repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
Hanhui-Ma-Lab/ColabFold-in-local | [
"29a9d2f318cf781855186788127f484e4c7ee014"
] | [
"AlphaFold-local.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#@title Input protein sequence(s), then hit `Runtime` -> `Run all`\n#from google.colab import files\nimport os.path\nimport re\nimport hashlib\nimport random\n\ndef add_hash(x,y):\n return x+\"_\"+hashlib.sha1(y.encode()).hexdigest()[:5]\n\n\nwith open(\"protein\") as f:\n query_sequence = f.read()\n#query_sequence = '' #@param {type:\"string\"}\n#@markdown - Use `:` to specify inter-protein chainbreaks for **modeling complexes** (supports homo- and hetro-oligomers). For example **PI...SK:PI...SK** for a mono-dimer\n\n# remove whitespaces\nquery_sequence = \"\".join(query_sequence.split())\n\njobname = 'test' #@param {type:\"string\"}\n# remove whitespaces\nbasejobname = \"\".join(jobname.split())\nbasejobname = re.sub(r'\\W+', '', basejobname)\njobname = add_hash(basejobname, query_sequence)\nwhile os.path.isfile(f\"{jobname}.csv\"):\n jobname = add_hash(basejobname, ''.join(random.sample(query_sequence,len(query_sequence))))\n\nwith open(f\"{jobname}.csv\", \"w\") as text_file:\n text_file.write(f\"id,sequence\\n{jobname},{query_sequence}\")\n\nqueries_path=f\"{jobname}.csv\"\n\n# number of models to use\nuse_amber = False #@param {type:\"boolean\"}\nuse_templates = False #@param {type:\"boolean\"}\n#save_to_google_drive = False #@param {type:\"boolean\"}\n#@markdown - if the save_to_google_drive option was selected, the result zip will be uploaded to your Google Drive\n\n#@markdown ### Advanced settings\nmsa_mode = \"MMseqs2 (UniRef+Environmental)\" #@param [\"MMseqs2 (UniRef+Environmental)\", \"MMseqs2 (UniRef only)\",\"single_sequence\",\"custom\"]\nmodel_type = \"auto\" #@param [\"auto\", \"AlphaFold2-ptm\", \"AlphaFold2-multimer\"]\n#@markdown - \"auto\" = protein structure prediction using \"AlphaFold2-ptm\" and complex prediction \"AlphaFold-multimer\". For complexes \"AlphaFold-multimer\" and \"AlphaFold-ptm\" can be used.\npair_mode = \"unpaired+paired\" #@param [\"unpaired+paired\",\"paired\",\"unpaired\"] {type:\"string\"}\n#@markdown - \"unpaired+paired\" = pair sequences from same species and add unpaired MSA, \"unpaired\" = generate seperate MSA for each chain, \"paired\" - only use sequences that were sucessfully paired.\nnum_recycles = 1 #@param [1,3,6,12,24,48] {type:\"raw\"}\n\n#@markdown Don't forget to hit `Runtime` -> `Run all` after updating the form.\n\n# decide which a3m to use\nif msa_mode.startswith(\"MMseqs2\"):\n a3m_file = f\"{jobname}.a3m\"\nelif msa_mode == \"custom\":\n a3m_file = f\"{jobname}.custom.a3m\"\n if not os.path.isfile(a3m_file):\n custom_msa_dict = files.upload()\n custom_msa = list(custom_msa_dict.keys())[0]\n header = 0\n import fileinput\n for line in fileinput.FileInput(custom_msa,inplace=1):\n if line.startswith(\">\"):\n header = header + 1\n if not line.rstrip():\n continue\n if line.startswith(\">\") == False and header == 1:\n query_sequence = line.rstrip()\n \n print(line, end='')\n\n os.rename(custom_msa, a3m_file)\n queries_path=a3m_file\n print(f\"moving {custom_msa} to {a3m_file}\")\nelse:\n a3m_file = f\"{jobname}.single_sequence.a3m\"\n with open(a3m_file, \"w\") as text_file:\n text_file.write(\">1\\n%s\" % query_sequence)\n\n \n# Removed\n#if save_to_google_drive:\n# from pydrive.drive import GoogleDrive\n# from pydrive.auth import GoogleAuth\n# from google.colab import auth\n# from oauth2client.client import GoogleCredentials\n# auth.authenticate_user()\n# gauth = GoogleAuth()\n# gauth.credentials = GoogleCredentials.get_application_default()\n# drive = GoogleDrive(gauth)\n# print(\"You are logged into Google Drive and are good to go!\")\n\n\n# In[ ]:\n\n\n#@title Run Prediction\n\nimport sys\n\nfrom colabfold.download import download_alphafold_params, default_data_dir\nfrom colabfold.utils import setup_logging\nfrom colabfold.batch import get_queries, run, set_model_type\n\nfrom colabfold.colabfold import plot_protein\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\n\n\n# For some reason we need that to get pdbfixer to import\nif use_amber and '/usr/local/lib/python3.7/site-packages/' not in sys.path:\n sys.path.insert(0, '/usr/local/lib/python3.7/site-packages/')\n\ndef prediction_callback(unrelaxed_protein, length, prediction_result, input_features):\n fig = plot_protein(unrelaxed_protein, Ls=length, dpi=100)\n plt.show()\n plt.close()\n\nresult_dir=\".\"\nsetup_logging(Path(\".\").joinpath(\"log.txt\"))\nqueries, is_complex = get_queries(queries_path)\nmodel_type = set_model_type(is_complex, model_type)\ndownload_alphafold_params(model_type, Path(\".\"))\nrun(\n queries=queries,\n result_dir=result_dir,\n use_templates=use_templates,\n use_amber=use_amber,\n msa_mode=msa_mode, \n model_type=model_type,\n num_models=5,\n num_recycles=num_recycles,\n model_order=[1, 2, 3, 4, 5],\n is_complex=is_complex,\n data_dir=Path(\".\"),\n keep_existing_results=False,\n recompile_padding=1.0,\n rank_by=\"auto\",\n pair_mode=pair_mode,\n stop_at_score=float(100),\n prediction_callback=prediction_callback,\n)\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.close"
]
] |
Elucidation/ChessboardDetect | [
"a5d2a2c2ab2434e4e041b4f384f3cd7d6884d2c4"
] | [
"generateFullDataset.py"
] | [
"# Given a list of pts text files, build a complete dataset from it.\nimport glob\nimport os\nimport PIL.Image\nimport cv2\nimport numpy as np\nfrom time import time\nfrom argparse import ArgumentParser\nfrom scipy.spatial import cKDTree\nimport tensorflow as tf\nimport SaddlePoints\nimport errno\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if os.path.isdir(path):\n pass\n else:\n raise\n\n\n# Given chessboard corners, get all 7x7 = 49 internal x-corner positions.\ndef getXcorners(corners):\n # Get Xcorners for image\n ideal_corners = np.array([[0,1],[1,1],[1,0],[0,0]],dtype=np.float32)\n M = cv2.getPerspectiveTransform(ideal_corners, corners) # From ideal to real.\n\n # 7x7 internal grid of 49 x-corners/\n xx,yy = np.meshgrid(np.arange(7, dtype=np.float32), np.arange(7, dtype=np.float32))\n all_ideal_grid_pts = np.vstack([xx.flatten(), yy.flatten()]).T\n all_ideal_grid_pts = (all_ideal_grid_pts + 1) / 8.0\n\n chess_xcorners = cv2.perspectiveTransform(np.expand_dims(all_ideal_grid_pts,0), M)[0,:,:]\n return chess_xcorners\n\n\ndef getPointsNearPoints(ptsA, ptsB, MIN_DIST_PX=3):\n # Returns a mask for points in A that are close by MIN_DIST_PX to points in B\n min_dists, min_dist_idx = cKDTree(ptsB).query(ptsA, 1)\n mask = min_dists < MIN_DIST_PX\n return mask\n\n# Load image from path\ndef loadImage(img_filepath):\n print (\"Processing %s\" % (img_filepath))\n \n img = PIL.Image.open(img_filepath)\n if (img.size[0] > 640):\n img = img.resize((640, 480), PIL.Image.BICUBIC)\n gray = np.array(img.convert('L'))\n rgb = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n return rgb, gray\n\ndef getTiles(pts, img_gray, WINSIZE=10):\n # NOTE : Assumes no point is within WINSIZE of an edge!\n # Points Nx2, columns should be x and y, not r and c.\n # WINSIZE = the number of pixels out from the point that a tile should be.\n\n # Build tiles of size Nx(2*WINSIZE+1)x(2*WINSIZE+1)\n img_shape = np.array([img_gray.shape[1], img_gray.shape[0]])\n tiles = np.zeros([len(pts), WINSIZE*2+1, WINSIZE*2+1], dtype=img_gray.dtype)\n for i, pt in enumerate(np.round(pts).astype(np.int64)):\n tiles[i,:,:] = img_gray[pt[1]-WINSIZE:pt[1]+WINSIZE+1,\n pt[0]-WINSIZE:pt[0]+WINSIZE+1]\n return tiles\n\ndef getTilesColor(pts, img, WINSIZE=10):\n # NOTE : Assumes no point is within WINSIZE of an edge!\n # Points Nx2, columns should be x and y, not r and c.\n # WINSIZE = the number of pixels out from the point that a tile should be.\n\n # Build tiles of size Nx(2*WINSIZE+1)x(2*WINSIZE+1)\n img_shape = np.array([img.shape[1], img.shape[0]])\n tiles = np.zeros([len(pts), WINSIZE*2+1, WINSIZE*2+1, 3], dtype=img.dtype)\n for i, pt in enumerate(np.round(pts).astype(np.int64)):\n tiles[i,:,:,:] = img[pt[1]-WINSIZE:pt[1]+WINSIZE+1,\n pt[0]-WINSIZE:pt[0]+WINSIZE+1, :]\n return tiles\n\n# View image with chessboard lines overlaid.\ndef addOverlay(idx, img, corners, good_xcorners, bad_pts):\n for pt in np.round(bad_pts).astype(np.int64):\n cv2.rectangle(img, tuple(pt-2),tuple(pt+2), (0,0,255), -1)\n\n for pt in np.round(good_xcorners).astype(np.int64):\n cv2.rectangle(img, tuple(pt-2),tuple(pt+2), (0,255,0), -1)\n\n\n cv2.polylines(img, \n [np.round(corners).astype(np.int32)], \n isClosed=True, thickness=2, color=(255,0,255))\n\n cv2.putText(img, \n 'Frame % 4d' % (idx),\n (5,15), cv2.FONT_HERSHEY_PLAIN, 1.0,(255,255,255),0)\n\ndef visualizeTiles(tiles):\n # Assumes no more than 49 tiles, only plots the first 49\n N = len(tiles)\n # assert N <= 49\n assert tiles.shape[1] == tiles.shape[2] # square tiles\n side = tiles.shape[1]\n cols = 7#int(np.ceil(np.sqrt(N)))\n rows = 7#int(np.ceil(N/(cols)))+1\n tile_img = np.zeros([rows*side, cols*side, 3], dtype=tiles.dtype)\n for i in range(min(N,49)):\n r, c = side*(int(i/cols)), side*(i%cols)\n tile_img[r:r+side, c:c+side,:] = tiles[i,:,:,:]\n return tile_img\n\n# Converting the values into features\n# _int64 is used for numeric values\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n# _bytes is used for string/char values\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef main(args):\n for pointfile in args.pointfiles:\n with open(pointfile, 'r') as f:\n lines = f.readlines()\n video_filepath = lines[0]\n images_path = os.path.dirname(pointfile)\n\n # Writing to TFrecord\n video_filename = os.path.basename(video_filepath)[:-5]\n folder_path = \"%s/winsize_%s_color\" % (args.tfrecords_path, args.winsize)\n mkdir_p(folder_path)\n\n tfrecord_path = \"%s/%s_ws%d.tfrecords\" % (folder_path, video_filename, args.winsize)\n with tf.python_io.TFRecordWriter(tfrecord_path) as writer:\n for line in lines[1:]:\n tA = time()\n parts = line.split(',')\n idx = int(parts[0])\n\n # if (idx < 260):\n # continue\n\n corners = np.array(parts[1:], dtype=np.float32).reshape([4,2])\n xcorners = getXcorners(corners)\n\n filename = \"%s/frame_%03d.jpg\" % (images_path, idx)\n img, gray = loadImage(filename)\n\n # Saddle points\n spts, gx, gy = SaddlePoints.getFinalSaddlePoints(gray, WINSIZE=args.winsize)\n\n good_spt_mask = getPointsNearPoints(spts, xcorners)\n good_xcorners = spts[good_spt_mask]\n bad_spts = spts[~good_spt_mask]\n\n # Only keep the same # of bad points as good\n # Shuffle bad points so we get a good smattering.\n N = len(good_xcorners)\n np.random.shuffle(bad_spts)\n bad_spts = bad_spts[:N]\n\n # good_xcorners, bad_xcorners, bad_spts, spts, keep_mask = getXcornersNearSaddlePts(gray, xcorners)\n\n tiles = getTilesColor(good_xcorners, img, WINSIZE=args.winsize)\n bad_tiles = getTilesColor(bad_spts, img, WINSIZE=args.winsize)\n\n # Write tiles to tf-records\n for tile in tiles:\n feature = { 'label': _int64_feature(1),\n 'image': _bytes_feature(tf.compat.as_bytes(tile.tostring())) }\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n writer.write(example.SerializeToString())\n\n for tile in bad_tiles:\n feature = { 'label': _int64_feature(0),\n 'image': _bytes_feature(tf.compat.as_bytes(tile.tostring())) }\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n writer.write(example.SerializeToString())\n\n\n if args.viztiles:\n tile_img = visualizeTiles(tiles)\n bad_tile_img = visualizeTiles(bad_tiles)\n\n print('\\t Took %.1f ms.' % ((time() - tA)*1000))\n\n if args.vizoverlay:\n overlay_img = img.copy()\n addOverlay(idx, overlay_img, corners, good_xcorners, bad_spts)\n\n cv2.imshow('frame',overlay_img)\n \n if args.viztiles:\n cv2.imshow('tiles', tile_img)\n cv2.imshow('bad_tiles', bad_tile_img)\n\n if (args.vizoverlay or args.viztiles):\n if (cv2.waitKey(1) & 0xFF == ord('q')):\n break\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument(\"pointfiles\", nargs='+',\n help=\"All pts.txt points files containing filename and chessboard coordinates.\")\n parser.add_argument(\"-savetf\",\n action='store_true', help=\"Whether to save tfrecords\")\n parser.add_argument(\"-viztiles\",\n action='store_true', help=\"Whether to visualize tiles or not\")\n parser.add_argument(\"-vizoverlay\",\n action='store_true', help=\"Whether to visualize overlay\")\n parser.add_argument(\"--tfrecords_path\", default='datasets/tfrecords',\n help=\"Folder to store tfrecord output\")\n parser.add_argument(\"-ws\", \"--winsize\", dest=\"winsize\", default=10, type=int,\n help=\"Half window size (full kernel = 2*winsize + 1)\")\n args = parser.parse_args()\n print(args)\n main(args)"
] | [
[
"numpy.random.shuffle",
"numpy.zeros",
"tensorflow.train.Int64List",
"numpy.arange",
"scipy.spatial.cKDTree",
"numpy.expand_dims",
"tensorflow.train.Features",
"tensorflow.python_io.TFRecordWriter",
"numpy.array",
"tensorflow.train.BytesList",
"numpy.round"
]
] |
binodthapachhetry/SWaN | [
"df54f72dfbd10bb67d3bed2cd20f3401eb779d50"
] | [
"build/lib/SWaN_accel/classify.py"
] | [
"import os, gzip, pickle, sys, datetime, struct\nfrom glob import glob\nimport pandas as pd\nimport subprocess\nimport shutil\nimport numpy as np\nfrom datetime import timedelta\nfrom io import StringIO\n\nfrom SWaN_accel import config\nfrom SWaN_accel import utils\nfrom SWaN_accel import feature_set\npd.options.mode.chained_assignment = None # default='warn'\n\n# JAR = 'jar/readBinaryFile.jar'\n\n# col = [\"HEADER_TIME_STAMP\",\"X\",\"Y\",\"Z\"]\n\ncol = [\"HEADER_TIME_STAMP\",\"X_ACCELERATION_METERS_PER_SECOND_SQUARED\",\n \"Y_ACCELERATION_METERS_PER_SECOND_SQUARED\",\"Z_ACCELERATION_METERS_PER_SECOND_SQUARED\"]\n\nMHEALTH_TIMESTAMP_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n\nPROB_WEAR = 'PROB_WEAR'\nPROB_SLEEP = 'PROB_SLEEP'\nPROB_NWEAR = 'PROB_NWEAR'\n\nori_header = ['ORI_X_MEDIAN', 'ORI_Y_MEDIAN', 'ORI_Z_MEDIAN']\n\n\ndef mhealth_timestamp_parser(val):\n return datetime.datetime.strptime(val, MHEALTH_TIMESTAMP_FORMAT)\n\ndef contigous_regions_usingOri(condition):\n d = np.floor(np.absolute(np.diff(condition)))\n idx, = d.nonzero()\n idx += 1\n idx = np.r_[0, idx - 1]\n idx = np.r_[idx, condition.size - 1]\n\n bout_lis = []\n for i in range(len(idx) - 1):\n if i == 0:\n first = idx[i]\n else:\n first = idx[i] + 1\n second = idx[i + 1]\n bout_lis = bout_lis + [[first, second]]\n\n this_ar = np.asarray(bout_lis)\n\n return this_ar\n\ndef contigous_regions(condition):\n d = np.diff(condition)\n idx, = d.nonzero()\n idx += 1\n idx = np.r_[0, idx - 1]\n idx = np.r_[idx, condition.size - 1]\n\n bout_lis = []\n for i in range(len(idx) - 1):\n if i == 0:\n first = idx[i]\n else:\n first = idx[i] + 1\n second = idx[i + 1]\n bout_lis = bout_lis + [[first, second]]\n\n this_ar = np.asarray(bout_lis)\n\n return this_ar\n\ndef filterUsingZori(bout_array, fil_df, lab_str, ref_str, prob_wear, prob_sleep, prob_nwear):\n fdf = fil_df.copy()\n tmp_fdf = fil_df.copy()\n for n in range(len(bout_array)):\n ar_sub = fdf[bout_array[n][0]:bout_array[n][1] + 1]\n ar_sub_pred = ar_sub[lab_str].values[0]\n ar_sub_start = ar_sub.index[0]\n ar_sub_ori = ar_sub[ref_str].values\n bout_array_sub = contigous_regions_usingOri(ar_sub_ori)\n bout_array_sub_final = bout_array_sub + ar_sub_start\n for m in range(len(bout_array_sub_final)):\n start = bout_array_sub_final[m][0]\n end = bout_array_sub_final[m][1]\n if ar_sub_pred == 0:\n if start == end:\n fdf.loc[start, 'PREDICTED_SMOOTH'] = 0\n fdf.loc[start, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start][prob_wear]\n fdf.loc[start, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start][prob_sleep]\n fdf.loc[start, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start][prob_nwear]\n else:\n fdf.loc[start:end, 'PREDICTED_SMOOTH'] = 1\n fdf.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_sleep]\n fdf.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start:end][prob_wear]\n fdf.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_nwear]\n elif ar_sub_pred == 1:\n if start == end:\n fdf.loc[start, 'PREDICTED_SMOOTH'] = 0\n fdf.loc[start, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start][prob_sleep]\n fdf.loc[start, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start][prob_wear]\n fdf.loc[start, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start][prob_nwear]\n else:\n fdf.loc[start:end, 'PREDICTED_SMOOTH'] = 1\n fdf.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_wear]\n fdf.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start:end][prob_sleep]\n fdf.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_nwear]\n elif ar_sub_pred == 2:\n if start == end:\n fdf.loc[start, 'PREDICTED_SMOOTH'] = 0\n fdf.loc[start, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start][prob_sleep]\n fdf.loc[start, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start][prob_wear]\n fdf.loc[start]['PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start][prob_nwear]\n else:\n fdf.loc[start:end, 'PREDICTED_SMOOTH'] = 2\n fdf.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_wear]\n fdf.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_fdf.loc[start:end][prob_sleep]\n fdf.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_fdf.loc[start:end][prob_nwear]\n return fdf\n\ndef lookBeforeAfter(lo_df):\n global new_lab\n df = lo_df.copy()\n tmp_df = lo_df.copy()\n tmp_ar = tmp_df['PREDICTED_SMOOTH'].values\n ff_obout_array = contigous_regions(tmp_ar)\n bout_df = pd.DataFrame(ff_obout_array, columns=['START_IND', 'STOP_IND'])\n bout_df['SIZE'] = bout_df['STOP_IND'] - bout_df['START_IND'] + 1\n\n start_ind = bout_df.iloc[0]['START_IND']\n stop_ind = bout_df.iloc[-1]['STOP_IND']\n size = len(bout_df.index)\n\n for bout_ind, bout_row in bout_df.iterrows():\n start, end, this_size = bout_row['START_IND'], bout_row['STOP_IND'], bout_row['SIZE']\n lab = tmp_df.loc[start]['PREDICTED_SMOOTH']\n bout_df.loc[bout_ind, 'LABEL'] = lab\n if lab == 1:\n if (bout_ind == len(bout_df.index) - 1) or (this_size >= 480):\n # if(this_size >= 480):\n bout_df.loc[bout_ind, 'LABEL'] = 2\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 2\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n\n # print('Done nonwear first')\n\n sleep_df = bout_df[bout_df.LABEL == 1]\n # ref_df_short = sleep_df[(sleep_df.SIZE >= 30)]\n ref_df_short = sleep_df[(sleep_df.SIZE >= 20)]\n ref_ind_ar_short = ref_df_short.index\n\n # nonwear related\n nwear_df = bout_df[bout_df.LABEL == 2]\n nwear_ref_ind_ar_short = None\n if not nwear_df.empty:\n nwear_ref_ind_ar_short = nwear_df.index\n\n # also add nonwear vicinity\n for bout_ind, bout_row in bout_df.iterrows():\n start, end = bout_row['START_IND'], bout_row['STOP_IND']\n lab = bout_row['LABEL']\n size = bout_row['SIZE']\n if lab == 1:\n if (size < 480) and (size >= 60):\n # min_distance = 60\n\n min_distance = 20\n\n nwear_min_distance = 10\n\n up, down = ref_ind_ar_short[ref_ind_ar_short < bout_ind], ref_ind_ar_short[ref_ind_ar_short > bout_ind]\n up_dist = None\n down_dist = None\n\n if len(up) != 0:\n up_ind = up[-1]\n sub_bout_df = bout_df.loc[(bout_df.index > up_ind) & (bout_df.index < bout_ind)]\n up_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()\n\n if len(down) != 0:\n down_ind = down[0]\n sub_bout_df = bout_df.loc[(bout_df.index > bout_ind) & (bout_df.index < down_ind)]\n down_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()\n\n # nonwear related\n nwear_up_dist = None\n nwear_down_dist = None\n if not nwear_df.empty:\n nwear_up = nwear_ref_ind_ar_short[nwear_ref_ind_ar_short < bout_ind]\n nwear_down = nwear_ref_ind_ar_short[nwear_ref_ind_ar_short > bout_ind]\n\n if len(nwear_up) != 0:\n nwear_up_ind = nwear_up[-1]\n sub_bout_df = bout_df.loc[(bout_df.index > nwear_up_ind) & (bout_df.index < bout_ind)]\n nwear_up_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()\n\n if len(nwear_down) != 0:\n nwear_down_ind = nwear_down[0]\n sub_bout_df = bout_df.loc[(bout_df.index > bout_ind) & (bout_df.index < nwear_down_ind)]\n nwear_down_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()\n\n # nonwear vicinity related\n if nwear_down_dist:\n if nwear_down_dist < nwear_min_distance:\n # print('flip', start, end, nwear_up_dist, nwear_down_dist)\n bout_df.loc[bout_ind, 'LABEL'] = 2\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 2\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n continue\n\n if nwear_up_dist:\n if nwear_up_dist < nwear_min_distance:\n # print('flip', start, end, nwear_up_dist, nwear_down_dist)\n bout_df.loc[bout_ind, 'LABEL'] = 2\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 2\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n continue\n\n # sleep vicinity related\n if (not up_dist) & (not down_dist):\n # print('flip', start, end, up_dist, down_dist)\n bout_df.loc[bout_ind, 'LABEL'] = 2\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 2\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n continue\n\n if up_dist and down_dist:\n if (up_dist > min_distance) and (down_dist > min_distance):\n # print('flip', start, end, up_dist, down_dist)\n bout_df.loc[bout_ind, 'LABEL'] = 2\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 2\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n continue\n\n # print('untouched', start, end, up_dist, down_dist)\n\n sleep_df = bout_df[bout_df.LABEL == 1]\n ref_df_short = sleep_df[(sleep_df.SIZE >= 30)]\n ref_ind_ar_short = ref_df_short.index\n\n for bout_ind, bout_row in bout_df.iterrows():\n start, end = bout_row['START_IND'], bout_row['STOP_IND']\n lab = bout_row['LABEL']\n size = bout_row['SIZE']\n if lab == 1:\n if (size < 60) and (size > 30):\n min_distance = 30\n up, down = ref_ind_ar_short[ref_ind_ar_short < bout_ind], ref_ind_ar_short[ref_ind_ar_short > bout_ind]\n up_dist = None\n down_dist = None\n\n if len(up) != 0:\n up_ind = up[-1]\n sub_bout_df = bout_df.loc[(bout_df.index > up_ind) & (bout_df.index < bout_ind)]\n up_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()\n\n if len(down) != 0:\n down_ind = down[0]\n sub_bout_df = bout_df.loc[(bout_df.index > bout_ind) & (bout_df.index < down_ind)]\n down_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()\n\n if (not up_dist) & (not down_dist):\n # print('flip', start, end, up_dist, down_dist)\n bout_df.loc[bout_ind, 'LABEL'] = 0\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 0\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n continue\n if not up_dist:\n if down_dist:\n if down_dist > min_distance:\n # print('flip', start, end, up_dist, down_dist)\n bout_df.loc[bout_ind, 'LABEL'] = 0\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 0\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n continue\n\n if not down_dist:\n if up_dist:\n if up_dist > min_distance:\n # print('flip', start, end, up_dist, down_dist)\n bout_df.loc[bout_ind, 'LABEL'] = 0\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 0\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n continue\n\n if up_dist and down_dist:\n if (up_dist > min_distance) and (down_dist > min_distance):\n # print('flip', start, end, up_dist, down_dist)\n bout_df.loc[bout_ind, 'LABEL'] = 0\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 0\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n continue\n\n # print('untouched', start, end, up_dist, down_dist)\n\n sleep_df = bout_df[bout_df.LABEL == 1]\n ref_df_short = sleep_df[(sleep_df.SIZE >= 30)]\n ref_ind_ar_short = ref_df_short.index\n\n for bout_ind, bout_row in bout_df.iterrows():\n start, end = bout_row['START_IND'], bout_row['STOP_IND']\n lab = bout_row['LABEL']\n size = bout_row['SIZE']\n if lab == 1:\n if size <= 30:\n min_distance = 30\n\n up, down = ref_ind_ar_short[ref_ind_ar_short < bout_ind], ref_ind_ar_short[ref_ind_ar_short > bout_ind]\n up_dist = None\n down_dist = None\n\n if len(up) != 0:\n up_ind = up[-1]\n sub_bout_df = bout_df.loc[(bout_df.index > up_ind) & (bout_df.index < bout_ind)]\n up_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()\n\n if len(down) != 0:\n down_ind = down[0]\n sub_bout_df = bout_df.loc[(bout_df.index > bout_ind) & (bout_df.index < down_ind)]\n down_dist = sub_bout_df[sub_bout_df.LABEL == 0]['SIZE'].sum()\n\n if (not up_dist) & (not down_dist):\n # print('flip', start, end, up_dist, down_dist)\n bout_df.loc[bout_ind, 'LABEL'] = 0\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 0\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n continue\n if not up_dist:\n if down_dist:\n if down_dist > min_distance:\n # print('flip', start, end, up_dist, down_dist)\n bout_df.loc[bout_ind, 'LABEL'] = 0\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 0\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n continue\n\n if not down_dist:\n if up_dist:\n if up_dist > min_distance:\n # print('flip', start, end, up_dist, down_dist)\n bout_df.loc[bout_ind, 'LABEL'] = 0\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 0\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n continue\n\n if up_dist and down_dist:\n if (up_dist > min_distance) or (down_dist > min_distance):\n # print('flip', start, end, up_dist, down_dist)\n bout_df.loc[bout_ind, 'LABEL'] = 0\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 0\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n continue\n\n # print('untouched', start, end, up_dist, down_dist)\n\n # smooth the wear between sleep period\n tmp_ar = df['PREDICTED_SMOOTH'].values\n ff_obout_array = contigous_regions(tmp_ar)\n bout_df = pd.DataFrame(ff_obout_array, columns=['START_IND', 'STOP_IND'])\n bout_df['SIZE'] = bout_df['STOP_IND'] - bout_df['START_IND'] + 1\n\n tmp_df = df.copy()\n for i in range(len(bout_df) - 1):\n # print(i)\n start, end, this_size = bout_df.loc[i, 'START_IND'], bout_df.loc[i, 'STOP_IND'], bout_df.loc[i, 'SIZE']\n lab = df.loc[start]['PREDICTED_SMOOTH']\n\n if this_size <= 20:\n\n prev_start = None\n next_start = None\n\n if i != 0:\n prev_start, prev_end, prev_size = bout_df.loc[i - 1, 'START_IND'], bout_df.loc[i - 1, 'STOP_IND'], \\\n bout_df.loc[i - 1, 'SIZE']\n prev_lab = df.loc[prev_start]['PREDICTED_SMOOTH']\n\n if i != len(bout_df):\n next_start, next_end, next_size = bout_df.loc[i + 1, 'START_IND'], bout_df.loc[i + 1, 'STOP_IND'], \\\n bout_df.loc[i + 1, 'SIZE']\n next_lab = df.loc[next_start]['PREDICTED_SMOOTH']\n\n if prev_start and next_start:\n if prev_size >= next_size:\n new_lab = prev_lab\n else:\n new_lab = next_lab\n\n elif prev_start:\n new_lab = prev_lab\n elif next_start:\n new_lab = next_lab\n\n if lab == 2:\n # print(start,end,lab,new_lab)\n if new_lab == 0:\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 0\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n if new_lab == 1:\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 1\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n\n # smooth the wear between sleep period\n tmp_ar = df['PREDICTED_SMOOTH'].values\n ff_obout_array = contigous_regions(tmp_ar)\n bout_df = pd.DataFrame(ff_obout_array, columns=['START_IND', 'STOP_IND'])\n bout_df['SIZE'] = bout_df['STOP_IND'] - bout_df['START_IND'] + 1\n\n tmp_df = df.copy()\n for i in range(len(bout_df) - 1):\n # print(i,len(bout_df))\n start, end, this_size = bout_df.loc[i, 'START_IND'], bout_df.loc[i, 'STOP_IND'], bout_df.loc[i, 'SIZE']\n lab = df.loc[start]['PREDICTED_SMOOTH']\n\n if this_size <= 20:\n\n prev_start = None\n next_start = None\n\n if i != 0:\n prev_start, prev_end, prev_size = bout_df.loc[i - 1, 'START_IND'], bout_df.loc[i - 1, 'STOP_IND'], \\\n bout_df.loc[i - 1, 'SIZE']\n prev_lab = df.loc[prev_start]['PREDICTED_SMOOTH']\n\n if i != len(bout_df):\n next_start, next_end, next_size = bout_df.loc[i + 1, 'START_IND'], bout_df.loc[i + 1, 'STOP_IND'], \\\n bout_df.loc[i + 1, 'SIZE']\n next_lab = df.loc[next_start]['PREDICTED_SMOOTH']\n\n if prev_start and next_start:\n if prev_size >= next_size:\n new_lab = prev_lab\n else:\n new_lab = next_lab\n\n elif prev_start:\n new_lab = prev_lab\n elif next_start:\n new_lab = next_lab\n\n if lab == 0:\n # print(start,end,lab,new_lab)\n if new_lab == 2:\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 2\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n if new_lab == 1:\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 1\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n\n # smooth the wear between sleep period\n tmp_ar = df['PREDICTED_SMOOTH'].values\n ff_obout_array = contigous_regions(tmp_ar)\n bout_df = pd.DataFrame(ff_obout_array, columns=['START_IND', 'STOP_IND'])\n bout_df['SIZE'] = bout_df['STOP_IND'] - bout_df['START_IND'] + 1\n\n tmp_df = df.copy()\n for i in range(len(bout_df) - 1):\n start, end, this_size = bout_df.loc[i, 'START_IND'], bout_df.loc[i, 'STOP_IND'], bout_df.loc[i, 'SIZE']\n lab = df.loc[start]['PREDICTED_SMOOTH']\n\n if this_size <= 20:\n\n prev_start = None\n next_start = None\n\n if i != 0:\n prev_start, prev_end, prev_size = bout_df.loc[i - 1, 'START_IND'], bout_df.loc[\n i - 1, 'STOP_IND'], \\\n bout_df.loc[i - 1, 'SIZE']\n prev_lab = df.loc[prev_start]['PREDICTED_SMOOTH']\n\n if i != len(bout_df):\n next_start, next_end, next_size = bout_df.loc[i + 1, 'START_IND'], bout_df.loc[\n i + 1, 'STOP_IND'], \\\n bout_df.loc[i + 1, 'SIZE']\n next_lab = df.loc[next_start]['PREDICTED_SMOOTH']\n\n if prev_start and next_start:\n if prev_size >= next_size:\n new_lab = prev_lab\n else:\n new_lab = next_lab\n\n elif prev_start:\n new_lab = prev_lab\n elif next_start:\n new_lab = next_lab\n\n if lab == 1:\n # print(start, end, lab, new_lab)\n if new_lab == 0:\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 0\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n if new_lab == 2:\n df.loc[start:end, 'PREDICTED_SMOOTH'] = 1\n df.loc[start:end, 'PROB_WEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_WEAR_SMOOTH']\n df.loc[start:end, 'PROB_SLEEP_SMOOTH'] = tmp_df.loc[start:end]['PROB_NWEAR_SMOOTH']\n df.loc[start:end, 'PROB_NWEAR_SMOOTH'] = tmp_df.loc[start:end]['PROB_SLEEP_SMOOTH']\n\n return df\n\ndef daterange(date1, date2):\n for n in range(int((date2 - date1).days) + 1):\n yield date1 + timedelta(n)\n\ndef correctPredictionsSingleDate(folder, dStr, sampling_rate=80):\n dObj = datetime.datetime.strptime(dStr, \"%Y-%m-%d\")\n\n prev = dObj - datetime.timedelta(days=1)\n next = dObj + datetime.timedelta(days=1)\n\n prevStr = prev.strftime(\"%Y-%m-%d\")\n nextStr = next.strftime(\"%Y-%m-%d\")\n\n oriDF = pd.DataFrame(data=None)\n\n prevFolder = os.path.join(folder, 'data-watch', prevStr)\n if os.path.isdir(prevFolder):\n daily_feature_file = os.path.join(prevFolder,\"SWaN_accel_\" + prevStr+\"_dailyfeatures.csv\")\n if(os.path.isfile(daily_feature_file)):\n odf = pd.read_csv(daily_feature_file, header=0, skiprows=0, sep=',', compression=\"infer\", quotechar='\"',\n parse_dates=['HEADER_TIME_STAMP','START_TIME','STOP_TIME'], date_parser=mhealth_timestamp_parser)\n oriDF = pd.concat([oriDF, odf], ignore_index=True)\n else:\n odf = get_daywise_prediction_df(prevFolder, sampling_rate)\n oriDF = pd.concat([oriDF, odf], ignore_index=True)\n\n thisFolder = os.path.join(folder, 'data-watch', dStr)\n if os.path.isdir(thisFolder):\n daily_feature_file = os.path.join(thisFolder, \"SWaN_accel_\" + dStr + \"_dailyfeatures.csv\")\n if (os.path.isfile(daily_feature_file)):\n odf = pd.read_csv(daily_feature_file, header=0, skiprows=0, sep=',', compression=\"infer\", quotechar='\"',\n parse_dates=['HEADER_TIME_STAMP','START_TIME','STOP_TIME'], date_parser=mhealth_timestamp_parser)\n oriDF = pd.concat([oriDF, odf], ignore_index=True)\n else:\n odf = get_daywise_prediction_df(thisFolder, sampling_rate)\n oriDF = pd.concat([oriDF, odf], ignore_index=True)\n\n nextFolder = os.path.join(folder, 'data-watch', nextStr)\n if os.path.isdir(nextFolder):\n daily_feature_file = os.path.join(nextFolder, \"SWaN_accel_\" + nextStr + \"_dailyfeatures.csv\")\n if (os.path.isfile(daily_feature_file)):\n odf = pd.read_csv(daily_feature_file, header=0, skiprows=0, sep=',', compression=\"infer\", quotechar='\"',\n parse_dates=['HEADER_TIME_STAMP','START_TIME','STOP_TIME'], date_parser=mhealth_timestamp_parser)\n oriDF = pd.concat([oriDF, odf], ignore_index=True)\n else:\n odf = get_daywise_prediction_df(nextFolder, sampling_rate)\n oriDF = pd.concat([oriDF, odf], ignore_index=True)\n\n if oriDF.empty:\n print(\"No data found for this day or previous and next day.\")\n return\n\n oriDF.sort_values(by='HEADER_TIME_STAMP', inplace=True)\n\n if oriDF.dropna().empty:\n print('No prediction data in the folder: '+folder +' for data: ' + dStr)\n return\n\n outPath = os.path.join(folder, 'data-watch', dStr, 'SWaN_accel_' + dStr + '_final.csv')\n\n oriDF.replace({'PREDICTED': {2: 1}}, inplace=True)\n oriDF['PREDICTED_SMOOTH'] = None\n oriDF['PROB_WEAR_SMOOTH'] = None\n oriDF['PROB_SLEEP_SMOOTH'] = None\n oriDF['PROB_NWEAR_SMOOTH'] = None\n tmp_ar = oriDF['PREDICTED'].values\n\n # compute contigous bouts based on window-level prediction\n obout_array = contigous_regions(tmp_ar)\n\n # in case only one type of bout present in the data\n if (obout_array.shape[0] == 1) & (oriDF.iloc[0]['PREDICTED'] == 1):\n oriDF['PREDICTED_SMOOTH'] = 2\n oriDF['PROB_WEAR_SMOOTH'] = oriDF[PROB_WEAR]\n oriDF['PROB_SLEEP_SMOOTH'] = oriDF[PROB_NWEAR]\n oriDF['PROB_NWEAR_SMOOTH'] = oriDF[PROB_SLEEP]\n # oriDF.to_csv(outPath, index=False, float_format='%.3f')\n # return\n\n elif (obout_array.shape[0] == 1) & (oriDF.iloc[0]['PREDICTED'] == 2):\n oriDF['PREDICTED_SMOOTH'] = 2\n oriDF['PROB_WEAR_SMOOTH'] = oriDF[PROB_WEAR]\n oriDF['PROB_SLEEP_SMOOTH'] = oriDF[PROB_SLEEP]\n oriDF['PROB_NWEAR_SMOOTH'] = oriDF[PROB_NWEAR]\n # oriDF.to_csv(outPath, index=False, float_format='%.3f')\n # return\n\n elif (obout_array.shape[0] == 1) & (oriDF.iloc[0]['PREDICTED'] == 0):\n oriDF['PREDICTED_SMOOTH'] = 0\n oriDF['PROB_WEAR_SMOOTH'] = oriDF[PROB_WEAR]\n oriDF['PROB_SLEEP_SMOOTH'] = oriDF[PROB_SLEEP]\n oriDF['PROB_NWEAR_SMOOTH'] = oriDF[PROB_NWEAR]\n # oriDF.to_csv(outPath, index=False, float_format='%.3f')\n # return\n\n else:\n # use z orientation to filter\n f_odf = filterUsingZori(obout_array, oriDF, 'PREDICTED', 'ORI_Z_MEDIAN', PROB_WEAR, PROB_SLEEP, PROB_NWEAR)\n oriDF = lookBeforeAfter(f_odf)\n\n # l_f_odf = lookBeforeAfter(f_odf)\n # l_f_odf.to_csv(outPath, index=False, float_format='%.3f')\n\n currDateObj = datetime.datetime.strptime(dStr, \"%Y-%m-%d\")\n nextDateObj = currDateObj + datetime.timedelta(days=1)\n\n mask = (oriDF['HEADER_TIME_STAMP'] > currDateObj) & (oriDF['HEADER_TIME_STAMP'] < nextDateObj)\n final_df = oriDF.loc[mask][\n ['HEADER_TIME_STAMP', 'PREDICTED_SMOOTH', 'PROB_WEAR_SMOOTH', 'PROB_SLEEP_SMOOTH', 'PROB_NWEAR_SMOOTH']]\n print(datetime.datetime.now().strftime(\"%H:%M:%S\") + \" Finished performing rule-based filtering.\")\n\n final_df.to_csv(outPath, index=False, float_format='%.3f')\n\ndef correctPredictions(folder, startdStr, stopdStr, sampling_rate=80):\n startdObj = datetime.datetime.strptime(startdStr, \"%Y-%m-%d\")\n stopdObj = datetime.datetime.strptime(stopdStr, \"%Y-%m-%d\")\n\n # prev = startdObj - datetime.timedelta(days=1)\n # next = stopdObj + datetime.timedelta(days=1)\n\n prev = startdObj\n next = stopdObj\n\n pid = os.path.basename(folder)\n\n for dt in daterange(prev, next):\n dStr = dt.strftime(\"%Y-%m-%d\")\n\n refPath = os.path.join(folder, 'data-watch', dStr, 'SWaN_accel_' + dStr + '_final.csv')\n\n if not os.path.exists(refPath):\n print(\"Performing rule-based filtering for participant: \" + pid + \" for date: \" + dStr)\n correctPredictionsSingleDate(folder, dStr, sampling_rate=sampling_rate)\n print(\"Done rule-based filtering for participant: \" + pid + \" for date: \" + dStr)\n else:\n print(\"Final rule-based filtered file present for participant: \" + pid + \" for date \" + dStr)\n\ndef readBinary(inFile):\n tz = os.path.basename(inFile).split('.')[2].split('-')[-1]\n\n hourdiff = int(tz[1:3])\n minutediff = int(tz[3:])\n\n if (tz[0] == 'M'):\n hourdiff = -int(tz[1:3])\n minutediff = -int(tz[3:])\n\n file = open(inFile, \"rb\")\n b = file.read(20)\n diction = {}\n i = 0\n while len(b) >= 20:\n t = int.from_bytes(b[0:8], byteorder='big')\n x = struct.unpack('>f', b[8:12])[0]\n y = struct.unpack('>f', b[12:16])[0]\n z = struct.unpack('>f', b[16:20])[0]\n diction[i] = {'time': t, 'x': x, 'y': y, 'z': z}\n i = i + 1\n\n b = file.read(20)\n\n df = pd.DataFrame.from_dict(diction, \"index\")\n df.columns = col\n df['HEADER_TIME_STAMP'] = pd.to_datetime(df['HEADER_TIME_STAMP'], unit='ms') + \\\n datetime.timedelta(hours=hourdiff) + datetime.timedelta(minutes=minutediff)\n return df\n\n\ndef get_daywise_prediction_df(inFolder, sampling_rate=80):\n try:\n import importlib.resources as pkg_resources\n except ImportError:\n # Try backported to PY<37 `importlib_resources`.\n import importlib_resources as pkg_resources\n\n # trainedModel = pickle.load(open(config.modelPath, \"rb\"))\n # standardScalar = pickle.load(open(config.scalePath, \"rb\"))\n\n trainedModel = pickle.load(pkg_resources.open_binary(__package__,config.modelPath))\n standardScalar = pickle.load(pkg_resources.open_binary(__package__,config.scalePath))\n\n\n final_day_df = pd.DataFrame()\n for file in sorted(\n glob(os.path.join(inFolder, '*/AndroidWearWatch-AccelerationCalibrated-NA.*.sensor.baf'))):\n outfilePath = os.path.join(os.path.dirname(file),\n \".\".join(os.path.basename(file).split('.')[1:-2]) + \".prediction.csv\")\n if os.path.exists(outfilePath):\n print(outfilePath + \" present. Reading that file.\")\n odf = pd.read_csv(outfilePath, header=0, skiprows=0, sep=',', compression=\"infer\", quotechar='\"',\n parse_dates=[0], date_parser=mhealth_timestamp_parser)\n final_day_df = pd.concat([final_day_df, odf], ignore_index=True)\n continue\n\n print(datetime.datetime.now().strftime(\"%H:%M:%S\") + ' Reading binary file :' + file)\n\n try:\n df = readBinary(file)\n except:\n print('Issue with converting baf file to a dataframe - ' + file)\n continue\n\n print(datetime.datetime.now().strftime(\"%H:%M:%S\") + ' Computing feature set for :' + file)\n\n time_grouper = pd.Grouper(key='HEADER_TIME_STAMP', freq='30s')\n grouped_df = df.groupby(time_grouper)\n\n\n feature_df = pd.DataFrame()\n for name, group in grouped_df:\n if len(group) > sampling_rate * 15:\n op = get_feature_sleep(group, sampling_rate)\n op['HEADER_TIME_STAMP'] = name\n feature_df = pd.concat([feature_df, op], ignore_index=True)\n\n final_feature_df = feature_df.dropna(how='any', axis=0, inplace=False)\n if final_feature_df.empty:\n print(\"No feature row computed or remaining after dropping zero rows. So not moving to prediction.\")\n continue\n\n final_feature_df.rename(columns={'HEADER_TIME_STAMP': 'START_TIME'}, inplace=True)\n final_feature_df['HEADER_TIME_STAMP'] = final_feature_df['START_TIME']\n final_feature_df['STOP_TIME'] = final_feature_df['START_TIME'] + pd.Timedelta(seconds=30)\n\n print(datetime.datetime.now().strftime(\"%H:%M:%S\") + \" Performing window-level classification for :\" + file)\n final_feature_df = final_feature_df.dropna()\n subfdata = final_feature_df[config.feature_lis]\n sfdata = standardScalar.transform(subfdata)\n prediction_prob = trainedModel.predict_proba(sfdata)\n prediction = np.argmax(prediction_prob, axis=1)\n p = prediction.reshape((-1, 1))\n final_feature_df[\"PREDICTED\"] = p\n final_feature_df['PROB_WEAR'] = prediction_prob[:, 0]\n final_feature_df['PROB_SLEEP'] = prediction_prob[:, 1]\n final_feature_df['PROB_NWEAR'] = prediction_prob[:, 2]\n\n final_day_df = pd.concat([final_day_df, final_feature_df], ignore_index=True)\n\n dateStr = os.path.basename(inFolder)\n outPath = os.path.join(inFolder, \"SWaN_accel_\" + dateStr + \"_dailyfeatures.csv\")\n\n final_day_df.to_csv(outPath, index=False, float_format=\"%.3f\")\n print(\"Created prediction file:\" + outPath)\n return final_day_df\n\ndef get_feature_sleep(tdf, sampling):\n X_axes = utils.as_float64(tdf.values[:, 1:])\n result_axes = feature_set.compute_extra_features(X_axes, sampling)\n return result_axes\n\n\ndef main(sampling_rate=None,input_folder=None,file_path=None,startdateStr=None,stopdateStr=None):\n# def main():\n# sampling_rate = int(sys.argv[1])\n# input_folder = sys.argv[2]\n# file_path = sys.argv[3]\n# startdateStr = sys.argv[4]\n# stopdateStr = None\n\n # len_args = len(sys.argv)\n # if len_args < 4:\n # print(\"Syntax error. It should be one of these formats:\\n\"\n # \"python SWaN_accelforTIME_final.py SAMPLING RATE INPUT_FOLDER PARTICIPATN_ID/FILE_PATH_WITH_PARTICIPANT_ID\\n\"\n # \"python SWaN_accelforTIME_final.py SAMPLING RATE INPUT_FOLDER PARTICIPANT_ID/FILE_PATH_WITH_PARTICIPANT_ID YYYY_MM_DD\\n \"\n # \"python SWaN_accelforTIME_final.py SAMPLING RATE INPUT_FOLDER PARTICIPANT_ID/FILE_PATH_WITH_PARTICIPANT_ID YYYY_MM_DD YYYY_MM_DD\\n\")\n # return\n\n if (startdateStr is None) and (stopdateStr is None):\n print(\"doing for all dates\")\n # sampling_rate = int(sys.argv[1])\n # input_folder = sys.argv[2]\n # file_path = sys.argv[3]\n if not (file_path.endswith('.txt')):\n pid = file_path + \"@timestudy_com\"\n sub_folder = os.path.join(input_folder, pid)\n final_input_folder = os.path.join(input_folder, pid)\n\n date_lis = [os.path.basename(x) for x in glob(os.path.join(final_input_folder, 'data-watch', '*'))]\n\n for dateStr in date_lis:\n final_input_folder = os.path.join(input_folder, pid, 'data-watch', dateStr)\n\n if not os.path.isdir(final_input_folder):\n print(\"Missing folder: \" + final_input_folder)\n continue\n\n refPath = os.path.join(final_input_folder, 'SWaN_accel_' + dateStr + '_final.csv')\n\n if not os.path.exists(refPath):\n print(\"Performing rule-based filtering for participant: \" + pid + \" for date: \" + dateStr)\n correctPredictionsSingleDate(sub_folder, dateStr, sampling_rate=sampling_rate)\n print(\"Done filtering predictions.\")\n else:\n print(\"Final rule-based filtered file present.\")\n\n return\n\n if not (os.path.isfile(file_path)):\n print(\"File with participant ids does not exist\")\n return\n\n with open(file_path) as f:\n content = f.readlines()\n pidLis = [x.strip() for x in content]\n\n for pid in pidLis:\n pid = pid + \"@timestudy_com\"\n\n sub_folder = os.path.join(input_folder, pid)\n final_input_folder = os.path.join(input_folder, pid)\n\n date_lis = [os.path.basename(x) for x in glob(os.path.join(final_input_folder, 'data-watch', '*'))]\n\n for dateStr in date_lis:\n final_input_folder = os.path.join(input_folder, pid, 'data-watch', dateStr)\n\n if not os.path.isdir(final_input_folder):\n print(\"Missing folder: \" + final_input_folder)\n continue\n\n refPath = os.path.join(final_input_folder, 'SWaN_accel_' + dateStr + '_final.csv')\n\n if not os.path.exists(refPath):\n print(\"Performing rule-based filtering for participant: \" + pid + \" for date: \" + dateStr)\n correctPredictionsSingleDate(sub_folder, dateStr, sampling_rate=sampling_rate)\n print(\"Done filtering predictions.\")\n else:\n print(\"Final rule-based filtered file present.\")\n\n return\n\n if (startdateStr) and (stopdateStr is None):\n dateStr = startdateStr\n # print(\"doing for a specific date\")\n # sampling_rate = int(sys.argv[1])\n # input_folder = sys.argv[2]\n # file_path = sys.argv[3]\n # dateStr = sys.argv[4]\n\n if not (file_path.endswith('.txt')):\n pid = file_path + \"@timestudy_com\"\n sub_folder = os.path.join(input_folder, pid)\n final_input_folder = os.path.join(input_folder, pid, 'data-watch', dateStr)\n\n if not os.path.isdir(final_input_folder):\n print(\"Missing folder: \" + final_input_folder)\n return\n\n refPath = os.path.join(final_input_folder, 'SWaN_accel_' + dateStr + '_final.csv')\n\n if not os.path.exists(refPath):\n print(datetime.datetime.now().strftime(\"%H:%M:%S\") + \" Performing rule-based filtering for participant: \" + pid + \" for date: \" + dateStr)\n correctPredictionsSingleDate(sub_folder, dateStr, sampling_rate=sampling_rate)\n print(\"Done filtering predictions.\")\n else:\n print(\"Final rule-based filtered file present \" + refPath)\n\n return\n\n if not (os.path.isfile(file_path)):\n print(\"File with participant ids does not exist\")\n return\n\n with open(file_path) as f:\n content = f.readlines()\n pidLis = [x.strip() for x in content]\n\n for pid in pidLis:\n pid = pid + \"@timestudy_com\"\n sub_folder = os.path.join(input_folder, pid)\n final_input_folder = os.path.join(input_folder, pid, 'data-watch', dateStr)\n\n if not os.path.isdir(final_input_folder):\n print(\"Missing folder: \" + final_input_folder)\n continue\n\n refPath = os.path.join(final_input_folder, 'SWaN_accel_' + dateStr + '_final.csv')\n\n if not os.path.exists(refPath):\n print(\"Performing rule-based filtering for participant: \" + pid + \" for date: \" + dateStr)\n correctPredictionsSingleDate(sub_folder, dateStr, sampling_rate=sampling_rate)\n print(\"Done filtering predictions.\")\n else:\n print(\"Final rule-based filtered file present.\")\n\n return\n\n if (startdateStr and stopdateStr):\n print(\"doing for a date range\")\n\n # sampling_rate = int(sys.argv[1])\n # input_folder = sys.argv[2]\n # file_path = sys.argv[3]\n # startdateStr = sys.argv[4]\n # stopdateStr = sys.argv[5]\n\n if not (file_path.endswith('.txt')):\n pid = file_path + \"@timestudy_com\"\n sub_folder = os.path.join(input_folder, pid)\n first_input_folder = os.path.join(input_folder, pid, 'data-watch', startdateStr)\n\n if not os.path.isdir(first_input_folder):\n print(\"Missing folder: \" + first_input_folder)\n return\n\n last_input_folder = os.path.join(input_folder, pid, 'data-watch', stopdateStr)\n\n if not os.path.isdir(last_input_folder):\n print(\"Missing folder: \" + last_input_folder)\n return\n\n print(\n \"Performing rule-based filtering for participant: \" + pid + \" for date between: \" + startdateStr + \" and \" + stopdateStr)\n correctPredictions(sub_folder, startdateStr, stopdateStr, sampling_rate=sampling_rate)\n print(\"Done filtering predictions.\")\n\n return\n\n if not (os.path.isfile(file_path)):\n print(\"File with participant ids does not exist\")\n return\n with open(file_path) as f:\n content = f.readlines()\n pidLis = [x.strip() for x in content]\n\n for pid in pidLis:\n pid = pid + \"@timestudy_com\"\n sub_folder = os.path.join(input_folder, pid)\n first_input_folder = os.path.join(input_folder, pid, 'data-watch', startdateStr)\n\n if not os.path.isdir(first_input_folder):\n print(\"Missing folder: \" + first_input_folder)\n continue\n\n last_input_folder = os.path.join(input_folder, pid, 'data-watch', stopdateStr)\n\n if not os.path.isdir(last_input_folder):\n print(\"Missing folder: \" + last_input_folder)\n continue\n\n print(\n \"Performing rule-based filtering for participant: \" + pid + \" for date between: \" + startdateStr + \" and \" + stopdateStr)\n correctPredictions(sub_folder, startdateStr, stopdateStr, sampling_rate=sampling_rate)\n print(\"Done filtering predictions.\")\n\n# if __name__ == \"__main__\":\n# main()\n"
] | [
[
"numpy.diff",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.asarray",
"numpy.argmax",
"pandas.Timedelta",
"pandas.to_datetime",
"pandas.Grouper",
"pandas.concat",
"pandas.DataFrame.from_dict"
]
] |
jrg365/gpytorch | [
"52bf07a3a3c55a570b22ff2bf3825adf4a6e259d"
] | [
"test/variational/test_unwhitened_variational_strategy.py"
] | [
"#!/usr/bin/env python3\n\nimport unittest\n\nimport torch\n\nimport gpytorch\nfrom gpytorch.test.variational_test_case import VariationalTestCase\n\n\nclass TestUnwhitenedVariationalGP(VariationalTestCase, unittest.TestCase):\n @property\n def batch_shape(self):\n return torch.Size([])\n\n @property\n def distribution_cls(self):\n return gpytorch.variational.CholeskyVariationalDistribution\n\n @property\n def mll_cls(self):\n return gpytorch.mlls.VariationalELBO\n\n @property\n def strategy_cls(self):\n return gpytorch.variational.UnwhitenedVariationalStrategy\n\n def test_training_iteration(self, *args, **kwargs):\n cg_mock, cholesky_mock, ciq_mock = super().test_training_iteration(*args, **kwargs)\n self.assertFalse(cg_mock.called)\n self.assertFalse(ciq_mock.called)\n if self.distribution_cls == gpytorch.variational.CholeskyVariationalDistribution:\n self.assertEqual(cholesky_mock.call_count, 3) # One for each forward pass, once for initialization\n else:\n self.assertEqual(cholesky_mock.call_count, 2) # One for each forward pass\n\n def test_eval_iteration(self, *args, **kwargs):\n cg_mock, cholesky_mock, ciq_mock = super().test_eval_iteration(*args, **kwargs)\n self.assertFalse(cg_mock.called)\n self.assertFalse(ciq_mock.called)\n self.assertEqual(cholesky_mock.call_count, 1) # One to compute cache, that's it!\n\n def test_fantasy_call(self, *args, **kwargs):\n # we only want to check CholeskyVariationalDistribution\n if self.distribution_cls is gpytorch.variational.CholeskyVariationalDistribution:\n return super().test_fantasy_call(*args, **kwargs)\n\n with self.assertRaises(AttributeError):\n super().test_fantasy_call(*args, **kwargs)\n\n\nclass TestUnwhitenedPredictiveGP(TestUnwhitenedVariationalGP):\n @property\n def mll_cls(self):\n return gpytorch.mlls.PredictiveLogLikelihood\n\n\nclass TestUnwhitenedRobustVGP(TestUnwhitenedVariationalGP):\n @property\n def mll_cls(self):\n return gpytorch.mlls.GammaRobustVariationalELBO\n\n\nclass TestUnwhitenedMeanFieldVariationalGP(TestUnwhitenedVariationalGP):\n @property\n def distribution_cls(self):\n return gpytorch.variational.MeanFieldVariationalDistribution\n\n\nclass TestUnwhitenedMeanFieldPredictiveGP(TestUnwhitenedPredictiveGP):\n @property\n def distribution_cls(self):\n return gpytorch.variational.MeanFieldVariationalDistribution\n\n\nclass TestUnwhitenedMeanFieldRobustVGP(TestUnwhitenedRobustVGP):\n @property\n def distribution_cls(self):\n return gpytorch.variational.MeanFieldVariationalDistribution\n\n\nclass TestUnwhitenedDeltaVariationalGP(TestUnwhitenedVariationalGP):\n @property\n def distribution_cls(self):\n return gpytorch.variational.DeltaVariationalDistribution\n\n\nclass TestUnwhitenedDeltaPredictiveGP(TestUnwhitenedPredictiveGP):\n @property\n def distribution_cls(self):\n return gpytorch.variational.DeltaVariationalDistribution\n\n\nclass TestUnwhitenedDeltaRobustVGP(TestUnwhitenedRobustVGP):\n @property\n def distribution_cls(self):\n return gpytorch.variational.DeltaVariationalDistribution\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"torch.Size"
]
] |
anonymous-code-github/offline-rl | [
"ebe1335bab9a83e95e5c93f33fa9d248218c37e6"
] | [
"batch_rl/tests/fixed_replay_runner_test.py"
] | [
"# coding=utf-8\n#\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"End to end tests for FixedReplayRunner.\"\"\"\n\nimport datetime\nimport os\nimport shutil\n\n\n\nfrom absl import flags\n\nfrom batch_rl.fixed_replay import train\nimport tensorflow as tf\n\nFLAGS = flags.FLAGS\n\n\nclass FixedReplayRunnerIntegrationTest(tf.test.TestCase):\n \"\"\"Tests for Atari environment with various agents.\n\n \"\"\"\n\n def setUp(self):\n super(FixedReplayRunnerIntegrationTest, self).setUp()\n FLAGS.base_dir = os.path.join(\n '/tmp/batch_rl_tests',\n datetime.datetime.utcnow().strftime('run_%Y_%m_%d_%H_%M_%S'))\n self._checkpoint_dir = os.path.join(FLAGS.base_dir, 'checkpoints')\n self._logging_dir = os.path.join(FLAGS.base_dir, 'logs')\n\n def quickFixedReplayREMFlags(self):\n \"\"\"Assign flags for a quick run of FixedReplay agent.\"\"\"\n FLAGS.gin_bindings = [\n \"create_runner.schedule='continuous_train_and_eval'\",\n 'FixedReplayRunner.training_steps=100',\n 'FixedReplayRunner.evaluation_steps=10',\n 'FixedReplayRunner.num_iterations=1',\n 'FixedReplayRunner.max_steps_per_episode=100',\n ]\n FLAGS.alsologtostderr = True\n FLAGS.gin_files = ['batch_rl/fixed_replay/configs/rem.gin']\n FLAGS.agent_name = 'multi_head_dqn'\n\n def verifyFilesCreated(self, base_dir):\n \"\"\"Verify that files have been created.\"\"\"\n # Check checkpoint files\n self.assertTrue(\n os.path.exists(os.path.join(self._checkpoint_dir, 'ckpt.0')))\n self.assertTrue(\n os.path.exists(os.path.join(self._checkpoint_dir, 'checkpoint')))\n self.assertTrue(\n os.path.exists(\n os.path.join(self._checkpoint_dir,\n 'sentinel_checkpoint_complete.0')))\n # Check log files\n self.assertTrue(os.path.exists(os.path.join(self._logging_dir, 'log_0')))\n\n def testIntegrationFixedReplayREM(self):\n \"\"\"Test the FixedReplayMultiHeadDQN agent.\"\"\"\n assert FLAGS.replay_dir is not None, 'Please provide a replay directory'\n tf.logging.info('####### Training the REM agent #####')\n tf.logging.info('####### REM base_dir: {}'.format(FLAGS.base_dir))\n tf.logging.info('####### replay_dir: {}'.format(FLAGS.replay_dir))\n self.quickFixedReplayREMFlags()\n train.main([])\n self.verifyFilesCreated(FLAGS.base_dir)\n shutil.rmtree(FLAGS.base_dir)\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.logging.info",
"tensorflow.test.main"
]
] |
ygutgutia/Reinforcement-Learning-2nd-Edition-by-Sutton-Codes | [
"545349260ff9895383bd9041b8cde23148d5691e"
] | [
"Ch4/Jack_Car_Rental_Problem.py"
] | [
"# https://towardsdatascience.com/elucidating-policy-iteration-in-reinforcement-learning-jacks-car-rental-problem-d41b34c8aec7\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.stats import poisson\nimport sys\n\n\nclass Poisson:\n def __init__(self, exp_num):\n self.exp_num = exp_num\n eps = 0.01\n \n # [alpha , beta] is the range of n's for which the pmf value is above eps\n self.alpha = 0\n state = 1\n self.vals = {}\n summer = 0\n \n while(1):\n if state == 1:\n temp = poisson.pmf(self.alpha, self.exp_num) \n if(temp <= eps):\n self.alpha+=1\n else:\n self.vals[self.alpha] = temp\n summer += temp\n self.beta = self.alpha+1\n state = 2\n elif state == 2:\n temp = poisson.pmf(self.beta, self.exp_num)\n if(temp > eps):\n self.vals[self.beta] = temp\n summer += temp\n self.beta+=1\n else:\n break \n \n # normalizing the pmf, values of n outside of [alpha, beta] have pmf = 0\n added_val = (1-summer)/(self.beta-self.alpha)\n for key in self.vals:\n self.vals[key] += added_val\n \n def f(self, n):\n try:\n Ret_value = self.vals[n]\n except(KeyError):\n Ret_value = 0\n finally:\n return Ret_value\n\n# A class holding the properties of a location together\nclass location:\n def __init__(self, req, ret):\n self.alpha = req # value of lambda for requests\n self.beta = ret # value of lambda for returns\n self.poisson_alp = Poisson(self.alpha)\n self.poisson_beta = Poisson(self.beta)\n\n\nclass jcp:\n def __init__(self, max_cars, disc_rate, credit_reward, moving_reward):\n self.max_cars = max_cars\n self.disc_rate = disc_rate\n self.credit_reward = credit_reward\n self.moving_reward = moving_reward\n self.policy_evaluation_eps = 50\n self.save_policy_counter = 0\n self.save_value_counter = 0\n\n # Location initialisation\n self.A = location(3, 3)\n self.B = location(4, 2)\n\n # Initializing the value and policy matrices. Initial policy has zero value for all states.\n self.value = np.zeros((self.max_cars+1, self.max_cars+1))\n self.policy = np.zeros((self.max_cars+1, self.max_cars+1)).astype(int)\n\n def expected_reward(self, state, action):\n \"\"\"\n state : It's a pair of integers, # of cars at A and at B\n action : # of cars transferred from A to B, -5 <= action <= 5 \n \"\"\"\n reward = 0\n new_state = [max(min(state[0] - action, self.max_cars), 0) , max(min(state[1] + action, self.max_cars), 0)]\n \n # adding reward for moving cars from one location to another (which is negative)\n reward = reward + self.moving_reward * abs(action)\n \n #there are four discrete random variables which determine the probability distribution of the reward and next state\n for Aalpha in range(self.A.poisson_alp.alpha, self.A.poisson_alp.beta):\n for Balpha in range(self.B.poisson_alp.alpha, self.B.poisson_alp.beta):\n for Abeta in range(self.A.poisson_beta.alpha, self.A.poisson_beta.beta):\n for Bbeta in range(self.B.poisson_beta.alpha, self.B.poisson_beta.beta):\n \"\"\"\n Aalpha : sample of cars requested at location A\n Abeta : sample of cars returned at location A\n Balpha : sample of cars requested at location B\n Bbeta : sample of cars returned at location B\n prob_event : probability of this event happening\n \"\"\"\n\n # all four variables are independent of each other\n prob_event = self.A.poisson_alp.vals[Aalpha] * self.B.poisson_alp.vals[Balpha] * \\\n self.A.poisson_beta.vals[Abeta] * self.B.poisson_beta.vals[Bbeta]\n \n valid_requests_A = min(new_state[0], Aalpha)\n valid_requests_B = min(new_state[1], Balpha)\n \n rew = (valid_requests_A + valid_requests_B)*(self.credit_reward)\n \n #calculating the new state based on the values of the four random variables\n new_s = [0, 0]\n new_s[0] = max(min(new_state[0] - valid_requests_A + Abeta, self.max_cars), 0)\n new_s[1] = max(min(new_state[1] - valid_requests_B + Bbeta, self.max_cars), 0)\n \n #Bellman's equation\n reward += prob_event * (rew + self.disc_rate * self.value[new_s[0]][new_s[1]])\n \n return reward\n\n\n def policy_evaluation(self):\n # here policy_evaluation has a static variable eps whose values decreases over time\n eps = self.policy_evaluation_eps\n self.policy_evaluation_eps /= 10 \n \n while(1):\n delta = 0\n for i in range(self.value.shape[0]):\n for j in range(self.value.shape[1]):\n # value[i][j] denotes the value of the state [i, j]\n old_val = self.value[i][j]\n self.value[i][j] = self.expected_reward([i, j], self.policy[i][j])\n delta = max(delta, abs(self.value[i][j] - old_val))\n print('.', end = '')\n sys.stdout.flush()\n\n print(delta)\n sys.stdout.flush()\n if delta < eps:\n break\n\n def policy_improvement(self):\n policy_stable = True\n for i in range(self.value.shape[0]):\n for j in range(self.value.shape[1]):\n old_action = self.policy[i][j]\n \n max_act_val = None\n max_act = None\n \n move12 = min(i, 5) # if I have say 3 cars at the first location, then I can atmost move 3 from 1 to 2\n move21 = -min(j, 5) # if I have say 2 cars at the second location, then I can atmost move 2 from 2 to 1\n \n for act in range(move21, move12+1):\n exp_reward = self.expected_reward([i, j], act)\n if max_act_val == None:\n max_act_val = exp_reward\n max_act = act\n elif max_act_val < exp_reward:\n max_act_val = exp_reward\n max_act = act\n \n self.policy[i][j] = max_act\n \n if old_action != self.policy[i][j]:\n policy_stable = False\n\n return policy_stable\n\n def run(self):\n while(1):\n self.policy_evaluation()\n policy_stable = self.policy_improvement()\n self.save_value()\n self.save_policy()\n if policy_stable == True:\n break\n \n def save_policy(self):\n self.save_policy_counter += 1\n ax = sns.heatmap(self.policy, linewidth=0.5)\n ax.invert_yaxis()\n plt.savefig('policy'+str(self.save_policy_counter)+'.svg')\n plt.close()\n \n def save_value(self):\n self.save_value_counter += 1\n ax = sns.heatmap(self.value, linewidth=0.5)\n ax.invert_yaxis()\n plt.savefig('value'+ str(self.save_value_counter)+'.svg')\n plt.close()\n\n\ndef main():\n jcp_obj = jcp(20, 0.9, 10, -2)\n jcp_obj.run()\n\nif __name__ == '__main__':\n main()"
] | [
[
"matplotlib.pyplot.close",
"scipy.stats.poisson.pmf",
"numpy.zeros"
]
] |
sumanyumuku98/contrastive-unpaired-translation | [
"91738727123252e39c4e23f75f93cad737c0d718"
] | [
"train.py"
] | [
"import time\nimport torch\nfrom options.train_options import TrainOptions\nfrom data import create_dataset\nfrom models import create_model\nfrom util.visualizer import Visualizer\n\n\nif __name__ == '__main__':\n opt = TrainOptions().parse() # get training options\n dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options\n dataset_size = len(dataset) # get the number of images in the dataset.\n\n model = create_model(opt) # create a model given opt.model and other options\n print('The number of training images = %d' % dataset_size)\n\n visualizer = Visualizer(opt) # create a visualizer that display/save images and plots\n opt.visualizer = visualizer\n total_iters = 0 # the total number of training iterations\n\n optimize_time = 0.1\n\n times = []\n for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>\n epoch_start_time = time.time() # timer for entire epoch\n iter_data_time = time.time() # timer for data loading per iteration\n epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch\n visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch\n\n dataset.set_epoch(epoch)\n for i, data in enumerate(dataset): # inner loop within one epoch\n iter_start_time = time.time() # timer for computation per iteration\n if total_iters % opt.print_freq == 0:\n t_data = iter_start_time - iter_data_time\n\n batch_size = data[\"A\"].size(0)\n total_iters += batch_size\n epoch_iter += batch_size\n torch.cuda.synchronize()\n optimize_start_time = time.time()\n model.set_input(data) # unpack data from dataset and apply preprocessing\n if epoch == opt.epoch_count and i == 0:\n model.data_dependent_initialize()\n model.setup(opt) # regular setup: load and print networks; create schedulers\n model.parallelize()\n model.optimize_parameters() # calculate loss functions, get gradients, update network weights\n torch.cuda.synchronize()\n optimize_time = (time.time() - optimize_start_time) / batch_size * 0.005 + 0.995 * optimize_time\n\n if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file\n save_result = total_iters % opt.update_html_freq == 0\n model.compute_visuals()\n visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)\n\n if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk\n losses = model.get_current_losses()\n visualizer.print_current_losses(epoch, epoch_iter, losses, optimize_time, t_data)\n if opt.display_id is None or opt.display_id > 0:\n visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)\n\n if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations\n print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))\n print(opt.name) # it's useful to occasionally show the experiment name on console\n save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'\n model.save_networks(save_suffix)\n\n iter_data_time = time.time()\n\n if epoch % opt.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs\n print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))\n model.save_networks('latest')\n# model.save_networks(epoch)\n\n print('End of epoch %d / %d \\t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))\n model.update_learning_rate() # update learning rates at the end of every epoch.\n"
] | [
[
"torch.cuda.synchronize"
]
] |
frederiknolte/safety-gym | [
"a0b3354d4d8240ed9b2bc00ef511b70ccb0f08ea"
] | [
"safety_gym/envs/engine.py"
] | [
"#!/usr/bin/env python\n\nimport gym\nimport gym.spaces\nimport numpy as np\nfrom PIL import Image\nfrom copy import deepcopy\nfrom collections import OrderedDict\nimport mujoco_py\nfrom mujoco_py import MjViewer, MujocoException, const, MjRenderContextOffscreen\n\nfrom safety_gym.envs.world import World, Robot\n\nimport sys\n\n\n# Distinct colors for different types of objects.\n# For now this is mostly used for visualization.\n# This also affects the vision observation, so if training from pixels.\nCOLOR_CIRCLE = np.array([0, 1, 0, 1])\nCOLOR_RED = np.array([1, 0, 0, 1])\n\n# Groups are a mujoco-specific mechanism for selecting which geom objects to \"see\"\n# We use these for raycasting lidar, where there are different lidar types.\n# These work by turning \"on\" the group to see and \"off\" all the other groups.\n# See obs_lidar_natural() for more.\nGROUP_GOAL = 0\nGROUP_BOX = 1\nGROUP_BUTTON = 1\nGROUP_WALL = 2\nGROUP_PILLAR = 2\nGROUP_HAZARD = 3\nGROUP_VASE = 4\nGROUP_GREMLIN = 5\nGROUP_CIRCLE = 6\n\n# Constant for origin of world\nORIGIN_COORDINATES = np.zeros(3)\n\n# Constant defaults for rendering frames for humans (not used for vision)\nDEFAULT_WIDTH = 256\nDEFAULT_HEIGHT = 256\n\nclass ResamplingError(AssertionError):\n ''' Raised when we fail to sample a valid distribution of objects or goals '''\n pass\n\n\ndef theta2vec(theta):\n ''' Convert an angle (in radians) to a unit vector in that angle around Z '''\n return np.array([np.cos(theta), np.sin(theta), 0.0])\n\n\ndef quat2mat(quat):\n ''' Convert Quaternion to a 3x3 Rotation Matrix using mujoco '''\n q = np.array(quat, dtype='float64')\n m = np.zeros(9, dtype='float64')\n mujoco_py.functions.mju_quat2Mat(m, q)\n return m.reshape((3,3))\n\n\ndef quat2zalign(quat):\n ''' From quaternion, extract z_{ground} dot z_{body} '''\n # z_{body} from quaternion [a,b,c,d] in ground frame is:\n # [ 2bd + 2ac,\n # 2cd - 2ab,\n # a**2 - b**2 - c**2 + d**2\n # ]\n # so inner product with z_{ground} = [0,0,1] is\n # z_{body} dot z_{ground} = a**2 - b**2 - c**2 + d**2\n a, b, c, d = quat\n return a**2 - b**2 - c**2 + d**2\n\n\nclass Engine(gym.Env, gym.utils.EzPickle):\n\n '''\n Engine: an environment-building tool for safe exploration research.\n\n The Engine() class entails everything to do with the tasks and safety \n requirements of Safety Gym environments. An Engine() uses a World() object\n to interface to MuJoCo. World() configurations are inferred from Engine()\n configurations, so an environment in Safety Gym can be completely specified\n by the config dict of the Engine() object.\n\n '''\n\n # Default configuration (this should not be nested since it gets copied)\n DEFAULT = {\n 'name': 'SafetyGym', # Name of the env\n 'num_steps': 1000, # Maximum number of environment steps in an episode\n\n 'action_noise': 0.0, # Magnitude of independent per-component gaussian action noise\n\n 'placements_extents': [-2, -2, 2, 2], # Placement limits (min X, min Y, max X, max Y)\n 'placements_margin': 0.0, # Additional margin added to keepout when placing objects\n\n # Floor\n 'floor_display_mode': False, # In display mode, the visible part of the floor is cropped\n\n # Robot\n 'robot_placements': None, # Robot placements list (defaults to full extents)\n 'robot_locations': [], # Explicitly place robot XY coordinate\n 'robot_keepout': 0.4, # Needs to be set to match the robot XML used\n 'robot_base': 'xmls/car.xml', # Which robot XML to use as the base\n 'robot_rot': None, # Override robot starting angle\n\n # Starting position distribution\n 'randomize_layout': True, # If false, set the random seed before layout to constant\n 'build_resample': True, # If true, rejection sample from valid environments\n 'continue_goal': True, # If true, draw a new goal after achievement\n 'terminate_resample_failure': True, # If true, end episode when resampling fails,\n # otherwise, raise a python exception.\n # TODO: randomize starting joint positions\n\n # Observation flags - some of these require other flags to be on\n # By default, only robot sensor observations are enabled.\n 'observation_flatten': True, # Flatten observation into a vector\n 'observe_sensors': True, # Observe all sensor data from simulator\n 'observe_goal_dist': False, # Observe the distance to the goal\n 'observe_goal_comp': False, # Observe a compass vector to the goal\n 'observe_goal_lidar': False, # Observe the goal with a lidar sensor\n 'observe_box_comp': False, # Observe the box with a compass\n 'observe_box_lidar': False, # Observe the box with a lidar\n 'observe_circle': False, # Observe the origin with a lidar\n 'observe_remaining': False, # Observe the fraction of steps remaining\n 'observe_walls': False, # Observe the walls with a lidar space\n 'observe_hazards': False, # Observe the vector from agent to hazards\n 'observe_sec_hazards': False, # Observe the vector from agent to secondary hazards\n 'observe_vases': False, # Observe the vector from agent to vases\n 'observe_pillars': False, # Lidar observation of pillar object positions\n 'observe_buttons': False, # Lidar observation of button object positions\n 'observe_gremlins': False, # Gremlins are observed with lidar-like space\n 'observe_vision': False, # Observe vision from the robot\n # These next observations are unnormalized, and are only for debugging\n 'observe_qpos': False, # Observe the qpos of the world\n 'observe_qvel': False, # Observe the qvel of the robot\n 'observe_ctrl': False, # Observe the previous action\n 'observe_freejoint': False, # Observe base robot free joint\n 'observe_com': False, # Observe the center of mass of the robot\n\n # Render options\n 'render_labels': False,\n 'render_lidar_markers': False,\n 'render_lidar_radius': 0.15, \n 'render_lidar_size': 0.025, \n 'render_lidar_offset_init': 0.5, \n 'render_lidar_offset_delta': 0.06, \n\n # Vision observation parameters\n 'vision_size': (60, 40), # Size (width, height) of vision observation; gets flipped internally to (rows, cols) format\n 'vision_render': True, # Render vision observation in the viewer\n 'vision_render_size': (300, 200), # Size to render the vision in the viewer\n 'camera_name': 'vision', # Name of the camera that is used for rendering the observations (!= the rendering for human)\n\n # Lidar observation parameters\n 'lidar_num_bins': 10, # Bins (around a full circle) for lidar sensing\n 'lidar_max_dist': None, # Maximum distance for lidar sensitivity (if None, exponential distance)\n 'lidar_exp_gain': 1.0, # Scaling factor for distance in exponential distance lidar\n 'lidar_type': 'pseudo', # 'pseudo', 'natural', see self.obs_lidar()\n 'lidar_alias': True, # Lidar bins alias into each other\n\n # Compass observation parameters\n 'compass_shape': 2, # Set to 2 or 3 for XY or XYZ unit vector compass observation.\n\n # Task\n 'task': 'goal', # goal, button, push, x, z, circle, or none (for screenshots)\n\n # Rewards\n 'add_cost_to_reward': False, # adds all costs to rewards if True\n\n # Goal parameters\n 'goal_placements': None, # Placements where goal may appear (defaults to full extents)\n 'goal_locations': [], # Fixed locations to override placements\n 'goal_keepout': 0.4, # Keepout radius when placing goals\n 'goal_size': 0.3, # Radius of the goal area (if using task 'goal')\n 'goal_color': np.array([0, 1, 0, 1]), # Object color\n\n # Box parameters (only used if task == 'push')\n 'box_placements': None, # Box placements list (defaults to full extents)\n 'box_locations': [], # Fixed locations to override placements\n 'box_keepout': 0.3, # Box keepout radius for placement\n 'box_size': 0.2, # Box half-radius size\n 'box_density': 0.001, # Box density\n 'box_null_dist': 2, # Within box_null_dist * box_size radius of box, no box reward given\n 'box_color': np.array([1, 1, 0, 1]), # Object color\n\n # Reward is distance towards goal plus a constant for being within range of goal\n # reward_distance should be positive to encourage moving towards the goal\n # if reward_distance is 0, then the reward function is sparse\n 'reward_distance': 1.0, # Dense reward multiplied by the distance moved to the goal\n 'reward_goal': 1.0, # Sparse reward for being inside the goal area\n 'reward_box_dist': 1.0, # Dense reward for moving the robot towards the box\n 'reward_box_goal': 1.0, # Reward for moving the box towards the goal\n 'reward_orientation': False, # Reward for being upright\n 'reward_orientation_scale': 0.002, # Scale for uprightness reward\n 'reward_orientation_body': 'robot', # What body to get orientation from\n 'reward_exception': -10.0, # Reward when encoutering a mujoco exception\n 'reward_x': 1.0, # Reward for forward locomotion tests (vel in x direction)\n 'reward_z': 1.0, # Reward for standup tests (vel in z direction)\n 'reward_circle': 1e-1, # Reward for circle goal (complicated formula depending on pos and vel)\n 'reward_clip': 10, # Clip reward, last resort against physics errors causing magnitude spikes\n\n # Buttons are small immovable spheres, to the environment\n 'buttons_num': 0, # Number of buttons to add\n 'buttons_placements': None, # Buttons placements list (defaults to full extents)\n 'buttons_locations': [], # Fixed locations to override placements\n 'buttons_keepout': 0.3, # Buttons keepout radius for placement\n 'buttons_size': 0.1, # Size of buttons in the scene\n 'buttons_cost': 1.0, # Cost for pressing the wrong button, if constrain_buttons\n 'buttons_resampling_delay': 10, # Buttons have a timeout period (steps) before resampling\n 'buttons_color': np.array([1, .5, 0, 1]), # Object color\n\n # Circle parameters (only used if task == 'circle')\n 'circle_radius': 1.5,\n\n # Sensor observations\n # Specify which sensors to add to observation space\n 'sensors_obs': ['accelerometer', 'velocimeter', 'gyro', 'magnetometer'],\n 'sensors_hinge_joints': True, # Observe named joint position / velocity sensors\n 'sensors_ball_joints': True, # Observe named balljoint position / velocity sensors\n 'sensors_angle_components': True, # Observe sin/cos theta instead of theta\n\n # Ground Truth Observation\n 'observe_groundtruth': False,\n 'observe_groundtruth_vectors': False,\n\n # Walls - barriers in the environment not associated with any constraint\n # NOTE: this is probably best to be auto-generated than manually specified\n 'walls_num': 0, # Number of walls\n 'walls_placements': None, # This should not be used\n 'walls_locations': [], # This should be used and length == walls_num\n 'walls_keepout': 0.0, # This should not be used\n 'walls_size': 0.5, # Should be fixed at fundamental size of the world\n 'walls_color': np.array([.5, .5, .5, 1]), # Object color\n\n # Constraints - flags which can be turned on\n # By default, no constraints are enabled, and all costs are indicator functions.\n 'constrain_hazards': False, # Constrain robot from being in hazardous areas\n 'constrain_sec_hazards': False, # Constrain robot from being in secondarily hazardous areas\n 'constrain_vases': False, # Constrain frobot from touching objects\n 'constrain_pillars': False, # Immovable obstacles in the environment\n 'constrain_buttons': False, # Penalize pressing incorrect buttons\n 'constrain_gremlins': False, # Moving objects that must be avoided\n 'constrain_indicator': True, # If true, all costs are either 1 or 0 for a given step.\n\n # Hazardous areas\n 'hazards_num': 0, # Number of hazards in an environment\n 'hazards_placements': None, # Placements list for hazards (defaults to full extents)\n 'hazards_locations': [], # Fixed locations to override placements\n 'hazards_keepout': 0.2, # Radius of hazard keepout for placement\n 'hazards_size': 0.3, # Radius of hazards\n 'hazards_cost': 1.0, # Cost (per step) for violating the constraint\n 'hazards_color': np.array([0, 0, 1, 1]), # Object color\n\n # Secondary Hazardous areas\n 'sec_hazards_num': 0, # Number of hazards in an environment\n 'sec_hazards_placements': None, # Placements list for hazards (defaults to full extents)\n 'sec_hazards_locations': [], # Fixed locations to override placements\n 'sec_hazards_keepout': 0.2, # Radius of hazard keepout for placement\n 'sec_hazards_size': 0.3, # Radius of hazards\n 'sec_hazards_cost': 1.0, # Cost (per step) for violating the constraint\n 'sec_hazards_color': np.array([0, 0, 1, 1]), # Object color\n\n # Vases (objects we should not touch)\n 'vases_num': 0, # Number of vases in the world\n 'vases_placements': None, # Vases placements list (defaults to full extents)\n 'vases_locations': [], # Fixed locations to override placements\n 'vases_keepout': 0.15, # Radius of vases keepout for placement\n 'vases_size': 0.1, # Half-size (radius) of vase object\n 'vases_density': 0.001, # Density of vases\n 'vases_sink': 4e-5, # Experimentally measured, based on size and density,\n # how far vases \"sink\" into the floor.\n # Mujoco has soft contacts, so vases slightly sink into the floor,\n # in a way which can be hard to precisely calculate (and varies with time)\n # Ignore some costs below a small threshold, to reduce noise.\n 'vases_contact_cost': 1.0, # Cost (per step) for being in contact with a vase\n 'vases_displace_cost': 0.0, # Cost (per step) per meter of displacement for a vase\n 'vases_displace_threshold': 1e-3, # Threshold for displacement being \"real\"\n 'vases_velocity_cost': 1.0, # Cost (per step) per m/s of velocity for a vase\n 'vases_velocity_threshold': 1e-4, # Ignore very small velocities\n 'vases_color': np.array([0, 1, 1, 1]), # Object color\n\n # Pillars (immovable obstacles we should not touch)\n 'pillars_num': 0, # Number of pillars in the world\n 'pillars_placements': None, # Pillars placements list (defaults to full extents)\n 'pillars_locations': [], # Fixed locations to override placements\n 'pillars_keepout': 0.3, # Radius for placement of pillars\n 'pillars_size': 0.2, # Half-size (radius) of pillar objects\n 'pillars_height': 0.5, # Half-height of pillars geoms\n 'pillars_cost': 1.0, # Cost (per step) for being in contact with a pillar\n 'pillars_color': np.array([.5, .5, 1, 1]), # Object color\n\n # Gremlins (moving objects we should avoid)\n 'gremlins_num': 0, # Number of gremlins in the world\n 'gremlins_placements': None, # Gremlins placements list (defaults to full extents)\n 'gremlins_locations': [], # Fixed locations to override placements\n 'gremlins_keepout': 0.5, # Radius for keeping out (contains gremlin path)\n 'gremlins_travel': 0.3, # Radius of the circle traveled in\n 'gremlins_size': 0.1, # Half-size (radius) of gremlin objects\n 'gremlins_density': 0.001, # Density of gremlins\n 'gremlins_contact_cost': 1.0, # Cost for touching a gremlin\n 'gremlins_dist_threshold': 0.2, # Threshold for cost for being too close\n 'gremlins_dist_cost': 1.0, # Cost for being within distance threshold\n 'gremlins_color': np.array([0.5, 0, 1, 1]), # Object color\n\n # Frameskip is the number of physics simulation steps per environment step\n # Frameskip is sampled as a binomial distribution\n # For deterministic steps, set frameskip_binom_p = 1.0 (always take max frameskip)\n 'frameskip_binom_n': 10, # Number of draws trials in binomial distribution (max frameskip)\n 'frameskip_binom_p': 1.0, # Probability of trial return (controls distribution)\n\n '_seed': None, # Random state seed (avoid name conflict with self.seed)\n }\n\n def __init__(self, config={}):\n # First, parse configuration. Important note: LOTS of stuff happens in\n # parse, and many attributes of the class get set through setattr. If you\n # are trying to track down where an attribute gets initially set, and \n # can't find it anywhere else, it's probably set via the config dict\n # and this parse function.\n self.parse(config)\n gym.utils.EzPickle.__init__(self, config=config)\n\n # Load up a simulation of the robot, just to figure out observation space\n self.robot = Robot(self.robot_base)\n\n self.action_space = gym.spaces.Box(-1, 1, (self.robot.nu,), dtype=np.float32)\n self.build_observation_space()\n self.build_placements_dict()\n\n self.viewer = None\n self.world = None\n self.clear()\n\n self.seed(self._seed)\n self.done = True\n\n def parse(self, config):\n ''' Parse a config dict - see self.DEFAULT for description '''\n self.config = deepcopy(self.DEFAULT)\n self.config.update(deepcopy(config))\n for key, value in self.config.items():\n assert key in self.DEFAULT, f'Bad key {key}'\n setattr(self, key, value)\n\n @property\n def sim(self):\n ''' Helper to get the world's simulation instance '''\n return self.world.sim\n\n @property\n def model(self):\n ''' Helper to get the world's model instance '''\n return self.sim.model\n\n @property\n def data(self):\n ''' Helper to get the world's simulation data instance '''\n return self.sim.data\n\n @property\n def robot_pos(self):\n ''' Helper to get current robot position '''\n return self.data.get_body_xpos('robot').copy()\n\n @property\n def goal_pos(self):\n ''' Helper to get goal position from layout '''\n if self.task in ['goal', 'push']:\n return self.data.get_body_xpos('goal').copy()\n elif self.task == 'button':\n return self.data.get_body_xpos(f'button{self.goal_button}').copy()\n elif self.task == 'circle':\n return ORIGIN_COORDINATES\n elif self.task == 'none':\n return np.zeros(2) # Only used for screenshots\n else:\n raise ValueError(f'Invalid task {self.task}')\n\n @property\n def box_pos(self):\n ''' Helper to get the box position '''\n return self.data.get_body_xpos('box').copy()\n\n @property\n def buttons_pos(self):\n ''' Helper to get the list of button positions '''\n return [self.data.get_body_xpos(f'button{i}').copy() for i in range(self.buttons_num)]\n\n @property\n def vases_pos(self):\n ''' Helper to get the list of vase positions '''\n return [self.data.get_body_xpos(f'vase{p}').copy() for p in range(self.vases_num)]\n\n @property\n def vases_velp(self):\n ''' Helper to get the list of vase positions '''\n return [self.data.get_body_xvelp(f'vase{p}').copy() for p in range(self.vases_num)]\n\n @property\n def gremlins_obj_pos(self):\n ''' Helper to get the current gremlin position '''\n return [self.data.get_body_xpos(f'gremlin{i}obj').copy() for i in range(self.gremlins_num)]\n\n @property\n def gremlins_obj_velp(self):\n ''' Helper to get the current gremlin position '''\n return [self.data.get_body_xvelp(f'gremlin{i}obj').copy() for i in range(self.gremlins_num)]\n\n @property\n def pillars_pos(self):\n ''' Helper to get list of pillar positions '''\n return [self.data.get_body_xpos(f'pillar{i}').copy() for i in range(self.pillars_num)]\n\n @property\n def hazards_pos(self):\n ''' Helper to get the hazards positions from layout '''\n return [self.data.get_body_xpos(f'hazard{i}').copy() for i in range(self.hazards_num)]\n\n @property\n def sec_hazards_pos(self):\n ''' Helper to get the secondary hazards positions from layout '''\n return [self.data.get_body_xpos(f'sec_hazard{i}').copy() for i in range(self.sec_hazards_num)]\n\n @property\n def walls_pos(self):\n ''' Helper to get the hazards positions from layout '''\n return [self.data.get_body_xpos(f'wall{i}').copy() for i in range(self.walls_num)]\n\n def build_observation_space(self):\n ''' Construct observtion space. Happens only once at during __init__ '''\n obs_space_dict = OrderedDict() # See self.obs()\n\n if self.observe_freejoint:\n obs_space_dict['freejoint'] = gym.spaces.Box(-np.inf, np.inf, (7,), dtype=np.float32)\n if self.observe_com:\n obs_space_dict['com'] = gym.spaces.Box(-np.inf, np.inf, (3,), dtype=np.float32)\n if self.observe_sensors:\n for sensor in self.sensors_obs: # Explicitly listed sensors\n dim = self.robot.sensor_dim[sensor]\n obs_space_dict[sensor] = gym.spaces.Box(-np.inf, np.inf, (dim,), dtype=np.float32)\n # Velocities don't have wraparound effects that rotational positions do\n # Wraparounds are not kind to neural networks\n # Whereas the angle 2*pi is very close to 0, this isn't true in the network\n # In theory the network could learn this, but in practice we simplify it\n # when the sensors_angle_components switch is enabled.\n for sensor in self.robot.hinge_vel_names:\n obs_space_dict[sensor] = gym.spaces.Box(-np.inf, np.inf, (1,), dtype=np.float32)\n for sensor in self.robot.ballangvel_names:\n obs_space_dict[sensor] = gym.spaces.Box(-np.inf, np.inf, (3,), dtype=np.float32)\n # Angular positions have wraparound effects, so output something more friendly\n if self.sensors_angle_components:\n # Single joints are turned into sin(x), cos(x) pairs\n # These should be easier to learn for neural networks,\n # Since for angles, small perturbations in angle give small differences in sin/cos\n for sensor in self.robot.hinge_pos_names:\n obs_space_dict[sensor] = gym.spaces.Box(-np.inf, np.inf, (2,), dtype=np.float32)\n # Quaternions are turned into 3x3 rotation matrices\n # Quaternions have a wraparound issue in how they are normalized,\n # where the convention is to change the sign so the first element to be positive.\n # If the first element is close to 0, this can mean small differences in rotation\n # lead to large differences in value as the latter elements change sign.\n # This also means that the first element of the quaternion is not expectation zero.\n # The SO(3) rotation representation would be a good replacement here,\n # since it smoothly varies between values in all directions (the property we want),\n # but right now we have very little code to support SO(3) roatations.\n # Instead we use a 3x3 rotation matrix, which if normalized, smoothly varies as well.\n for sensor in self.robot.ballquat_names:\n obs_space_dict[sensor] = gym.spaces.Box(-np.inf, np.inf, (3, 3), dtype=np.float32)\n else:\n # Otherwise include the sensor without any processing\n # TODO: comparative study of the performance with and without this feature.\n for sensor in self.robot.hinge_pos_names:\n obs_space_dict[sensor] = gym.spaces.Box(-np.inf, np.inf, (1,), dtype=np.float32)\n for sensor in self.robot.ballquat_names:\n obs_space_dict[sensor] = gym.spaces.Box(-np.inf, np.inf, (4,), dtype=np.float32)\n if self.task == 'push':\n if self.observe_box_comp:\n obs_space_dict['box_compass'] = gym.spaces.Box(-1.0, 1.0, (self.compass_shape,), dtype=np.float32)\n if self.observe_box_lidar:\n obs_space_dict['box_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)\n if self.observe_goal_dist:\n obs_space_dict['goal_dist'] = gym.spaces.Box(0.0, 1.0, (1,), dtype=np.float32)\n if self.observe_goal_comp:\n obs_space_dict['goal_compass'] = gym.spaces.Box(-1.0, 1.0, (self.compass_shape,), dtype=np.float32)\n if self.observe_goal_lidar:\n obs_space_dict['goal_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)\n if self.task == 'circle' and self.observe_circle:\n obs_space_dict['circle_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)\n if self.observe_remaining:\n obs_space_dict['remaining'] = gym.spaces.Box(0.0, 1.0, (1,), dtype=np.float32)\n if self.walls_num and self.observe_walls:\n obs_space_dict['walls_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)\n if self.observe_hazards:\n obs_space_dict['hazards_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)\n if self.observe_sec_hazards:\n obs_space_dict['sec_hazards_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)\n if self.observe_vases:\n obs_space_dict['vases_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)\n if self.gremlins_num and self.observe_gremlins:\n obs_space_dict['gremlins_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)\n if self.pillars_num and self.observe_pillars:\n obs_space_dict['pillars_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)\n if self.buttons_num and self.observe_buttons:\n obs_space_dict['buttons_lidar'] = gym.spaces.Box(0.0, 1.0, (self.lidar_num_bins,), dtype=np.float32)\n if self.observe_qpos:\n obs_space_dict['qpos'] = gym.spaces.Box(-np.inf, np.inf, (self.robot.nq,), dtype=np.float32)\n if self.observe_qvel:\n obs_space_dict['qvel'] = gym.spaces.Box(-np.inf, np.inf, (self.robot.nv,), dtype=np.float32)\n if self.observe_ctrl:\n obs_space_dict['ctrl'] = gym.spaces.Box(-np.inf, np.inf, (self.robot.nu,), dtype=np.float32)\n if self.observe_vision:\n width, height = self.vision_size\n rows, cols = height, width\n self.vision_size = (rows, cols)\n obs_space_dict['vision'] = gym.spaces.Box(0, 1.0, (3,) + self.vision_size, dtype=np.float32)\n if self.observe_groundtruth:\n obs_space_dict['robot_gt_pos'] = gym.spaces.Box(-np.inf, np.inf, (3,), dtype=np.float32)\n obs_space_dict['goal_gt_pos'] = gym.spaces.Box(-np.inf, np.inf, (3,), dtype=np.float32)\n if self.hazards_num > 0:\n obs_space_dict['hazards_gt'] = gym.spaces.Box(-np.inf, np.inf, (self.hazards_num * 3,), dtype=np.float32)\n if self.sec_hazards_num > 0:\n obs_space_dict['sec_hazards_gt'] = gym.spaces.Box(-np.inf, np.inf, (self.sec_hazards_num * 3,), dtype=np.float32)\n if self.vases_num > 0:\n obs_space_dict['vases_gt'] = gym.spaces.Box(-np.inf, np.inf, (self.vases_num * (3 + 9),), dtype=np.float32)\n if self.pillars_num > 0:\n obs_space_dict['pillars_gt'] = gym.spaces.Box(-np.inf, np.inf, (self.pillars_num * 3,), dtype=np.float32)\n if self.gremlins_num > 0:\n obs_space_dict['gremlins_gt'] = gym.spaces.Box(-np.inf, np.inf, (self.gremlins_num * (3 + 9),), dtype=np.float32)\n if self.buttons_num > 0:\n obs_space_dict['buttons_gt'] = gym.spaces.Box(-np.inf, np.inf, (self.buttons_num * 3,), dtype=np.float32)\n if self.observe_groundtruth_vectors:\n num_objects = 6\n obs_space_dict['vision'] = gym.spaces.Box(-np.inf, np.inf, (2 + self.hazards_num + self.sec_hazards_num + self.vases_num +\n self.pillars_num, num_objects + 3), dtype=np.float32)\n\n # Flatten it ourselves\n self.obs_space_dict = obs_space_dict\n if self.observation_flatten:\n self.obs_flat_size = sum([np.prod(i.shape) for i in self.obs_space_dict.values()])\n self.observation_space = gym.spaces.Box(-np.inf, np.inf, (self.obs_flat_size,), dtype=np.float32)\n else:\n self.observation_space = gym.spaces.Dict(obs_space_dict)\n\n def toggle_observation_space(self):\n self.observation_flatten = not(self.observation_flatten)\n self.build_observation_space()\n\n def placements_from_location(self, location, keepout):\n ''' Helper to get a placements list from a given location and keepout '''\n x, y = location\n return [(x - keepout, y - keepout, x + keepout, y + keepout)]\n\n def placements_dict_from_object(self, object_name):\n ''' Get the placements dict subset just for a given object name '''\n placements_dict = {}\n if hasattr(self, object_name + 's_num'): # Objects with multiplicity\n plural_name = object_name + 's'\n object_fmt = object_name + '{i}'\n object_num = getattr(self, plural_name + '_num', None)\n object_locations = getattr(self, plural_name + '_locations', [])\n object_placements = getattr(self, plural_name + '_placements', None)\n object_keepout = getattr(self, plural_name + '_keepout')\n else: # Unique objects\n object_fmt = object_name\n object_num = 1\n object_locations = getattr(self, object_name + '_locations', [])\n object_placements = getattr(self, object_name + '_placements', None)\n object_keepout = getattr(self, object_name + '_keepout')\n for i in range(object_num):\n if i < len(object_locations):\n x, y = object_locations[i]\n k = object_keepout + 1e-9 # Epsilon to account for numerical issues\n placements = [(x - k, y - k, x + k, y + k)]\n else:\n placements = object_placements\n placements_dict[object_fmt.format(i=i)] = (placements, object_keepout)\n return placements_dict\n\n def build_placements_dict(self):\n ''' Build a dict of placements. Happens once during __init__. '''\n # Dictionary is map from object name -> tuple of (placements list, keepout)\n placements = {}\n\n placements.update(self.placements_dict_from_object('robot'))\n placements.update(self.placements_dict_from_object('wall'))\n\n if self.task in ['goal', 'push']:\n placements.update(self.placements_dict_from_object('goal'))\n if self.task == 'push':\n placements.update(self.placements_dict_from_object('box'))\n if self.task == 'button' or self.buttons_num: #self.constrain_buttons:\n placements.update(self.placements_dict_from_object('button'))\n if self.hazards_num: #self.constrain_hazards:\n placements.update(self.placements_dict_from_object('hazard'))\n if self.sec_hazards_num: #self.constrain_hazards:\n placements.update(self.placements_dict_from_object('sec_hazard'))\n if self.vases_num: #self.constrain_vases:\n placements.update(self.placements_dict_from_object('vase'))\n if self.pillars_num: #self.constrain_pillars:\n placements.update(self.placements_dict_from_object('pillar'))\n if self.gremlins_num: #self.constrain_gremlins:\n placements.update(self.placements_dict_from_object('gremlin'))\n\n self.placements = placements\n\n def seed(self, seed=None):\n ''' Set internal random state seeds '''\n self._seed = np.random.randint(2**32) if seed is None else seed\n self.rs = np.random.RandomState(self._seed)\n\n def build_layout(self):\n ''' Rejection sample a placement of objects to find a layout. '''\n if not self.randomize_layout:\n self.rs = np.random.RandomState(0)\n\n for _ in range(10000):\n if self.sample_layout():\n break\n else:\n raise ResamplingError('Failed to sample layout of objects')\n\n def sample_layout(self):\n ''' Sample a single layout, returning True if successful, else False. '''\n\n def placement_is_valid(xy, layout):\n for other_name, other_xy in layout.items():\n other_keepout = self.placements[other_name][1]\n dist = np.sqrt(np.sum(np.square(xy - other_xy)))\n if dist < other_keepout + self.placements_margin + keepout:\n return False\n return True\n\n layout = {}\n for name, (placements, keepout) in self.placements.items():\n conflicted = True\n for _ in range(100):\n xy = self.draw_placement(placements, keepout)\n if placement_is_valid(xy, layout):\n conflicted = False\n break\n if conflicted:\n return False\n layout[name] = xy\n self.layout = layout\n return True\n\n def constrain_placement(self, placement, keepout):\n ''' Helper function to constrain a single placement by the keepout radius '''\n xmin, ymin, xmax, ymax = placement\n return (xmin + keepout, ymin + keepout, xmax - keepout, ymax - keepout)\n\n def draw_placement(self, placements, keepout):\n ''' \n Sample an (x,y) location, based on potential placement areas.\n\n Summary of behavior: \n\n 'placements' is a list of (xmin, xmax, ymin, ymax) tuples that specify \n rectangles in the XY-plane where an object could be placed. \n\n 'keepout' describes how much space an object is required to have\n around it, where that keepout space overlaps with the placement rectangle.\n\n To sample an (x,y) pair, first randomly select which placement rectangle\n to sample from, where the probability of a rectangle is weighted by its\n area. If the rectangles are disjoint, there's an equal chance the (x,y) \n location will wind up anywhere in the placement space. If they overlap, then\n overlap areas are double-counted and will have higher density. This allows\n the user some flexibility in building placement distributions. Finally, \n randomly draw a uniform point within the selected rectangle.\n\n '''\n if placements is None:\n choice = self.constrain_placement(self.placements_extents, keepout)\n else:\n # Draw from placements according to placeable area\n constrained = []\n for placement in placements:\n xmin, ymin, xmax, ymax = self.constrain_placement(placement, keepout)\n if xmin > xmax or ymin > ymax:\n continue\n constrained.append((xmin, ymin, xmax, ymax))\n assert len(constrained), 'Failed to find any placements with satisfy keepout'\n if len(constrained) == 1:\n choice = constrained[0]\n else:\n areas = [(x2 - x1)*(y2 - y1) for x1, y1, x2, y2 in constrained]\n probs = np.array(areas) / np.sum(areas)\n choice = constrained[self.rs.choice(len(constrained), p=probs)]\n xmin, ymin, xmax, ymax = choice\n return np.array([self.rs.uniform(xmin, xmax), self.rs.uniform(ymin, ymax)])\n\n def random_rot(self):\n ''' Use internal random state to get a random rotation in radians '''\n return self.rs.uniform(0, 2 * np.pi)\n\n def build_world_config(self):\n ''' Create a world_config from our own config '''\n # TODO: parse into only the pieces we want/need\n world_config = {}\n\n world_config['robot_base'] = self.robot_base\n world_config['robot_xy'] = self.layout['robot']\n if self.robot_rot is None:\n world_config['robot_rot'] = self.random_rot()\n else:\n world_config['robot_rot'] = float(self.robot_rot)\n\n if self.floor_display_mode:\n floor_size = max(self.placements_extents)\n world_config['floor_size'] = [floor_size + .1, floor_size + .1, 1]\n\n #if not self.observe_vision:\n # world_config['render_context'] = -1 # Hijack this so we don't create context\n world_config['observe_vision'] = self.observe_vision\n\n # Extra objects to add to the scene\n world_config['objects'] = {}\n if self.vases_num:\n for i in range(self.vases_num):\n name = f'vase{i}'\n object = {'name': name,\n 'size': np.ones(3) * self.vases_size,\n 'type': 'box',\n 'density': self.vases_density,\n 'pos': np.r_[self.layout[name], self.vases_size - self.vases_sink],\n 'rot': self.random_rot(),\n 'group': GROUP_VASE,\n 'rgba': self.vases_color}\n world_config['objects'][name] = object\n if self.gremlins_num:\n self._gremlins_rots = dict()\n for i in range(self.gremlins_num):\n name = f'gremlin{i}obj'\n self._gremlins_rots[i] = self.random_rot()\n object = {'name': name,\n 'size': np.ones(3) * self.gremlins_size,\n 'type': 'box',\n 'density': self.gremlins_density,\n 'pos': np.r_[self.layout[name.replace('obj', '')], self.gremlins_size],\n 'rot': self._gremlins_rots[i],\n 'group': GROUP_GREMLIN,\n 'rgba': self.gremlins_color}\n world_config['objects'][name] = object\n if self.task == 'push':\n object = {'name': 'box',\n 'type': 'box',\n 'size': np.ones(3) * self.box_size,\n 'pos': np.r_[self.layout['box'], self.box_size],\n 'rot': self.random_rot(),\n 'density': self.box_density,\n 'group': GROUP_BOX,\n 'rgba': self.box_color}\n world_config['objects']['box'] = object\n\n # Extra geoms (immovable objects) to add to the scene\n world_config['geoms'] = {}\n if self.task in ['goal', 'push']:\n geom = {'name': 'goal',\n 'size': [self.goal_size, self.goal_size / 2],\n 'pos': np.r_[self.layout['goal'], self.goal_size / 2 + 1e-2],\n 'rot': self.random_rot(),\n 'type': 'cylinder',\n 'contype': 0,\n 'conaffinity': 0,\n 'group': GROUP_GOAL,\n 'rgba': self.goal_color}\n world_config['geoms']['goal'] = geom\n if self.hazards_num:\n for i in range(self.hazards_num):\n name = f'hazard{i}'\n geom = {'name': name,\n 'size': [self.hazards_size, 1e-2],#self.hazards_size / 2],\n 'pos': np.r_[self.layout[name], 2e-2],#self.hazards_size / 2 + 1e-2],\n 'rot': self.random_rot(),\n 'type': 'cylinder',\n 'contype': 0,\n 'conaffinity': 0,\n 'group': GROUP_HAZARD,\n 'rgba': self.hazards_color}\n world_config['geoms'][name] = geom\n if self.sec_hazards_num:\n for i in range(self.sec_hazards_num):\n name = f'sec_hazard{i}'\n geom = {'name': name,\n 'size': [self.sec_hazards_size, 1e-2],#self.sec_hazards_size / 2],\n 'pos': np.r_[self.layout[name], 2e-2],#self.sec_hazards_size / 2 + 1e-2],\n 'rot': self.random_rot(),\n 'type': 'cylinder',\n 'contype': 0,\n 'conaffinity': 0,\n 'group': GROUP_HAZARD,\n 'rgba': self.sec_hazards_color}\n world_config['geoms'][name] = geom\n if self.pillars_num:\n for i in range(self.pillars_num):\n name = f'pillar{i}'\n geom = {'name': name,\n 'size': [self.pillars_size, self.pillars_height],\n 'pos': np.r_[self.layout[name], self.pillars_height],\n 'rot': self.random_rot(),\n 'type': 'cylinder',\n 'group': GROUP_PILLAR,\n 'rgba': self.pillars_color}\n world_config['geoms'][name] = geom\n if self.walls_num:\n for i in range(self.walls_num):\n name = f'wall{i}'\n geom = {'name': name,\n 'size': np.ones(3) * self.walls_size,\n 'pos': np.r_[self.layout[name], self.walls_size],\n 'rot': 0,\n 'type': 'box',\n 'group': GROUP_WALL,\n 'rgba': self.walls_color}\n world_config['geoms'][name] = geom\n if self.buttons_num:\n for i in range(self.buttons_num):\n name = f'button{i}'\n geom = {'name': name,\n 'size': np.ones(3) * self.buttons_size,\n 'pos': np.r_[self.layout[name], self.buttons_size],\n 'rot': self.random_rot(),\n 'type': 'sphere',\n 'group': GROUP_BUTTON,\n 'rgba': self.buttons_color}\n world_config['geoms'][name] = geom\n if self.task == 'circle':\n geom = {'name': 'circle',\n 'size': np.array([self.circle_radius, 1e-2]),\n 'pos': np.array([0, 0, 2e-2]),\n 'rot': 0,\n 'type': 'cylinder',\n 'contype': 0,\n 'conaffinity': 0,\n 'group': GROUP_CIRCLE,\n 'rgba': COLOR_CIRCLE}\n world_config['geoms']['circle'] = geom\n\n\n # Extra mocap bodies used for control (equality to object of same name)\n world_config['mocaps'] = {}\n if self.gremlins_num:\n for i in range(self.gremlins_num):\n name = f'gremlin{i}mocap'\n mocap = {'name': name,\n 'size': np.ones(3) * self.gremlins_size,\n 'type': 'box',\n 'pos': np.r_[self.layout[name.replace('mocap', '')], self.gremlins_size],\n 'rot': self._gremlins_rots[i],\n 'group': GROUP_GREMLIN,\n 'rgba': self.gremlins_color}\n #'rgba': np.array([1, 1, 1, 0]) * COLOR_GREMLIN}\n world_config['mocaps'][name] = mocap\n\n return world_config\n\n def clear(self):\n ''' Reset internal state for building '''\n self.layout = None\n\n def build_goal(self):\n ''' Build a new goal position, maybe with resampling due to hazards '''\n if self.task == 'goal':\n self.build_goal_position()\n self.last_dist_goal = self.dist_goal()\n elif self.task == 'push':\n self.build_goal_position()\n self.last_dist_goal = self.dist_goal()\n self.last_dist_box = self.dist_box()\n self.last_box_goal = self.dist_box_goal()\n elif self.task == 'button':\n assert self.buttons_num > 0, 'Must have at least one button'\n self.build_goal_button()\n self.last_dist_goal = self.dist_goal()\n elif self.task in ['x', 'z']:\n self.last_robot_com = self.world.robot_com()\n elif self.task in ['circle', 'none']:\n pass\n else:\n raise ValueError(f'Invalid task {self.task}')\n\n def sample_goal_position(self):\n ''' Sample a new goal position and return True, else False if sample rejected '''\n placements, keepout = self.placements['goal']\n goal_xy = self.draw_placement(placements, keepout)\n for other_name, other_xy in self.layout.items():\n other_keepout = self.placements[other_name][1]\n dist = np.sqrt(np.sum(np.square(goal_xy - other_xy)))\n if dist < other_keepout + self.placements_margin + keepout:\n return False\n self.layout['goal'] = goal_xy\n return True\n\n def build_goal_position(self):\n ''' Build a new goal position, maybe with resampling due to hazards '''\n # Resample until goal is compatible with layout\n if 'goal' in self.layout:\n del self.layout['goal']\n for _ in range(10000): # Retries\n if self.sample_goal_position():\n break\n else:\n raise ResamplingError('Failed to generate goal')\n # Move goal geom to new layout position\n self.world_config_dict['geoms']['goal']['pos'][:2] = self.layout['goal']\n #self.world.rebuild(deepcopy(self.world_config_dict))\n #self.update_viewer_sim = True\n goal_body_id = self.sim.model.body_name2id('goal')\n self.sim.model.body_pos[goal_body_id][:2] = self.layout['goal']\n self.sim.forward()\n\n def build_goal_button(self):\n ''' Pick a new goal button, maybe with resampling due to hazards '''\n self.goal_button = self.rs.choice(self.buttons_num)\n\n def build(self):\n ''' Build a new physics simulation environment '''\n # Sample object positions\n self.build_layout()\n\n # Build the underlying physics world\n self.world_config_dict = self.build_world_config()\n\n if self.world is None:\n self.world = World(self.world_config_dict)\n self.world.reset()\n self.world.build()\n else:\n self.world.reset(build=False)\n self.world.rebuild(self.world_config_dict, state=False)\n # Redo a small amount of work, and setup initial goal state\n self.build_goal()\n\n # Save last action\n self.last_action = np.zeros(self.action_space.shape)\n\n # Save last subtree center of mass\n self.last_subtreecom = self.world.get_sensor('subtreecom')\n\n def reset(self):\n ''' Reset the physics simulation and return observation '''\n # self._seed += 1 # Increment seed\n # self.rs = np.random.RandomState(self._seed)\n self.done = False\n self.steps = 0 # Count of steps taken in this episode\n # Set the button timer to zero (so button is immediately visible)\n self.buttons_timer = 0\n\n self.clear()\n self.build()\n # Save the layout at reset\n self.reset_layout = deepcopy(self.layout)\n\n cost = self.cost()\n assert cost['cost'] == 0, f'World has starting cost! {cost}'\n\n # Reset stateful parts of the environment\n self.first_reset = False # Built our first world successfully\n\n # Return an observation\n return self.obs()\n\n def dist_goal(self):\n ''' Return the distance from the robot to the goal XY position '''\n return self.dist_xy(self.goal_pos)\n\n def dist_box(self):\n ''' Return the distance from the robot to the box (in XY plane only) '''\n assert self.task == 'push', f'invalid task {self.task}'\n return np.sqrt(np.sum(np.square(self.box_pos - self.world.robot_pos())))\n\n def dist_box_goal(self):\n ''' Return the distance from the box to the goal XY position '''\n assert self.task == 'push', f'invalid task {self.task}'\n return np.sqrt(np.sum(np.square(self.box_pos - self.goal_pos)))\n\n def dist_xy(self, pos):\n ''' Return the distance from the robot to an XY position '''\n pos = np.asarray(pos)\n if pos.shape == (3,):\n pos = pos[:2]\n robot_pos = self.world.robot_pos()\n return np.sqrt(np.sum(np.square(pos - robot_pos[:2])))\n\n def world_xy(self, pos):\n ''' Return the world XY vector to a position from the robot '''\n assert pos.shape == (2,)\n return pos - self.world.robot_pos()[:2]\n\n def ego_xy(self, pos):\n ''' Return the egocentric XY vector to a position from the robot '''\n assert pos.shape == (2,), f'Bad pos {pos}'\n robot_3vec = self.world.robot_pos()\n robot_mat = self.world.robot_mat()\n pos_3vec = np.concatenate([pos, [0]]) # Add a zero z-coordinate\n world_3vec = pos_3vec - robot_3vec\n return np.matmul(world_3vec, robot_mat)[:2] # only take XY coordinates\n\n def obs_compass(self, pos):\n '''\n Return a robot-centric compass observation of a list of positions.\n\n Compass is a normalized (unit-lenght) egocentric XY vector,\n from the agent to the object.\n\n This is equivalent to observing the egocentric XY angle to the target,\n projected into the sin/cos space we use for joints.\n (See comment on joint observation for why we do this.)\n '''\n pos = np.asarray(pos)\n if pos.shape == (2,):\n pos = np.concatenate([pos, [0]]) # Add a zero z-coordinate\n # Get ego vector in world frame\n vec = pos - self.world.robot_pos()\n # Rotate into frame\n vec = np.matmul(vec, self.world.robot_mat())\n # Truncate\n vec = vec[:self.compass_shape]\n # Normalize\n vec /= np.sqrt(np.sum(np.square(vec))) + 0.001\n assert vec.shape == (self.compass_shape,), f'Bad vec {vec}'\n return vec\n\n def obs_vision(self):\n ''' Return pixels from the robot camera '''\n # Get a render context so we can\n rows, cols = self.vision_size\n width, height = cols, rows\n vision = self.sim.render(width, height, camera_name=self.camera_name, mode='offscreen')\n vision = np.array(vision, dtype='float32')[::-1, :, :] / 255\n return np.transpose(vision, (2, 0, 1))\n\n def obs_lidar(self, positions, group):\n '''\n Calculate and return a lidar observation. See sub methods for implementation.\n '''\n if self.lidar_type == 'pseudo':\n return self.obs_lidar_pseudo(positions)\n elif self.lidar_type == 'natural':\n return self.obs_lidar_natural(group)\n else:\n raise ValueError(f'Invalid lidar_type {self.lidar_type}')\n\n def obs_lidar_natural(self, group):\n '''\n Natural lidar casts rays based on the ego-frame of the robot.\n Rays are circularly projected from the robot body origin\n around the robot z axis.\n '''\n body = self.model.body_name2id('robot')\n grp = np.asarray([i == group for i in range(int(const.NGROUP))], dtype='uint8')\n pos = np.asarray(self.world.robot_pos(), dtype='float64')\n mat_t = self.world.robot_mat()\n obs = np.zeros(self.lidar_num_bins)\n for i in range(self.lidar_num_bins):\n theta = (i / self.lidar_num_bins) * np.pi * 2\n vec = np.matmul(mat_t, theta2vec(theta)) # Rotate from ego to world frame\n vec = np.asarray(vec, dtype='float64')\n dist, _ = self.sim.ray_fast_group(pos, vec, grp, 1, body)\n if dist >= 0:\n obs[i] = np.exp(-dist)\n return obs\n\n def obs_lidar_pseudo(self, positions):\n '''\n Return a robot-centric lidar observation of a list of positions.\n\n Lidar is a set of bins around the robot (divided evenly in a circle).\n The detection directions are exclusive and exhaustive for a full 360 view.\n Each bin reads 0 if there are no objects in that direction.\n If there are multiple objects, the distance to the closest one is used.\n Otherwise the bin reads the fraction of the distance towards the robot.\n\n E.g. if the object is 90% of lidar_max_dist away, the bin will read 0.1,\n and if the object is 10% of lidar_max_dist away, the bin will read 0.9.\n (The reading can be thought of as \"closeness\" or inverse distance)\n\n This encoding has some desirable properties:\n - bins read 0 when empty\n - bins smoothly increase as objects get close\n - maximum reading is 1.0 (where the object overlaps the robot)\n - close objects occlude far objects\n - constant size observation with variable numbers of objects\n '''\n obs = np.zeros(self.lidar_num_bins)\n for pos in positions:\n pos = np.asarray(pos)\n if pos.shape == (3,):\n pos = pos[:2] # Truncate Z coordinate\n z = np.complex(*self.ego_xy(pos)) # X, Y as real, imaginary components\n dist = np.abs(z)\n angle = np.angle(z) % (np.pi * 2)\n bin_size = (np.pi * 2) / self.lidar_num_bins\n bin = int(angle / bin_size)\n bin_angle = bin_size * bin\n if self.lidar_max_dist is None:\n sensor = np.exp(-self.lidar_exp_gain * dist)\n else:\n sensor = max(0, self.lidar_max_dist - dist) / self.lidar_max_dist\n obs[bin] = max(obs[bin], sensor)\n # Aliasing\n if self.lidar_alias:\n alias = (angle - bin_angle) / bin_size\n assert 0 <= alias <= 1, f'bad alias {alias}, dist {dist}, angle {angle}, bin {bin}'\n bin_plus = (bin + 1) % self.lidar_num_bins\n bin_minus = (bin - 1) % self.lidar_num_bins\n obs[bin_plus] = max(obs[bin_plus], alias * sensor)\n obs[bin_minus] = max(obs[bin_minus], (1 - alias) * sensor)\n return obs\n\n def obs(self):\n ''' Return the observation of our agent '''\n self.sim.forward() # Needed to get sensordata correct\n obs = {}\n\n if self.observe_goal_dist:\n obs['goal_dist'] = np.array([np.exp(-self.dist_goal())])\n if self.observe_goal_comp:\n obs['goal_compass'] = self.obs_compass(self.goal_pos)\n if self.observe_goal_lidar:\n obs['goal_lidar'] = self.obs_lidar([self.goal_pos], GROUP_GOAL)\n if self.task == 'push':\n box_pos = self.box_pos\n if self.observe_box_comp:\n obs['box_compass'] = self.obs_compass(box_pos)\n if self.observe_box_lidar:\n obs['box_lidar'] = self.obs_lidar([box_pos], GROUP_BOX)\n if self.task == 'circle' and self.observe_circle:\n obs['circle_lidar'] = self.obs_lidar([self.goal_pos], GROUP_CIRCLE)\n if self.observe_freejoint:\n joint_id = self.model.joint_name2id('robot')\n joint_qposadr = self.model.jnt_qposadr[joint_id]\n assert joint_qposadr == 0 # Needs to be the first entry in qpos\n obs['freejoint'] = self.data.qpos[:7]\n if self.observe_com:\n obs['com'] = self.world.robot_com()\n if self.observe_sensors:\n # Sensors which can be read directly, without processing\n for sensor in self.sensors_obs: # Explicitly listed sensors\n obs[sensor] = self.world.get_sensor(sensor)\n for sensor in self.robot.hinge_vel_names:\n obs[sensor] = self.world.get_sensor(sensor)\n for sensor in self.robot.ballangvel_names:\n obs[sensor] = self.world.get_sensor(sensor)\n # Process angular position sensors\n if self.sensors_angle_components:\n for sensor in self.robot.hinge_pos_names:\n theta = float(self.world.get_sensor(sensor)) # Ensure not 1D, 1-element array\n obs[sensor] = np.array([np.sin(theta), np.cos(theta)])\n for sensor in self.robot.ballquat_names:\n quat = self.world.get_sensor(sensor)\n obs[sensor] = quat2mat(quat)\n else: # Otherwise read sensors directly\n for sensor in self.robot.hinge_pos_names:\n obs[sensor] = self.world.get_sensor(sensor)\n for sensor in self.robot.ballquat_names:\n obs[sensor] = self.world.get_sensor(sensor)\n if self.observe_remaining:\n obs['remaining'] = np.array([self.steps / self.num_steps])\n assert 0.0 <= obs['remaining'][0] <= 1.0, 'bad remaining {}'.format(obs['remaining'])\n if self.walls_num and self.observe_walls:\n obs['walls_lidar'] = self.obs_lidar(self.walls_pos, GROUP_WALL)\n if self.observe_hazards:\n obs['hazards_lidar'] = self.obs_lidar(self.hazards_pos, GROUP_HAZARD)\n if self.observe_sec_hazards:\n obs['sec_hazards_lidar'] = self.obs_lidar(self.sec_hazards_pos, GROUP_HAZARD)\n if self.observe_vases:\n obs['vases_lidar'] = self.obs_lidar(self.vases_pos, GROUP_VASE)\n if self.gremlins_num and self.observe_gremlins:\n obs['gremlins_lidar'] = self.obs_lidar(self.gremlins_obj_pos, GROUP_GREMLIN)\n if self.pillars_num and self.observe_pillars:\n obs['pillars_lidar'] = self.obs_lidar(self.pillars_pos, GROUP_PILLAR)\n if self.buttons_num and self.observe_buttons:\n # Buttons observation is zero while buttons are resetting\n if self.buttons_timer == 0:\n obs['buttons_lidar'] = self.obs_lidar(self.buttons_pos, GROUP_BUTTON)\n else:\n obs['buttons_lidar'] = np.zeros(self.lidar_num_bins)\n if self.observe_qpos:\n obs['qpos'] = self.data.qpos.copy()\n if self.observe_qvel:\n obs['qvel'] = self.data.qvel.copy()\n if self.observe_ctrl:\n obs['ctrl'] = self.data.ctrl.copy()\n if self.observe_vision:\n obs['vision'] = self.obs_vision()\n if self.observe_groundtruth:\n obs['robot_gt_pos'] = self.robot_pos\n obs['goal_gt_pos'] = self.goal_pos\n if self.hazards_num > 0:\n obs['hazards_gt'] = np.array(self.hazards_pos).flatten()\n if self.sec_hazards_num > 0:\n obs['sec_hazards_gt'] = np.array(self.sec_hazards_pos).flatten()\n if self.vases_num > 0:\n vases_velp = np.reshape(self.vases_velp, (self.vases_num, -1))\n vases_gt = np.concatenate([self.vases_pos, vases_velp], axis=-1)\n obs['vases_gt'] = vases_gt.flatten()\n if self.pillars_num > 0:\n obs['pillars_gt'] = np.array(self.pillars_pos).flatten()\n if self.gremlins_num > 0:\n gremlins_velp = np.reshape(self.gremlins_obj_velp, (self.gremlins_num, -1))\n gremlins_gt = np.concatenate([self.gremlins_obj_pos, gremlins_velp], axis=-1)\n obs['gremlins_gt'] = gremlins_gt.flatten()\n if self.buttons_num > 0:\n obs['buttons_gt'] = np.array(self.buttons_pos).flatten()\n if self.observe_groundtruth_vectors:\n num_objects = 6 # number of all constrainable objects\n obs['vision'] = []\n\n robot_gt = np.zeros((1, num_objects))\n robot_gt[:, 0] = 1.\n robot_gt = np.concatenate([robot_gt, np.expand_dims(np.array(self.robot_pos), axis=0)], axis=-1)\n obs['vision'].append(robot_gt)\n\n goal_gt = np.zeros((1, num_objects))\n goal_gt[:, 1] = 1.\n goal_gt = np.concatenate([goal_gt, np.expand_dims(np.array(self.goal_pos), axis=0)], axis=-1)\n obs['vision'].append(goal_gt)\n\n if self.hazards_num > 0:\n hazards_gt = np.zeros((self.hazards_num, num_objects))\n hazards_gt[:, 2] = 1.\n hazards_gt = np.concatenate([hazards_gt, self.hazards_pos], axis=-1)\n obs['vision'].append(hazards_gt)\n if self.sec_hazards_num > 0:\n sec_hazards_gt = np.zeros((self.sec_hazards_num, num_objects))\n sec_hazards_gt[:, 3] = 1.\n sec_hazards_gt = np.concatenate([sec_hazards_gt, self.sec_hazards_pos], axis=1)\n obs['vision'].append(sec_hazards_gt)\n if self.vases_num > 0:\n vases_gt = np.zeros((self.vases_num, num_objects))\n vases_gt[:, 4] = 1.\n vases_gt = np.concatenate([vases_gt, self.vases_pos], axis=-1)\n obs['vision'].append(vases_gt)\n if self.pillars_num > 0:\n pillars_gt = np.zeros((self.pillars_num, num_objects))\n pillars_gt[:, 5] = 1.\n pillars_gt = np.concatenate([pillars_gt, self.pillars_pos], axis=-1)\n obs['vision'].append(pillars_gt)\n\n # shuffle object representations\n obs['vision'] = np.concatenate(obs['vision'], axis=0)\n shuffle_idx = self.rs.rand(obs['vision'].shape[0]).argsort()\n obs['vision'] = obs['vision'][shuffle_idx]\n\n if self.observation_flatten:\n flat_obs = np.zeros(self.obs_flat_size)\n offset = 0\n for k in sorted(self.obs_space_dict.keys()):\n k_size = np.prod(obs[k].shape)\n flat_obs[offset:offset + k_size] = obs[k].flat\n offset += k_size\n obs = flat_obs\n assert self.observation_space.contains(obs), f'Bad obs {obs} {self.observation_space}'\n return obs\n\n def cost(self):\n ''' Calculate the current costs and return a dict '''\n self.sim.forward() # Ensure positions and contacts are correct\n cost = {}\n # Conctacts processing\n if self.constrain_vases:\n cost['cost_vases_contact'] = 0\n if self.constrain_pillars:\n cost['cost_pillars'] = 0\n if self.constrain_buttons:\n cost['cost_buttons'] = 0\n if self.constrain_gremlins:\n cost['cost_gremlins'] = 0\n buttons_constraints_active = self.constrain_buttons and (self.buttons_timer == 0)\n for contact in self.data.contact[:self.data.ncon]:\n geom_ids = [contact.geom1, contact.geom2]\n geom_names = sorted([self.model.geom_id2name(g) for g in geom_ids])\n if self.constrain_vases and any(n.startswith('vase') for n in geom_names):\n if any(n in self.robot.geom_names for n in geom_names):\n cost['cost_vases_contact'] += self.vases_contact_cost\n if self.constrain_pillars and any(n.startswith('pillar') for n in geom_names):\n if any(n in self.robot.geom_names for n in geom_names):\n cost['cost_pillars'] += self.pillars_cost\n if buttons_constraints_active and any(n.startswith('button') for n in geom_names):\n if any(n in self.robot.geom_names for n in geom_names):\n if not any(n == f'button{self.goal_button}' for n in geom_names):\n cost['cost_buttons'] += self.buttons_cost\n if self.constrain_gremlins and any(n.startswith('gremlin') for n in geom_names):\n if any(n in self.robot.geom_names for n in geom_names):\n cost['cost_gremlins'] += self.gremlins_contact_cost\n\n # Displacement processing\n if self.constrain_vases and self.vases_displace_cost:\n cost['cost_vases_displace'] = 0\n for i in range(self.vases_num):\n name = f'vase{i}'\n dist = np.sqrt(np.sum(np.square(self.data.get_body_xpos(name)[:2] - self.reset_layout[name])))\n if dist > self.vases_displace_threshold:\n cost['cost_vases_displace'] += dist * self.vases_displace_cost\n\n # Velocity processing\n if self.constrain_vases and self.vases_velocity_cost:\n # TODO: penalize rotational velocity too, but requires another cost coefficient\n cost['cost_vases_velocity'] = 0\n for i in range(self.vases_num):\n name = f'vase{i}'\n vel = np.sqrt(np.sum(np.square(self.data.get_body_xvelp(name))))\n if vel >= self.vases_velocity_threshold:\n cost['cost_vases_velocity'] += vel * self.vases_velocity_cost\n\n # Calculate constraint violations\n if self.constrain_hazards:\n cost['cost_hazards'] = 0\n for h_pos in self.hazards_pos:\n h_dist = self.dist_xy(h_pos)\n if h_dist <= self.hazards_size:\n cost['cost_hazards'] += self.hazards_cost # * (self.hazards_size - h_dist)\n if self.constrain_sec_hazards:\n cost['cost_sec_hazards'] = 0\n for h_pos in self.sec_hazards_pos:\n h_dist = self.dist_xy(h_pos)\n if h_dist <= self.sec_hazards_size:\n cost['cost_sec_hazards'] += self.sec_hazards_cost\n\n # Sum all costs into single total cost\n cost['cost'] = sum(v for k, v in cost.items() if k.startswith('cost_'))\n\n # Optionally remove shaping from reward functions.\n if self.constrain_indicator:\n for k in list(cost.keys()):\n cost[k] = float(cost[k] > 0.0) # Indicator function\n\n self._cost = cost\n\n return cost\n\n def goal_met(self):\n ''' Return true if the current goal is met this step '''\n if self.task == 'goal':\n return self.dist_goal() <= self.goal_size #+ 0.08 # TODO remove 0.08\n if self.task == 'push':\n return self.dist_box_goal() <= self.goal_size\n if self.task == 'button':\n for contact in self.data.contact[:self.data.ncon]:\n geom_ids = [contact.geom1, contact.geom2]\n geom_names = sorted([self.model.geom_id2name(g) for g in geom_ids])\n if any(n == f'button{self.goal_button}' for n in geom_names):\n if any(n in self.robot.geom_names for n in geom_names):\n return True\n return False\n if self.task in ['x', 'z', 'circle', 'none']:\n return False\n raise ValueError(f'Invalid task {self.task}')\n\n def set_mocaps(self):\n ''' Set mocap object positions before a physics step is executed '''\n if self.gremlins_num: # self.constrain_gremlins:\n phase = float(self.data.time)\n for i in range(self.gremlins_num):\n name = f'gremlin{i}'\n target = np.array([np.sin(phase), np.cos(phase)]) * self.gremlins_travel\n pos = np.r_[target, [self.gremlins_size]]\n self.data.set_mocap_pos(name + 'mocap', pos)\n\n def update_layout(self):\n ''' Update layout dictionary with new places of objects '''\n self.sim.forward()\n for k in list(self.layout.keys()):\n # Mocap objects have to be handled separately\n if 'gremlin' in k:\n continue\n self.layout[k] = self.data.get_body_xpos(k)[:2].copy()\n\n def buttons_timer_tick(self):\n ''' Tick the buttons resampling timer '''\n self.buttons_timer = max(0, self.buttons_timer - 1)\n\n def step(self, action):\n ''' Take a step and return observation, reward, done, and info '''\n action = np.array(action, copy=False) # Cast to ndarray\n assert not self.done, 'Environment must be reset before stepping'\n\n info = {}\n\n # Set action\n action_range = self.model.actuator_ctrlrange\n # action_scale = action_range[:,1] - action_range[:, 0]\n self.data.ctrl[:] = np.clip(action, action_range[:,0], action_range[:,1]) #np.clip(action * 2 / action_scale, -1, 1)\n if self.action_noise:\n self.data.ctrl[:] += self.action_noise * self.rs.randn(self.model.nu)\n\n # Simulate physics forward\n exception = False\n for _ in range(self.rs.binomial(self.frameskip_binom_n, self.frameskip_binom_p)):\n try:\n self.set_mocaps()\n self.sim.step() # Physics simulation step\n except MujocoException as me:\n print('MujocoException', me)\n exception = True\n break\n if exception:\n self.done = True\n reward = self.reward_exception\n info['cost_exception'] = 1.0\n else:\n self.sim.forward() # Needed to get sensor readings correct!\n\n # Reward processing\n reward = self.reward()\n\n # Constraint violations\n info.update(self.cost())\n\n # Button timer (used to delay button resampling)\n self.buttons_timer_tick()\n\n # Goal processing\n if self.goal_met():\n info['goal_met'] = True\n reward += self.reward_goal\n if self.continue_goal:\n # Update the internal layout so we can correctly resample (given objects have moved)\n self.update_layout()\n # Reset the button timer (only used for task='button' environments)\n self.buttons_timer = self.buttons_resampling_delay\n # Try to build a new goal, end if we fail\n if self.terminate_resample_failure:\n try:\n self.build_goal()\n except ResamplingError as e:\n # Normal end of episode\n self.done = True\n else:\n # Try to make a goal, which could raise a ResamplingError exception\n self.build_goal()\n else:\n self.done = True\n\n else:\n info['goal_met'] = False\n\n # Timeout\n self.steps += 1\n if self.steps >= self.num_steps:\n self.done = True # Maximum number of steps in an episode reached\n\n if self.add_cost_to_reward:\n reward -= info['cost']\n\n return self.obs(), reward, self.done, info\n\n def reward(self):\n ''' Calculate the dense component of reward. Call exactly once per step '''\n reward = 0.0\n # Distance from robot to goal\n if self.task in ['goal', 'button']:\n dist_goal = self.dist_goal()\n reward += (self.last_dist_goal - dist_goal) * self.reward_distance\n self.last_dist_goal = dist_goal\n # Distance from robot to box\n if self.task == 'push':\n dist_box = self.dist_box()\n gate_dist_box_reward = (self.last_dist_box > self.box_null_dist * self.box_size)\n reward += (self.last_dist_box - dist_box) * self.reward_box_dist * gate_dist_box_reward\n self.last_dist_box = dist_box\n # Distance from box to goal\n if self.task == 'push':\n dist_box_goal = self.dist_box_goal()\n reward += (self.last_box_goal - dist_box_goal) * self.reward_box_goal\n self.last_box_goal = dist_box_goal\n # Used for forward locomotion tests\n if self.task == 'x':\n robot_com = self.world.robot_com()\n reward += (robot_com[0] - self.last_robot_com[0]) * self.reward_x\n self.last_robot_com = robot_com\n # Used for jump up tests\n if self.task == 'z':\n robot_com = self.world.robot_com()\n reward += (robot_com[2] - self.last_robot_com[2]) * self.reward_z\n self.last_robot_com = robot_com\n # Circle environment reward\n if self.task == 'circle':\n robot_com = self.world.robot_com()\n robot_vel = self.world.robot_vel()\n x, y, _ = robot_com\n u, v, _ = robot_vel\n radius = np.sqrt(x**2 + y**2)\n reward += (((-u*y + v*x)/radius)/(1 + np.abs(radius - self.circle_radius))) * self.reward_circle\n # Intrinsic reward for uprightness\n if self.reward_orientation:\n zalign = quat2zalign(self.data.get_body_xquat(self.reward_orientation_body))\n reward += self.reward_orientation_scale * zalign\n # Clip reward\n if self.reward_clip:\n in_range = reward < self.reward_clip and reward > -self.reward_clip\n if not(in_range):\n reward = np.clip(reward, -self.reward_clip, self.reward_clip)\n print('Warning: reward was outside of range!')\n return reward\n\n def render_lidar(self, poses, color, offset, group):\n ''' Render the lidar observation '''\n robot_pos = self.world.robot_pos()\n robot_mat = self.world.robot_mat()\n lidar = self.obs_lidar(poses, group)\n for i, sensor in enumerate(lidar):\n if self.lidar_type == 'pseudo':\n i += 0.5 # Offset to center of bin\n theta = 2 * np.pi * i / self.lidar_num_bins\n rad = self.render_lidar_radius\n binpos = np.array([np.cos(theta) * rad, np.sin(theta) * rad, offset])\n pos = robot_pos + np.matmul(binpos, robot_mat.transpose())\n alpha = min(1, sensor + .1)\n self.viewer.add_marker(pos=pos,\n size=self.render_lidar_size * np.ones(3),\n type=const.GEOM_SPHERE,\n rgba=np.array(color) * alpha,\n label='')\n\n def render_compass(self, pose, color, offset):\n ''' Render a compass observation '''\n robot_pos = self.world.robot_pos()\n robot_mat = self.world.robot_mat()\n # Truncate the compass to only visualize XY component\n compass = np.concatenate([self.obs_compass(pose)[:2] * 0.15, [offset]])\n pos = robot_pos + np.matmul(compass, robot_mat.transpose())\n self.viewer.add_marker(pos=pos,\n size=.05 * np.ones(3),\n type=const.GEOM_SPHERE,\n rgba=np.array(color) * 0.5,\n label='')\n\n def render_area(self, pos, size, color, label='', alpha=0.1):\n ''' Render a radial area in the environment '''\n z_size = min(size, 0.3)\n pos = np.asarray(pos)\n if pos.shape == (2,):\n pos = np.r_[pos, 0] # Z coordinate 0\n self.viewer.add_marker(pos=pos,\n size=[size, size, z_size],\n type=const.GEOM_CYLINDER,\n rgba=np.array(color) * alpha,\n label=label if self.render_labels else '')\n\n def render_sphere(self, pos, size, color, label='', alpha=0.1):\n ''' Render a radial area in the environment '''\n pos = np.asarray(pos)\n if pos.shape == (2,):\n pos = np.r_[pos, 0] # Z coordinate 0\n self.viewer.add_marker(pos=pos,\n size=size * np.ones(3),\n type=const.GEOM_SPHERE,\n rgba=np.array(color) * alpha,\n label=label if self.render_labels else '')\n\n def render_swap_callback(self):\n ''' Callback between mujoco render and swapping GL buffers '''\n if self.observe_vision and self.vision_render:\n self.viewer.draw_pixels(self.save_obs_vision, 0, 0)\n\n def render(self,\n mode='human', \n camera_id=None,\n width=DEFAULT_WIDTH,\n height=DEFAULT_HEIGHT\n ):\n ''' Render the environment to the screen '''\n\n if self.viewer is None or mode!=self._old_render_mode:\n # Set camera if specified\n if mode == 'human':\n self.viewer = MjViewer(self.sim)\n self.viewer.cam.fixedcamid = -1\n self.viewer.cam.type = const.CAMERA_FREE\n else:\n self.viewer = MjRenderContextOffscreen(self.sim)\n self.viewer._hide_overlay = True\n self.viewer.cam.fixedcamid = camera_id #self.model.camera_name2id(mode)\n self.viewer.cam.type = const.CAMERA_FIXED\n self.viewer.render_swap_callback = self.render_swap_callback\n # Turn all the geom groups on\n self.viewer.vopt.geomgroup[:] = 1\n self._old_render_mode = mode\n self.viewer.update_sim(self.sim)\n\n if camera_id is not None:\n # Update camera if desired\n self.viewer.cam.fixedcamid = camera_id\n\n # Lidar markers\n if self.render_lidar_markers:\n offset = self.render_lidar_offset_init # Height offset for successive lidar indicators\n if 'box_lidar' in self.obs_space_dict or 'box_compass' in self.obs_space_dict:\n if 'box_lidar' in self.obs_space_dict:\n self.render_lidar([self.box_pos], self.box_color, offset, GROUP_BOX)\n if 'box_compass' in self.obs_space_dict:\n self.render_compass(self.box_pos, self.box_color, offset)\n offset += self.render_lidar_offset_delta\n if 'goal_lidar' in self.obs_space_dict or 'goal_compass' in self.obs_space_dict:\n if 'goal_lidar' in self.obs_space_dict:\n self.render_lidar([self.goal_pos], self.goal_color, offset, GROUP_GOAL)\n if 'goal_compass' in self.obs_space_dict:\n self.render_compass(self.goal_pos, self.goal_color, offset)\n offset += self.render_lidar_offset_delta\n if 'buttons_lidar' in self.obs_space_dict:\n self.render_lidar(self.buttons_pos, self.buttons_color, offset, GROUP_BUTTON)\n offset += self.render_lidar_offset_delta\n if 'circle_lidar' in self.obs_space_dict:\n self.render_lidar([ORIGIN_COORDINATES], COLOR_CIRCLE, offset, GROUP_CIRCLE)\n offset += self.render_lidar_offset_delta\n if 'walls_lidar' in self.obs_space_dict:\n self.render_lidar(self.walls_pos, self.walls_color, offset, GROUP_WALL)\n offset += self.render_lidar_offset_delta\n if 'hazards_lidar' in self.obs_space_dict:\n self.render_lidar(self.hazards_pos, self.hazards_color, offset, GROUP_HAZARD)\n offset += self.render_lidar_offset_delta\n if 'sec_hazards_lidar' in self.obs_space_dict:\n self.render_lidar(self.sec_hazards_pos, self.sec_hazards_color, offset, GROUP_HAZARD)\n offset += self.render_lidar_offset_delta\n if 'pillars_lidar' in self.obs_space_dict:\n self.render_lidar(self.pillars_pos, self.pillars_color, offset, GROUP_PILLAR)\n offset += self.render_lidar_offset_delta\n if 'gremlins_lidar' in self.obs_space_dict:\n self.render_lidar(self.gremlins_obj_pos, self.gremlins_color, offset, GROUP_GREMLIN)\n offset += self.render_lidar_offset_delta\n if 'vases_lidar' in self.obs_space_dict:\n self.render_lidar(self.vases_pos, self.vases_color, offset, GROUP_VASE)\n offset += self.render_lidar_offset_delta\n\n # Add goal marker\n if self.task == 'button':\n self.render_area(self.goal_pos, self.buttons_size * 2, self.buttons_color, 'goal', alpha=0.1)\n\n # Add indicator for nonzero cost\n if self._cost.get('cost', 0) > 0:\n self.render_sphere(self.world.robot_pos(), 0.25, COLOR_RED, alpha=.5)\n\n # Draw vision pixels\n if self.observe_vision and self.vision_render:\n vision = self.obs_vision()\n vision = np.array(vision * 255, dtype='uint8')\n vision = Image.fromarray(vision).resize(self.vision_render_size)\n vision = np.array(vision, dtype='uint8')\n self.save_obs_vision = vision\n\n if mode=='human':\n self.viewer.render()\n elif mode=='rgb_array':\n self.viewer.render(width, height)\n data = self.viewer.read_pixels(width, height, depth=False)\n self.viewer._markers[:] = []\n self.viewer._overlay.clear()\n return data[::-1, :, :]"
] | [
[
"numpy.sum",
"numpy.ones",
"numpy.asarray",
"numpy.random.RandomState",
"numpy.transpose",
"numpy.reshape",
"numpy.abs",
"numpy.cos",
"numpy.zeros",
"numpy.prod",
"numpy.square",
"numpy.array",
"numpy.matmul",
"numpy.exp",
"numpy.clip",
"numpy.angle",
"numpy.sqrt",
"numpy.sin",
"numpy.concatenate",
"numpy.random.randint"
]
] |
mianuddin/csc492_recommender_pkg | [
"3c89bb1f4ef2a34ed4f9bb6a99ae623eaee1954b"
] | [
"recpkg/implicit.py"
] | [
"import numpy as np\nfrom sklearn.base import BaseEstimator\nfrom tensorflow import keras\nfrom .recommenders import KerasRecommender\n\n\nclass ItemPopularity(BaseEstimator):\n \"\"\"Recommender based solely on interactions per item.\"\"\"\n\n def fit(self, X=None, y=None):\n \"\"\"Fit the recommender from the training dataset.\n\n Args:\n X (ndarray of shape (n_samples, 2)): An array where each row\n consists of a user and an\n item.\n y (ndarray of shape (n_samples,)): An array where each entry\n denotes interactions between\n the corresponding user and item.\n \"\"\"\n unique, counts = np.unique(X[y == 1, 1], return_counts=True)\n self.interactions_by_item = dict(zip(unique, counts))\n\n def predict(self, X=None):\n \"\"\"Predict the scores for the provided data.\n\n Args:\n X (ndarray of shape (n_samples, 2)): An array where each row\n consists of a user and an\n item.\n\n Returns:\n ndarray of shape (n_samples,): Class labels for each data sample.\n \"\"\"\n y_pred = np.array([self.interactions_by_item[i] for i in X[:, 1]])\n return y_pred / max(y_pred)\n\n\nclass GeneralizedMatrixFactorization(KerasRecommender):\n \"\"\"Recommender implementing the GMF architecture.\n\n Args:\n n_factors (int): The number of latent factors.\n epochs (int): The number of epochs to train the NN.\n optimizer (keras.optimizers.Optimizer): The model's optimizer.\n loss (keras.losses.Loss): The loss function.\n metrics (List[keras.metrics.Metric, ...]): The metric functions.\n seed (int): A random seed.\n user_input (keras.Input): An input for the users.\n item_input (keras.Input): An input for the items.\n user_preprocessing_layers (keras.layers.Layer): Preprocessing layers\n for the users.\n item_preprocessing_layers (keras.layers.Layer): Preprocessing layers\n for the items.\n \"\"\"\n def __init__(self,\n n_factors=8,\n epochs=10,\n optimizer=keras.optimizers.Adam(),\n loss=keras.losses.BinaryCrossentropy(),\n metrics=[keras.metrics.BinaryAccuracy()],\n seed=None,\n user_input=None,\n item_input=None,\n user_preprocessing_layers=None,\n item_preprocessing_layers=None):\n super().__init__(epochs,\n optimizer,\n loss,\n metrics,\n seed,\n user_input,\n item_input,\n user_preprocessing_layers,\n item_preprocessing_layers)\n self.n_factors = n_factors\n self.user_input = user_input\n self.item_input = item_input\n self.user_preprocessing_layers = user_preprocessing_layers\n self.item_preprocessing_layers = item_preprocessing_layers\n\n @staticmethod\n def create_core_layers(n_factors,\n user_layers,\n item_layers,\n user_dense_kwdargs={},\n item_dense_kwdargs={}):\n \"\"\"Creates the core layers of the GMF model.\n\n Returns the hidden layers of the model. Specifically, the ones between\n the inputs and the visible, output layer.\n\n Args:\n n_factors (int): The number of latent factors.\n user_layers (keras.layers.Layer): The input or preprocessing layers\n for the users.\n item_layers (keras.layers.Layer): The input or preprocessing layers\n for the items.\n user_dense_kwdargs (Dict): The keyword arguments for the\n user dense layer.\n item_dense_kwdargs (Dict): The keyword arguments for the\n item dense layer.\n\n Returns:\n keras.layers.Layer: The core layers of the model.\n \"\"\"\n\n gmf_layers = [\n keras.layers.Dense(n_factors, **user_dense_kwdargs)(user_layers),\n keras.layers.Dense(n_factors, **item_dense_kwdargs)(item_layers)\n ]\n gmf_layers = keras.layers.Multiply()(gmf_layers)\n\n return gmf_layers\n\n def create_model(self):\n \"\"\"Creates a new GMF model.\"\"\"\n user_input = (self.user_input\n if self.user_input is not None else\n keras.Input(shape=(1), name=\"user\", dtype=\"int64\"))\n item_input = (self.item_input\n if self.item_input is not None else\n keras.Input(shape=(1), name=\"item\", dtype=\"int64\"))\n\n user_preprocessing_layers = (\n self.user_preprocessing_layers\n if self.user_preprocessing_layers is not None\n else user_input\n )\n item_preprocessing_layers = (\n self.item_preprocessing_layers\n if self.item_preprocessing_layers is not None\n else item_input\n )\n\n gmf_layers = GeneralizedMatrixFactorization.create_core_layers(\n self.n_factors,\n user_preprocessing_layers,\n item_preprocessing_layers\n )\n\n gmf_output = keras.layers.Dense(\n 1,\n activation=\"sigmoid\",\n kernel_constraint=keras.constraints.unit_norm()\n )(gmf_layers)\n\n return keras.Model(inputs=[user_input, item_input],\n outputs=[gmf_output],\n name=\"generalized_matrix_factorization\")\n\n def get_core_layers_kwdargs(self):\n \"\"\"Returns the appropriate kwdargs for pretraining core layers.\n\n Returns:\n Tuple[Dict, Dict]: The keyword arguments for the user and item\n dense layers.\n \"\"\"\n if not self.model:\n raise RuntimeError(\"GMF is not trained.\")\n\n user_kernel, user_bias = self.model.layers[6].get_weights()\n item_kernel, item_bias = self.model.layers[7].get_weights()\n user_dense_kwdargs = {\n \"kernel_initializer\": keras.initializers.Constant(user_kernel),\n \"bias_initializer\": keras.initializers.Constant(user_bias)\n }\n item_dense_kwdargs = {\n \"kernel_initializer\": keras.initializers.Constant(item_kernel),\n \"bias_initializer\": keras.initializers.Constant(item_bias)\n }\n\n return user_dense_kwdargs, item_dense_kwdargs\n\n def get_output_weights(self):\n \"\"\"Returns the kernel and bias for the output layer of this model.\n\n Returns:\n List[ndarray, Optional[ndarray]]: The kernel and bias.\n \"\"\"\n if not self.model:\n raise RuntimeError(\"GMF is not trained.\")\n\n return self.model.layers[-1].get_weights()\n\n\nclass MultiLayerPerceptron(KerasRecommender):\n \"\"\"Recommender implementing the MLP architecture.\n\n Args:\n n_factors (int): The number of latent factors.\n n_hidden_layers (int): The number of hidden layers.\n epochs (int): The number of epochs to train the NN.\n optimizer (keras.optimizers.Optimizer): The model's optimizer.\n loss (keras.losses.Loss): The loss function.\n metrics (List[keras.metrics.Metric, ...]): The metric functions.\n seed (int): A random seed.\n user_input (keras.Input): An input for the users.\n item_input (keras.Input): An input for the items.\n user_preprocessing_layers (keras.layers.Layer): Preprocessing layers\n for the users.\n item_preprocessing_layers (keras.layers.Layer): Preprocessing layers\n for the items.\n \"\"\"\n def __init__(self,\n n_factors=8,\n n_hidden_layers=4,\n epochs=10,\n optimizer=keras.optimizers.Adam(),\n loss=keras.losses.BinaryCrossentropy(),\n metrics=[keras.metrics.BinaryAccuracy()],\n seed=None,\n user_input=None,\n item_input=None,\n user_preprocessing_layers=None,\n item_preprocessing_layers=None):\n super().__init__(epochs,\n optimizer,\n loss,\n metrics,\n seed,\n user_input,\n item_input,\n user_preprocessing_layers,\n item_preprocessing_layers)\n self.n_factors = n_factors\n self.n_hidden_layers = n_hidden_layers\n self.user_input = user_input\n self.item_input = item_input\n self.user_preprocessing_layers = user_preprocessing_layers\n self.item_preprocessing_layers = item_preprocessing_layers\n\n @staticmethod\n def create_core_layers(n_factors,\n n_hidden_layers,\n user_layers,\n item_layers,\n hidden_layers_kwdargs=[]):\n \"\"\"Creates the core layers of the MLP model.\n\n Returns the hidden layers of the model. Specifically, the ones between\n the inputs and the visible, output layer.\n\n Args:\n n_factors (int): The number of latent factors.\n user_layers (keras.layers.Layer): The input or preprocessing layers\n for the users.\n item_layers (keras.layers.Layer): The input or preprocessing layers\n for the items.\n hidden_layers_kwdargs (List[Dict, ...]): The keyword\n arguments for each\n hidden layer.\n\n Returns:\n keras.layers.Layer: The core layers of the model.\n \"\"\"\n\n mlp_layers = keras.layers.Concatenate()([user_layers, item_layers])\n\n for x, i in enumerate(range(n_hidden_layers)[::-1]):\n current_kwdargs = {}\n\n if x < len(hidden_layers_kwdargs):\n current_kwdargs = hidden_layers_kwdargs[x]\n\n mlp_layers = keras.layers.Dense(n_factors * (2 ** i),\n activation=\"relu\",\n **current_kwdargs)(mlp_layers)\n\n return mlp_layers\n\n def create_model(self):\n \"\"\"Creates a new MLP model.\"\"\"\n\n user_input = (self.user_input\n if self.user_input is not None else\n keras.Input(shape=(1), name=\"user\", dtype=\"int64\"))\n item_input = (self.item_input\n if self.item_input is not None else\n keras.Input(shape=(1), name=\"item\", dtype=\"int64\"))\n\n user_preprocessing_layers = (\n self.user_preprocessing_layers\n if self.user_preprocessing_layers is not None\n else user_input\n )\n item_preprocessing_layers = (\n self.item_preprocessing_layers\n if self.item_preprocessing_layers is not None\n else item_input\n )\n\n mlp_layers = MultiLayerPerceptron.create_core_layers(\n self.n_factors,\n self.n_hidden_layers,\n user_preprocessing_layers,\n item_preprocessing_layers\n )\n\n mlp_output = keras.layers.Dense(1,\n activation=\"sigmoid\",\n use_bias=False)(mlp_layers)\n\n return keras.Model(inputs=[user_input, item_input],\n outputs=[mlp_output],\n name=\"multi-layer_perceptron\")\n\n def get_core_layers_kwdargs(self):\n \"\"\"Returns the appropriate kwdargs for pretraining core layers.\n\n Returns:\n Dict[String, Object]: The keyword arguments for the hidden layers.\n \"\"\"\n if not self.model:\n raise RuntimeError(\"MLP is not trained.\")\n\n hidden_layers_kwdargs = []\n for i in range(7, 7 + self.n_hidden_layers):\n kernel, bias = self.model.layers[i].get_weights()\n hidden_layers_kwdargs.append({\n \"kernel_initializer\": keras.initializers.Constant(kernel),\n \"bias_initializer\": keras.initializers.Constant(bias)\n })\n\n return hidden_layers_kwdargs\n\n def get_output_weights(self):\n \"\"\"Returns the kernel and bias for the output layer of this model.\n\n Returns:\n List[ndarray, Optional[ndarray]]: The kernel and bias.\n \"\"\"\n if not self.model:\n raise RuntimeError(\"MLP is not trained.\")\n\n return [self.model.layers[-1].get_weights()[0], None]\n\n\nclass NeuralMatrixFactorization(KerasRecommender):\n \"\"\"Recommender implementing the NeuMF architecture, an ensemble of GMF/MLP.\n\n Args:\n gmf_n_factors (int): The number of latent factors for GMF.\n mlp_n_factors (int): The number of latent factors for MLP.\n mlp_n_hidden_layers (int): The number of hidden layers.\n gmf_trained (GeneralizedMatrixFactorization): A trained GMF model of\n the same number of\n factors.\n mlp_trained (MultiLayerPerceptron): A trained MLP model of the same\n number of factors and hidden\n layers.\n alpha (float): The tradeoff between MLP and GMF.\n epochs (int): The number of epochs to train the NN.\n optimizer (keras.optimizers.Optimizer): The model's optimizer.\n loss (keras.losses.Loss): The loss function.\n metrics (List[keras.metrics.Metric, ...]): The metric functions.\n seed (int): A random seed.\n user_input (keras.Input): An input for the users.\n item_input (keras.Input): An input for the items.\n user_preprocessing_layers (keras.layers.Layer): Preprocessing layers\n for the users.\n item_preprocessing_layers (keras.layers.Layer): Preprocessing layers\n for the items.\n \"\"\"\n def __init__(self,\n gmf_n_factors=8,\n mlp_n_factors=8,\n mlp_n_hidden_layers=4,\n gmf_trained=None,\n mlp_trained=None,\n alpha=0.5,\n epochs=10,\n optimizer=keras.optimizers.SGD(),\n loss=keras.losses.BinaryCrossentropy(),\n metrics=[keras.metrics.BinaryAccuracy()],\n seed=None,\n user_input=None,\n item_input=None,\n user_preprocessing_layers=None,\n item_preprocessing_layers=None):\n super().__init__(epochs,\n optimizer,\n loss,\n metrics,\n seed,\n user_input,\n item_input,\n user_preprocessing_layers,\n item_preprocessing_layers)\n self.gmf_n_factors = gmf_n_factors\n self.mlp_n_factors = mlp_n_factors\n self.mlp_n_hidden_layers = mlp_n_hidden_layers\n self.gmf_trained = gmf_trained\n self.mlp_trained = mlp_trained\n self.alpha = alpha\n self.user_input = user_input\n self.item_input = item_input\n self.user_preprocessing_layers = user_preprocessing_layers\n self.item_preprocessing_layers = item_preprocessing_layers\n\n def create_model(self):\n \"\"\"Creates a new NeuMF model.\n\n Returns:\n keras.Model: The NeuMF model. It will be pretrained if trained\n models are provided in the constructor.\n \"\"\"\n\n user_input = (self.user_input\n if self.user_input is not None else\n keras.Input(shape=(1), name=\"user\", dtype=\"int64\"))\n item_input = (self.item_input\n if self.item_input is not None else\n keras.Input(shape=(1), name=\"item\", dtype=\"int64\"))\n\n user_preprocessing_layers = (\n self.user_preprocessing_layers\n if self.user_preprocessing_layers is not None\n else user_input\n )\n item_preprocessing_layers = (\n self.item_preprocessing_layers\n if self.item_preprocessing_layers is not None\n else item_input\n )\n\n user_dense_kwdargs = {}\n item_dense_kwdargs = {}\n hidden_layers_kwdargs = []\n neumf_output_kernel = \"glorot_uniform\"\n\n if self.gmf_trained and self.mlp_trained:\n if self.gmf_trained.n_factors != self.gmf_n_factors:\n raise RuntimeError(\"GMF factors are not consistent.\")\n\n if self.mlp_trained.n_factors != self.mlp_n_factors:\n raise RuntimeError(\"MLP factors are not consistent.\")\n if self.mlp_trained.n_hidden_layers != self.mlp_n_hidden_layers:\n raise RuntimeError(\"MLP factors are not consistent.\")\n\n user_dense_kwdargs, item_dense_kwdargs = (\n self.gmf_trained.get_core_layers_kwdargs()\n )\n\n hidden_layers_kwdargs = self.mlp_trained.get_core_layers_kwdargs()\n\n gmf_output_kernel, _ = self.gmf_trained.get_output_weights()\n mlp_output_kernel, _ = self.mlp_trained.get_output_weights()\n neumf_output_kernel = keras.initializers.Constant(\n np.concatenate((gmf_output_kernel * self.alpha,\n mlp_output_kernel * (1 - self.alpha)))\n )\n\n gmf_layers = GeneralizedMatrixFactorization.create_core_layers(\n self.gmf_n_factors,\n user_preprocessing_layers,\n item_preprocessing_layers,\n user_dense_kwdargs,\n item_dense_kwdargs\n )\n\n mlp_layers = MultiLayerPerceptron.create_core_layers(\n self.mlp_n_factors,\n self.mlp_n_hidden_layers,\n user_preprocessing_layers,\n item_preprocessing_layers,\n hidden_layers_kwdargs\n )\n\n neumf_layers = [gmf_layers, mlp_layers]\n neumf_layers = keras.layers.Concatenate()(neumf_layers)\n neumf_layers = (\n keras.layers.Dense(1,\n activation=\"sigmoid\",\n kernel_initializer=neumf_output_kernel,\n kernel_constraint=keras.constraints.unit_norm(),\n use_bias=False)(neumf_layers)\n )\n\n return keras.Model(inputs=[user_input, item_input],\n outputs=[neumf_layers],\n name=\"neural_matrix_factorization\")\n"
] | [
[
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.initializers.Constant",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.constraints.unit_norm",
"tensorflow.keras.Model",
"tensorflow.keras.metrics.BinaryAccuracy",
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorflow.keras.layers.Multiply",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.layers.Dense",
"numpy.array",
"numpy.concatenate",
"numpy.unique",
"tensorflow.keras.Input"
]
] |
ccfelius/TravelingSalesMan | [
"ebc3b960859590623c0eb301545cd093c41d157a"
] | [
"SA.py"
] | [
"\"\"\" TSP SIMULATED ANNEALING \"\"\"\n\n# Imports\nimport math\nimport numpy as np\n\n# read data from file\nf = open(\"TSP-configurations/eil51.tsp.txt\", \"r\")\n# f = open(\"TSP-configurations/a280.tsp.txt\", \"r\")\n# f = open(\"TSP-configurations/pcb442.tsp.txt\", \"r\")\n\nnetwork = f.readlines()[6:-1]\n\n# create dictionary to store coordinates\nnodes = dict()\n\n# split data and put in dict\nfor node in network:\n node = list(map(int, (list(filter(None, node.rstrip().rsplit(' '))))))\n nodes[node[0]] = node[1:]\n\n# calculate distance between 2 nodes\ndef get_distance(dictionary, city1, city2):\n x = dictionary[city1][0] - dictionary[city2][0]\n y = dictionary[city1][1] - dictionary[city2][1]\n return math.sqrt(x**2 + y**2)\n\n# def get_distance(dictionary, city1, city2):\n# x = dictionary[city1][0][0] - dictionary[city2][0][0]\n# y = dictionary[city1][0][1] - dictionary[city2][0][1]\n# return math.sqrt(x**2 + y**2)\n\n# calculate the total distance\ndef total_distance(tour, dictionary):\n\n distance = 0\n for i in range(len(tour)-1):\n distance += get_distance(dictionary, tour[i], tour[i+1])\n\n return distance\n\n\n# add nearest neighbors in order of nearest to most far\nfor node in range(1,len(nodes)+1):\n t_dict = dict()\n tour = [i for i in nodes.keys()]\n tour.remove(node)\n\n for j in tour:\n t_dict[j] = get_distance(nodes, node, j)\n\n nodes[node].append(sorted(t_dict.items(), key=lambda x: x[1]))\n\nprint(nodes)\n\n\n\ndef SA(coordinates, tour, temp, coolingdown, mlength, swap = False, start_node=True):\n\n if start_node == True:\n a, c = [tour[0]], [tour[0]]\n b = tour[1:]\n np.random.shuffle(b)\n tour = a + b + c\n else:\n np.random.shuffle(tour)\n\n print(f'\\nInitial solution: {tour}\\n')\n \n # Initial costs\n costs = total_distance(tour, coordinates)\n\n for i in range(1000): # Parameter\n print(i, 'cost=', costs)\n\n temp = coolingdown(temp)\n if temp == 0:\n print(\"Temperature of 0 reached\")\n return tour, costs\n\n for j in range(mlength): # Parameter\n\n if swap == True:\n # Exchange two coordinates and get a candidate solution solution\n c1, c2 = np.random.randint(1, len(tour)-1, size = 2)\n\n # Swap coordinates\n tour[c1], tour[c2] = tour[c2], tour[c1]\n else:\n randindex = np.random.randint(1,len(tour)-2)\n randcity = np.random.randint(2,len(tour)-1)\n c2_i = tour.index(randcity)\n tour.remove(randcity)\n # print(f'city {c2} removed out of index {c2_i}')\n tour.insert(randindex, randcity)\n\n # get the new costs\n cost_n = total_distance(tour, coordinates)\n\n # replace old costs if new costs is less\n if cost_n < costs:\n costs = cost_n\n else:\n # Generate random probability\n x = np.random.uniform()\n\n # If prob < formula accept candidate solution\n if x < min(1, math.exp(-(cost_n-costs)/temp)):\n costs = cost_n\n else:\n if swap == True:\n # Swap back to prior solution\n tour[c1], tour[c2] = tour[c2], tour[c1]\n else:\n tour.remove(randcity)\n tour.insert(c2_i, randcity)\n\n return tour, costs, temp\n\ndef candidate_solution():\n return\n\ndef cooling(temp):\n \"\"\"\n Cooling down function\n\n :param temp: (float) temperature\n :return: (float) new temperature\n \"\"\"\n return temp - np.log(temp)\n\nTemperature = 1000 # Parameter\nMCL = 500 # Markov Chain Length (inner loop)\n# Get node names\ninitial_tour = [i for i in nodes.keys()]\n\nprint(SA(nodes, initial_tour, Temperature, cooling, MCL))"
] | [
[
"numpy.log",
"numpy.random.uniform",
"numpy.random.shuffle"
]
] |
yunchu/mmsegmentation | [
"404f3e0e8859991931b6a39a583de412348e98f0",
"404f3e0e8859991931b6a39a583de412348e98f0"
] | [
"mmseg/apis/ote/apis/segmentation/model_wrappers/blur.py",
"mmseg/integration/nncf/utils.py"
] | [
"# Copyright (C) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\nimport cv2\nimport numpy as np\nfrom typing import Any, Dict\n\nfrom openvino.model_zoo.model_api.models import SegmentationModel\nfrom openvino.model_zoo.model_api.models.types import NumericalValue\nfrom ote_sdk.utils.segmentation_utils import create_hard_prediction_from_soft_prediction\n\n\nclass BlurSegmentation(SegmentationModel):\n __model__ = 'blur_segmentation'\n\n def __init__(self, model_adapter, configuration=None, preload=False):\n super().__init__(model_adapter, configuration, preload)\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'soft_threshold': NumericalValue(default_value=0.5, min=0.0, max=1.0),\n 'blur_strength': NumericalValue(value_type=int, default_value=1, min=0, max=25)\n })\n\n return parameters\n\n def postprocess(self, outputs: Dict[str, np.ndarray], metadata: Dict[str, Any]):\n predictions = outputs[self.output_blob_name].squeeze()\n soft_prediction = np.transpose(predictions, axes=(1, 2, 0))\n\n hard_prediction = create_hard_prediction_from_soft_prediction(\n soft_prediction=soft_prediction,\n soft_threshold=self.soft_threshold,\n blur_strength=self.blur_strength\n )\n hard_prediction = cv2.resize(hard_prediction, metadata['original_shape'][1::-1], 0, 0, interpolation=cv2.INTER_NEAREST)\n soft_prediction = cv2.resize(soft_prediction, metadata['original_shape'][1::-1], 0, 0, interpolation=cv2.INTER_NEAREST)\n metadata['soft_predictions'] = soft_prediction\n\n return hard_prediction\n",
"# Copyright (C) 2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n#\n\nimport importlib\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\n\nimport torch\n\n\n_is_nncf_enabled = importlib.util.find_spec('nncf') is not None\n\n\ndef is_nncf_enabled():\n return _is_nncf_enabled\n\n\ndef check_nncf_is_enabled():\n if not is_nncf_enabled():\n raise RuntimeError('Tried to use NNCF, but NNCF is not installed')\n\n\ndef get_nncf_version():\n if not is_nncf_enabled():\n return None\n import nncf\n return nncf.__version__\n\n\ndef load_checkpoint(model, filename, map_location=None, strict=False):\n \"\"\"Load checkpoint from a file or URI.\n\n Args:\n model (Module): Module to load checkpoint.\n filename (str): Either a filepath or URL or modelzoo://xxxxxxx.\n map_location (str): Same as :func:`torch.load`.\n strict (bool): Whether to allow different params for the model and\n checkpoint.\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n \"\"\"\n from nncf.torch import load_state\n\n checkpoint = torch.load(filename, map_location=map_location)\n # get state_dict from checkpoint\n if isinstance(checkpoint, OrderedDict):\n state_dict = checkpoint\n elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n else:\n raise RuntimeError(\n 'No state_dict found in checkpoint file {}'.format(filename))\n _ = load_state(model, state_dict, strict)\n return checkpoint\n\n\n@contextmanager\ndef nullcontext():\n \"\"\"\n Context which does nothing\n \"\"\"\n yield\n\n\ndef no_nncf_trace():\n \"\"\"\n Wrapper for original NNCF no_nncf_trace() context\n \"\"\"\n\n if is_nncf_enabled():\n from nncf.torch.dynamic_graph.context import no_nncf_trace as original_no_nncf_trace\n return original_no_nncf_trace()\n return nullcontext()\n\n\ndef is_in_nncf_tracing():\n if not is_nncf_enabled():\n return False\n\n from nncf.torch.dynamic_graph.context import get_current_context\n\n ctx = get_current_context()\n\n if ctx is None:\n return False\n return ctx.is_tracing\n\ndef is_accuracy_aware_training_set(nncf_config):\n if not is_nncf_enabled():\n return False\n from nncf.config.utils import is_accuracy_aware_training\n is_acc_aware_training_set = is_accuracy_aware_training(nncf_config)\n return is_acc_aware_training_set\n"
] | [
[
"numpy.transpose"
],
[
"torch.load"
]
] |
sixhobbits/Data-Storage-for-Artificial-Intelligence | [
"af14abfeb4f091da1dadab5aa6c02801c50e70da"
] | [
"Chapter09/Exercise03/preprocess_data.py"
] | [
"import argparse\nimport os\nimport json\nfrom pathlib import Path\nimport pandas as pd\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n prog=\"exercise 3\",\n description=\"preprocess meta data\")\n parser.add_argument('-f', '--file', type=str, required=True, help='meta data file path')\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n # get args\n args = parse_args()\n filepath = args.file\n\n # read data\n data_cats = json.load(open(filepath, 'r'))\n # convert json to dataframe\n df_cat = pd.DataFrame(data_cats)\n df_cat['category'] = df_cat['items'].apply(lambda x: x['snippet']['title'])\n df_cat['id'] = df_cat['items'].apply(lambda x: int(x['id']))\n df_cat_drop = df_cat.drop(columns=['kind', 'etag', 'items'])\n # cache\n dir_cache = Path(__file__).parent.absolute()/'tmp'\n try:\n df_cat_drop.to_csv(os.path.join(dir_cache, 'data_cats.csv'))\n except FileNotFoundError:\n os.mkdir(dir_cache)\n df_cat_drop.to_csv(os.path.join(dir_cache, 'data_cats.csv'))\n"
] | [
[
"pandas.DataFrame"
]
] |
shashankk24/natural-language-summary-generation-from-structured-data-master | [
"a8bd083685ff7d5c0228588c47ddfcecba4cf78b"
] | [
"TensorFlow_implementation/Summary_Generator/Tensorflow_Graph/order_planner_without_copynet.py"
] | [
"'''\n This file generates the graph of the Model that we are going to use for the order planner for neural summary generator\n The function returns the graph object and some of the important handles of the tensors of the graph in a dictionary.\n Note, that all the possible tensor handles can be obtained by the tf.get_tensor_by_name() function. This is done to make\n things easy.\n'''\n\nimport tensorflow as tf\n\n# define the graph builder function:\ndef get_computation_graph(seed_value, field_vocab_size, content_label_vocab_size, field_embedding_size,\n content_label_embedding_size, lstm_cell_state_size, hidden_state_size, rev_content_label_dict):\n '''\n Function for building the graph for model 1:\n The architecture is same as defined in the base paper, except the copynet part\n '''\n\n # reset the current graph in the session\n tf.reset_default_graph()\n\n graph = tf.Graph() # create a new graph object\n\n # define all the graph computations using the as_default function\n print(\"\\n\\n=============================================================================================================\")\n print(\"Building the graph ... \")\n with graph.as_default():\n\n\n # ========================================================================\n # | Step 1:\n # ========================================================================\n\n print(\"\\nstep 1: Creating input placeholders for the computations ...\")\n # Placeholders for the input data:\n with tf.variable_scope(\"Input_Data\"):\n tf_field_encodings = tf.placeholder(tf.int32, shape=(None, None), name=\"input_field_encodings\")\n tf_content_encodings = tf.placeholder(tf.int32, shape=(None, None), name=\"input_content_encodings\")\n tf_label_encodings = tf.placeholder(tf.int32, shape=(None, None), name=\"input_label_encodings\")\n\n # This is a placeholder for storing the lengths of the input sequences (they are padded to tensor)\n tf_input_seqs_lengths = tf.placeholder(tf.int32, shape=(None,), name=\"input_sequence_lengths\")\n\n # This is a placeholder for storing the lengths of the decoder sequences (they are padded to tensor)\n tf_label_seqs_lengths = tf.placeholder(tf.int32, shape=(None,), name=\"decoder_sequence_lengths\")\n\n\n # create the one-hot encoded values for the label_encodings\n with tf.variable_scope(\"One_hot_encoder\"):\n tf_one_hot_label_encodings = tf.one_hot(tf_label_encodings, depth=content_label_vocab_size)\n\n # print all placeholders for the encodings generated in step 1\n print(\"\\tplaceholder for the field_encodings: \", tf_field_encodings)\n print(\"\\tplaceholder for the content_encodings: \", tf_content_encodings)\n print(\"\\tplaceholder for the label_encodings: \", tf_label_encodings)\n print(\"\\tplaceholder for the input_sequence_lengths: \", tf_input_seqs_lengths)\n print(\"\\tplaceholder for the label_sequence_lengths: \", tf_label_seqs_lengths)\n\n\n # ========================================================================\n # | Step 2:\n # ========================================================================\n\n print(\"\\nstep 2: Creating Embeddings Mechanism for the input and the output words ...\")\n # Scope for the shared Content_Label matrix\n with tf.variable_scope(\"Unified_Vocabulary_Matrix\"):\n content_label_embedding_matrix = tf.get_variable(\"content_label_embedding_matrix\",\n shape=(content_label_vocab_size, content_label_embedding_size),\n initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value),\n dtype=tf.float32)\n\n # Embeddings for the given input data:\n with tf.variable_scope(\"Input_Embedder\"):\n # Embed the field encodings:\n field_embedding_matrix = tf.get_variable(\"field_embedding_matrix\",\n shape=(field_vocab_size, field_embedding_size),\n initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value),\n dtype=tf.float32)\n\n tf_field_embedded = tf.nn.embedding_lookup(field_embedding_matrix, tf_field_encodings, name=\"field_embedder\")\n\n # Embed the content encodings:\n tf_content_embedded = tf.nn.embedding_lookup(content_label_embedding_matrix,\n tf_content_encodings, name=\"content_embedder\")\n\n\n print(\"\\tEmbedded_Input_Tensors: \", tf_field_embedded, tf_content_embedded)\n\n # Embeddings for the label (summary sentences):\n with tf.variable_scope(\"Label_Embedder\"):\n # embed the label encodings\n tf_label_embedded = tf.nn.embedding_lookup(content_label_embedding_matrix,\n tf_label_encodings, name=\"label_embedder\")\n\n print(\"\\tEmbedded_Label_Tensors: \", tf_label_embedded)\n\n # Concatenate the Input embeddings channel_wise and obtain the combined input tensor\n with tf.variable_scope(\"Input_Concatenator\"):\n tf_field_content_embedded = tf.concat([tf_field_embedded, tf_content_embedded], axis=-1, name=\"concatenator\")\n\n print(\"\\tFinal_Input_to_the_Encoder: \", tf_field_content_embedded)\n\n\n # ========================================================================\n # | Step 3:\n # ========================================================================\n\n print(\"\\nstep 3: Creating the encoder RNN to obtain the encoded input sequences. (The Encoder Module) ... \")\n with tf.variable_scope(\"Encoder\"):\n encoded_input, encoder_final_state = tf.nn.dynamic_rnn (\n cell = tf.nn.rnn_cell.LSTMCell(lstm_cell_state_size), # let all parameters to be default\n inputs = tf_field_content_embedded,\n sequence_length = tf_input_seqs_lengths,\n dtype = tf.float32\n )\n print(\"\\tEncoded_vectors_bank for attention mechanism: \", encoded_input)\n\n # define the size parameter for the encoded_inputs\n encoded_inputs_embeddings_size = encoded_input.shape[-1]\n\n print(\"\\tFinal_state obtained from the last step of encoder: \", encoder_final_state)\n\n\n # ========================================================================\n # | Step 4:\n # ========================================================================\n\n print(\"\\nstep 4: defining the Attention Mechanism for the Model (The Dispatcher Module) ...\")\n\n\n print(\"**step 4.1: defining the content based attention\")\n with tf.variable_scope(\"Content_Based_Attention/trainable_weights\"):\n '''\n These weights and bias matrices must be compatible with the dimensions of the h_values and the f_values\n passed to the function below. If they are not, some exception might get thrown and it would be difficult\n to debug it.\n '''\n # field weights for the content_based attention\n W_f = tf.get_variable(\"field_attention_weights\", shape=(field_embedding_size, content_label_embedding_size),\n initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value))\n b_f = tf.get_variable(\"field_attention_biases\", shape=(field_embedding_size, 1),\n initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value))\n\n # hidden states weights for the content_based attention\n W_c = tf.get_variable(\"content_attention_weights\",\n shape=(encoded_inputs_embeddings_size, content_label_embedding_size),\n initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value))\n b_c = tf.get_variable(\"content_attention_biases\", shape=(encoded_inputs_embeddings_size, 1),\n initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value))\n\n # Define the summary_ops for all the weights:\n W_f_summary = tf.summary.histogram(\"Content_based_attention/field_weights\", W_f)\n b_f_summary = tf.summary.histogram(\"Content_based_attention/field_biases\", b_f)\n W_c_summary = tf.summary.histogram(\"Content_based_attention/content_weights\", W_c)\n b_c_summary = tf.summary.histogram(\"Content_based_attention/content_weights\", b_c)\n\n\n with tf.variable_scope(\"Content_Based_Attention\"):\n def get_content_based_attention_vectors(query_vectors):\n '''\n function that returns the alpha_content vector using the yt-1 (query vectors)\n '''\n # use the W_f and b_f to transform the query_vectors to the shape of f_values\n f_trans_query_vectors = tf.matmul(W_f, tf.transpose(query_vectors)) + b_f\n # use the W_c and b_c to transform the query_vectors to the shape of h_values\n h_trans_query_vectors = tf.matmul(W_c, tf.transpose(query_vectors)) + b_c\n\n # transpose and expand the dims of the f_trans_query_vectors\n f_trans_query_matrices = tf.expand_dims(tf.transpose(f_trans_query_vectors), axis=-1)\n # obtain the field attention_values by using the matmul operation\n field_attention_values = tf.matmul(tf_field_embedded, f_trans_query_matrices)\n\n # perform the same process for the h_trans_query_vectors\n h_trans_query_matrices = tf.expand_dims(tf.transpose(h_trans_query_vectors), axis=-1)\n hidden_attention_values = tf.matmul(encoded_input, h_trans_query_matrices)\n\n # drop the last dimension (1 sized)\n field_attention_values = tf.squeeze(field_attention_values, axis=[-1])\n hidden_attention_values = tf.squeeze(hidden_attention_values, axis=[-1])\n\n\n # free up non_required resources:\n ret_value = tf.nn.softmax(field_attention_values * hidden_attention_values, name=\"softmax\")\n\n # return the element wise multiplied values followed by softmax\n return ret_value\n\n\n print(\"**step 4.2: defining the link based attention\")\n with tf.variable_scope(\"Link_Based_Attention/trainable_weights\"):\n '''\n The dimensions of the Link_Matrix must be properly compatible with the field_vocab_size.\n If they are not, some exception might get thrown and it would be difficult\n to debug it.\n '''\n Link_Matrix = tf.get_variable(\"Link_Attention_Matrix\", shape=(field_vocab_size, field_vocab_size),\n dtype=tf.float32, initializer=tf.random_normal_initializer(mean=0.5, stddev=0.5, seed=seed_value))\n\n # Link_Matrix_summary = tf.summary.histogram(\"Link_based_attention\", Link_Matrix)\n\n print(\"\\tThe Link Matrix used for this attention: \", Link_Matrix)\n\n\n # define the function for obtaining the link based attention values.\n with tf.variable_scope(\"Link_Based_Attention\"):\n def get_link_based_attention_vectors(prev_attention_vectors):\n '''\n This function generates the link based attention vectors using the Link matrix and the\n '''\n # carve out only the relevant values from the Link matrix\n matrix_all_values_from = tf.nn.embedding_lookup(Link_Matrix, tf_field_encodings)\n\n # // TODO: Calculate the matrix_relevant_values from matrix_all_values_from\n matrix_relevant_values = tf.map_fn(lambda u: tf.gather(u[0],u[1],axis=1),\n [matrix_all_values_from, tf_field_encodings], dtype=matrix_all_values_from.dtype)\n\n\n return tf.nn.softmax(tf.reduce_sum(tf.expand_dims(prev_attention_vectors, axis = -1) *\n matrix_relevant_values, axis=1),name=\"softmax\")\n\n\n print(\"**step 4.3: defining the hybrid attention\")\n # define the hybrid of the content based and the link based attention\n with tf.variable_scope(\"Hybrid_attention/trainable_weights\"):\n # for now, this is just the content_based attention:\n Zt_weights = tf.get_variable(\"zt_gate_parameter_vector\", dtype=tf.float32,\n initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value),\n shape=(hidden_state_size + field_embedding_size + content_label_embedding_size, 1))\n\n Zt_weights_summary = tf.summary.histogram(\"Hybrid_attention/zt_weights\", Zt_weights)\n\n\n with tf.variable_scope(\"Hybrid_attention\"):\n # define the hybrid_attention_calculator function:\n def get_hybrid_attention(h_values, y_values, content_attention, link_attention):\n '''\n function to calcuate the hybrid attention using the content_attention and the link_attention\n '''\n # calculate the e_f values\n e_t = tf.reduce_sum(tf.expand_dims(link_attention, axis=-1) * tf_field_embedded, axis=1)\n\n # create the concatenated vectors from h_values e_t and y_values\n input_to_zt_gate = tf.concat([h_values, e_t, y_values], axis=-1) # channel wise concatenation\n\n # perfrom the computations of the z gate:\n z_t = tf.nn.sigmoid(tf.matmul(input_to_zt_gate, Zt_weights))\n\n # calculate z_t~ value using the empirical values = 0.2z_t + 0.5\n z_t_tilde = (0.2 * z_t) + 0.5\n\n # compute the final hybrid_attention_values using the z_t_tilde values over content and link based values\n hybrid_attention = (z_t_tilde * content_attention) + ((1 - z_t_tilde) * link_attention)\n\n # return the calculated hybrid attention:\n return hybrid_attention\n\n\n # ========================================================================\n # | Step 5:\n # ========================================================================\n\n print(\"\\nstep 5: creating the decoder RNN to obtain the generated summary for the structured data (The Decoder Module) ...\")\n\n with tf.variable_scope(\"Decoder/trainable_weights\"):\n # define the weights for the output projection calculation\n W_output = tf.get_variable(\n \"output_projector_matrix\", dtype=tf.float32,\n initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value),\n shape=(hidden_state_size, content_label_vocab_size))\n b_output = tf.get_variable(\n \"output_projector_biases\", dtype=tf.float32,\n initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value),\n shape=(content_label_vocab_size,))\n\n # define the weights and biases for the x_t calculation\n W_d = tf.get_variable(\n \"x_t_gate_matrix\", dtype=tf.float32,\n initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value),\n shape=((hidden_state_size + content_label_embedding_size), content_label_embedding_size))\n b_d = tf.get_variable(\n \"x_t_gate_biases\", dtype=tf.float32,\n initializer=tf.random_uniform_initializer(minval=-1, maxval=1, seed=seed_value),\n shape=(content_label_embedding_size,))\n\n # define the summary ops for the defined weights and biases\n W_output_summary = tf.summary.histogram(\"Decoder/W_output\", W_output)\n b_output_summary = tf.summary.histogram(\"Decoder/b_output\", b_output)\n W_d_summary = tf.summary.histogram(\"Decoder/W_d\", W_d)\n b_d_summary = tf.summary.histogram(\"Decoder/b_d\", b_d)\n\n # create the LSTM cell to be used for decoding purposes\n decoder_cell = tf.nn.rnn_cell.LSTMCell(lstm_cell_state_size)\n\n def decode(start_tokens, mode = \"inference\", decoder_lengths = None, w_reuse = True):\n '''\n Function that defines the decoder op and returns the decoded sequence (the summary)\n\n @params:\n start_tokens = a tensor containing the start tokens (one for each sequence in the batch)\n mode = a value from \"training\" or \"inference\" to determine for how long the decoder rnn is to be unrolled.\n behaviour is as follows:\n \"training\" => The rnn will be unrolled until the max(decode_lengths). decode_lengths cannot be None.\n \"inference\" => decode_lengths is be ignored and unrolling will be done till <eos> is received\n\n '''\n with tf.variable_scope(\"Decoder\", reuse = w_reuse):\n # define the function to obtain the predictions out of the given hidden_state_values\n def get_predictions(h_t_values):\n '''\n This function transforms the h_t_values into a one_hot_type probability vector\n '''\n # apply the output_projection gate to obtain the predictions from the h_t_values\n predictions = tf.matmul(h_t_values, W_output) + b_output\n\n # return the predictions:\n return predictions\n\n\n # define a function to obtain the values for the next input to the LSTM_cell (y_t values)\n def get_y_t_values(pred_vals):\n '''\n pred_vals = the tensor of shape [batch_size x content_label_vocab_size]\n '''\n\n # calculate the next words to be predicted\n act_preds = tf.argmax(pred_vals, axis=-1)\n\n # perform embedding lookup for these act_preds\n y_t_values = tf.nn.embedding_lookup(content_label_embedding_matrix, act_preds)\n\n # return the calculated y_t_values\n return y_t_values\n\n\n # write the loop function for the raw_rnn:\n def decoder_loop_function(time, cell_output, cell_state, loop_state):\n '''\n The decoder loop function for the raw_rnn\n @params\n compatible with -> https://www.tensorflow.org/api_docs/python/tf/nn/raw_rnn\n '''\n if(cell_state is None):\n # initial call of the loop function\n finished = (time >= tf_label_seqs_lengths)\n next_input = start_tokens\n next_cell_state = encoder_final_state\n emit_output = tf.placeholder(tf.float32, shape=(content_label_vocab_size))\n next_loop_state = tf.zeros_like(tf_field_encodings, dtype=tf.float32)\n\n else:\n # we define the loop_state as the prev_hybrid attention_vector!\n prev_attention_vectors = loop_state # extract the prev_attention_vector from the loop state\n\n # obtain the predictions for the cell_output\n preds = get_predictions(cell_output)\n\n # obtain the y_t_values from the cell_output values:\n y_t_values = get_y_t_values(preds)\n\n ''' Calculate the attention: '''\n # calculate the content_based attention values using the defined module\n cont_attn = get_content_based_attention_vectors(y_t_values)\n\n # calculate the link based attention values\n link_attn = get_link_based_attention_vectors(prev_attention_vectors)\n # print \"link_attention: \", link_attn\n\n # calculate the hybrid_attention\n hybrid_attn = get_hybrid_attention(cell_output, y_t_values, cont_attn, link_attn)\n\n ''' Calculate the x_t vector for next_input value'''\n # use the hybrid_attn to attend over the encoded_input (to calculate the a_t values)\n a_t_values = tf.reduce_sum(tf.expand_dims(hybrid_attn, axis=-1) * encoded_input, axis=1)\n\n # apply the x_t gate\n x_t = tf.tanh(tf.matmul(tf.concat([a_t_values, y_t_values], axis=-1), W_d) + b_d)\n\n\n ''' Calculate the finished vector for perfoming computations '''\n # define the fninshed parameter for the loop to determine whether to continue or not.\n if(mode == \"training\"):\n finished = (time >= decoder_lengths)\n\n elif(mode == \"inference\"):\n temp = tf.argmax(preds, axis=-1) # obtain the output predictions in encoded form\n finished = (temp == rev_content_label_dict['<eos>'])\n\n ''' Copy mechanism is left (//TODO: change the following and implement copy mechanism)'''\n emit_output = preds\n\n # The next_input is the x_t vector so calculated:\n next_input = x_t\n # The next loop_state is the current content_based attention\n next_loop_state = hybrid_attn\n # The next_cell_state is going to be equal to the cell_state. (we_don't tweak it)\n next_cell_state = cell_state\n\n # In both the cases, the return value is same.\n # return all these created parameters\n return (finished, next_input, next_cell_state, emit_output, next_loop_state)\n\n # use the tf.nn.raw_rnn to define the decoder computations\n outputs, _, _ = tf.nn.raw_rnn(decoder_cell, decoder_loop_function)\n\n # return the outputs obtained from the raw_rnn:\n return tf.transpose(outputs.stack(), perm=[1, 0, 2])\n\n\n # ========================================================================\n # | Step 6:\n # ========================================================================\n\n print(\"\\nstep 6: defining the training computations ...\")\n\n with tf.name_scope(\"Training_computations\"):\n outputs = decode(tf_label_embedded[:, 0, :], mode=\"training\",\n decoder_lengths=tf_label_seqs_lengths, w_reuse=None)\n\n\n # print the outputs:\n print(\"\\tFinal Output_Tensor obtained from the decoder: \", outputs)\n\n\n # ========================================================================\n # | Step 7:\n # ========================================================================\n\n print(\"\\nstep 7: defining the cost function for optimization ...\")\n\n # define the loss (objective) function for minimization\n with tf.variable_scope(\"Loss\"):\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits_v2(logits=outputs, labels=tf_one_hot_label_encodings))\n\n # record the loss summary:\n loss_summary = tf.summary.scalar(\"Objective_loss\", loss)\n\n\n # ========================================================================\n # | Step 8:\n # ========================================================================\n\n print(\"\\nstep 8: defining the computations for the inference mode ...\")\n\n # define the computations for the inference mode\n with tf.variable_scope(\"inference_computations\"):\n inf_outputs = decode(tf_label_embedded[:, 0, :])\n\n print(\"\\tInference outputs: \", inf_outputs)\n\n\n # ========================================================================\n # | Step _:\n # ========================================================================\n\n print(\"\\nstep _ : setting up the errands for TensorFlow ...\")\n\n with tf.variable_scope(\"Errands\"):\n all_summaries = tf.summary.merge_all()\n\n print(\"=============================================================================================================\\n\\n\")\n\n # Generate the interface dictionary object for this defined graph\n interface_dict = {\n\n # Tensors for input placeholders into the graph\n \"input\": {\n \"field_encodings\": tf_field_encodings,\n \"content_encodings\": tf_content_encodings,\n \"label_encodings\": tf_label_encodings,\n \"input_sequence_lengths\": tf_input_seqs_lengths,\n \"label_sequence_lengths\": tf_label_seqs_lengths\n },\n\n # Tensors for embedding matrices:\n \"field_embeddings\": field_embedding_matrix,\n \"content_label_embeddings\": content_label_embedding_matrix,\n\n # Tensor for loass\n \"loss\": loss,\n\n # Tensor for the inference output:\n \"inference\": inf_outputs,\n\n # Tensor for training outputs\n \"training_output\": outputs,\n\n # Tensor for init and summary_ops\n \"summary\": all_summaries\n }\n\n # return the built graph object and it's interface dictionary:\n return graph, interface_dict\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.variable_scope",
"tensorflow.matmul",
"tensorflow.squeeze",
"tensorflow.name_scope",
"tensorflow.one_hot",
"tensorflow.concat",
"tensorflow.nn.softmax",
"tensorflow.nn.raw_rnn",
"tensorflow.summary.histogram",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.Graph",
"tensorflow.random_normal_initializer",
"tensorflow.random_uniform_initializer",
"tensorflow.transpose",
"tensorflow.expand_dims",
"tensorflow.zeros_like",
"tensorflow.reset_default_graph",
"tensorflow.nn.embedding_lookup",
"tensorflow.placeholder",
"tensorflow.summary.merge_all",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.argmax",
"tensorflow.gather"
]
] |
skavulya/atk | [
"c83f0bee2530282e39bf28d4a15355561b5eca4d"
] | [
"python-client/trustedanalytics/tests/sources.py"
] | [
"# vim: set encoding=utf-8\n\n#\n# Copyright (c) 2015 Intel Corporation \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom collections import OrderedDict\n\nclass SimpleDataSource(object):\n\n annotation = \"simple\"\n\n def __init__(self, schema=None, rows=None, columns=None):\n if not ((rows is None) ^ (columns is None)):\n raise ValueError(\"Either rows or columns must be supplied\")\n if schema and not isinstance(schema, OrderedDict):\n self.schema = OrderedDict(schema)\n else:\n self.schema = schema\n self.rows = rows\n self.columns = columns\n if columns:\n names = self.schema.keys()\n if len(names) != len(self.columns):\n raise ValueError(\"number of columns in schema not equals number of columns provided\")\n for key in self.columns.keys():\n if key not in names:\n raise ValueError(\"names in schema do not all match the names in the columns provided\")\n\n def to_pandas_dataframe(self):\n import numpy as np\n from pandas import DataFrame\n if self.rows:\n a = np.array(self.rows, dtype=_schema_as_numpy_dtype(self.schema))\n df = DataFrame(a)\n else: # columns\n df = DataFrame(self.columns)\n return df\n\ndef _schema_as_numpy_dtype(schema):\n return [(c, _get_numpy_dtype_from_core_type(t)) for c, t in schema.items()]\n\ndef _get_numpy_dtype_from_core_type(t):\n return object\n # if t in [str, unicode, dict, bytearray, list]:\n # return object\n # return t\n"
] | [
[
"pandas.DataFrame"
]
] |
edwardstm/rbf_keras | [
"4029d1c15003438f7caadb9efefe0c026ba18933"
] | [
"rbflayer.py"
] | [
"import tensorflow as tf\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.layers import Layer\nfrom tensorflow.keras.initializers import RandomUniform, Initializer, Constant\nimport numpy as np\n\nclass InitCentersRandom(Initializer):\n \"\"\" Initializer for initialization of centers of RBF network\n as random samples from the given data set.\n\n # Arguments\n X: matrix, dataset to choose the centers from (random rows\n are taken as centers)\n \"\"\"\n\n def __init__(self, X):\n self.X = X\n\n def __call__(self, shape, dtype=None):\n assert shape[1] == self.X.shape[1]\n idx = tf.constant( np.random.randint(self.X.shape[0], size=shape[0]) )\n return self.X[idx, :]\n\n\nclass RBFLayer(Layer):\n \"\"\" Layer of Gaussian RBF units.\n\n # Example\n\n ```python\n model = Sequential()\n model.add(RBFLayer(10,\n initializer=InitCentersRandom(X),\n betas=1.0,\n input_shape=(1,)))\n model.add(Dense(1))\n ```\n\n\n # Arguments\n output_dim: number of hidden units (i.e. number of outputs of the\n layer)\n initializer: instance of initiliazer to initialize centers\n betas: float, initial value for betas\n\n \"\"\"\n\n def __init__(self, output_dim, initializer=None, betas=1.0, **kwargs):\n self.output_dim = output_dim\n self.init_betas = betas\n if not initializer:\n self.initializer = RandomUniform(0.0, 1.0)\n else:\n self.initializer = initializer\n super(RBFLayer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n\n self.centers = self.add_weight(name='centers',\n shape=(self.output_dim, input_shape[1]),\n initializer=self.initializer,\n trainable=True)\n self.betas = self.add_weight(name='betas',\n shape=(self.output_dim,),\n initializer=Constant(\n value=self.init_betas),\n # initializer='ones',\n trainable=True)\n\n super(RBFLayer, self).build(input_shape)\n\n def call(self, x):\n\n C = K.expand_dims(self.centers)\n H = K.transpose(C-K.transpose(x))\n return K.exp(-self.betas * K.sum(H**2, axis=1))\n\n # C = self.centers[np.newaxis, :, :]\n # X = x[:, np.newaxis, :]\n\n # diffnorm = K.sum((C-X)**2, axis=-1)\n # ret = K.exp( - self.betas * diffnorm)\n # return ret\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], self.output_dim)\n\n def get_config(self):\n # have to define get_config to be able to use model_from_json\n config = {\n 'output_dim': self.output_dim\n }\n base_config = super(RBFLayer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n"
] | [
[
"tensorflow.keras.initializers.Constant",
"tensorflow.keras.backend.sum",
"tensorflow.keras.backend.expand_dims",
"tensorflow.keras.initializers.RandomUniform",
"tensorflow.keras.backend.transpose",
"numpy.random.randint"
]
] |
vt-vl-lab/video-data-aug | [
"01667cdbd1b952f2510af3422beeeb76e0d9e15a"
] | [
"tools/data/build_rawframes.py"
] | [
"import argparse\nimport glob\nimport os\nimport os.path as osp\nimport sys\nimport warnings\nfrom multiprocessing import Pool\n\nimport mmcv\nimport numpy as np\n\n# custom import \nimport pandas as pd\nimport pdb\n\ndef extract_frame(vid_item):\n \"\"\"Generate optical flow using dense flow.\n\n Args:\n vid_item (list): Video item containing video full path,\n video (short) path, video id.\n\n Returns:\n bool: Whether generate optical flow successfully.\n \"\"\"\n full_path, vid_path, vid_id, method, task = vid_item\n if '/' in vid_path:\n act_name = osp.basename(osp.dirname(vid_path))\n out_full_path = osp.join(args.out_dir, act_name)\n else:\n out_full_path = args.out_dir\n\n if task == 'rgb':\n if args.use_opencv:\n # Not like using denseflow,\n # Use OpenCV will not make a sub directory with the video name\n video_name = osp.splitext(osp.basename(vid_path))[0]\n out_full_path = osp.join(out_full_path, video_name)\n\n vr = mmcv.VideoReader(full_path)\n for i in range(len(vr)):\n if vr[i] is not None:\n w, h, c = np.shape(vr[i])\n if args.new_short == 0:\n out_img = mmcv.imresize(vr[i], (args.new_width,\n args.new_height))\n else:\n if min(h, w) == h:\n new_h = args.new_short\n new_w = int((new_h / h) * w)\n else:\n new_w = args.new_short\n new_h = int((new_w / w) * h)\n out_img = mmcv.imresize(vr[i], (new_h, new_w))\n mmcv.imwrite(out_img,\n f'{out_full_path}/img_{i + 1:05d}.jpg')\n else:\n warnings.warn(\n 'Length inconsistent!'\n f'Early stop with {i + 1} out of {len(vr)} frames.')\n break\n else:\n if args.new_short == 0:\n cmd = osp.join(\n f\"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'\"\n f' -nw={args.new_width} -nh={args.new_height} -v')\n else:\n cmd = osp.join(\n f\"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'\"\n f' -ns={args.new_short} -v')\n os.system(cmd)\n elif task == 'flow':\n if args.input_frames:\n if args.new_short == 0:\n cmd = osp.join(\n f\"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'\" # noqa: E501\n f' -nw={args.new_width} --nh={args.new_height} -v --if')\n else:\n cmd = osp.join(\n f\"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'\" # noqa: E501\n f' -ns={args.new_short} -v --if')\n else:\n if args.new_short == 0:\n cmd = osp.join(\n f\"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'\" # noqa: E501\n f' -nw={args.new_width} --nh={args.new_height} -v')\n else:\n cmd = osp.join(\n f\"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'\" # noqa: E501\n f' -ns={args.new_short} -v')\n os.system(cmd)\n else:\n if args.new_short == 0:\n cmd_rgb = osp.join(\n f\"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'\"\n f' -nw={args.new_width} -nh={args.new_height} -v')\n cmd_flow = osp.join(\n f\"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'\" # noqa: E501\n f' -nw={args.new_width} -nh={args.new_height} -v')\n else:\n cmd_rgb = osp.join(\n f\"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'\"\n f' -ns={args.new_short} -v')\n cmd_flow = osp.join(\n f\"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'\" # noqa: E501\n f' -ns={args.new_short} -v')\n os.system(cmd_rgb)\n os.system(cmd_flow)\n\n print(f'{task} {vid_id} {vid_path} {method} done')\n sys.stdout.flush()\n return True\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='extract optical flows')\n parser.add_argument('src_dir', type=str, help='source video directory')\n parser.add_argument('out_dir', type=str, help='output rawframe directory')\n parser.add_argument(\n '--task',\n type=str,\n default='flow',\n choices=['rgb', 'flow', 'both'],\n help='which type of frames to be extracted')\n parser.add_argument(\n '--level',\n type=int,\n choices=[1, 2],\n default=2,\n help='directory level of data')\n parser.add_argument(\n '--num-worker',\n type=int,\n default=8,\n help='number of workers to build rawframes')\n parser.add_argument(\n '--flow-type',\n type=str,\n default=None,\n choices=[None, 'tvl1', 'warp_tvl1', 'farn', 'brox'],\n help='flow type to be generated')\n parser.add_argument(\n '--out-format',\n type=str,\n default='jpg',\n choices=['jpg', 'h5', 'png'],\n help='output format')\n parser.add_argument(\n '--ext',\n type=str,\n default='avi',\n choices=['avi', 'mp4', 'webm'],\n help='video file extensions')\n parser.add_argument(\n '--new-width', type=int, default=0, help='resize image width')\n parser.add_argument(\n '--new-height', type=int, default=0, help='resize image height')\n parser.add_argument(\n '--new-short',\n type=int,\n default=0,\n help='resize image short side length keeping ratio')\n parser.add_argument('--num-gpu', type=int, default=8, help='number of GPU')\n parser.add_argument(\n '--resume',\n action='store_true',\n default=False,\n help='resume optical flow extraction instead of overwriting')\n parser.add_argument(\n '--use-opencv',\n action='store_true',\n help='Whether to use opencv to extract rgb frames')\n parser.add_argument(\n '--input-frames',\n action='store_true',\n help='Whether to extract flow frames based on rgb frames')\n parser.add_argument(\n '--ref_listfile_path', type=str, default='', help='reference listfile path for the subset') \n args = parser.parse_args()\n\n return args\n\ndef get_subset_classes(ref_listfile_path):\n df = pd.read_csv(ref_listfile_path, header=None, sep='*')\n cur_data = df.values\n \n subset_classes = []\n for i,row in enumerate(cur_data): \n cur_cls = row[0].split('/')[1]\n cur_cls = cur_cls.replace(' ', '_').replace('(', '-').replace(')', '-')\n if cur_cls not in subset_classes:\n subset_classes.append(cur_cls)\n\n return subset_classes\n\n \ndef filter_vid_list(vid_list, src_dir, ref_listfile_path):\n subset_classes = get_subset_classes(ref_listfile_path)\n filtered_vid_list = []\n filtered_full_path_list = []\n for vid,fpath in zip(vid_list,fullpath_list):\n cur_cls = vid.split('/')[0]\n if cur_cls in subset_classes:\n filtered_vid_list.append(vid)\n filtered_full_path_list.append(os.path.join(src_dir, vid))\n\n return filtered_vid_list, filtered_full_path_list\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n if not osp.isdir(args.out_dir):\n print(f'Creating folder: {args.out_dir}')\n os.makedirs(args.out_dir)\n\n if args.level == 2:\n if args.ref_listfile_path != '':\n classes = get_subset_classes(args.ref_listfile_path)\n else:\n classes = os.listdir(args.src_dir)\n for classname in classes:\n new_dir = osp.join(args.out_dir, classname)\n if not osp.isdir(new_dir):\n print(f'Creating folder: {new_dir}')\n os.makedirs(new_dir)\n\n if args.input_frames:\n print('Reading rgb frames from folder: ', args.src_dir)\n fullpath_list = glob.glob(args.src_dir + '/*' * args.level)\n done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level)\n print('Total number of rgb frame folders found: ', len(fullpath_list))\n else:\n print('Reading videos from folder: ', args.src_dir)\n print('Extension of videos: ', args.ext)\n fullpath_list = glob.glob(args.src_dir + '/*' * args.level + '.' +\n args.ext)\n done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level)\n print('Total number of videos found: ', len(fullpath_list))\n\n if args.resume:\n fullpath_list = set(fullpath_list).difference(set(done_fullpath_list))\n fullpath_list = list(fullpath_list)\n print('Resuming. number of videos to be done: ', len(fullpath_list))\n\n if args.level == 2:\n vid_list = list(\n map(\n lambda p: osp.join(\n osp.basename(osp.dirname(p)), osp.basename(p)),\n fullpath_list))\n elif args.level == 1:\n vid_list, fullpath_list = list(map(lambda p: osp.basename(p), fullpath_list))\n\n if args.ref_listfile_path != '':\n vid_list, fullpath_list = filter_vid_list(vid_list, args.src_dir, args.ref_listfile_path)\n \n pool = Pool(args.num_worker)\n pool.map(\n extract_frame,\n zip(fullpath_list, vid_list, range(len(vid_list)),\n len(vid_list) * [args.flow_type],\n len(vid_list) * [args.task]))\n"
] | [
[
"pandas.read_csv",
"numpy.shape"
]
] |
Xuyiyang23333/asbot | [
"c3b8a88e0970c1b39f9f7575f64b3fc3fe5161ba"
] | [
"as/tools/generator.py"
] | [
"from PIL import Image, ImageDraw, ImageFont\r\nimport numpy as np\r\nfrom decimal import Decimal, ROUND_HALF_UP\r\nfrom math import radians, tan, cos, sin\r\nfrom os import path\r\n\r\n_round = lambda f, r=ROUND_HALF_UP: int(Decimal(str(f)).quantize(Decimal(\"0\"), rounding=r))\r\nrgb = lambda r, g, b: (r, g, b)\r\n\r\nupper_font_path = path.join(path.dirname(__file__), 'NotoSansCJKSC-Black.ttf')\r\ndowner_font_path = path.join(path.dirname(__file__), 'NotoSerifCJKSC-Black.ttf')\r\n\r\n\r\ndef get_gradient_2d(start, stop, width, height, is_horizontal=False):\r\n if is_horizontal:\r\n return np.tile(np.linspace(start, stop, width), (height, 1))\r\n else:\r\n return np.tile(np.linspace(start, stop, height), (width, 1)).T\r\n\r\n\r\ndef getTextWidth(text, font, width=100, height=500, recursive=False):\r\n step = 100\r\n img = Image.new(\"L\", (width, height))\r\n draw = ImageDraw.Draw(img)\r\n draw.text((0, 0), text, font=font, fill=255)\r\n box = img.getbbox()\r\n if box[2] < width - step or (recursive and box[2] == width - step):\r\n return box[2]\r\n else:\r\n return getTextWidth(text=text, font=font, width=width + step, height=height, recursive=True)\r\n\r\n\r\ndef get_gradient_3d(width, height, start_list, stop_list, is_horizontal_list=(False, False, False)):\r\n result = np.zeros((height, width, len(start_list)), dtype=float)\r\n for i, (start, stop, is_horizontal) in enumerate(zip(start_list, stop_list, is_horizontal_list)):\r\n result[:, :, i] = get_gradient_2d(start, stop, width, height, is_horizontal)\r\n return result\r\n\r\n\r\ndef createLinearGradient(steps, width, height, size=1, center=0.5):\r\n margin_up = _round(height * (center - size / 2))\r\n margin_down = _round(height * (1 - center - size / 2))\r\n result = np.zeros((0, width, len(steps[0])), dtype=float)\r\n for i, k in enumerate(steps.keys()):\r\n if k == 0:\r\n array = get_gradient_3d(width, _round(margin_up), steps[k], steps[k])\r\n result = np.vstack([result, array])\r\n continue\r\n pk = list(steps.keys())[i - 1]\r\n h = _round(height * size * (k - pk))\r\n array = get_gradient_3d(width, h, steps[pk], steps[k])\r\n result = np.vstack([result, array])\r\n if k == 1:\r\n array = get_gradient_3d(width, _round(margin_down), steps[k], steps[k])\r\n result = np.vstack([result, array])\r\n continue\r\n return result\r\n\r\n\r\ndef genBaseImage(width=1500, height=500):\r\n k = 0.63 # 渐变色缩放系数,不应大于1\r\n c = 0.53 # 渐变色中心位置\r\n\r\n downerSilverArray = createLinearGradient({\r\n 0: rgb(0, 15, 36),\r\n 0.10: rgb(255, 255, 255),\r\n 0.18: rgb(55, 58, 59),\r\n 0.25: rgb(55, 58, 59),\r\n 0.5: rgb(200, 200, 200),\r\n 0.75: rgb(55, 58, 59),\r\n 0.85: rgb(25, 20, 31),\r\n 0.91: rgb(240, 240, 240),\r\n 0.95: rgb(166, 175, 194),\r\n 1: rgb(50, 50, 50)\r\n }, width=width, height=height, size=k, center=c)\r\n\r\n goldArray = createLinearGradient({\r\n 0: rgb(253, 241, 0),\r\n 0.25: rgb(245, 253, 187),\r\n 0.4: rgb(255, 255, 255),\r\n 0.75: rgb(253, 219, 9),\r\n 0.9: rgb(127, 53, 0),\r\n 1: rgb(243, 196, 11)\r\n }, width=width, height=height, size=k, center=c)\r\n\r\n strokeRedArray = createLinearGradient({\r\n 0: rgb(255, 100, 0),\r\n 0.5: rgb(123, 0, 0),\r\n 0.51: rgb(240, 0, 0),\r\n 1: rgb(5, 0, 0)\r\n }, width=width, height=height, size=k, center=c)\r\n\r\n redArray = createLinearGradient({\r\n 0: rgb(230, 0, 0),\r\n 0.5: rgb(123, 0, 0),\r\n 0.51: rgb(240, 0, 0),\r\n 1: rgb(5, 0, 0)\r\n }, width=width, height=height, size=k, center=c)\r\n\r\n silver2Array = createLinearGradient({\r\n 0: rgb(245, 246, 248),\r\n 0.15: rgb(255, 255, 255),\r\n 0.35: rgb(195, 213, 220),\r\n 0.5: rgb(160, 190, 201),\r\n 0.51: rgb(160, 190, 201),\r\n 0.52: rgb(196, 215, 222),\r\n 1.0: rgb(255, 255, 255)\r\n }, width=width, height=height, size=k, center=c)\r\n\r\n navyArray = createLinearGradient({\r\n 0: rgb(16, 25, 58),\r\n 0.03: rgb(255, 255, 255),\r\n 0.08: rgb(16, 25, 58),\r\n 0.2: rgb(16, 25, 58),\r\n 1: rgb(16, 25, 58)\r\n }, width=width, height=height, size=k, center=c)\r\n\r\n result = {\r\n \"downerSilver\": Image.fromarray(np.uint8(downerSilverArray)).crop((0, 0, width, height)),\r\n \"gold\": Image.fromarray(np.uint8(goldArray)).crop((0, 0, width, height)),\r\n \"red\": Image.fromarray(np.uint8(redArray)).crop((0, 0, width, height)),\r\n \"strokeRed\": Image.fromarray(np.uint8(strokeRedArray)).crop((0, 0, width, height)),\r\n \"silver2\": Image.fromarray(np.uint8(silver2Array)).crop((0, 0, width, height)),\r\n \"strokeNavy\": Image.fromarray(np.uint8(navyArray)).crop((0, 0, width, height)), # Width: 7\r\n \"baseStrokeBlack\": Image.new(\"RGBA\", (width, height), rgb(0, 0, 0)).crop((0, 0, width, height)), # Width: 17\r\n \"strokeBlack\": Image.new(\"RGBA\", (width, height), rgb(16, 25, 58)).crop((0, 0, width, height)), # Width: 17\r\n \"strokeWhite\": Image.new(\"RGBA\", (width, height), rgb(221, 221, 221)).crop((0, 0, width, height)), # Width: 8\r\n \"baseStrokeWhite\": Image.new(\"RGBA\", (width, height), rgb(255, 255, 255)).crop((0, 0, width, height))\r\n # Width: 8\r\n }\r\n for k in result.keys():\r\n result[k].putalpha(255)\r\n return result\r\n\r\n\r\ndef genImage(word_a=\"5000兆円\", word_b=\"欲しい!\", default_width=1500, height=500,\r\n bg=\"white\", subset=250, default_base=None):\r\n # width = max_width\r\n\r\n k = 0.8 # 字体缩放系数\r\n\r\n alpha = (0, 0, 0, 0)\r\n leftmargin = 50\r\n upmargin = 20\r\n font_upper = ImageFont.truetype(upper_font_path, _round(height * 0.35 * k) + upmargin)\r\n font_downer = ImageFont.truetype(downer_font_path, _round(height * 0.35 * k) + upmargin)\r\n\r\n # Prepare Width\r\n upper_width = max([default_width,\r\n getTextWidth(word_a, font_upper, width=default_width,\r\n height=_round(height / 2))]) + 300\r\n downer_width = max([default_width,\r\n getTextWidth(word_b, font_upper, width=default_width,\r\n height=_round(height / 2))]) + 300\r\n\r\n # Prepare base - Upper (if required)\r\n if default_width == upper_width:\r\n upper_base = default_base\r\n else:\r\n upper_base = genBaseImage(width=upper_width + leftmargin, height=_round(height / 2) + upmargin)\r\n\r\n # Prepare base - Downer (if required)\r\n downer_base = genBaseImage(width=downer_width + leftmargin, height=_round(height / 2) + upmargin)\r\n # if default_width == downer_width:\r\n # downer_base = default_base\r\n # else:\r\n\r\n # Prepare mask - Upper\r\n upper_mask_base = Image.new(\"L\", (upper_width + leftmargin, _round(height / 2) + upmargin), 0)\r\n\r\n mask_img_upper = list()\r\n upper_data = [\r\n [\r\n (4, 4), (4, 4), (0, 0), (0, 0), (2, -3), (0, -3), (0, -3), (0, -3)\r\n ],\r\n [\r\n 22, 20, 16, 10, 6, 6, 3, 0\r\n ],\r\n [\r\n \"baseStrokeBlack\",\r\n \"downerSilver\",\r\n \"baseStrokeBlack\",\r\n \"gold\",\r\n \"baseStrokeBlack\",\r\n \"baseStrokeWhite\",\r\n \"strokeRed\",\r\n \"red\",\r\n ]\r\n ]\r\n for pos, stroke, color in zip(upper_data[0], upper_data[1], upper_data[2]):\r\n mask_img_upper.append(upper_mask_base.copy())\r\n mask_draw_upper = ImageDraw.Draw(mask_img_upper[-1])\r\n mask_draw_upper.text((pos[0] + leftmargin, pos[1] + upmargin), word_a,\r\n font=font_upper, fill=255,\r\n stroke_width=_round(stroke * height / 500))\r\n\r\n # Prepare mask - Downer\r\n downer_mask_base = Image.new(\"L\", (downer_width + leftmargin, _round(height / 2) + upmargin), 0)\r\n mask_img_downer = list()\r\n downer_data = [\r\n [\r\n (5, 2), (5, 2), (0, 0), (0, 0), (0, 0), (0, -3)\r\n ], [\r\n 22, 19, 17, 8, 7, 0\r\n ], [\r\n \"baseStrokeBlack\",\r\n \"downerSilver\",\r\n \"strokeBlack\",\r\n \"strokeWhite\",\r\n \"strokeNavy\",\r\n \"silver2\"\r\n ]\r\n ]\r\n for pos, stroke, color in zip(downer_data[0], downer_data[1], downer_data[2]):\r\n mask_img_downer.append(downer_mask_base.copy())\r\n mask_draw_downer = ImageDraw.Draw(mask_img_downer[-1])\r\n mask_draw_downer.text((pos[0] + leftmargin, pos[1] + upmargin), word_b,\r\n font=font_downer, fill=255,\r\n stroke_width=_round(stroke * height / 500))\r\n\r\n # Draw text - Upper\r\n img_upper = Image.new(\"RGBA\", (upper_width, _round(height / 2)), alpha)\r\n\r\n for i, (pos, stroke, color) in enumerate(zip(upper_data[0], upper_data[1], upper_data[2])):\r\n img_upper_part = Image.new(\"RGBA\", (upper_width + leftmargin, _round(height / 2) + upmargin), alpha)\r\n img_upper_part.paste(upper_base[color], (0, 0), mask=mask_img_upper[i])\r\n img_upper.alpha_composite(img_upper_part)\r\n\r\n # Draw text - Downer\r\n img_downer = Image.new(\"RGBA\", (downer_width + leftmargin, _round(height / 2)), alpha)\r\n for i, (pos, stroke, color) in enumerate(zip(downer_data[0], downer_data[1], downer_data[2])):\r\n img_downer_part = Image.new(\"RGBA\", (downer_width + leftmargin, _round(height / 2) + upmargin), alpha)\r\n img_downer_part.paste(downer_base[color], (0, 0), mask=mask_img_downer[i])\r\n img_downer.alpha_composite(img_downer_part)\r\n\r\n # img_upper.save(\"./uptemp.png\")\r\n # img_downer.save(\"./downtemp.png\")\r\n # tilt image\r\n tiltres = list()\r\n angle = 20\r\n for img in [img_upper, img_downer]:\r\n dist = img.height * tan(radians(angle))\r\n data = (1, tan(radians(angle)), -dist, 0, 1, 0)\r\n imgc = img.crop((0, 0, img.width + dist, img.height))\r\n imgt = imgc.transform(imgc.size, Image.AFFINE, data, Image.BILINEAR)\r\n tiltres.append(imgt)\r\n\r\n # finish\r\n previmg = Image.new(\"RGBA\", (max([upper_width, downer_width]) + leftmargin + subset + 100, height + upmargin + 100),\r\n (255, 255, 255, 0))\r\n # previmg.paste(tiltres[0], (0, 0))\r\n # previmg.paste(tiltres[1], (subset, _round(height/2)))\r\n previmg.alpha_composite(tiltres[0], (0, 50), (0, 0))\r\n if upper_width > downer_width + subset:\r\n previmg.alpha_composite(tiltres[1], (upper_width + subset - downer_width, _round(height / 2) + 50), (0, 0))\r\n else:\r\n previmg.alpha_composite(tiltres[1], (subset, _round(height / 2) + 50), (0, 0))\r\n # previmg.save(\"./test1.png\")\r\n croprange = previmg.getbbox()\r\n img = previmg.crop(croprange)\r\n final_image = Image.new(\"RGB\", (img.size[0] + 100, img.size[1] + 100), bg)\r\n final_image.paste(img, (50, 50))\r\n\r\n return final_image\r\n"
] | [
[
"numpy.vstack",
"numpy.linspace",
"numpy.uint8"
]
] |
mcx/legged_gym | [
"dd6a6892e54c4f111a203319c05da8dca9595ae1"
] | [
"legged_gym/envs/anymal_c/anymal.py"
] | [
"# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: BSD-3-Clause\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# Copyright (c) 2021 ETH Zurich, Nikita Rudin\n\nfrom time import time\nimport numpy as np\nimport os\n\nfrom isaacgym.torch_utils import *\nfrom isaacgym import gymtorch, gymapi, gymutil\n\nimport torch\n# from torch.tensor import Tensor\nfrom typing import Tuple, Dict\n\nfrom legged_gym.envs import LeggedRobot\nfrom legged_gym import LEGGED_GYM_ROOT_DIR\nfrom .mixed_terrains.anymal_c_rough_config import AnymalCRoughCfg\n\nclass Anymal(LeggedRobot):\n cfg : AnymalCRoughCfg\n def __init__(self, cfg, sim_params, physics_engine, sim_device, headless):\n super().__init__(cfg, sim_params, physics_engine, sim_device, headless)\n\n # load actuator network\n if self.cfg.control.use_actuator_network:\n actuator_network_path = self.cfg.control.actuator_net_file.format(LEGGED_GYM_ROOT_DIR=LEGGED_GYM_ROOT_DIR)\n self.actuator_network = torch.jit.load(actuator_network_path).to(self.device)\n \n def reset_idx(self, env_ids):\n super().reset_idx(env_ids)\n # Additionaly empty actuator network hidden states\n self.sea_hidden_state_per_env[:, env_ids] = 0.\n self.sea_cell_state_per_env[:, env_ids] = 0.\n\n def _init_buffers(self):\n super()._init_buffers()\n # Additionally initialize actuator network hidden state tensors\n self.sea_input = torch.zeros(self.num_envs*self.num_actions, 1, 2, device=self.device, requires_grad=False)\n self.sea_hidden_state = torch.zeros(2, self.num_envs*self.num_actions, 8, device=self.device, requires_grad=False)\n self.sea_cell_state = torch.zeros(2, self.num_envs*self.num_actions, 8, device=self.device, requires_grad=False)\n self.sea_hidden_state_per_env = self.sea_hidden_state.view(2, self.num_envs, self.num_actions, 8)\n self.sea_cell_state_per_env = self.sea_cell_state.view(2, self.num_envs, self.num_actions, 8)\n\n def _compute_torques(self, actions):\n # Choose between pd controller and actuator network\n if self.cfg.control.use_actuator_network:\n with torch.inference_mode():\n self.sea_input[:, 0, 0] = (actions * self.cfg.control.action_scale + self.default_dof_pos - self.dof_pos).flatten()\n self.sea_input[:, 0, 1] = self.dof_vel.flatten()\n torques, (self.sea_hidden_state[:], self.sea_cell_state[:]) = self.actuator_network(self.sea_input, (self.sea_hidden_state, self.sea_cell_state))\n return torques\n else:\n # pd controller\n return super()._compute_torques(actions) "
] | [
[
"torch.zeros",
"torch.jit.load",
"torch.inference_mode"
]
] |
iRmantou/lightseq | [
"9a617306fa711a3d6a25ef3eab9bfbe408692189"
] | [
"lightseq/training/ops/pytorch/torch_transformer_layers.py"
] | [
"# Copyright 2021 The LightSeq Team\n# Copyright Facebook Fairseq\n# We use layers from Facebook Fairseq as our baseline\n\n\nimport math\nimport uuid\n\nfrom typing import Dict, Optional, Tuple, List\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor, nn\nfrom torch.nn import Parameter, LayerNorm, Dropout, Linear\n\nfrom lightseq.training.ops.pytorch import util\nfrom lightseq.training.ops.pytorch.layer_base import (\n TransformerEmbeddingLayerBase,\n TransformerEncoderLayerBase,\n TransformerDecoderLayerBase,\n)\nfrom .quantization import (\n QuantLinear,\n TensorQuantizer,\n act_quant_config,\n weight_quant_config,\n)\n\n\nclass MultiheadAttention(nn.Module):\n \"\"\"Multi-headed attention.\n\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n kdim=None,\n vdim=None,\n dropout=0.0,\n bias=True,\n add_bias_kv=False,\n add_zero_attn=False,\n self_attention=False,\n encoder_decoder_attention=False,\n is_decoder=False,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.kdim = kdim if kdim is not None else embed_dim\n self.vdim = vdim if vdim is not None else embed_dim\n self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim\n\n self.num_heads = num_heads\n self.dropout_module = Dropout(dropout)\n\n self.head_dim = embed_dim // num_heads\n assert (\n self.head_dim * num_heads == self.embed_dim\n ), \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim ** -0.5\n\n self.self_attention = self_attention\n self.encoder_decoder_attention = encoder_decoder_attention\n self.is_decoder = is_decoder\n\n assert (\n not self.self_attention or self.qkv_same_dim\n ), \"Self-attention requires query, key and value to be of the same size\"\n\n self.attention_quant = None\n if self.self_attention:\n # self.qkv_proj = Linear(embed_dim, 3*embed_dim, bias=bias)\n self.qkv_proj = QuantLinear(embed_dim, 3 * embed_dim, bias=bias)\n\n self.attention_quant = (\n TensorQuantizer(act_quant_config) if self.is_decoder else None\n )\n elif self.encoder_decoder_attention and self.is_decoder:\n self.k_proj = QuantLinear(\n self.kdim, embed_dim, pre_activation=\"encoder_out\", bias=bias\n )\n self.v_proj = QuantLinear(\n self.vdim, embed_dim, pre_activation=\"encoder_out\", bias=bias\n )\n self.q_proj = QuantLinear(embed_dim, embed_dim, bias=bias)\n\n self.out_proj = QuantLinear(embed_dim, embed_dim, bias=bias)\n\n if add_bias_kv:\n self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))\n self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))\n else:\n self.bias_k = self.bias_v = None\n\n self.add_zero_attn = add_zero_attn\n\n self.reset_parameters()\n\n self.onnx_trace = False\n self.tpu = False\n self.init_incremental_state()\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def prepare_for_tpu_(self, **kwargs):\n self.tpu = True\n\n def reset_parameters(self):\n if self.qkv_same_dim:\n # Empirically observed the convergence to be much better with\n # the scaled initialization\n if self.self_attention:\n nn.init.xavier_uniform_(self.qkv_proj.weight, gain=1 / math.sqrt(2))\n else:\n nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))\n else:\n nn.init.xavier_uniform_(self.k_proj.weight)\n nn.init.xavier_uniform_(self.v_proj.weight)\n nn.init.xavier_uniform_(self.q_proj.weight)\n\n nn.init.xavier_uniform_(self.out_proj.weight)\n if self.out_proj.bias is not None:\n nn.init.constant_(self.out_proj.bias, 0.0)\n if self.bias_k is not None:\n nn.init.xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n nn.init.xavier_normal_(self.bias_v)\n\n def forward(\n self,\n query,\n key: Optional[Tensor],\n value: Optional[Tensor],\n key_padding_mask: Optional[Tensor] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n need_weights: bool = True,\n static_kv: bool = False,\n attn_mask: Optional[Tensor] = None,\n before_softmax: bool = False,\n need_head_weights: bool = False,\n ):\n \"\"\"Input shape: Time x Batch x Channel\n\n Args:\n key_padding_mask (ByteTensor, optional): mask to exclude\n keys that are pads, of shape `(batch, src_len)`, where\n padding elements are indicated by 1s.\n need_weights (bool, optional): return the attention weights,\n averaged over heads (default: False).\n attn_mask (ByteTensor, optional): typically used to\n implement causal attention, where the mask prevents the\n attention from looking forward in time (default: None).\n before_softmax (bool, optional): return the raw attention\n weights and values before the attention softmax.\n need_head_weights (bool, optional): return the attention\n weights for each head. Implies *need_weights*. Default:\n return the average attention weights over all heads.\n \"\"\"\n\n if need_head_weights:\n need_weights = True\n\n tgt_len, bsz, embed_dim = query.size()\n assert embed_dim == self.embed_dim\n assert list(query.size()) == [tgt_len, bsz, embed_dim]\n\n if incremental_state is not None:\n saved_state = self._get_input_buffer(incremental_state)\n if saved_state is not None and \"prev_key\" in saved_state:\n # previous time steps are cached - no need to recompute\n # key and value if they are static\n if static_kv:\n assert self.encoder_decoder_attention and not self.self_attention\n key = value = None\n else:\n saved_state = None\n\n if self.self_attention:\n qkv = self.qkv_proj(query)\n if self.attention_quant is not None:\n qkv = self.attention_quant(qkv)\n q, k, v = qkv.split(self.embed_dim, dim=-1)\n # q = self.q_proj(query)\n # k = self.k_proj(query)\n # v = self.v_proj(query)\n elif self.encoder_decoder_attention:\n # encoder-decoder attention\n q = self.q_proj(query)\n if key is None:\n assert value is None\n k = v = None\n else:\n k = self.k_proj(key)\n v = self.v_proj(key)\n\n else:\n assert key is not None and value is not None\n q = self.q_proj(query)\n k = self.k_proj(key)\n v = self.v_proj(value)\n q = q * self.scaling\n\n if self.bias_k is not None:\n assert self.bias_v is not None\n k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])\n v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])\n if attn_mask is not None:\n attn_mask = torch.cat(\n [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1\n )\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [\n key_padding_mask,\n key_padding_mask.new_zeros(key_padding_mask.size(0), 1),\n ],\n dim=1,\n )\n\n q = (\n q.contiguous()\n .view(tgt_len, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n if k is not None:\n k = (\n k.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n if v is not None:\n v = (\n v.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n if saved_state is not None:\n # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)\n if \"prev_key\" in saved_state:\n _prev_key = saved_state[\"prev_key\"]\n assert _prev_key is not None\n prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n k = prev_key\n else:\n assert k is not None\n k = torch.cat([prev_key, k], dim=1)\n if \"prev_value\" in saved_state:\n _prev_value = saved_state[\"prev_value\"]\n assert _prev_value is not None\n prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n v = prev_value\n else:\n assert v is not None\n v = torch.cat([prev_value, v], dim=1)\n prev_key_padding_mask: Optional[Tensor] = None\n if \"prev_key_padding_mask\" in saved_state:\n prev_key_padding_mask = saved_state[\"prev_key_padding_mask\"]\n assert k is not None and v is not None\n key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(\n key_padding_mask=key_padding_mask,\n prev_key_padding_mask=prev_key_padding_mask,\n batch_size=bsz,\n src_len=k.size(1),\n static_kv=static_kv,\n )\n\n saved_state[\"prev_key\"] = k.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state[\"prev_value\"] = v.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state[\"prev_key_padding_mask\"] = key_padding_mask\n # In this branch incremental_state is never None\n assert incremental_state is not None\n incremental_state = self._set_input_buffer(incremental_state, saved_state)\n assert k is not None\n src_len = k.size(1)\n\n # This is part of a workaround to get around fork/join parallelism\n # not supporting Optional types.\n if key_padding_mask is not None and key_padding_mask.dim() == 0:\n key_padding_mask = None\n\n if key_padding_mask is not None:\n assert key_padding_mask.size(0) == bsz\n assert key_padding_mask.size(1) == src_len\n\n if self.add_zero_attn:\n assert v is not None\n src_len += 1\n k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)\n v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)\n if attn_mask is not None:\n attn_mask = torch.cat(\n [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1\n )\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [\n key_padding_mask,\n torch.zeros(key_padding_mask.size(0), 1).type_as(\n key_padding_mask\n ),\n ],\n dim=1,\n )\n\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)\n\n assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]\n\n if attn_mask is not None:\n attn_mask = attn_mask.unsqueeze(0)\n if self.onnx_trace:\n attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)\n attn_weights += attn_mask\n\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n if not self.tpu:\n attn_weights = attn_weights.masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),\n float(\"-inf\"),\n )\n else:\n attn_weights = attn_weights.transpose(0, 2)\n attn_weights = attn_weights.masked_fill(key_padding_mask, float(\"-inf\"))\n attn_weights = attn_weights.transpose(0, 2)\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if before_softmax:\n return attn_weights, v\n\n attn_weights_float = util.softmax(\n attn_weights, dim=-1, onnx_trace=self.onnx_trace\n )\n attn_weights = attn_weights_float.type_as(attn_weights)\n attn_probs = self.dropout_module(attn_weights)\n\n assert v is not None\n attn = torch.bmm(attn_probs, v)\n assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]\n if self.onnx_trace and attn.size(1) == 1:\n # when ONNX tracing a single decoder step (sequence length == 1)\n # the transpose is a no-op copy before view, thus unnecessary\n attn = attn.contiguous().view(tgt_len, bsz, embed_dim)\n else:\n attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn = self.out_proj(attn)\n attn_weights: Optional[Tensor] = None\n if need_weights:\n attn_weights = attn_weights_float.view(\n bsz, self.num_heads, tgt_len, src_len\n ).transpose(1, 0)\n if not need_head_weights:\n # average attention weights over heads\n attn_weights = attn_weights.mean(dim=0)\n\n return attn, attn_weights\n\n @staticmethod\n def _append_prev_key_padding_mask(\n key_padding_mask: Optional[Tensor],\n prev_key_padding_mask: Optional[Tensor],\n batch_size: int,\n src_len: int,\n static_kv: bool,\n ) -> Optional[Tensor]:\n # saved key padding masks have shape (bsz, seq_len)\n if prev_key_padding_mask is not None and static_kv:\n new_key_padding_mask = prev_key_padding_mask\n elif prev_key_padding_mask is not None and key_padding_mask is not None:\n new_key_padding_mask = torch.cat(\n [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1\n )\n # During incremental decoding, as the padding token enters and\n # leaves the frame, there will be a time when prev or current\n # is None\n elif prev_key_padding_mask is not None:\n filler = torch.zeros(\n (batch_size, src_len - prev_key_padding_mask.size(1)),\n device=prev_key_padding_mask.device,\n )\n new_key_padding_mask = torch.cat(\n [prev_key_padding_mask.float(), filler.float()], dim=1\n )\n elif key_padding_mask is not None:\n filler = torch.zeros(\n (batch_size, src_len - key_padding_mask.size(1)),\n device=key_padding_mask.device,\n )\n new_key_padding_mask = torch.cat(\n [filler.float(), key_padding_mask.float()], dim=1\n )\n else:\n new_key_padding_mask = prev_key_padding_mask\n return new_key_padding_mask\n\n @torch.jit.export\n def reorder_incremental_state(\n self,\n incremental_state: Dict[str, Dict[str, Optional[Tensor]]],\n new_order: Tensor,\n ):\n \"\"\"Reorder buffered internal state (for incremental generation).\"\"\"\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n for k in input_buffer.keys():\n input_buffer_k = input_buffer[k]\n if input_buffer_k is not None:\n if self.encoder_decoder_attention and input_buffer_k.size(\n 0\n ) == new_order.size(0):\n break\n input_buffer[k] = input_buffer_k.index_select(0, new_order)\n incremental_state = self._set_input_buffer(incremental_state, input_buffer)\n return incremental_state\n\n def _get_input_buffer(\n self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]\n ) -> Dict[str, Optional[Tensor]]:\n result = self.get_incremental_state(incremental_state, \"attn_state\")\n if result is not None:\n return result\n else:\n empty_result: Dict[str, Optional[Tensor]] = {}\n return empty_result\n\n def _set_input_buffer(\n self,\n incremental_state: Dict[str, Dict[str, Optional[Tensor]]],\n buffer: Dict[str, Optional[Tensor]],\n ):\n return self.set_incremental_state(incremental_state, \"attn_state\", buffer)\n\n def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):\n return attn_weights\n\n def upgrade_state_dict_named(self, state_dict, name):\n prefix = name + \".\" if name != \"\" else \"\"\n items_to_add = {}\n keys_to_remove = []\n for k in state_dict.keys():\n if k.endswith(prefix + \"in_proj_weight\"):\n # in_proj_weight used to be q + k + v with same dimensions\n dim = int(state_dict[k].shape[0] / 3)\n items_to_add[prefix + \"q_proj.weight\"] = state_dict[k][:dim]\n items_to_add[prefix + \"k_proj.weight\"] = state_dict[k][dim : 2 * dim]\n items_to_add[prefix + \"v_proj.weight\"] = state_dict[k][2 * dim :]\n\n keys_to_remove.append(k)\n\n k_bias = prefix + \"in_proj_bias\"\n if k_bias in state_dict.keys():\n dim = int(state_dict[k].shape[0] / 3)\n items_to_add[prefix + \"q_proj.bias\"] = state_dict[k_bias][:dim]\n items_to_add[prefix + \"k_proj.bias\"] = state_dict[k_bias][\n dim : 2 * dim\n ]\n items_to_add[prefix + \"v_proj.bias\"] = state_dict[k_bias][2 * dim :]\n\n keys_to_remove.append(prefix + \"in_proj_bias\")\n\n for k in keys_to_remove:\n del state_dict[k]\n\n for key, value in items_to_add.items():\n state_dict[key] = value\n\n def init_incremental_state(self):\n self._incremental_state_id = str(uuid.uuid4())\n\n def _get_full_incremental_state_key(self, key: str) -> str:\n return \"{}.{}\".format(self._incremental_state_id, key)\n\n def get_incremental_state(\n self,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],\n key: str,\n ) -> Optional[Dict[str, Optional[Tensor]]]:\n \"\"\"Helper for getting incremental state for an nn.Module.\"\"\"\n full_key = self._get_full_incremental_state_key(key)\n if incremental_state is None or full_key not in incremental_state:\n return None\n return incremental_state[full_key]\n\n def set_incremental_state(\n self,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],\n key: str,\n value: Dict[str, Optional[Tensor]],\n ) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:\n \"\"\"Helper for setting incremental state for an nn.Module.\"\"\"\n if incremental_state is not None:\n full_key = self._get_full_incremental_state_key(key)\n incremental_state[full_key] = value\n return incremental_state\n\n\nclass TransformerEncoderLayer(TransformerEncoderLayerBase):\n \"\"\"Encoder layer implemented by fairseq.\n This version only removes the \"args\" parameter, no other changes\n\n In the original paper each operation (multi-head attention or FFN) is\n postprocessed with: `dropout -> add residual -> layernorm`.\n In the tensor2tensor code they suggest that learning is more robust when\n preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n normalize_before to True.\n \"\"\"\n\n def __init__(self, config, initial_weights=None, initial_biases=None):\n super().__init__()\n self.embed_dim = config.hidden_size\n\n self.self_attn = self.build_self_attention(\n self.embed_dim, config.nhead, config.attn_prob_dropout_ratio\n )\n self.self_attn_layer_norm = LayerNorm(self.embed_dim)\n self.dropout_module = Dropout(config.hidden_dropout_ratio)\n self.activation_fn = util.get_activation_fn(activation=config.activation_fn)\n self.activation_dropout_module = Dropout(float(config.activation_dropout_ratio))\n self.normalize_before = config.pre_layer_norm\n self.fc1 = QuantLinear(\n self.embed_dim,\n config.intermediate_size,\n )\n self.fc2 = QuantLinear(\n config.intermediate_size, self.embed_dim, pre_activation=\"relu\"\n )\n\n self.final_layer_norm = LayerNorm(self.embed_dim)\n\n def build_self_attention(self, embed_dim, nhead, attn_dropout):\n return MultiheadAttention(\n embed_dim,\n nhead,\n dropout=attn_dropout,\n self_attention=True,\n )\n\n def residual_connection(self, x, residual):\n return residual + x\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"\n Rename layer norm states from `...layer_norms.0.weight` to\n `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to\n `...final_layer_norm.weight`\n \"\"\"\n layer_norm_map = {\"0\": \"self_attn_layer_norm\", \"1\": \"final_layer_norm\"}\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layer_norms.{}.{}\".format(name, old, m)\n if k in state_dict:\n state_dict[\"{}.{}.{}\".format(name, new, m)] = state_dict[k]\n del state_dict[k]\n\n def forward(self, x, encoder_padding_mask):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(batch, seq_len, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, seq_len)` where padding elements are indicated by ``1``.\n\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n \"\"\"\n\n # anything in original attn_mask = 1, becomes -1e8\n # anything in original attn_mask = 0, becomes 0\n # Note that we cannot use -inf here, because at some edge cases,\n # the attention weight (before softmax) for some padded element in query\n # will become -inf, which results in NaN in model parameters\n\n x = x.transpose(0, 1)\n\n residual = x\n if self.normalize_before:\n x = self.self_attn_layer_norm(x)\n x, _ = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=encoder_padding_mask,\n )\n x = self.dropout_module(x)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n residual = x\n if self.normalize_before:\n x = self.final_layer_norm(x)\n\n x = self.activation_fn(self.fc1(x))\n\n x = self.activation_dropout_module(x)\n x = self.fc2(x)\n x = self.dropout_module(x)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.final_layer_norm(x)\n\n x = x.transpose(0, 1)\n return x\n\n\nclass TransformerDecoderLayer(TransformerDecoderLayerBase):\n \"\"\"Decoder layer implemented by fairseq.\n This version only removes the \"args\" parameter, no other changes\n \"\"\"\n\n def __init__(self, config, initial_weights=None, initial_biases=None):\n super().__init__()\n self.embed_dim = config.hidden_size\n self.dropout_module = Dropout(config.hidden_dropout_ratio)\n self.cross_self_attention = False\n\n self.self_attn = self.build_self_attention(\n self.embed_dim,\n config.nhead,\n config.attn_prob_dropout_ratio,\n )\n\n self.activation_fn = util.get_activation_fn(activation=config.activation_fn)\n self.activation_dropout_module = Dropout(float(config.activation_dropout_ratio))\n self.normalize_before = config.pre_layer_norm\n\n self.self_attn_layer_norm = LayerNorm(self.embed_dim)\n\n self.encoder_attn = self.build_encoder_attention(\n self.embed_dim,\n config.hidden_size,\n config.attn_prob_dropout_ratio,\n config.nhead,\n )\n self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)\n\n self.fc1 = QuantLinear(\n self.embed_dim,\n config.intermediate_size,\n )\n self.fc2 = QuantLinear(\n config.intermediate_size,\n self.embed_dim,\n pre_activation=\"relu\",\n )\n\n self.final_layer_norm = LayerNorm(self.embed_dim)\n self.need_attn = True\n\n self.onnx_trace = False\n\n def build_self_attention(\n self, embed_dim, nhead, attn_dropout, add_bias_kv=False, add_zero_attn=False\n ):\n return MultiheadAttention(\n embed_dim,\n nhead,\n dropout=attn_dropout,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n self_attention=not self.cross_self_attention,\n is_decoder=True,\n )\n\n def build_encoder_attention(\n self, embed_dim, encoder_embed_dim, attn_dropout, nhead\n ):\n return MultiheadAttention(\n embed_dim,\n nhead,\n kdim=encoder_embed_dim,\n vdim=encoder_embed_dim,\n dropout=attn_dropout,\n encoder_decoder_attention=True,\n is_decoder=True,\n )\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def residual_connection(self, x, residual):\n return residual + x\n\n def forward(\n self,\n x,\n encoder_out: Optional[torch.Tensor] = None,\n encoder_padding_mask: Optional[torch.Tensor] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n prev_self_attn_state: Optional[List[torch.Tensor]] = None,\n prev_attn_state: Optional[List[torch.Tensor]] = None,\n self_attn_mask: Optional[torch.Tensor] = None,\n self_attn_padding_mask: Optional[torch.Tensor] = None,\n need_attn: bool = False,\n need_head_weights: bool = False,\n ):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(batch, seq_len, embed_dim)`\n encoder_padding_mask (ByteTensor, optional): binary\n ByteTensor of shape `(batch, src_len)` where padding\n elements are indicated by ``1``.\n need_attn (bool, optional): return attention weights\n need_head_weights (bool, optional): return attention weights\n for each head (default: return average over heads).\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n \"\"\"\n\n if need_head_weights:\n need_attn = True\n x = x.transpose(0, 1)\n residual = x\n if self.normalize_before:\n x = self.self_attn_layer_norm(x)\n if prev_self_attn_state is not None:\n prev_key, prev_value = prev_self_attn_state[:2]\n saved_state: Dict[str, Optional[Tensor]] = {\n \"prev_key\": prev_key,\n \"prev_value\": prev_value,\n }\n if len(prev_self_attn_state) >= 3:\n saved_state[\"prev_key_padding_mask\"] = prev_self_attn_state[2]\n assert incremental_state is not None\n self.self_attn._set_input_buffer(incremental_state, saved_state)\n _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)\n if self.cross_self_attention and not (\n incremental_state is not None\n and _self_attn_input_buffer is not None\n and \"prev_key\" in _self_attn_input_buffer\n ):\n if self_attn_mask is not None:\n assert encoder_out is not None\n self_attn_mask = torch.cat(\n (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1\n )\n if self_attn_padding_mask is not None:\n if encoder_padding_mask is None:\n assert encoder_out is not None\n encoder_padding_mask = self_attn_padding_mask.new_zeros(\n encoder_out.size(1), encoder_out.size(0)\n )\n self_attn_padding_mask = torch.cat(\n (encoder_padding_mask, self_attn_padding_mask), dim=1\n )\n assert encoder_out is not None\n y = torch.cat((encoder_out, x), dim=0)\n else:\n y = x\n\n x, attn = self.self_attn(\n query=x,\n key=y,\n value=y,\n key_padding_mask=self_attn_padding_mask,\n incremental_state=incremental_state,\n need_weights=False,\n attn_mask=self_attn_mask,\n )\n x = self.dropout_module(x)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n if self.encoder_attn is not None and encoder_out is not None:\n if (\n encoder_out.shape[1] != x.shape[1]\n and x.shape[1] % encoder_out.shape[1] == 0\n ):\n beam_size = int(x.shape[1] / encoder_out.shape[1])\n encoder_out = encoder_out.repeat_interleave(beam_size, 1)\n encoder_padding_mask = encoder_padding_mask.repeat_interleave(\n beam_size, 0\n )\n\n residual = x\n if self.normalize_before:\n x = self.encoder_attn_layer_norm(x)\n if prev_attn_state is not None:\n prev_key, prev_value = prev_attn_state[:2]\n saved_state: Dict[str, Optional[Tensor]] = {\n \"prev_key\": prev_key,\n \"prev_value\": prev_value,\n }\n if len(prev_attn_state) >= 3:\n saved_state[\"prev_key_padding_mask\"] = prev_attn_state[2]\n assert incremental_state is not None\n self.encoder_attn._set_input_buffer(incremental_state, saved_state)\n\n x, attn = self.encoder_attn(\n query=x,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n incremental_state=incremental_state,\n static_kv=True,\n need_weights=need_attn or (not self.training and self.need_attn),\n need_head_weights=need_head_weights,\n )\n x = self.dropout_module(x)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.encoder_attn_layer_norm(x)\n\n residual = x\n if self.normalize_before:\n x = self.final_layer_norm(x)\n\n x = self.activation_fn(self.fc1(x))\n x = self.activation_dropout_module(x)\n x = self.fc2(x)\n x = self.dropout_module(x)\n x = self.residual_connection(x, residual)\n if not self.normalize_before:\n x = self.final_layer_norm(x)\n if self.onnx_trace and incremental_state is not None:\n saved_state = self.self_attn._get_input_buffer(incremental_state)\n assert saved_state is not None\n if self_attn_padding_mask is not None:\n self_attn_state = [\n saved_state[\"prev_key\"],\n saved_state[\"prev_value\"],\n saved_state[\"prev_key_padding_mask\"],\n ]\n else:\n self_attn_state = [saved_state[\"prev_key\"], saved_state[\"prev_value\"]]\n return x, attn, self_attn_state\n x = x.transpose(0, 1)\n return x, attn, None\n\n def make_generation_fast_(self, need_attn: bool = False, **kwargs):\n self.need_attn = need_attn\n\n\nclass TransformerEmbeddingLayer(TransformerEmbeddingLayerBase):\n def __init__(self, config):\n super().__init__()\n\n self.emb_lookup = nn.Embedding(\n config.vocab_size, config.embedding_dim, padding_idx=config.padding_idx\n )\n self.emb_lookup.to(dtype=(torch.half if config.fp16 else torch.float))\n self.embeddings = self.emb_lookup.weight\n\n nn.init.normal_(self.embeddings, mean=0, std=config.embedding_dim ** -0.5)\n nn.init.constant_(self.embeddings[config.padding_idx], 0)\n self.embed_positions = SinusoidalPositionalEmbedding(\n config.embedding_dim, config.padding_idx, config.max_seq_len, config.fp16\n )\n self.embedding_dim = config.embedding_dim\n self.dropout = Dropout(config.dropout)\n self.emb_quant = TensorQuantizer(weight_quant_config)\n self.config = config\n\n def forward(self, input, step=0):\n x = self.emb_lookup(input)\n x = self.emb_quant(x)\n x = math.sqrt(self.embedding_dim) * x\n x += self.embed_positions(input, step)\n x = self.dropout(x)\n\n return x\n\n\nclass SinusoidalPositionalEmbedding(nn.Module):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\n\n Padding symbols are ignored.\n \"\"\"\n\n def __init__(self, embedding_dim, padding_idx, init_size=1024, fp16=False):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.padding_idx = padding_idx\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n init_size, embedding_dim, padding_idx\n )\n if fp16:\n self.weights = self.weights.to(torch.half)\n\n @staticmethod\n def get_embedding(\n num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None\n ):\n \"\"\"Build sinusoidal embeddings.\n\n This matches the implementation in tensor2tensor, but differs slightly\n from the description in Section 3.5 of \"Attention Is All You Need\".\n \"\"\"\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)\n emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(\n 1\n ) * emb.unsqueeze(0)\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(\n num_embeddings, -1\n )\n if embedding_dim % 2 == 1:\n # zero pad\n emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)\n return emb\n\n def make_positions(self, tensor, padding_idx, step):\n mask = tensor.ne(padding_idx).int()\n return ((torch.cumsum(mask, dim=1).type_as(mask) - 1 + step) * mask).long()\n\n def forward(\n self,\n input,\n step=0,\n incremental_state=None,\n timestep=None,\n positions=None,\n ):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bsz, seq_len = input.size(0), input.size(1)\n positions = self.make_positions(input, self.padding_idx, step)\n mask = (\n torch.ne(input, self.padding_idx)\n .unsqueeze(2)\n .expand(bsz, seq_len, self.embedding_dim)\n )\n return (\n self.weights.to(input.device)\n .index_select(0, positions.view(-1))\n .view(bsz, seq_len, -1)\n * mask\n ).detach()\n"
] | [
[
"torch.nn.init.xavier_uniform_",
"torch.nn.init.constant_",
"torch.nn.init.xavier_normal_",
"torch.cos",
"torch.nn.Embedding",
"torch.nn.init.normal_",
"torch.ne",
"torch.cumsum",
"torch.sin",
"torch.nn.LayerNorm",
"torch.arange",
"torch.cat",
"torch.zeros",
"torch.bmm",
"torch.nn.Dropout",
"torch.Tensor"
]
] |
davidmam/BirdNET-Pi | [
"873c8f4c56b30edb9297134a92a7c5a178c390e4"
] | [
"analyze.py"
] | [
"# BirdWeather edits by @timsterc\n# Other edits by @CaiusX and @mcguirepr89\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ['CUDA_VISIBLE_DEVICES'] = ''\n\ntry:\n import tflite_runtime.interpreter as tflite\nexcept:\n from tensorflow import lite as tflite\n\nimport argparse\nimport operator\nimport librosa\nimport numpy as np\nimport math\nimport time\nfrom decimal import Decimal\nimport json\n############################################################################### \nimport requests\nimport mysql.connector\n###############################################################################\nimport datetime\nimport pytz\nfrom tzlocal import get_localzone\nfrom pathlib import Path\n\ndef loadModel():\n\n global INPUT_LAYER_INDEX\n global OUTPUT_LAYER_INDEX\n global MDATA_INPUT_INDEX\n global CLASSES\n\n print('LOADING TF LITE MODEL...', end=' ')\n\n # Load TFLite model and allocate tensors.\n interpreter = tflite.Interpreter(model_path='model/BirdNET_6K_GLOBAL_MODEL.tflite',num_threads=2)\n interpreter.allocate_tensors()\n\n # Get input and output tensors.\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n # Get input tensor index\n INPUT_LAYER_INDEX = input_details[0]['index']\n MDATA_INPUT_INDEX = input_details[1]['index']\n OUTPUT_LAYER_INDEX = output_details[0]['index']\n\n # Load labels\n CLASSES = []\n with open('model/labels.txt', 'r') as lfile:\n for line in lfile.readlines():\n CLASSES.append(line.replace('\\n', ''))\n\n print('DONE!')\n\n return interpreter\n\ndef loadCustomSpeciesList(path):\n\n slist = []\n if os.path.isfile(path):\n with open(path, 'r') as csfile:\n for line in csfile.readlines():\n slist.append(line.replace('\\r', '').replace('\\n', ''))\n\n return slist\n\ndef splitSignal(sig, rate, overlap, seconds=3.0, minlen=1.5):\n\n # Split signal with overlap\n sig_splits = []\n for i in range(0, len(sig), int((seconds - overlap) * rate)):\n split = sig[i:i + int(seconds * rate)]\n\n # End of signal?\n if len(split) < int(minlen * rate):\n break\n \n # Signal chunk too short? Fill with zeros.\n if len(split) < int(rate * seconds):\n temp = np.zeros((int(rate * seconds)))\n temp[:len(split)] = split\n split = temp\n \n sig_splits.append(split)\n\n return sig_splits\n\ndef readAudioData(path, overlap, sample_rate=48000):\n\n print('READING AUDIO DATA...', end=' ', flush=True)\n\n # Open file with librosa (uses ffmpeg or libav)\n sig, rate = librosa.load(path, sr=sample_rate, mono=True, res_type='kaiser_fast')\n\n # Split audio into 3-second chunks\n chunks = splitSignal(sig, rate, overlap)\n\n print('DONE! READ', str(len(chunks)), 'CHUNKS.')\n\n return chunks\n\ndef convertMetadata(m):\n\n # Convert week to cosine\n if m[2] >= 1 and m[2] <= 48:\n m[2] = math.cos(math.radians(m[2] * 7.5)) + 1 \n else:\n m[2] = -1\n\n # Add binary mask\n mask = np.ones((3,))\n if m[0] == -1 or m[1] == -1:\n mask = np.zeros((3,))\n if m[2] == -1:\n mask[2] = 0.0\n\n return np.concatenate([m, mask])\n\ndef custom_sigmoid(x, sensitivity=1.0):\n return 1 / (1.0 + np.exp(-sensitivity * x))\n\ndef predict(sample, interpreter, sensitivity):\n\n # Make a prediction\n interpreter.set_tensor(INPUT_LAYER_INDEX, np.array(sample[0], dtype='float32'))\n interpreter.set_tensor(MDATA_INPUT_INDEX, np.array(sample[1], dtype='float32'))\n interpreter.invoke()\n prediction = interpreter.get_tensor(OUTPUT_LAYER_INDEX)[0]\n\n # Apply custom sigmoid\n p_sigmoid = custom_sigmoid(prediction, sensitivity)\n\n # Get label and scores for pooled predictions\n p_labels = dict(zip(CLASSES, p_sigmoid))\n\n # Sort by score\n p_sorted = sorted(p_labels.items(), key=operator.itemgetter(1), reverse=True)\n\n # Remove species that are on blacklist\n for i in range(min(10, len(p_sorted))):\n if p_sorted[i][0] in ['Human_Human', 'Non-bird_Non-bird', 'Noise_Noise']:\n p_sorted[i] = (p_sorted[i][0], 0.0)\n\n # Only return first the top ten results\n return p_sorted[:10]\n\ndef analyzeAudioData(chunks, lat, lon, week, sensitivity, overlap, interpreter):\n\n detections = {}\n start = time.time()\n print('ANALYZING AUDIO...', end=' ', flush=True)\n\n # Convert and prepare metadata\n mdata = convertMetadata(np.array([lat, lon, week]))\n mdata = np.expand_dims(mdata, 0)\n\n # Parse every chunk\n pred_start = 0.0\n for c in chunks:\n\n # Prepare as input signal\n sig = np.expand_dims(c, 0)\n\n # Make prediction\n p = predict([sig, mdata], interpreter, sensitivity)\n\n # Save result and timestamp\n pred_end = pred_start + 3.0\n detections[str(pred_start) + ';' + str(pred_end)] = p\n pred_start = pred_end - overlap\n\n print('DONE! Time', int((time.time() - start) * 10) / 10.0, 'SECONDS')\n\n return detections\n\ndef writeResultsToFile(detections, min_conf, path):\n\n print('WRITING RESULTS TO', path, '...', end=' ')\n rcnt = 0\n with open(path, 'w') as rfile:\n rfile.write('Start (s);End (s);Scientific name;Common name;Confidence\\n')\n for d in detections:\n for entry in detections[d]:\n if entry[1] >= min_conf and (entry[0] in WHITE_LIST or len(WHITE_LIST) == 0):\n rfile.write(d + ';' + entry[0].replace('_', ';') + ';' + str(entry[1]) + '\\n')\n rcnt += 1\n print('DONE! WROTE', rcnt, 'RESULTS.')\n\ndef main():\n\n global WHITE_LIST\n\n # Parse passed arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--i', help='Path to input file.')\n parser.add_argument('--o', default='result.csv', help='Path to output file. Defaults to result.csv.')\n parser.add_argument('--lat', type=float, default=-1, help='Recording location latitude. Set -1 to ignore.')\n parser.add_argument('--lon', type=float, default=-1, help='Recording location longitude. Set -1 to ignore.')\n parser.add_argument('--week', type=int, default=-1, help='Week of the year when the recording was made. Values in [1, 48] (4 weeks per month). Set -1 to ignore.')\n parser.add_argument('--overlap', type=float, default=0.0, help='Overlap in seconds between extracted spectrograms. Values in [0.0, 2.9]. Defaults tp 0.0.')\n parser.add_argument('--sensitivity', type=float, default=1.0, help='Detection sensitivity; Higher values result in higher sensitivity. Values in [0.5, 1.5]. Defaults to 1.0.')\n parser.add_argument('--min_conf', type=float, default=0.1, help='Minimum confidence threshold. Values in [0.01, 0.99]. Defaults to 0.1.') \n parser.add_argument('--custom_list', default='', help='Path to text file containing a list of species. Not used if not provided.')\n parser.add_argument('--birdweather_id', default='99999', help='Private Station ID for BirdWeather.') \n\n args = parser.parse_args()\n\n # Load model\n interpreter = loadModel()\n\n # Load custom species list\n if not args.custom_list == '':\n WHITE_LIST = loadCustomSpeciesList(args.custom_list)\n else:\n WHITE_LIST = []\n\n birdweather_id = args.birdweather_id\n\n # Read audio data\n audioData = readAudioData(args.i, args.overlap)\n\n # Get Date/Time from filename in case Pi gets behind\n #now = datetime.now()\n full_file_name = args.i\n file_name = Path(full_file_name).stem\n file_date = file_name.split('-birdnet-')[0]\n file_time = file_name.split('-birdnet-')[1]\n date_time_str = file_date + ' ' + file_time\n date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')\n #print('Date:', date_time_obj.date())\n #print('Time:', date_time_obj.time())\n print('Date-time:', date_time_obj)\n now = date_time_obj\n current_date = now.strftime(\"%Y/%m/%d\")\n current_time = now.strftime(\"%H:%M:%S\")\n current_iso8601 = now.astimezone(get_localzone()).isoformat()\n \n week_number = int(now.strftime(\"%V\"))\n week = max(1, min(week_number, 48))\n\n sensitivity = max(0.5, min(1.0 - (args.sensitivity - 1.0), 1.5))\n\n # Process audio data and get detections\n detections = analyzeAudioData(audioData, args.lat, args.lon, week, sensitivity, args.overlap, interpreter)\n\n # Write detections to output file\n min_conf = max(0.01, min(args.min_conf, 0.99))\n writeResultsToFile(detections, min_conf, args.o)\n \n############################################################################### \n############################################################################### \n \n soundscape_uploaded = False\n\n # Write detections to Database\n for i in detections:\n print(\"\\n\", detections[i][0],\"\\n\")\n with open('BirdDB.txt', 'a') as rfile:\n for d in detections:\n print(\"\\n\", \"Database Entry\", \"\\n\")\n for entry in detections[d]:\n if entry[1] >= min_conf and (entry[0] in WHITE_LIST or len(WHITE_LIST) == 0):\n rfile.write(str(current_date) + ';' + str(current_time) + ';' + entry[0].replace('_', ';') + ';' \\\n + str(entry[1]) +\";\" + str(args.lat) + ';' + str(args.lon) + ';' + str(min_conf) + ';' + str(week) + ';' \\\n + str(sensitivity) +';' + str(args.overlap) + '\\n')\n\n def insert_variables_into_table(Date, Time, Sci_Name, Com_Name, Confidence, Lat, Lon, Cutoff, Week, Sens, Overlap):\n try:\n connection = mysql.connector.connect(host='localhost',\n database='birds',\n user='birder',\n password='birdnet')\n cursor = connection.cursor()\n mySql_insert_query = \"\"\"INSERT INTO detections (Date, Time, Sci_Name, Com_Name, Confidence, Lat, Lon, Cutoff, Week, Sens, Overlap)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n \n record = (Date, Time, Sci_Name, Com_Name, Confidence, Lat, Lon, Cutoff, Week, Sens, Overlap)\n\n cursor.execute(mySql_insert_query, record)\n connection.commit()\n print(\"Record inserted successfully into detections table\")\n\n \n except mysql.connector.Error as error:\n print(\"Failed to insert record into detections table {}\".format(error))\n \n finally:\n if connection.is_connected():\n connection.close()\n print(\"MySQL connection is closed\")\n\n species = entry[0]\n sci_name,com_name = species.split('_')\n insert_variables_into_table(str(current_date), str(current_time), sci_name, com_name, \\\n str(entry[1]), str(args.lat), str(args.lon), str(min_conf), str(week), \\\n str(args.sensitivity), str(args.overlap))\n\n print(str(current_date) + ';' + str(current_time) + ';' + entry[0].replace('_', ';') + ';' + str(entry[1]) +\";\" + str(args.lat) + ';' + str(args.lon) + ';' + str(min_conf) + ';' + str(week) + ';' + str(args.sensitivity) +';' + str(args.overlap) + '\\n')\n\n if birdweather_id != \"99999\":\n\n if soundscape_uploaded is False:\n # POST soundscape to server\n soundscape_url = \"https://app.birdweather.com/api/v1/stations/\" + birdweather_id + \"/soundscapes\" + \"?timestamp=\" + current_iso8601\n\n with open(args.i, 'rb') as f:\n wav_data = f.read()\n response = requests.post(url=soundscape_url, data=wav_data, headers={'Content-Type': 'application/octet-stream'})\n print(\"Soundscape POST Response Status - \", response.status_code)\n sdata = response.json()\n soundscape_id = sdata['soundscape']['id']\n soundscape_uploaded = True\n\n # POST detection to server\n detection_url = \"https://app.birdweather.com/api/v1/stations/\" + birdweather_id + \"/detections\"\n start_time = d.split(';')[0]\n end_time = d.split(';')[1]\n post_begin = \"{ \"\n now_p_start = now + datetime.timedelta(seconds=float(start_time))\n current_iso8601 = now_p_start.astimezone(get_localzone()).isoformat()\n post_timestamp = \"\\\"timestamp\\\": \\\"\" + current_iso8601 + \"\\\",\"\n post_lat = \"\\\"lat\\\": \" + str(args.lat) + \",\"\n post_lon = \"\\\"lon\\\": \" + str(args.lon) + \",\"\n post_soundscape_id = \"\\\"soundscapeId\\\": \" + str(soundscape_id) + \",\"\n post_soundscape_start_time = \"\\\"soundscapeStartTime\\\": \" + start_time + \",\"\n post_soundscape_end_time = \"\\\"soundscapeEndTime\\\": \" + end_time + \",\"\n post_commonName = \"\\\"commonName\\\": \\\"\" + entry[0].split('_')[1] + \"\\\",\"\n post_scientificName = \"\\\"scientificName\\\": \\\"\" + entry[0].split('_')[0] + \"\\\",\"\n post_algorithm = \"\\\"algorithm\\\": \" + \"\\\"alpha\\\"\" + \",\"\n post_confidence = \"\\\"confidence\\\": \" + str(entry[1])\n post_end = \" }\"\n\n post_json = post_begin + post_timestamp + post_lat + post_lon + post_soundscape_id + post_soundscape_start_time + post_soundscape_end_time + post_commonName + post_scientificName + post_algorithm + post_confidence + post_end\n print(post_json)\n response = requests.post(detection_url, json=json.loads(post_json))\n print(\"Detection POST Response Status - \", response.status_code)\n\n #time.sleep(3)\n\n############################################################################### \n############################################################################### \n\nif __name__ == '__main__':\n\n main()\n\n # Example calls\n # python3 analyze.py --i 'example/XC558716 - Soundscape.mp3' --lat 35.4244 --lon -120.7463 --week 18\n # python3 analyze.py --i 'example/XC563936 - Soundscape.mp3' --lat 47.6766 --lon -122.294 --week 11 --overlap 1.5 --min_conf 0.25 --sensitivity 1.25 --custom_list 'example/custom_species_list.txt'\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"tensorflow.lite.Interpreter",
"numpy.exp",
"numpy.expand_dims",
"numpy.array",
"numpy.concatenate"
]
] |
joepvd/aiida_core | [
"6e9711046753332933f982971db1d7ac7e7ade58"
] | [
"aiida/backends/tests/export_and_import.py"
] | [
"# -*- coding: utf-8 -*-\n###########################################################################\n# Copyright (c), The AiiDA team. All rights reserved. #\n# This file is part of the AiiDA code. #\n# #\n# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #\n# For further information on the license, see the LICENSE.txt file #\n# For further information please visit http://www.aiida.net #\n###########################################################################\n\"\"\"\nTests for the export and import routines.\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport io\nimport six\nfrom six.moves import range, zip\n\nfrom aiida.backends.testbase import AiidaTestCase\nfrom aiida.orm.importexport import import_data\nfrom aiida import orm\n\n\n\nclass TestSpecificImport(AiidaTestCase):\n\n def setUp(self):\n super(TestSpecificImport, self).setUp()\n self.clean_db()\n self.insert_data()\n\n def test_simple_import(self):\n \"\"\"\n This is a very simple test which checks that an export file with nodes\n that are not associated to a computer is imported correctly. In Django\n when such nodes are exported, there is an empty set for computers\n in the export file. In SQLA there is such a set only when a computer is\n associated with the exported nodes. When an empty computer set is\n found at the export file (when imported to an SQLA profile), the SQLA\n import code used to crash. This test demonstrates this problem.\n \"\"\"\n import tempfile\n from aiida.orm.data.parameter import ParameterData\n from aiida.orm.importexport import export, import_data\n from aiida.orm.node import Node\n from aiida.orm.querybuilder import QueryBuilder\n\n parameters = ParameterData(dict={\n 'Pr': {\n 'cutoff': 50.0,\n 'pseudo_type': 'Wentzcovitch',\n 'dual': 8,\n 'cutoff_units': 'Ry'\n },\n 'Ru': {\n 'cutoff': 40.0,\n 'pseudo_type': 'SG15',\n 'dual': 4,\n 'cutoff_units': 'Ry'\n },\n }).store()\n\n with tempfile.NamedTemporaryFile() as handle:\n nodes = [parameters]\n export(nodes, outfile=handle.name, overwrite=True, silent=True)\n\n # Check that we have the expected number of nodes in the database\n self.assertEquals(QueryBuilder().append(Node).count(), len(nodes))\n\n # Clean the database and verify there are no nodes left\n self.clean_db()\n self.assertEquals(QueryBuilder().append(Node).count(), 0)\n\n # After importing we should have the original number of nodes again\n import_data(handle.name, silent=True)\n self.assertEquals(QueryBuilder().append(Node).count(), len(nodes))\n\n def test_cycle_structure_data(self):\n \"\"\"\n Create an export with some Calculation and Data nodes and import it after having\n cleaned the database. Verify that the nodes and their attributes are restored\n properly after importing the created export archive\n \"\"\"\n import tempfile\n from aiida.common.links import LinkType\n from aiida.orm.calculation import Calculation\n from aiida.orm.data.structure import StructureData\n from aiida.orm.data.remote import RemoteData\n from aiida.orm.importexport import export, import_data\n from aiida.orm.node import Node\n from aiida.orm.querybuilder import QueryBuilder\n\n test_label = 'Test structure'\n test_cell = [\n [8.34, 0.0, 0.0],\n [0.298041701839357, 8.53479766274308, 0.0],\n [0.842650688117053, 0.47118495164127, 10.6965192730702]\n ]\n test_kinds = [\n {\n 'symbols': [u'Fe'],\n 'weights': [1.0],\n 'mass': 55.845,\n 'name': u'Fe'\n },\n {\n 'symbols': [u'S'],\n 'weights': [1.0],\n 'mass': 32.065,\n 'name': u'S'\n }\n ]\n\n structure = StructureData(cell=test_cell)\n structure.append_atom(symbols=['Fe'], position=[0, 0, 0])\n structure.append_atom(symbols=['S'], position=[2, 2, 2])\n structure.label = test_label\n structure.store()\n\n parent_calculation = Calculation()\n parent_calculation._set_attr('key', 'value')\n parent_calculation.store()\n child_calculation = Calculation()\n child_calculation._set_attr('key', 'value')\n child_calculation.store()\n remote_folder = RemoteData(computer=self.computer, remote_path='/').store()\n\n remote_folder.add_link_from(parent_calculation, link_type=LinkType.CREATE)\n child_calculation.add_link_from(remote_folder, link_type=LinkType.INPUT)\n structure.add_link_from(child_calculation, link_type=LinkType.CREATE)\n\n with tempfile.NamedTemporaryFile() as handle:\n\n nodes = [structure, child_calculation, parent_calculation, remote_folder]\n export(nodes, outfile=handle.name, overwrite=True, silent=True)\n\n # Check that we have the expected number of nodes in the database\n self.assertEquals(QueryBuilder().append(Node).count(), len(nodes))\n\n # Clean the database and verify there are no nodes left\n self.clean_db()\n self.assertEquals(QueryBuilder().append(Node).count(), 0)\n\n # After importing we should have the original number of nodes again\n import_data(handle.name, silent=True)\n self.assertEquals(QueryBuilder().append(Node).count(), len(nodes))\n\n # Verify that Calculations have non-empty attribute dictionaries\n qb = QueryBuilder().append(Calculation)\n for [calculation] in qb.iterall():\n self.assertIsInstance(calculation.get_attrs(), dict)\n self.assertNotEquals(len(calculation.get_attrs()), 0)\n\n # Verify that the structure data maintained its label, cell and kinds\n qb = QueryBuilder().append(StructureData)\n for [structure] in qb.iterall():\n self.assertEquals(structure.label, test_label)\n self.assertEquals(structure.cell, test_cell)\n\n qb = QueryBuilder().append(StructureData, project=['attributes.kinds'])\n for [kinds] in qb.iterall():\n self.assertEqual(len(kinds), 2)\n for kind in kinds:\n self.assertIn(kind, test_kinds)\n\n # Check that there is a StructureData that is an output of a Calculation\n qb = QueryBuilder()\n qb.append(Calculation, project=['uuid'], tag='calculation')\n qb.append(StructureData, output_of='calculation')\n self.assertGreater(len(qb.all()), 0)\n\n # Check that there is a RemoteData that is a child and parent of a Calculation\n qb = QueryBuilder()\n qb.append(Calculation, tag='parent')\n qb.append(RemoteData, project=['uuid'], output_of='parent', tag='remote')\n qb.append(Calculation, output_of='remote')\n self.assertGreater(len(qb.all()), 0)\n\n\nclass TestSimple(AiidaTestCase):\n\n def setUp(self):\n self.clean_db()\n self.insert_data()\n\n def tearDown(self):\n pass\n\n def test_0(self):\n import os\n import shutil\n import tempfile\n\n from aiida.orm import load_node\n from aiida.orm.data.base import Str, Int, Float, Bool\n from aiida.orm.importexport import export\n\n # Creating a folder for the import/export files\n temp_folder = tempfile.mkdtemp()\n try:\n # producing values for each base type\n values = (\"Hello\", 6, -1.2399834e12, False) # , [\"Bla\", 1, 1e-10])\n filename = os.path.join(temp_folder, \"export.tar.gz\")\n\n # producing nodes:\n nodes = [cls(val).store() for val, cls in zip(values, (Str, Int, Float, Bool))]\n # my uuid - list to reload the node:\n uuids = [n.uuid for n in nodes]\n # exporting the nodes:\n export(nodes, outfile=filename, silent=True)\n # cleaning:\n self.clean_db()\n # Importing back the data:\n import_data(filename, silent=True)\n # Checking whether values are preserved:\n for uuid, refval in zip(uuids, values):\n self.assertEquals(load_node(uuid).value, refval)\n finally:\n # Deleting the created temporary folder\n shutil.rmtree(temp_folder, ignore_errors=True)\n\n def test_1(self):\n import os\n import shutil\n import tempfile\n\n from aiida.orm import DataFactory\n from aiida.orm import load_node\n from aiida.orm.calculation.job import JobCalculation\n from aiida.orm.importexport import export\n\n # Creating a folder for the import/export files\n temp_folder = tempfile.mkdtemp()\n try:\n StructureData = DataFactory('structure')\n sd = StructureData()\n sd.store()\n\n calc = JobCalculation()\n calc.set_computer(self.computer)\n calc.set_option('resources', {\"num_machines\": 1, \"num_mpiprocs_per_machine\": 1})\n calc.store()\n\n calc.add_link_from(sd)\n\n pks = [sd.pk, calc.pk]\n\n attrs = {}\n for pk in pks:\n node = load_node(pk)\n attrs[node.uuid] = dict()\n for k in node.attrs():\n attrs[node.uuid][k] = node.get_attr(k)\n\n filename = os.path.join(temp_folder, \"export.tar.gz\")\n\n export([calc], outfile=filename, silent=True)\n\n self.clean_db()\n\n # NOTE: it is better to load new nodes by uuid, rather than assuming\n # that they will have the first 3 pks. In fact, a recommended policy in\n # databases is that pk always increment, even if you've deleted elements\n import_data(filename, silent=True)\n for uuid in attrs.keys():\n node = load_node(uuid)\n # for k in node.attrs():\n for k in attrs[uuid].keys():\n self.assertEquals(attrs[uuid][k], node.get_attr(k))\n finally:\n # Deleting the created temporary folder\n shutil.rmtree(temp_folder, ignore_errors=True)\n # print temp_folder\n\n def test_2(self):\n \"\"\"\n Test the check for the export format version.\n \"\"\"\n import tarfile\n import os\n import shutil\n import tempfile\n\n from aiida.common import exceptions\n from aiida.orm import DataFactory\n from aiida.orm.importexport import export\n import aiida.utils.json as json\n\n\n # Creating a folder for the import/export files\n export_file_tmp_folder = tempfile.mkdtemp()\n unpack_tmp_folder = tempfile.mkdtemp()\n try:\n StructureData = DataFactory('structure')\n sd = StructureData()\n sd.store()\n\n filename = os.path.join(export_file_tmp_folder, \"export.tar.gz\")\n export([sd], outfile=filename, silent=True)\n\n with tarfile.open(filename, \"r:gz\", format=tarfile.PAX_FORMAT) as tar:\n tar.extractall(unpack_tmp_folder)\n\n with io.open(os.path.join(unpack_tmp_folder,\n 'metadata.json'), 'r', encoding='utf8') as fhandle:\n metadata = json.load(fhandle)\n metadata['export_version'] = 0.0\n\n with io.open(os.path.join(unpack_tmp_folder, 'metadata.json'),\n 'wb') as fhandle:\n json.dump(metadata, fhandle)\n\n with tarfile.open(filename, \"w:gz\", format=tarfile.PAX_FORMAT) as tar:\n tar.add(unpack_tmp_folder, arcname=\"\")\n\n self.tearDownClass()\n self.setUpClass()\n\n with self.assertRaises(exceptions.IncompatibleArchiveVersionError):\n import_data(filename, silent=True)\n finally:\n # Deleting the created temporary folders\n shutil.rmtree(export_file_tmp_folder, ignore_errors=True)\n shutil.rmtree(unpack_tmp_folder, ignore_errors=True)\n\n def test_3(self):\n \"\"\"\n Test importing of nodes, that have links to unknown nodes.\n \"\"\"\n import tarfile\n import os\n import shutil\n import tempfile\n\n from aiida.orm.importexport import export\n from aiida.common.folders import SandboxFolder\n from aiida.orm.data.structure import StructureData\n from aiida.orm import load_node\n import aiida.utils.json as json\n\n # Creating a folder for the import/export files\n temp_folder = tempfile.mkdtemp()\n try:\n node_label = \"Test structure data\"\n sd = StructureData()\n sd.label = str(node_label)\n sd.store()\n\n filename = os.path.join(temp_folder, \"export.tar.gz\")\n export([sd], outfile=filename, silent=True)\n\n unpack = SandboxFolder()\n with tarfile.open(\n filename, \"r:gz\", format=tarfile.PAX_FORMAT) as tar:\n tar.extractall(unpack.abspath)\n\n with io.open(unpack.get_abs_path('data.json'), 'r', encoding='utf8') as fhandle:\n metadata = json.load(fhandle)\n metadata['links_uuid'].append({\n 'output': sd.uuid,\n 'input': 'non-existing-uuid',\n 'label': 'parent'\n })\n\n with io.open(unpack.get_abs_path('data.json'), 'wb') as fhandle:\n json.dump(metadata, fhandle)\n\n with tarfile.open(\n filename, \"w:gz\", format=tarfile.PAX_FORMAT) as tar:\n tar.add(unpack.abspath, arcname=\"\")\n\n self.clean_db()\n\n with self.assertRaises(ValueError):\n import_data(filename, silent=True)\n\n import_data(filename, ignore_unknown_nodes=True, silent=True)\n self.assertEquals(load_node(sd.uuid).label, node_label)\n\n finally:\n # Deleting the created temporary folder\n shutil.rmtree(temp_folder, ignore_errors=True)\n\n def test_4(self):\n \"\"\"\n Test control of licenses.\n \"\"\"\n from aiida.common.exceptions import LicensingException\n from aiida.common.folders import SandboxFolder\n from aiida.orm.importexport import export_tree\n\n from aiida.orm import DataFactory\n\n StructureData = DataFactory('structure')\n sd = StructureData()\n sd.source = {'license': 'GPL'}\n sd.store()\n\n folder = SandboxFolder()\n export_tree([sd], folder=folder, silent=True,\n allowed_licenses=['GPL'])\n # Folder should contain two files of metadata + nodes/\n self.assertEquals(len(folder.get_content_list()), 3)\n\n folder = SandboxFolder()\n export_tree([sd], folder=folder, silent=True,\n forbidden_licenses=['Academic'])\n # Folder should contain two files of metadata + nodes/\n self.assertEquals(len(folder.get_content_list()), 3)\n\n folder = SandboxFolder()\n with self.assertRaises(LicensingException):\n export_tree([sd], folder=folder, silent=True,\n allowed_licenses=['CC0'])\n\n folder = SandboxFolder()\n with self.assertRaises(LicensingException):\n export_tree([sd], folder=folder, silent=True,\n forbidden_licenses=['GPL'])\n\n def cc_filter(license):\n return license.startswith('CC')\n\n def gpl_filter(license):\n return license == 'GPL'\n\n def crashing_filter(license):\n raise NotImplementedError(\"not implemented yet\")\n\n folder = SandboxFolder()\n with self.assertRaises(LicensingException):\n export_tree([sd], folder=folder, silent=True,\n allowed_licenses=cc_filter)\n\n folder = SandboxFolder()\n with self.assertRaises(LicensingException):\n export_tree([sd], folder=folder, silent=True,\n forbidden_licenses=gpl_filter)\n\n folder = SandboxFolder()\n with self.assertRaises(LicensingException):\n export_tree([sd], folder=folder, silent=True,\n allowed_licenses=crashing_filter)\n\n folder = SandboxFolder()\n with self.assertRaises(LicensingException):\n export_tree([sd], folder=folder, silent=True,\n forbidden_licenses=crashing_filter)\n\n def test_5(self):\n \"\"\"\n This test checks that nodes belonging to different users are correctly\n exported & imported.\n \"\"\"\n import os\n import shutil\n import tempfile\n\n from aiida.orm import load_node\n from aiida.orm.calculation.job import JobCalculation\n from aiida.orm.data.structure import StructureData\n from aiida.orm.importexport import export\n from aiida.common.datastructures import calc_states\n from aiida.common.links import LinkType\n from aiida.common.utils import get_configured_user_email\n\n # Creating a folder for the import/export files\n temp_folder = tempfile.mkdtemp()\n try:\n # Create another user\n new_email = \"[email protected]\"\n user = orm.User(email=new_email, backend=self.backend).store()\n\n # Create a structure data node that has a calculation as output\n sd1 = StructureData()\n sd1.set_user(user)\n sd1.label = 'sd1'\n sd1.store()\n\n jc1 = JobCalculation()\n jc1.set_computer(self.computer)\n jc1.set_option('resources', {\"num_machines\": 1, \"num_mpiprocs_per_machine\": 1})\n jc1.set_user(user)\n jc1.label = 'jc1'\n jc1.store()\n jc1.add_link_from(sd1)\n jc1._set_state(calc_states.PARSING)\n\n # Create some nodes from a different user\n sd2 = StructureData()\n sd2.set_user(user)\n sd2.label = 'sd2'\n sd2.store()\n sd2.add_link_from(jc1, label='l1', link_type=LinkType.CREATE) # I assume jc1 CREATED sd2\n\n jc2 = JobCalculation()\n jc2.set_computer(self.computer)\n jc2.set_option('resources', {\"num_machines\": 1, \"num_mpiprocs_per_machine\": 1})\n jc2.label = 'jc2'\n jc2.store()\n jc2.add_link_from(sd2, label='l2')\n jc2._set_state(calc_states.PARSING)\n\n sd3 = StructureData()\n sd3.label = 'sd3'\n sd3.store()\n sd3.add_link_from(jc2, label='l3', link_type=LinkType.CREATE)\n\n uuids_u1 = [sd1.uuid, jc1.uuid, sd2.uuid]\n uuids_u2 = [jc2.uuid, sd3.uuid]\n\n filename = os.path.join(temp_folder, \"export.tar.gz\")\n\n export([sd3], outfile=filename, silent=True)\n self.clean_db()\n import_data(filename, silent=True)\n\n # Check that the imported nodes are correctly imported and that\n # the user assigned to the nodes is the right one\n for uuid in uuids_u1:\n node = load_node(uuid=uuid)\n self.assertEquals(node.get_user().email, new_email)\n for uuid in uuids_u2:\n self.assertEquals(load_node(uuid).get_user().email,\n get_configured_user_email())\n finally:\n # Deleting the created temporary folder\n shutil.rmtree(temp_folder, ignore_errors=True)\n\n def test_6(self):\n \"\"\"\n This test checks that nodes belonging to user A (which is not the\n default user) can be correctly exported, imported, enriched with nodes\n from the default user, re-exported & re-imported and that in the end\n all the nodes that have been finally imported belonging to the right\n users.\n \"\"\"\n import os\n import shutil\n import tempfile\n\n from aiida.orm import load_node\n from aiida.orm.calculation.job import JobCalculation\n from aiida.orm.data.structure import StructureData\n from aiida.orm.importexport import export\n from aiida.common.datastructures import calc_states\n from aiida.common.links import LinkType\n from aiida.common.utils import get_configured_user_email\n\n # Creating a folder for the import/export files\n temp_folder = tempfile.mkdtemp()\n try:\n # Create another user\n new_email = \"[email protected]\"\n user = orm.User(email=new_email, backend=self.backend).store()\n\n # Create a structure data node that has a calculation as output\n sd1 = StructureData()\n sd1.set_user(user)\n sd1.label = 'sd1'\n sd1.store()\n\n jc1 = JobCalculation()\n jc1.set_computer(self.computer)\n jc1.set_option('resources', {\"num_machines\": 1, \"num_mpiprocs_per_machine\": 1})\n jc1.set_user(user)\n jc1.label = 'jc1'\n jc1.store()\n jc1.add_link_from(sd1)\n jc1._set_state(calc_states.PARSING)\n\n # Create some nodes from a different user\n sd2 = StructureData()\n sd2.set_user(user)\n sd2.label = 'sd2'\n sd2.store()\n sd2.add_link_from(jc1, label='l1', link_type=LinkType.CREATE)\n\n # Set the jc1 to FINISHED\n jc1._set_state(calc_states.FINISHED)\n\n # At this point we export the generated data\n filename1 = os.path.join(temp_folder, \"export1.tar.gz\")\n export([sd2], outfile=filename1, silent=True)\n uuids1 = [sd1.uuid, jc1.uuid, sd2.uuid]\n self.clean_db()\n self.insert_data()\n import_data(filename1, silent=True)\n\n # Check that the imported nodes are correctly imported and that\n # the user assigned to the nodes is the right one\n for uuid in uuids1:\n self.assertEquals(load_node(uuid).get_user().email, new_email)\n\n # Now we continue to generate more data based on the imported\n # data\n sd2_imp = load_node(sd2.uuid)\n\n jc2 = JobCalculation()\n jc2.set_computer(self.computer)\n jc2.set_option('resources', {\"num_machines\": 1, \"num_mpiprocs_per_machine\": 1})\n jc2.label = 'jc2'\n jc2.store()\n jc2.add_link_from(sd2_imp, label='l2')\n jc2._set_state(calc_states.PARSING)\n\n sd3 = StructureData()\n sd3.label = 'sd3'\n sd3.store()\n sd3.add_link_from(jc2, label='l3', link_type=LinkType.CREATE)\n\n # Set the jc2 to FINISHED\n jc2._set_state(calc_states.FINISHED)\n\n # Store the UUIDs of the nodes that should be checked\n # if they can be imported correctly.\n uuids2 = [jc2.uuid, sd3.uuid]\n\n filename2 = os.path.join(temp_folder, \"export2.tar.gz\")\n export([sd3], outfile=filename2, silent=True)\n self.clean_db()\n self.insert_data()\n import_data(filename2, silent=True)\n\n # Check that the imported nodes are correctly imported and that\n # the user assigned to the nodes is the right one\n for uuid in uuids1:\n self.assertEquals(load_node(uuid).get_user().email, new_email)\n for uuid in uuids2:\n self.assertEquals(load_node(uuid).get_user().email,\n get_configured_user_email())\n\n finally:\n # Deleting the created temporary folder\n shutil.rmtree(temp_folder, ignore_errors=True)\n\n def test_7(self):\n \"\"\"\n This test checks that nodes that belong to a specific group are\n correctly imported and exported.\n \"\"\"\n import os\n import shutil\n import tempfile\n\n from aiida.orm import load_node\n from aiida.orm.calculation.job import JobCalculation\n from aiida.orm.data.structure import StructureData\n from aiida.orm.importexport import export\n from aiida.common.datastructures import calc_states\n from aiida.orm.querybuilder import QueryBuilder\n\n # Creating a folder for the import/export files\n temp_folder = tempfile.mkdtemp()\n try:\n # Create another user\n new_email = \"[email protected]\"\n user = orm.User(email=new_email, backend=self.backend)\n user.store()\n\n # Create a structure data node that has a calculation as output\n sd1 = StructureData()\n sd1.set_user(user)\n sd1.label = 'sd1'\n sd1.store()\n\n jc1 = JobCalculation()\n jc1.set_computer(self.computer)\n jc1.set_option('resources', {\"num_machines\": 1, \"num_mpiprocs_per_machine\": 1})\n jc1.set_user(user)\n jc1.label = 'jc1'\n jc1.store()\n jc1.add_link_from(sd1)\n jc1._set_state(calc_states.PARSING)\n\n # Create a group and add the data inside\n from aiida.orm.group import Group\n g1 = Group(name=\"node_group\")\n g1.store()\n g1.add_nodes([sd1, jc1])\n g1_uuid = g1.uuid\n\n # At this point we export the generated data\n filename1 = os.path.join(temp_folder, \"export1.tar.gz\")\n export([sd1, jc1, g1], outfile=filename1,\n silent=True)\n n_uuids = [sd1.uuid, jc1.uuid]\n self.clean_db()\n self.insert_data()\n import_data(filename1, silent=True)\n\n # Check that the imported nodes are correctly imported and that\n # the user assigned to the nodes is the right one\n for uuid in n_uuids:\n self.assertEquals(load_node(uuid).get_user().email, new_email)\n\n # Check that the exported group is imported correctly\n qb = QueryBuilder()\n qb.append(Group, filters={'uuid': {'==': g1_uuid}})\n self.assertEquals(qb.count(), 1, \"The group was not found.\")\n finally:\n # Deleting the created temporary folder\n shutil.rmtree(temp_folder, ignore_errors=True)\n\n def test_group_export(self):\n \"\"\"\n Test that when exporting just a group, its nodes are also exported\n \"\"\"\n import os\n import shutil\n import tempfile\n\n from aiida.orm import load_node\n from aiida.orm.data.structure import StructureData\n from aiida.orm.importexport import export\n from aiida.orm.querybuilder import QueryBuilder\n\n # Creating a folder for the import/export files\n temp_folder = tempfile.mkdtemp()\n try:\n # Create another user\n new_email = \"[email protected]\"\n user = orm.User(email=new_email, backend=self.backend)\n user.store()\n\n # Create a structure data node\n sd1 = StructureData()\n sd1.set_user(user)\n sd1.label = 'sd1'\n sd1.store()\n\n # Create a group and add the data inside\n from aiida.orm.group import Group\n g1 = Group(name=\"node_group\")\n g1.store()\n g1.add_nodes([sd1])\n g1_uuid = g1.uuid\n\n # At this point we export the generated data\n filename1 = os.path.join(temp_folder, \"export1.tar.gz\")\n export([g1], outfile=filename1, silent=True)\n n_uuids = [sd1.uuid]\n self.clean_db()\n self.insert_data()\n import_data(filename1, silent=True)\n\n # Check that the imported nodes are correctly imported and that\n # the user assigned to the nodes is the right one\n for uuid in n_uuids:\n self.assertEquals(load_node(uuid).get_user().email, new_email)\n\n # Check that the exported group is imported correctly\n qb = QueryBuilder()\n qb.append(Group, filters={'uuid': {'==': g1_uuid}})\n self.assertEquals(qb.count(), 1, \"The group was not found.\")\n finally:\n # Deleting the created temporary folder\n shutil.rmtree(temp_folder, ignore_errors=True)\n\n def test_workfunction_1(self):\n import shutil, os, tempfile\n\n from aiida.work.workfunctions import workfunction\n from aiida.orm.data.float import Float\n from aiida.orm import load_node\n from aiida.orm.importexport import export\n from aiida.common.exceptions import NotExistent\n # Creating a folder for the import/export files\n temp_folder = tempfile.mkdtemp()\n\n @workfunction\n def add(a, b):\n \"\"\"Add 2 numbers\"\"\"\n return {'res': Float(a + b)}\n\n def max_(**kwargs):\n \"\"\"select the max value\"\"\"\n max_val = max([(v.value, v) for v in kwargs.values()])\n return {'res': max_val[1]}\n\n try:\n # I'm creating a bunch of nuimbers\n a, b, c, d, e = (Float(i) for i in range(5))\n # this adds the maximum number between bcde to a.\n res = add(a=a, b=max_(b=b, c=c, d=d, e=e)['res'])['res']\n # These are the uuids that would be exported as well (as parents) if I wanted the final result\n uuids_values = [(a.uuid, a.value), (e.uuid, e.value), (res.uuid, res.value)]\n # These are the uuids that shouldn't be exported since it's a selection.\n not_wanted_uuids = [v.uuid for v in (b, c, d)]\n # At this point we export the generated data\n filename1 = os.path.join(temp_folder, \"export1.tar.gz\")\n export([res], outfile=filename1, silent=True)\n self.clean_db()\n self.insert_data()\n import_data(filename1, silent=True)\n # Check that the imported nodes are correctly imported and that the value is preserved\n for uuid, value in uuids_values:\n self.assertEquals(load_node(uuid).value, value)\n for uuid in not_wanted_uuids:\n with self.assertRaises(NotExistent):\n load_node(uuid)\n finally:\n # Deleting the created temporary folder\n shutil.rmtree(temp_folder, ignore_errors=True)\n\n def test_workcalculation_2(self):\n import shutil, os, tempfile\n\n from aiida.orm.calculation.work import WorkCalculation\n from aiida.orm.data.float import Float\n from aiida.orm.data.int import Int\n from aiida.orm import load_node\n from aiida.common.links import LinkType\n from aiida.orm.importexport import export\n\n from aiida.common.exceptions import NotExistent\n # Creating a folder for the import/export files\n temp_folder = tempfile.mkdtemp()\n\n try:\n master = WorkCalculation().store()\n slave = WorkCalculation().store()\n\n input_1 = Int(3).store()\n input_2 = Int(5).store()\n output_1 = Int(2).store()\n\n master.add_link_from(input_1, 'input_1', link_type=LinkType.INPUT)\n slave.add_link_from(master, 'CALL', link_type=LinkType.CALL)\n slave.add_link_from(input_2, 'input_2', link_type=LinkType.INPUT)\n output_1.add_link_from(master, 'CREATE', link_type=LinkType.CREATE)\n\n uuids_values = [(v.uuid, v.value) for v in (output_1,)]\n filename1 = os.path.join(temp_folder, \"export1.tar.gz\")\n export([output_1], outfile=filename1, silent=True)\n self.clean_db()\n self.insert_data()\n import_data(filename1, silent=True)\n\n for uuid, value in uuids_values:\n self.assertEquals(load_node(uuid).value, value)\n\n finally:\n # Deleting the created temporary folder\n shutil.rmtree(temp_folder, ignore_errors=True)\n\n def test_reexport(self):\n \"\"\"\n Export something, import and reexport and check if everything is valid.\n The export is rather easy::\n\n ___ ___ ___\n | | INP | | CREATE | |\n | p | --> | c | -----> | a |\n |___| |___| |___|\n\n \"\"\"\n import os, shutil, tempfile, numpy as np, string, random\n from datetime import datetime\n\n from aiida.orm import Calculation, load_node, Group\n from aiida.orm.data.array import ArrayData\n from aiida.orm.data.parameter import ParameterData\n from aiida.orm.querybuilder import QueryBuilder\n from aiida.orm.importexport import export\n from aiida.common.hashing import make_hash\n from aiida.common.links import LinkType\n def get_hash_from_db_content(groupname):\n qb = QueryBuilder()\n qb.append(ParameterData, tag='p', project='*')\n qb.append(Calculation, tag='c', project='*', edge_tag='p2c', edge_project=('label', 'type'))\n qb.append(ArrayData, tag='a', project='*', edge_tag='c2a', edge_project=('label', 'type'))\n qb.append(Group, filters={'name': groupname}, project='*', tag='g', group_of='a')\n # I want the query to contain something!\n self.assertTrue(qb.count() > 0)\n # The hash is given from the preservable entries in an export-import cycle,\n # uuids, attributes, labels, descriptions, arrays, link-labels, link-types:\n hash_ = make_hash([(\n item['p']['*'].get_attrs(),\n item['p']['*'].uuid,\n item['p']['*'].label,\n item['p']['*'].description,\n item['c']['*'].uuid,\n item['c']['*'].get_attrs(),\n item['a']['*'].get_attrs(),\n [item['a']['*'].get_array(name) for name in item['a']['*'].get_arraynames()],\n item['a']['*'].uuid,\n item['g']['*'].uuid,\n item['g']['*'].name,\n item['p2c']['label'],\n item['p2c']['type'],\n item['c2a']['label'],\n item['c2a']['type'],\n item['g']['*'].name,\n ) for item in qb.dict()])\n return hash_\n\n # Creating a folder for the import/export files\n temp_folder = tempfile.mkdtemp()\n chars = string.ascii_uppercase + string.digits\n size = 10\n groupname = 'test-group'\n try:\n nparr = np.random.random((4, 3, 2))\n trial_dict = {}\n # give some integers:\n trial_dict.update({str(k): np.random.randint(100) for k in range(10)})\n # give some floats:\n trial_dict.update({str(k): np.random.random() for k in range(10, 20)})\n # give some booleans:\n trial_dict.update({str(k): bool(np.random.randint(1)) for k in range(20, 30)})\n # give some datetime:\n trial_dict.update({str(k): datetime(\n year=2017,\n month=np.random.randint(1, 12),\n day=np.random.randint(1, 28)) for k in range(30, 40)})\n # give some text:\n trial_dict.update({str(k): ''.join(random.choice(chars) for _ in range(size)) for k in range(20, 30)})\n\n p = ParameterData(dict=trial_dict)\n p.label = str(datetime.now())\n p.description = 'd_' + str(datetime.now())\n p.store()\n c = Calculation()\n # setting also trial dict as attributes, but randomizing the keys)\n (c._set_attr(str(int(k) + np.random.randint(10)), v) for k, v in trial_dict.items())\n c.store()\n a = ArrayData()\n a.set_array('array', nparr)\n a.store()\n # LINKS\n # the calculation has input the parameters-instance\n c.add_link_from(p, label='input_parameters', link_type=LinkType.INPUT)\n # I want the array to be an output of the calculation\n a.add_link_from(c, label='output_array', link_type=LinkType.CREATE)\n g = Group(name='test-group')\n g.store()\n g.add_nodes(a)\n\n hash_from_dbcontent = get_hash_from_db_content(groupname)\n\n # I export and reimport 3 times in a row:\n for i in range(3):\n # Always new filename:\n filename = os.path.join(temp_folder, \"export-{}.zip\".format(i))\n # Loading the group from the string\n g = Group.get_from_string(groupname)\n # exporting based on all members of the group\n # this also checks if group memberships are preserved!\n export([g] + [n for n in g.nodes], outfile=filename, silent=True)\n # cleaning the DB!\n self.clean_db()\n # reimporting the data from the file\n import_data(filename, silent=True, ignore_unknown_nodes=True)\n # creating the hash from db content\n new_hash = get_hash_from_db_content(groupname)\n # I check for equality against the first hash created, which implies that hashes\n # are equal in all iterations of this process\n self.assertEqual(hash_from_dbcontent, new_hash)\n\n finally:\n # Deleting the created temporary folder\n shutil.rmtree(temp_folder, ignore_errors=True)\n\n\nclass TestComplex(AiidaTestCase):\n\n def test_complex_graph_import_export(self):\n \"\"\"\n This test checks that a small and bit complex graph can be correctly\n exported and imported.\n\n It will create the graph, store it to the database, export it to a file\n and import it. In the end it will check if the initial nodes are present\n at the imported graph.\n \"\"\"\n import tempfile\n import shutil\n import os\n\n from aiida.orm.calculation.job import JobCalculation\n from aiida.orm.data.folder import FolderData\n from aiida.orm.data.parameter import ParameterData\n from aiida.orm.data.remote import RemoteData\n from aiida.common.links import LinkType\n from aiida.orm.importexport import export, import_data\n from aiida.orm.utils import load_node\n from aiida.common.exceptions import NotExistent\n\n temp_folder = tempfile.mkdtemp()\n try:\n calc1 = JobCalculation()\n calc1.set_computer(self.computer)\n calc1.set_option('resources', {\"num_machines\": 1, \"num_mpiprocs_per_machine\": 1})\n calc1.label = \"calc1\"\n calc1.store()\n calc1._set_state(u'RETRIEVING')\n\n pd1 = ParameterData()\n pd1.label = \"pd1\"\n pd1.store()\n\n pd2 = ParameterData()\n pd2.label = \"pd2\"\n pd2.store()\n\n rd1 = RemoteData()\n rd1.label = \"rd1\"\n rd1.set_remote_path(\"/x/y.py\")\n rd1.set_computer(self.computer)\n rd1.store()\n rd1.add_link_from(calc1, link_type=LinkType.CREATE)\n\n calc2 = JobCalculation()\n calc2.set_computer(self.computer)\n calc2.set_option('resources', {\"num_machines\": 1, \"num_mpiprocs_per_machine\": 1})\n calc2.label = \"calc2\"\n calc2.store()\n calc2.add_link_from(pd1, link_type=LinkType.INPUT)\n calc2.add_link_from(pd2, link_type=LinkType.INPUT)\n calc2.add_link_from(rd1, link_type=LinkType.INPUT)\n calc2._set_state(u'SUBMITTING')\n\n fd1 = FolderData()\n fd1.label = \"fd1\"\n fd1.store()\n fd1.add_link_from(calc2, link_type=LinkType.CREATE)\n\n node_uuids_labels = {calc1.uuid: calc1.label, pd1.uuid: pd1.label,\n pd2.uuid: pd2.label, rd1.uuid: rd1.label,\n calc2.uuid: calc2.label, fd1.uuid: fd1.label}\n\n filename = os.path.join(temp_folder, \"export.tar.gz\")\n export([fd1], outfile=filename, silent=True)\n\n self.clean_db()\n\n import_data(filename, silent=True, ignore_unknown_nodes=True)\n\n for uuid, label in node_uuids_labels.items():\n try:\n load_node(uuid)\n except NotExistent:\n self.fail(\"Node with UUID {} and label {} was not \"\n \"found.\".format(uuid, label))\n\n finally:\n # Deleting the created temporary folder\n shutil.rmtree(temp_folder, ignore_errors=True)\n\n\nclass TestComputer(AiidaTestCase):\n\n def setUp(self):\n self.clean_db()\n self.insert_data()\n\n def tearDown(self):\n pass\n\n def test_same_computer_import(self):\n \"\"\"\n Test that you can import nodes in steps without any problems. In this\n test we will import a first calculation and then a second one. The\n import should work as expected and have in the end two job\n calculations.\n\n Each calculation is related to the same computer. In the end we should\n have only one computer\n \"\"\"\n import os\n import shutil\n import tempfile\n\n from aiida.orm.importexport import export\n from aiida.orm.querybuilder import QueryBuilder\n from aiida.orm.computers import Computer\n from aiida.orm.calculation.job import JobCalculation\n\n # Creating a folder for the import/export files\n export_file_tmp_folder = tempfile.mkdtemp()\n unpack_tmp_folder = tempfile.mkdtemp()\n\n try:\n # Store two job calculation related to the same computer\n calc1_label = \"calc1\"\n calc1 = JobCalculation()\n calc1.set_computer(self.computer)\n calc1.set_option('resources', {\"num_machines\": 1,\n \"num_mpiprocs_per_machine\": 1})\n calc1.label = calc1_label\n calc1.store()\n calc1._set_state(u'RETRIEVING')\n\n calc2_label = \"calc2\"\n calc2 = JobCalculation()\n calc2.set_computer(self.computer)\n calc2.set_option('resources', {\"num_machines\": 2,\n \"num_mpiprocs_per_machine\": 2})\n calc2.label = calc2_label\n calc2.store()\n calc2._set_state(u'RETRIEVING')\n\n # Store locally the computer name\n comp_name = six.text_type(self.computer.name)\n comp_uuid = six.text_type(self.computer.uuid)\n\n # Export the first job calculation\n filename1 = os.path.join(export_file_tmp_folder, \"export1.tar.gz\")\n export([calc1], outfile=filename1, silent=True)\n\n # Export the second job calculation\n filename2 = os.path.join(export_file_tmp_folder, \"export2.tar.gz\")\n export([calc2], outfile=filename2, silent=True)\n\n # Clean the local database\n self.clean_db()\n\n # Check that there are no computers\n qb = QueryBuilder()\n qb.append(Computer, project=['*'])\n self.assertEqual(qb.count(), 0, \"There should not be any computers\"\n \"in the database at this point.\")\n\n # Check that there are no calculations\n qb = QueryBuilder()\n qb.append(JobCalculation, project=['*'])\n self.assertEqual(qb.count(), 0, \"There should not be any \"\n \"calculations in the database at \"\n \"this point.\")\n\n # Import the first calculation\n import_data(filename1, silent=True)\n\n # Check that the calculation computer is imported correctly.\n qb = QueryBuilder()\n qb.append(JobCalculation, project=['label'])\n self.assertEqual(qb.count(), 1, \"Only one calculation should be \"\n \"found.\")\n self.assertEqual(six.text_type(qb.first()[0]), calc1_label,\n \"The calculation label is not correct.\")\n\n # Check that the referenced computer is imported correctly.\n qb = QueryBuilder()\n qb.append(Computer, project=['name', 'uuid', 'id'])\n self.assertEqual(qb.count(), 1, \"Only one computer should be \"\n \"found.\")\n self.assertEqual(six.text_type(qb.first()[0]), comp_name,\n \"The computer name is not correct.\")\n self.assertEqual(six.text_type(qb.first()[1]), comp_uuid,\n \"The computer uuid is not correct.\")\n\n # Store the id of the computer\n comp_id = qb.first()[2]\n\n # Import the second calculation\n import_data(filename2, silent=True)\n\n # Check that the number of computers remains the same and its data\n # did not change.\n qb = QueryBuilder()\n qb.append(Computer, project=['name', 'uuid', 'id'])\n self.assertEqual(qb.count(), 1, \"Only one computer should be \"\n \"found.\")\n self.assertEqual(six.text_type(qb.first()[0]), comp_name,\n \"The computer name is not correct.\")\n self.assertEqual(six.text_type(qb.first()[1]), comp_uuid,\n \"The computer uuid is not correct.\")\n self.assertEqual(qb.first()[2], comp_id,\n \"The computer id is not correct.\")\n\n # Check that now you have two calculations attached to the same\n # computer.\n qb = QueryBuilder()\n qb.append(Computer, tag='comp')\n qb.append(JobCalculation, has_computer='comp', project=['label'])\n self.assertEqual(qb.count(), 2, \"Two calculations should be \"\n \"found.\")\n ret_labels = set(_ for [_] in qb.all())\n self.assertEqual(ret_labels, set([calc1_label, calc2_label]),\n \"The labels of the calculations are not correct.\")\n\n finally:\n # Deleting the created temporary folders\n shutil.rmtree(export_file_tmp_folder, ignore_errors=True)\n shutil.rmtree(unpack_tmp_folder, ignore_errors=True)\n\n def test_same_computer_different_name_import(self):\n \"\"\"\n This test checks that if the computer is re-imported with a different\n name to the same database, then the original computer will not be\n renamed. It also checks that the names were correctly imported (without\n any change since there is no computer name collision)\n \"\"\"\n import os\n import shutil\n import tempfile\n\n from aiida.orm.importexport import export\n from aiida.orm.querybuilder import QueryBuilder\n from aiida.orm.computers import Computer\n from aiida.orm.calculation.job import JobCalculation\n\n # Creating a folder for the import/export files\n export_file_tmp_folder = tempfile.mkdtemp()\n unpack_tmp_folder = tempfile.mkdtemp()\n\n try:\n # Store a calculation\n calc1_label = \"calc1\"\n calc1 = JobCalculation()\n calc1.set_computer(self.computer)\n calc1.set_option('resources', {\"num_machines\": 1,\n \"num_mpiprocs_per_machine\": 1})\n calc1.label = calc1_label\n calc1.store()\n calc1._set_state(u'RETRIEVING')\n\n # Store locally the computer name\n comp1_name = six.text_type(self.computer.name)\n\n # Export the first job calculation\n filename1 = os.path.join(export_file_tmp_folder, \"export1.tar.gz\")\n export([calc1], outfile=filename1, silent=True)\n\n # Rename the computer\n self.computer.set_name(comp1_name + \"_updated\")\n\n # Store a second calculation\n calc2_label = \"calc2\"\n calc2 = JobCalculation()\n calc2.set_computer(self.computer)\n calc2.set_option('resources', {\"num_machines\": 2,\n \"num_mpiprocs_per_machine\": 2})\n calc2.label = calc2_label\n calc2.store()\n calc2._set_state(u'RETRIEVING')\n\n # Export the second job calculation\n filename2 = os.path.join(export_file_tmp_folder, \"export2.tar.gz\")\n export([calc2], outfile=filename2, silent=True)\n\n # Clean the local database\n self.clean_db()\n\n # Check that there are no computers\n qb = QueryBuilder()\n qb.append(Computer, project=['*'])\n self.assertEqual(qb.count(), 0, \"There should not be any computers\"\n \"in the database at this point.\")\n\n # Check that there are no calculations\n qb = QueryBuilder()\n qb.append(JobCalculation, project=['*'])\n self.assertEqual(qb.count(), 0, \"There should not be any \"\n \"calculations in the database at \"\n \"this point.\")\n\n # Import the first calculation\n import_data(filename1, silent=True)\n\n # Check that the calculation computer is imported correctly.\n qb = QueryBuilder()\n qb.append(JobCalculation, project=['label'])\n self.assertEqual(qb.count(), 1, \"Only one calculation should be \"\n \"found.\")\n self.assertEqual(six.text_type(qb.first()[0]), calc1_label,\n \"The calculation label is not correct.\")\n\n # Check that the referenced computer is imported correctly.\n qb = QueryBuilder()\n qb.append(Computer, project=['name', 'uuid', 'id'])\n self.assertEqual(qb.count(), 1, \"Only one computer should be \"\n \"found.\")\n self.assertEqual(six.text_type(qb.first()[0]), comp1_name,\n \"The computer name is not correct.\")\n\n # Import the second calculation\n import_data(filename2, silent=True)\n\n # Check that the number of computers remains the same and its data\n # did not change.\n qb = QueryBuilder()\n qb.append(Computer, project=['name'])\n self.assertEqual(qb.count(), 1, \"Only one computer should be \"\n \"found.\")\n self.assertEqual(six.text_type(qb.first()[0]), comp1_name,\n \"The computer name is not correct.\")\n\n finally:\n # Deleting the created temporary folders\n shutil.rmtree(export_file_tmp_folder, ignore_errors=True)\n shutil.rmtree(unpack_tmp_folder, ignore_errors=True)\n\n def test_different_computer_same_name_import(self):\n \"\"\"\n This test checks that if there is a name collision, the imported\n computers are renamed accordingly.\n \"\"\"\n import os\n import shutil\n import tempfile\n\n from aiida.orm.importexport import export\n from aiida.orm.querybuilder import QueryBuilder\n from aiida.orm.computers import Computer\n from aiida.orm.calculation.job import JobCalculation\n from aiida.orm.importexport import COMP_DUPL_SUFFIX\n\n # Creating a folder for the import/export files\n export_file_tmp_folder = tempfile.mkdtemp()\n unpack_tmp_folder = tempfile.mkdtemp()\n\n try:\n # Set the computer name\n comp1_name = \"localhost_1\"\n self.computer.set_name(comp1_name)\n\n # Store a calculation\n calc1_label = \"calc1\"\n calc1 = JobCalculation()\n calc1.set_computer(self.computer)\n calc1.set_option('resources', {\"num_machines\": 1,\n \"num_mpiprocs_per_machine\": 1})\n calc1.label = calc1_label\n calc1.store()\n calc1._set_state(u'RETRIEVING')\n\n # Export the first job calculation\n filename1 = os.path.join(export_file_tmp_folder, \"export1.tar.gz\")\n export([calc1], outfile=filename1, silent=True)\n\n # Reset the database\n self.clean_db()\n self.insert_data()\n\n # Set the computer name to the same name as before\n self.computer.set_name(comp1_name)\n\n # Store a second calculation\n calc2_label = \"calc2\"\n calc2 = JobCalculation()\n calc2.set_computer(self.computer)\n calc2.set_option('resources', {\"num_machines\": 2,\n \"num_mpiprocs_per_machine\": 2})\n calc2.label = calc2_label\n calc2.store()\n calc2._set_state(u'RETRIEVING')\n\n # Export the second job calculation\n filename2 = os.path.join(export_file_tmp_folder, \"export2.tar.gz\")\n export([calc2], outfile=filename2, silent=True)\n\n # Reset the database\n self.clean_db()\n self.insert_data()\n\n # Set the computer name to the same name as before\n self.computer.set_name(comp1_name)\n\n # Store a third calculation\n calc3_label = \"calc3\"\n calc3 = JobCalculation()\n calc3.set_computer(self.computer)\n calc3.set_option('resources', {\"num_machines\": 2,\n \"num_mpiprocs_per_machine\": 2})\n calc3.label = calc3_label\n calc3.store()\n calc3._set_state(u'RETRIEVING')\n\n # Export the third job calculation\n filename3 = os.path.join(export_file_tmp_folder, \"export3.tar.gz\")\n export([calc3], outfile=filename3, silent=True)\n\n # Clean the local database\n self.clean_db()\n\n # Check that there are no computers\n qb = QueryBuilder()\n qb.append(Computer, project=['*'])\n self.assertEqual(qb.count(), 0, \"There should not be any computers\"\n \"in the database at this point.\")\n\n # Check that there are no calculations\n qb = QueryBuilder()\n qb.append(JobCalculation, project=['*'])\n self.assertEqual(qb.count(), 0, \"There should not be any \"\n \"calculations in the database at \"\n \"this point.\")\n\n # Import all the calculations\n import_data(filename1, silent=True)\n import_data(filename2, silent=True)\n import_data(filename3, silent=True)\n\n # Retrieve the calculation-computer pairs\n qb = QueryBuilder()\n qb.append(JobCalculation, project=['label'], tag='jcalc')\n qb.append(Computer, project=['name'],\n computer_of='jcalc')\n self.assertEqual(qb.count(), 3, \"Three combinations expected.\")\n res = qb.all()\n self.assertIn([calc1_label, comp1_name], res,\n \"Calc-Computer combination not found.\")\n self.assertIn([calc2_label,\n comp1_name + COMP_DUPL_SUFFIX.format(0)], res,\n \"Calc-Computer combination not found.\")\n self.assertIn([calc3_label,\n comp1_name + COMP_DUPL_SUFFIX.format(1)], res,\n \"Calc-Computer combination not found.\")\n finally:\n # Deleting the created temporary folders\n shutil.rmtree(export_file_tmp_folder, ignore_errors=True)\n shutil.rmtree(unpack_tmp_folder, ignore_errors=True)\n\n def test_correct_import_of_computer_json_params(self):\n \"\"\"\n This test checks that the metadata and transport params are\n exported and imported correctly in both backends.\n \"\"\"\n import os\n import shutil\n import tempfile\n\n from aiida.orm.importexport import export\n from aiida.orm.querybuilder import QueryBuilder\n from aiida.orm.computers import Computer\n from aiida.orm.calculation.job import JobCalculation\n\n # Creating a folder for the import/export files\n export_file_tmp_folder = tempfile.mkdtemp()\n unpack_tmp_folder = tempfile.mkdtemp()\n\n try:\n # Set the computer name\n comp1_name = \"localhost_1\"\n comp1_metadata = {\n u'workdir': u'/tmp/aiida'\n }\n comp1_transport_params = {\n u'key1': u'value1',\n u'key2': 2\n }\n self.computer.set_name(comp1_name)\n self.computer._set_metadata(comp1_metadata)\n self.computer.set_transport_params(comp1_transport_params)\n\n # Store a calculation\n calc1_label = \"calc1\"\n calc1 = JobCalculation()\n calc1.set_computer(self.computer)\n calc1.set_option('resources', {\"num_machines\": 1,\n \"num_mpiprocs_per_machine\": 1})\n calc1.label = calc1_label\n calc1.store()\n calc1._set_state(u'RETRIEVING')\n\n # Export the first job calculation\n filename1 = os.path.join(export_file_tmp_folder, \"export1.tar.gz\")\n export([calc1], outfile=filename1, silent=True)\n\n # Clean the local database\n self.clean_db()\n # Import the data\n import_data(filename1, silent=True)\n\n qb = QueryBuilder()\n qb.append(Computer, project=['transport_params', '_metadata'],\n tag=\"comp\")\n self.assertEqual(qb.count(), 1, \"Expected only one computer\")\n\n res = qb.dict()[0]\n self.assertEqual(res['comp']['transport_params'],\n comp1_transport_params,\n \"Not the expected transport parameters \"\n \"were found\")\n self.assertEqual(res['comp']['_metadata'],\n comp1_metadata,\n \"Not the expected metadata were found\")\n finally:\n # Deleting the created temporary folders\n shutil.rmtree(export_file_tmp_folder, ignore_errors=True)\n shutil.rmtree(unpack_tmp_folder, ignore_errors=True)\n\n def test_import_of_django_sqla_export_file(self):\n \"\"\"\n Check why sqla import manages to import the django export file correctly\n \"\"\"\n from aiida.backends.tests.utils.fixtures import import_archive_fixture\n from aiida.orm.querybuilder import QueryBuilder\n from aiida.orm.computers import Computer\n\n for archive in ['export/compare/django.aiida', 'export/compare/sqlalchemy.aiida']:\n # Clean the database\n self.clean_db()\n\n # Import the needed data\n import_archive_fixture(archive)\n\n # The expected metadata & transport parameters\n comp1_metadata = {\n u'workdir': u'/tmp/aiida'\n }\n comp1_transport_params = {\n u'key1': u'value1',\n u'key2': 2\n }\n\n # Check that we got the correct metadata & transport parameters\n qb = QueryBuilder()\n qb.append(Computer, project=['transport_params', '_metadata'], tag=\"comp\")\n self.assertEqual(qb.count(), 1, \"Expected only one computer\")\n\n res = qb.dict()[0]\n\n self.assertEqual(res['comp']['transport_params'], comp1_transport_params)\n self.assertEqual(res['comp']['_metadata'], comp1_metadata)\n\n\nclass TestLinks(AiidaTestCase):\n\n def setUp(self):\n self.clean_db()\n self.insert_data()\n\n def tearDown(self):\n pass\n\n def get_all_node_links(self):\n \"\"\"\n \"\"\"\n from aiida.orm import load_node, Node\n from aiida.orm.querybuilder import QueryBuilder\n qb = QueryBuilder()\n qb.append(Node, project='uuid', tag='input')\n qb.append(Node, project='uuid', tag='output',\n edge_project=['label', 'type'], output_of='input')\n return qb.all()\n\n def test_input_and_create_links(self):\n \"\"\"\n Simple test that will verify that INPUT and CREATE links are properly exported and\n correctly recreated upon import.\n \"\"\"\n import os, shutil, tempfile\n\n from aiida.orm.data.int import Int\n from aiida.orm.importexport import export\n from aiida.orm.calculation.work import WorkCalculation\n from aiida.common.links import LinkType\n\n tmp_folder = tempfile.mkdtemp()\n\n try:\n node_work = WorkCalculation().store()\n node_input = Int(1).store()\n node_output = Int(2).store()\n\n node_work.add_link_from(node_input, 'input', link_type=LinkType.INPUT)\n node_output.add_link_from(node_work, 'output', link_type=LinkType.CREATE)\n\n export_links = self.get_all_node_links()\n export_file = os.path.join(tmp_folder, 'export.tar.gz')\n export([node_output], outfile=export_file, silent=True)\n\n self.clean_db()\n self.insert_data()\n\n import_data(export_file, silent=True)\n import_links = self.get_all_node_links()\n\n export_set = [tuple(_) for _ in export_links]\n import_set = [tuple(_) for _ in import_links]\n\n self.assertEquals(set(export_set), set(import_set))\n finally:\n shutil.rmtree(tmp_folder, ignore_errors=True)\n\n def construct_complex_graph(self, export_combination=0):\n \"\"\"\n This method creates a \"complex\" graph with all available link types\n (INPUT, CREATE, RETURN and CALL) and returns the nodes of the graph. It\n also returns various combinations of nodes that need to be extracted\n but also the final expected set of nodes (after adding the expected\n predecessors, desuccessors).\n \"\"\"\n from aiida.orm.data.base import Int\n from aiida.orm.calculation.job import JobCalculation\n from aiida.orm.calculation.work import WorkCalculation\n from aiida.common.datastructures import calc_states\n from aiida.common.links import LinkType\n\n if export_combination < 0 or export_combination > 8:\n return None\n\n # Node creation\n d1 = Int(1).store()\n d2 = Int(1).store()\n wc1 = WorkCalculation().store()\n wc2 = WorkCalculation().store()\n\n pw1 = JobCalculation()\n pw1.set_computer(self.computer)\n pw1.set_option('resources', {\"num_machines\": 1, \"num_mpiprocs_per_machine\": 1})\n pw1.store()\n\n d3 = Int(1).store()\n d4 = Int(1).store()\n\n pw2 = JobCalculation()\n pw2.set_computer(self.computer)\n pw2.set_option('resources', {\"num_machines\": 1, \"num_mpiprocs_per_machine\": 1})\n pw2.store()\n\n d5 = Int(1).store()\n d6 = Int(1).store()\n\n # Link creation\n wc1.add_link_from(d1, 'input1', link_type=LinkType.INPUT)\n wc1.add_link_from(d2, 'input2', link_type=LinkType.INPUT)\n\n wc2.add_link_from(d1, 'input', link_type=LinkType.INPUT)\n wc2.add_link_from(wc1, 'call', link_type=LinkType.CALL)\n\n pw1.add_link_from(d1, 'input', link_type=LinkType.INPUT)\n pw1.add_link_from(wc2, 'call', link_type=LinkType.CALL)\n pw1._set_state(calc_states.PARSING)\n\n d3.add_link_from(pw1, 'create', link_type=LinkType.CREATE)\n d3.add_link_from(wc2, 'return', link_type=LinkType.RETURN)\n\n d4.add_link_from(pw1, 'create', link_type=LinkType.CREATE)\n d4.add_link_from(wc2, 'return', link_type=LinkType.RETURN)\n\n pw2.add_link_from(d4, 'input', link_type=LinkType.INPUT)\n pw2._set_state(calc_states.PARSING)\n\n d5.add_link_from(pw2, 'create', link_type=LinkType.CREATE)\n d6.add_link_from(pw2, 'create', link_type=LinkType.CREATE)\n\n # Return the generated nodes\n graph_nodes = [d1, d2, d3, d4, d5, d6, pw1, pw2, wc1, wc2]\n\n # Create various combinations of nodes that should be exported\n # and the final set of nodes that are exported in each case, following\n # predecessor/successor links.\n export_list = [\n (wc1, [d1, d2, d3, d4, pw1, wc1, wc2]),\n (wc2, [d1, d3, d4, pw1, wc2]),\n (d3, [d1, d3, d4, pw1]),\n (d4, [d1, d3, d4, pw1]),\n (d5, [d1, d3, d4, d5, d6, pw1, pw2]),\n (d6, [d1, d3, d4, d5, d6, pw1, pw2]),\n (pw2, [d1, d3, d4, d5, d6, pw1, pw2]),\n (d1, [d1]),\n (d2, [d2])\n ]\n\n return graph_nodes, export_list[export_combination]\n\n def test_data_create_reversed_false(self):\n \"\"\"Verify that create_reversed = False is respected when only exporting Data nodes.\"\"\"\n import os\n import shutil\n import tempfile\n\n from aiida.common.datastructures import calc_states\n from aiida.orm import Data, Group\n from aiida.orm.data.base import Int\n from aiida.orm.calculation.job import JobCalculation\n from aiida.orm.importexport import export\n from aiida.common.links import LinkType\n from aiida.orm.querybuilder import QueryBuilder\n\n tmp_folder = tempfile.mkdtemp()\n\n try:\n data_input = Int(1).store()\n data_output = Int(2).store()\n\n calc = JobCalculation()\n calc.set_computer(self.computer)\n calc.set_option('resources', {\"num_machines\": 1, \"num_mpiprocs_per_machine\": 1})\n calc.store()\n\n calc.add_link_from(data_input, 'input', link_type=LinkType.INPUT)\n calc._set_state(calc_states.PARSING)\n data_output.add_link_from(calc, 'create', link_type=LinkType.CREATE)\n\n group = Group.create(name='test_group')\n group.add_nodes(data_output)\n\n export_file = os.path.join(tmp_folder, 'export.tar.gz')\n export([group], outfile=export_file, silent=True, create_reversed=False)\n\n self.clean_db()\n self.insert_data()\n\n import_data(export_file, silent=True)\n\n builder = QueryBuilder()\n builder.append(Data)\n self.assertEqual(builder.count(), 1, 'Expected a single Data node but got {}'.format(builder.count()))\n self.assertEqual(builder.all()[0][0].uuid, data_output.uuid)\n\n builder = QueryBuilder()\n builder.append(JobCalculation)\n self.assertEqual(builder.count(), 0, 'Expected no Calculation nodes')\n finally:\n shutil.rmtree(tmp_folder, ignore_errors=True)\n\n def test_complex_workflow_graph_links(self):\n \"\"\"\n This test checks that all the needed links are correctly exported and\n imported. More precisely, it checks that INPUT, CREATE, RETURN and CALL\n links connecting Data nodes, JobCalculations and WorkCalculations are\n exported and imported correctly.\n \"\"\"\n import os, shutil, tempfile\n\n from aiida.orm import Node\n from aiida.orm.importexport import export\n from aiida.common.links import LinkType\n from aiida.orm.querybuilder import QueryBuilder\n tmp_folder = tempfile.mkdtemp()\n\n try:\n graph_nodes, _ = self.construct_complex_graph()\n\n # Getting the input, create, return and call links\n qb = QueryBuilder()\n qb.append(Node, project='uuid')\n qb.append(Node, project='uuid',\n edge_project=['label', 'type'],\n edge_filters={'type': {'in': (LinkType.INPUT.value,\n LinkType.CREATE.value,\n LinkType.RETURN.value,\n LinkType.CALL.value)}})\n export_links = qb.all()\n\n export_file = os.path.join(tmp_folder, 'export.tar.gz')\n export(graph_nodes, outfile=export_file, silent=True)\n\n self.clean_db()\n self.insert_data()\n\n import_data(export_file, silent=True)\n import_links = self.get_all_node_links()\n\n export_set = [tuple(_) for _ in export_links]\n import_set = [tuple(_) for _ in import_links]\n\n self.assertEquals(set(export_set), set(import_set))\n finally:\n shutil.rmtree(tmp_folder, ignore_errors=True)\n\n def test_complex_workflow_graph_export_set_expansion(self):\n import os, shutil, tempfile\n from aiida.orm.importexport import export\n from aiida.orm.querybuilder import QueryBuilder\n from aiida.orm import Node\n\n for export_conf in range(0, 8):\n\n graph_nodes, (export_node, export_target) = (\n self.construct_complex_graph(export_conf))\n\n tmp_folder = tempfile.mkdtemp()\n try:\n export_file = os.path.join(tmp_folder, 'export.tar.gz')\n export([export_node], outfile=export_file, silent=True)\n export_node_str = str(export_node)\n\n self.clean_db()\n self.insert_data()\n\n import_data(export_file, silent=True)\n\n # Get all the nodes of the database\n qb = QueryBuilder()\n qb.append(Node, project='uuid')\n imported_node_uuids = set(str(_[0]) for _ in qb.all())\n\n export_target_uuids = set(str(_.uuid) for _ in export_target)\n\n from aiida.orm.utils import load_node\n self.assertEquals(\n export_target_uuids,\n imported_node_uuids,\n \"Problem in comparison of export node: \" +\n str(export_node_str) + \"\\n\" +\n \"Expected set: \" + str(export_target_uuids) + \"\\n\" +\n \"Imported set: \" + str(imported_node_uuids) + \"\\n\" +\n \"Difference: \" + str([load_node(_) for _ in\n export_target_uuids.symmetric_difference(\n imported_node_uuids)])\n )\n\n finally:\n shutil.rmtree(tmp_folder, ignore_errors=True)\n\n def test_recursive_export_input_and_create_links_proper(self):\n \"\"\"\n Check that CALL, INPUT, RETURN and CREATE links are followed\n recursively.\n \"\"\"\n import os, shutil, tempfile\n from aiida.orm import Node\n from aiida.orm.data.base import Int\n from aiida.orm.importexport import export\n from aiida.orm.calculation.inline import InlineCalculation\n from aiida.orm.calculation.work import WorkCalculation\n from aiida.common.links import LinkType\n from aiida.orm.querybuilder import QueryBuilder\n tmp_folder = tempfile.mkdtemp()\n\n try:\n wc2 = WorkCalculation().store()\n wc1 = WorkCalculation().store()\n c1 = InlineCalculation().store()\n ni1 = Int(1).store()\n ni2 = Int(2).store()\n no1 = Int(1).store()\n no2 = Int(2).store()\n\n # Create the connections between workcalculations and calculations\n wc1.add_link_from(wc2, 'call', link_type=LinkType.CALL)\n c1.add_link_from(wc1, 'call', link_type=LinkType.CALL)\n\n # Connect the first data node to wc1 & c1\n wc1.add_link_from(ni1, 'ni1-to-wc1',\n link_type=LinkType.INPUT)\n c1.add_link_from(ni1, 'ni1-to-c1',\n link_type=LinkType.INPUT)\n\n # Connect the second data node to wc1 & c1\n wc1.add_link_from(ni2, 'ni2-to-wc1',\n link_type=LinkType.INPUT)\n c1.add_link_from(ni2, 'ni2-to-c1',\n link_type=LinkType.INPUT)\n\n # Connecting the first output node to wc1 & c1\n no1.add_link_from(wc1, 'output',\n link_type=LinkType.RETURN)\n no1.add_link_from(c1, 'output',\n link_type=LinkType.CREATE)\n\n # Connecting the second output node to wc1 & c1\n no2.add_link_from(wc1, 'output',\n link_type=LinkType.RETURN)\n no2.add_link_from(c1, 'output',\n link_type=LinkType.CREATE)\n\n # Getting the input, create, return and call links\n qb = QueryBuilder()\n qb.append(Node, project='uuid')\n qb.append(Node, project='uuid',\n edge_project=['label', 'type'],\n edge_filters={'type': {'in': (LinkType.INPUT.value,\n LinkType.CREATE.value,\n LinkType.RETURN.value,\n LinkType.CALL.value)}})\n export_links = qb.all()\n\n export_file = os.path.join(tmp_folder, 'export.tar.gz')\n export([wc2], outfile=export_file, silent=True)\n\n self.clean_db()\n self.insert_data()\n\n import_data(export_file, silent=True)\n import_links = self.get_all_node_links()\n\n export_set = [tuple(_) for _ in export_links]\n import_set = [tuple(_) for _ in import_links]\n\n self.assertEquals(set(export_set), set(import_set))\n finally:\n shutil.rmtree(tmp_folder, ignore_errors=True)\n\n def test_links_for_workflows(self):\n \"\"\"\n Check that CALL links are not followed in the export procedure, and the only creation\n is followed for data::\n\n ____ ____ ____\n | | INP | | CALL | |\n | i1 | --> | w1 | <--- | w2 |\n |____| |____| |____|\n | |\n CREATE v v RETURN\n ____\n | |\n | o1 |\n |____|\n\n \"\"\"\n import os, shutil, tempfile\n\n from aiida.orm.data.base import Int\n from aiida.orm.importexport import export\n from aiida.orm.calculation.work import WorkCalculation\n from aiida.common.links import LinkType\n tmp_folder = tempfile.mkdtemp()\n\n try:\n w1 = WorkCalculation().store()\n w2 = WorkCalculation().store()\n i1 = Int(1).store()\n o1 = Int(2).store()\n\n w1.add_link_from(i1, 'input-i1', link_type=LinkType.INPUT)\n w1.add_link_from(w2, 'call', link_type=LinkType.CALL)\n o1.add_link_from(w1, 'output', link_type=LinkType.CREATE)\n o1.add_link_from(w1, 'return', link_type=LinkType.RETURN)\n\n links_wanted = [l for l in self.get_all_node_links() if l[3] in\n (LinkType.CREATE.value,\n LinkType.INPUT.value,\n LinkType.RETURN.value)]\n\n export_file_1 = os.path.join(tmp_folder, 'export-1.tar.gz')\n export_file_2 = os.path.join(tmp_folder, 'export-2.tar.gz')\n export([o1], outfile=export_file_1, silent=True)\n export([w1], outfile=export_file_2, silent=True)\n\n self.clean_db()\n self.insert_data()\n\n import_data(export_file_1, silent=True)\n links_in_db = self.get_all_node_links()\n\n self.assertEquals(sorted(links_wanted), sorted(links_in_db))\n self.clean_db()\n self.insert_data()\n\n import_data(export_file_2, silent=True)\n links_in_db = self.get_all_node_links()\n self.assertEquals(sorted(links_wanted), sorted(links_in_db))\n\n finally:\n shutil.rmtree(tmp_folder, ignore_errors=True)\n\n def test_double_return_links_for_workflows(self):\n \"\"\"\n This test checks that double return links to a node can be exported\n and imported without problems,\n \"\"\"\n import os, shutil, tempfile\n\n from aiida.orm.data.base import Int\n from aiida.orm.importexport import export\n from aiida.orm.calculation.work import WorkCalculation\n from aiida.common.links import LinkType\n from aiida.orm.querybuilder import QueryBuilder\n from aiida.orm.node import Node\n\n tmp_folder = tempfile.mkdtemp()\n\n try:\n w1 = WorkCalculation().store()\n w2 = WorkCalculation().store()\n i1 = Int(1).store()\n o1 = Int(2).store()\n\n w1.add_link_from(i1, 'input-i1', link_type=LinkType.INPUT)\n w1.add_link_from(w2, 'call', link_type=LinkType.CALL)\n o1.add_link_from(w1, 'output', link_type=LinkType.CREATE)\n o1.add_link_from(w1, 'return', link_type=LinkType.RETURN)\n o1.add_link_from(w2, 'return', link_type=LinkType.RETURN)\n\n uuids_wanted = set(_.uuid for _ in (w1, o1, i1, w2))\n links_wanted = [l for l in self.get_all_node_links() if l[3] in (\n 'createlink', 'inputlink', 'returnlink', 'calllink')]\n\n export_file = os.path.join(tmp_folder, 'export.tar.gz')\n export([o1, w1, w2, i1],\n outfile=export_file, silent=True)\n\n self.clean_db()\n self.insert_data()\n\n import_data(export_file, silent=True)\n\n uuids_in_db = [str(uuid) for [uuid] in\n QueryBuilder().append(Node, project='uuid').all()]\n self.assertEquals(sorted(uuids_wanted), sorted(uuids_in_db))\n\n links_in_db = self.get_all_node_links()\n self.assertEquals(sorted(links_wanted), sorted(links_in_db))\n\n finally:\n shutil.rmtree(tmp_folder, ignore_errors=True)\n\n def test_that_solo_code_is_exported_correctly(self):\n \"\"\"\n This test checks that when a calculation is exported then the\n corresponding code is also exported.\n \"\"\"\n import os, shutil, tempfile\n\n from aiida.orm.utils import load_node\n from aiida.orm.importexport import export\n from aiida.orm.code import Code\n\n tmp_folder = tempfile.mkdtemp()\n\n try:\n code_label = 'test_code1'\n\n code = Code()\n code.set_remote_computer_exec((self.computer, '/bin/true'))\n code.label = code_label\n code.store()\n\n code_uuid = code.uuid\n\n export_file = os.path.join(tmp_folder, 'export.tar.gz')\n export([code], outfile=export_file, silent=True)\n\n self.clean_db()\n self.insert_data()\n\n import_data(export_file, silent=True)\n\n self.assertEquals(load_node(code_uuid).label, code_label)\n finally:\n shutil.rmtree(tmp_folder, ignore_errors=True)\n\n def test_that_input_code_is_exported_correctly(self):\n \"\"\"\n This test checks that when a calculation is exported then the\n corresponding code is also exported. It also checks that the links\n are also in place after the import.\n \"\"\"\n import os, shutil, tempfile\n\n from aiida.orm.utils import load_node\n from aiida.orm.importexport import export\n from aiida.common.links import LinkType\n from aiida.orm.calculation.job import JobCalculation\n from aiida.orm.code import Code\n from aiida.orm.querybuilder import QueryBuilder\n\n tmp_folder = tempfile.mkdtemp()\n\n try:\n code_label = 'test_code1'\n\n code = Code()\n code.set_remote_computer_exec((self.computer, '/bin/true'))\n code.label = code_label\n code.store()\n\n code_uuid = code.uuid\n\n jc = JobCalculation()\n jc.set_computer(self.computer)\n jc.set_option('resources',\n {\"num_machines\": 1, \"num_mpiprocs_per_machine\": 1})\n jc.store()\n\n jc.add_link_from(code, 'code', link_type=LinkType.INPUT)\n\n export_file = os.path.join(tmp_folder, 'export.tar.gz')\n export([jc], outfile=export_file, silent=True)\n\n self.clean_db()\n self.insert_data()\n\n import_data(export_file, silent=True)\n\n # Check that the node is there\n self.assertEquals(load_node(code_uuid).label, code_label)\n # Check that the link is in place\n qb = QueryBuilder()\n qb.append(Code, project='uuid')\n qb.append(JobCalculation, project='uuid',\n edge_project=['label', 'type'],\n edge_filters={'type': {'==': LinkType.INPUT.value}})\n self.assertEquals(qb.count(), 1,\n \"Expected to find one and only one link from \"\n \"code to the calculation node. {} found.\"\n .format(qb.count()))\n finally:\n shutil.rmtree(tmp_folder, ignore_errors=True)\n"
] | [
[
"numpy.random.random",
"numpy.random.randint"
]
] |
corner4world/nntrainer | [
"0f342e8f2a1ec95b4e712aa3390b21cf0ea4efae"
] | [
"test/input_gen/genModelsRecurrent_v2.py"
] | [
"#!/usr/bin/env python3\n# SPDX-License-Identifier: Apache-2.0\n##\n# Copyright (C) 2021 Jihoon Lee <[email protected]>\n#\n# @file genModelsRecurrent_v2.py\n# @date 19 October 2021\n# @brief Generate recurrent model tcs\n# @author Jihoon lee <[email protected]>\n\nfrom recorder_v2 import record_v2, inspect_file\nfrom zoneout import Zoneout\nimport torch\n\nclass FCUnroll(torch.nn.Module):\n def __init__(self, unroll_for=1, num_fc=1):\n super().__init__()\n self.fcs = torch.nn.ModuleList([torch.nn.Linear(1, 1) for i in range(num_fc)])\n self.unroll_for = unroll_for\n # self.loss = torch.nn.MSELoss()\n self.loss = torch.nn.Identity()\n\n def forward(self, inputs, labels):\n output = inputs[0]\n for i in range(self.unroll_for):\n for fc in self.fcs:\n output = fc(output)\n loss = self.loss(output)\n # loss = self.loss(output, labels[0])\n return output, loss\n\nclass RNNCellStacked(torch.nn.Module):\n def __init__(self, unroll_for=1, num_rnn=1, input_size=1, hidden_size=1):\n super().__init__()\n self.rnns = torch.nn.ModuleList(\n [\n torch.nn.RNNCell(input_size, hidden_size)\n for _ in range(num_rnn)\n ]\n )\n self.unroll_for = unroll_for\n self.loss = torch.nn.MSELoss()\n\n def forward(self, inputs, labels):\n hs = [torch.zeros_like(inputs[0]) for _ in self.rnns]\n out = inputs[0]\n ret = []\n for _ in range(self.unroll_for):\n for i, rnn in enumerate(self.rnns):\n hs[i] = rnn(out, hs[i])\n out = hs[i]\n ret.append(out)\n\n ret = torch.stack(ret, dim=1)\n loss = self.loss(ret, labels[0])\n return ret, loss\n\nclass LSTMStacked(torch.nn.Module):\n def __init__(self, num_lstm=1, bidirectional=False):\n super().__init__()\n self.input_size = self.hidden_size = 2\n self.num_lstm = num_lstm\n self.bidirectional=bidirectional\n self.lstms = torch.nn.ModuleList(\n [\n torch.nn.LSTM(self.input_size if self.bidirectional == False or i == 0 else 2 * self.input_size, self.hidden_size, batch_first=True, bidirectional=bidirectional)\n # Intended comment\n # torch.nn.LSTM(self.input_size if self.bidirectional == False or i == 0 else 2 * self.input_size, self.hidden_size, num_layers=num_lstm, batch_first=True, bidirectional=bidirectional)\n for i in range(num_lstm)\n ]\n )\n self.loss = torch.nn.MSELoss()\n\n def forward(self, inputs, labels):\n out = inputs[0]\n states = inputs[1:]\n # hs = [states[2 * i] for i in range(self.num_lstm)]\n hs = [torch.zeros((2, 3, 2)) if self.bidirectional else torch.zeros((1, 3, 2)) for _ in range(self.num_lstm)]\n # cs = [states[2 * i + 1] for i in range(self.num_lstm)]\n cs = [torch.zeros((2, 3, 2)) if self.bidirectional else torch.zeros((1, 3, 2)) for _ in range(self.num_lstm)]\n for i, (lstm, h, c) in enumerate(zip(self.lstms, hs, cs)):\n out, (hs[i], cs[i]) = lstm(out, (h, c))\n \n loss = self.loss(out, labels[0])\n return out, loss\n\nclass LSTMCellStacked(torch.nn.Module):\n def __init__(self, unroll_for=2, num_lstmcell=1):\n super().__init__()\n self.input_size = self.hidden_size = 2\n self.lstmcells = torch.nn.ModuleList(\n [\n torch.nn.LSTMCell(self.input_size, self.hidden_size)\n for _ in range(num_lstmcell)\n ]\n )\n self.unroll_for = unroll_for\n self.num_lstmcell = num_lstmcell\n self.loss = torch.nn.MSELoss()\n\n def forward(self, inputs, labels):\n out = inputs[0]\n states = inputs[1:]\n hs = [states[2 * i] for i in range(self.num_lstmcell)]\n cs = [states[2 * i + 1] for i in range(self.num_lstmcell)]\n ret = []\n for _ in range(self.unroll_for):\n for i, (lstm, h, c) in enumerate(zip(self.lstmcells, hs, cs)):\n hs[i], cs[i] = lstm(out, (h, c))\n out = hs[i]\n ret.append(out)\n\n ret = torch.stack(ret, dim=1)\n loss = self.loss(ret, labels[0])\n return ret, loss\n\nclass ZoneoutLSTMStacked(torch.nn.Module):\n def __init__(self, batch_size=3, unroll_for=2, num_lstm=1, hidden_state_zoneout_rate=1, cell_state_zoneout_rate=1):\n super().__init__()\n self.input_size = self.hidden_size = 2\n self.cell_state_zoneout_rate = cell_state_zoneout_rate\n self.zoneout_lstms = torch.nn.ModuleList(\n [\n Zoneout(batch_size, self.input_size, self.hidden_size, unroll_for, hidden_state_zoneout_rate, cell_state_zoneout_rate)\n for _ in range(num_lstm)\n ]\n )\n self.unroll_for = unroll_for\n self.num_lstm = num_lstm\n self.loss = torch.nn.MSELoss()\n\n def forward(self, inputs, labels):\n out = inputs[0]\n states = inputs[1:]\n hs = [states[2 * i] for i in range(self.num_lstm)]\n cs = [states[2 * i + 1] for i in range(self.num_lstm)]\n ret = []\n for num_unroll in range(self.unroll_for):\n for i, (zoneout_lstm, h, c) in enumerate(zip(self.zoneout_lstms, hs, cs)):\n hs[i], cs[i] = zoneout_lstm(out, (h, c, num_unroll))\n out = hs[i]\n ret.append(out)\n\n ret = torch.stack(ret, dim=1)\n loss = self.loss(ret, labels[0])\n return ret, loss\n\nclass GRUCellStacked(torch.nn.Module):\n def __init__(self, unroll_for=2, num_grucell=1):\n super().__init__()\n self.input_size = self.hidden_size = 2\n self.grus = torch.nn.ModuleList(\n [\n torch.nn.GRUCell(self.input_size, self.hidden_size, bias=True)\n for _ in range(num_grucell)\n ]\n )\n self.unroll_for = unroll_for\n self.loss = torch.nn.MSELoss()\n\n def forward(self, inputs, labels):\n out = inputs[0]\n hs = inputs[1:]\n ret = []\n for _ in range(self.unroll_for):\n for i, (gru, h) in enumerate(zip(self.grus, hs)):\n hs[i] = gru(out, h)\n out = hs[i]\n ret.append(out)\n\n ret = torch.stack(ret, dim=1)\n loss = self.loss(ret, labels[0])\n return ret, loss\n\nif __name__ == \"__main__\":\n record_v2(\n FCUnroll(unroll_for=5),\n iteration=2,\n input_dims=[(1,)],\n label_dims=[(1,)],\n name=\"fc_unroll_single\",\n )\n\n record_v2(\n FCUnroll(unroll_for=2, num_fc=2),\n iteration=2,\n input_dims=[(1,)],\n label_dims=[(1,)],\n name=\"fc_unroll_stacked\",\n )\n\n record_v2(\n FCUnroll(unroll_for=2, num_fc=2),\n iteration=2,\n input_dims=[(1,)],\n label_dims=[(1,)],\n name=\"fc_unroll_stacked_clipped\",\n clip=True\n )\n\n record_v2(\n RNNCellStacked(unroll_for=2, num_rnn=1, input_size=2, hidden_size=2),\n iteration=2,\n input_dims=[(3, 2)],\n label_dims=[(3, 2, 2)],\n name=\"rnncell_single\",\n )\n\n record_v2(\n RNNCellStacked(unroll_for=2, num_rnn=2, input_size=2, hidden_size=2),\n iteration=2,\n input_dims=[(3, 2)],\n label_dims=[(3, 2, 2)],\n name=\"rnncell_stacked\",\n )\n\n unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 1, 3, 2, 2, 2, False]\n record_v2(\n LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),\n iteration=iteration,\n input_dims=[(batch_size, unroll_for, feature_size)],\n # input_dims=[(batch_size, unroll_for, feature_size)] + [(1, batch_size, unit) for _ in range(2 * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"lstm_single\",\n )\n\n unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 2, 3, 2, 2, 2, False]\n record_v2(\n LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),\n iteration=iteration,\n input_dims=[(batch_size, unroll_for, feature_size)],\n # input_dims=[(batch_size, unroll_for, feature_size)] + [(1, batch_size, unit) for _ in range(2 * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"lstm_stacked\",\n )\n\n unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 1, 3, 2, 2, 2, True]\n record_v2(\n LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),\n iteration=iteration,\n input_dims=[(batch_size, unroll_for, feature_size)],\n # input_dims=[(batch_size, unroll_for, feature_size)] + [(2, batch_size, unit) for _ in range(2 * num_lstm)],\n label_dims=[(batch_size, unroll_for, 2 * unit)],\n name=\"bidirectional_lstm_single\",\n )\n\n unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 2, 3, 2, 2, 2, True]\n record_v2(\n LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),\n iteration=iteration,\n input_dims=[(batch_size, unroll_for, feature_size)],\n # input_dims=[(batch_size, unroll_for, feature_size)] + [(2, batch_size, unit) for _ in range(2 * num_lstm)],\n label_dims=[(batch_size, unroll_for, 2 * unit)],\n name=\"bidirectional_lstm_stacked\",\n )\n\n unroll_for, num_lstmcell, state_num, batch_size, unit, feature_size, iteration = [2, 1, 2, 3, 2, 2, 2]\n record_v2(\n LSTMCellStacked(unroll_for=unroll_for, num_lstmcell=num_lstmcell),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstmcell)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"lstmcell_single\",\n )\n\n unroll_for, num_lstmcell, state_num, batch_size, unit, feature_size, iteration = [2, 2, 2, 3, 2, 2, 2]\n record_v2(\n LSTMCellStacked(unroll_for=unroll_for, num_lstmcell=num_lstmcell),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstmcell)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"lstmcell_stacked\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 0.0]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_single_000_000\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 0.0]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_stacked_000_000\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 0.0]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_single_050_000\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 0.0]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_stacked_050_000\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 0.0]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_single_100_000\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 0.0]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_stacked_100_000\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 0.5]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_single_000_050\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 0.5]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_stacked_000_050\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 0.5]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_single_050_050\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 0.5]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_stacked_050_050\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 0.5]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_single_100_050\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 0.5]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_stacked_100_050\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 1.0]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_single_000_100\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 1.0]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_stacked_000_100\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 1.0]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_single_050_100\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 1.0]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_stacked_050_100\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 1.0]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_single_100_100\",\n )\n\n unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 1.0]\n record_v2(\n ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"zoneout_lstm_stacked_100_100\",\n )\n\n unroll_for, num_grucell, batch_size, unit, feature_size, iteration, = [2, 1, 3, 2, 2, 2]\n record_v2(\n GRUCellStacked(unroll_for=unroll_for, num_grucell=num_grucell),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(num_grucell)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"grucell_single\",\n )\n\n unroll_for, num_grucell, batch_size, unit, feature_size, iteration, = [2, 2, 3, 2, 2, 2]\n record_v2(\n GRUCellStacked(unroll_for=unroll_for, num_grucell=num_grucell),\n iteration=iteration,\n input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(num_grucell)],\n label_dims=[(batch_size, unroll_for, unit)],\n name=\"grucell_stacked\",\n )\n\n # inspect_file(\"lstm_single.nnmodelgolden\")\n"
] | [
[
"torch.stack",
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.nn.LSTM",
"torch.zeros_like",
"torch.nn.GRUCell",
"torch.nn.LSTMCell",
"torch.nn.Identity",
"torch.nn.RNNCell",
"torch.zeros"
]
] |
wi11dey/pylabnet | [
"a6e3362f727c45aaa60e61496e858ae92e85574d"
] | [
"pylabnet/scripts/counter/monitor_counts.py"
] | [
"\"\"\" Generic script for monitoring counts from a counter \"\"\"\n\nimport numpy as np\nimport time\nimport pyqtgraph as pg\nfrom pylabnet.gui.pyqt.external_gui import Window\nfrom pylabnet.utils.logging.logger import LogClient\nfrom pylabnet.scripts.pause_script import PauseService\nfrom pylabnet.network.core.generic_server import GenericServer\nfrom pylabnet.network.client_server import si_tt\nfrom pylabnet.utils.helper_methods import load_script_config, get_ip, unpack_launcher, load_config, get_gui_widgets, get_legend_from_graphics_view, find_client, load_script_config\n\n\n# Static methods\n\n# def generate_widgets():\n# \"\"\"Static method to return systematically named gui widgets for 4ch wavemeter monitor\"\"\"\n\n# graphs, legends, numbers = [], [], []\n# for i in range(2):\n# graphs.append('graph_widget_' + str(i + 1))\n# legends.append('legend_widget_' + str(i + 1))\n# numbers.append('number_label_' + str(i + 1))\n# for i in range(2, 8):\n# numbers.append('number_label_' + str(i + 1))\n# return graphs, legends, numbers\n\n\nclass CountMonitor:\n\n # Generate all widget instances for the .ui to use\n # _plot_widgets, _legend_widgets, _number_widgets = generate_widgets()\n\n def __init__(self, ctr_client: si_tt.Client, ui='count_monitor', logger_client=None, server_port=None, combined_channel=False, config=None):\n \"\"\" Constructor for CountMonitor script\n\n :param ctr_client: instance of hardware client for counter\n :param gui_client: (optional) instance of client of desired output GUI\n :param logger_client: (obj) instance of logger client.\n :param server_port: (int) port number of script server\n :combined_channel: (bool) If true, show additional trace with summed counts.\n \"\"\"\n\n self._ctr = ctr_client\n self.log = logger_client\n self.combined_channel = combined_channel\n self._bin_width = None\n self._n_bins = None\n self._ch_list = None\n self._plot_list = None # List of channels to assign to each plot (e.g. [[1,2], [3,4]])\n self._plots_assigned = [] # List of plots on the GUI that have been assigned\n\n if self.combined_channel:\n ui = 'count_monitor_combined'\n else:\n ui = 'count_monitor'\n\n # Instantiate GUI window\n self.gui = Window(\n gui_template=ui,\n host=get_ip(),\n port=server_port,\n log=self.log\n )\n\n # Setup stylesheet.\n self.gui.apply_stylesheet()\n\n if self.combined_channel:\n num_plots = 3\n else:\n num_plots = 2\n\n # Get all GUI widgets\n self.widgets = get_gui_widgets(\n self.gui,\n graph_widget=num_plots,\n number_label=8,\n event_button=num_plots,\n legend_widget=num_plots\n )\n\n # Load config\n self.config = {}\n if config is not None:\n self.config = load_script_config(\n script='monitor_counts',\n config=config,\n logger=self.logger_client\n )\n\n if not 'name' in self.config:\n self.config.update({'name': f'monitor{np.random.randint(1000)}'})\n\n def set_hardware(self, ctr):\n \"\"\" Sets hardware client for this script\n\n :param ctr: instance of count monitor hardware client\n \"\"\"\n\n # Initialize counter instance\n self._ctr = ctr\n\n def set_params(self, bin_width=1e9, n_bins=1e4, ch_list=[1], plot_list=None):\n \"\"\" Sets counter parameters\n\n :param bin_width: bin width in ps\n :param n_bins: number of bins to display on graph\n :param ch_list: (list) channels to record\n :param plot_list: list of channels to assign to each plot (e.g. [[1,2], [3,4]])\n \"\"\"\n\n # Save params to internal variables\n self._bin_width = int(bin_width)\n self._n_bins = int(n_bins)\n self._ch_list = ch_list\n self._plot_list = plot_list\n\n def run(self):\n \"\"\" Runs the counter from scratch\"\"\"\n\n try:\n\n # Start the counter with desired parameters\n self._initialize_display()\n\n # Give time to initialize\n # time.sleep(0.05)\n self._is_running = True\n\n self._ctr.start_trace(\n name=self.config['name'],\n ch_list=self._ch_list,\n bin_width=self._bin_width,\n n_bins=self._n_bins\n )\n\n # Continuously update data until paused\n while self._is_running:\n self._update_output()\n self.gui.force_update()\n\n except Exception as exc_obj:\n self._is_running = False\n raise exc_obj\n\n def pause(self):\n \"\"\" Pauses the counter\"\"\"\n\n self._is_running = False\n\n def resume(self):\n \"\"\" Resumes the counter.\n\n To be used to resume after the counter has been paused.\n \"\"\"\n\n try:\n self._is_running = True\n\n # Clear counter and resume plotting\n self._ctr.clear_ctr(name=self.config['name'])\n while self._is_running:\n self._update_output()\n\n except Exception as exc_obj:\n self._is_running = False\n raise exc_obj\n\n # Technical methods\n\n def _initialize_display(self):\n \"\"\" Initializes the display (configures all plots) \"\"\"\n\n plot_index = 0\n for index in range(len(self.widgets['graph_widget'])):\n # Configure and return legend widgets\n self.widgets['legend_widget'][index] = get_legend_from_graphics_view(\n self.widgets['legend_widget'][index]\n )\n\n for color, channel in enumerate(self._ch_list):\n\n # Figure out which plot to assign to\n if self._plot_list is not None:\n for index, channel_set in enumerate(self._plot_list):\n if channel in channel_set:\n plot_index = index\n break\n\n # If we have not assigned this plot yet, assign it\n # if plot_index not in self._plots_assigned:\n # self.gui_handler.assign_plot(\n # plot_widget=self._plot_widgets[plot_index],\n # plot_label='Counter Monitor {}'.format(plot_index + 1),\n # legend_widget=self._legend_widgets[plot_index]\n # )\n # self._plots_assigned.append(plot_index)\n\n # Now assign this curve\n # self.gui_handler.assign_curve(\n # plot_label='Counter Monitor {}'.format(plot_index + 1),\n # curve_label='Channel {}'.format(channel),\n # error=True\n # )\n\n # Create a curve and store the widget in our dictionary\n self.widgets[f'curve_{channel}'] = self.widgets['graph_widget'][plot_index].plot(\n pen=pg.mkPen(color=self.gui.COLOR_LIST[color])\n )\n self.widgets['legend_widget'][plot_index].addItem(\n self.widgets[f'curve_{channel}'],\n ' - ' + f'Channel {channel}'\n )\n\n # Assign scalar\n # self.gui_handler.assign_label(\n # label_widget=self._number_widgets[channel - 1],\n # label_label='Channel {}'.format(channel)\n # )\n\n # Handle button pressing\n from functools import partial\n\n for plot_index, clear_button in enumerate(self.widgets['event_button']):\n clear_button.clicked.connect(partial(lambda plot_index: self._clear_plot(plot_index), plot_index=plot_index))\n\n if self.combined_channel:\n self.widgets['curve_combo'] = self.widgets['graph_widget'][index + 1].plot(\n pen=pg.mkPen(color=self.gui.COLOR_LIST[color + 1])\n )\n self.widgets['legend_widget'][index + 1].addItem(\n self.widgets['curve_combo'],\n ' - ' + 'Combined Counts'\n )\n\n def _clear_plot(self, plot_index):\n \"\"\" Clears the curves on a particular plot\n\n :param plot_index: (int) index of plot to clear\n \"\"\"\n\n # First, handle case where combined count channel is clears (very ugly).\n if self.combined_channel and plot_index == len(self._plot_list):\n channel = 'combo'\n # Set the curve to constant with last point for all entries\n self.widgets[f'curve_{channel}'].setData(\n np.ones(self._n_bins) * self.widgets[f'curve_{channel}'].yData[-1]\n )\n else:\n # Find all curves in this plot\n for channel in self._plot_list[plot_index]:\n\n # Set the curve to constant with last point for all entries\n self.widgets[f'curve_{channel}'].setData(\n np.ones(self._n_bins) * self.widgets[f'curve_{channel}'].yData[-1]\n )\n\n self._ctr.clear_ctr(name=self.config['name'])\n\n def _update_output(self):\n \"\"\" Updates the output to all current values\"\"\"\n\n # Update all active channels\n # x_axis = self._ctr.get_x_axis()/1e12\n\n counts = self._ctr.get_counts(name=self.config['name'])\n counts_per_sec = counts * (1e12 / self._bin_width)\n # noise = np.sqrt(counts)*(1e12/self._bin_width)\n # plot_index = 0\n\n summed_counts = np.sum(counts_per_sec, axis=0)\n\n for index, count_array in enumerate(counts_per_sec):\n\n # Figure out which plot to assign to\n channel = self._ch_list[index]\n # if self._plot_list is not None:\n # for index_plot, channel_set in enumerate(self._plot_list):\n # if channel in channel_set:\n # plot_index = index_plot\n # break\n\n # Update GUI data\n\n # self.gui_handler.set_curve_data(\n # data=count_array,\n # error=noise[index],\n # plot_label='Counter Monitor {}'.format(plot_index + 1),\n # curve_label='Channel {}'.format(channel)\n # )\n # self.gui_handler.set_label(\n # text='{:.4e}'.format(count_array[-1]),\n # label_label='Channel {}'.format(channel)\n # )\n\n self.widgets[f'curve_{channel}'].setData(count_array)\n self.widgets[f'number_label'][channel - 1].setText(str(count_array[-1]))\n\n if self.combined_channel:\n self.widgets['curve_combo'].setData(summed_counts)\n\n\ndef launch(**kwargs):\n \"\"\" Launches the count monitor script \"\"\"\n\n # logger, loghost, logport, clients, guis, params = unpack_launcher(**kwargs)\n logger = kwargs['logger']\n clients = kwargs['clients']\n config = load_script_config(\n 'monitor_counts',\n kwargs['config'],\n logger\n )\n\n if config['combined_channel'] == 'True':\n combined_channel = True\n else:\n combined_channel = False\n # Instantiate CountMonitor\n try:\n monitor = CountMonitor(\n ctr_client=find_client(\n clients,\n config,\n client_type='si_tt',\n client_config='standard_ctr',\n logger=logger\n ),\n logger_client=logger,\n server_port=kwargs['server_port'],\n combined_channel=combined_channel\n )\n except KeyError:\n print('Please make sure the module names for required servers and GUIS are correct.')\n time.sleep(15)\n raise\n # except:\n # config = None\n # ch_list = [7, 8]\n # plot_list = [[7], [8]]\n\n # Instantiate Pause server\n # try:\n # pause_logger = LogClient(\n # host=loghost,\n # port=logport,\n # module_tag='count_monitor_pause_server'\n # )\n # except ConnectionRefusedError:\n # logger.warn('Could not connect Count Monitor Pause server to logger')\n\n # pause_service = PauseService()\n # pause_service.assign_module(module=monitor)\n # pause_service.assign_logger(logger=pause_logger)\n\n # timeout = 0\n # while timeout < 1000:\n # try:\n # port = np.random.randint(1, 9999)\n # pause_server = GenericServer(\n # host=get_ip(),\n # port=port,\n # service=pause_service)\n # pause_logger.update_data(data=dict(port=port))\n # timeout = 9999\n # except ConnectionRefusedError:\n # logger.warn(f'Failed to instantiate Count Monitor Pause server at port {port}')\n # timeout += 1\n # pause_server.start()\n\n # Set parameters\n monitor.set_params(**config['params'])\n\n # Run\n monitor.run()\n"
] | [
[
"numpy.sum",
"numpy.random.randint",
"numpy.ones"
]
] |
olmosUC3M/Inference-and-Learning-in-discrete-Bayesian-Networks | [
"12e08f2e3f34146638806212be54837cc22c0516"
] | [
"Notebooks/lib/Message_passing_BN.py"
] | [
"## Message passing over a discrete BN ##\n## Library created by Pablo Martínez Olmos, University Carlos III Madrid ##\n## [email protected] ##\n## Last modification 15/11/2016 ##\n\nimport numpy as np\n\n## Messages are stored in the logaritmic domain ##\n## Global constants (to control numerical issues)\n\ninf_log=100\t\t#To impose hard constraints (i.e. an observed variable)\nconstant_log=50\t\t#Used to improve stability in the Check Node (CN) operation\n\n\n## Function definitions\n\n\ndef create_var_node(ID,cardinality,neighbor_order,observed_value_index=-1):\n\n # Variable Nodes are defined by a dictionary with several fields\t\n var_node={}\n var_node['ID']=ID\n var_node['node_type']=0\t#type 0 refers to variable node, 1o to check nodes. \n var_node['cardinality']=cardinality\t\t#Num. of possible values the RV can take\n var_node['neighbor_order']=np.array(neighbor_order)\t#Ordered array of the neighbor's IDs (neighbors are CNs!)\n var_node['input_msgs']=[]\t#List to store input messages \n var_node['observed']=observed_value_index\t#-1 if the variable is not observed\n var_node['inner_factor']=np.zeros([cardinality,1])\t#Internal vector used to imposed hard messages when variable is observed\n \n #If variable is observed, then the inner_factor vector is log[0 0 ... 0 1 0 ...]\n if(observed_value_index!=-1):\n var_node['inner_factor']-=inf_log\n var_node['inner_factor'][observed_value_index]=inf_log\n \n #Initialize input msgs by filling with zeros\n \n for index,f in enumerate(var_node['neighbor_order']):\n var_node['input_msgs'].append(0)\n \n return var_node\n\ndef create_message(input_node,output_node,table):\n\n #Messages are defined by a dictionary with three keys: input node (sender node), output_node (receiver node), and table of values\n \n message={}\n message['input_node']=input_node\n message['output_node']=output_node\n message['table']=table\n \n return message\n \n\ndef create_factor_node(ID,neighbors,CPD):\n \n \n # Check Nodes are defined by a dictionary with several fields\t\n \n factor_node={}\n factor_node['ID']=ID\n factor_node['node_type']=1\n factor_node['input_msgs']=[]\n\n CPD=np.array(CPD)\n CPD=CPD.reshape(CPD.shape[0],)\t#Just to make sure that CPD is a np. array vector of dim. (n,)\n factor_node['CPD']=np.array(CPD)\t#CPD table associated to the factor\n\n factor_node['CPD_order']=np.zeros([len(neighbors),1]).astype(int) #Ordered array of the neighbor's IDs (neighbors are CNs!)\n factor_node['cardinalities']=np.zeros([len(neighbors),1]).astype(int) #Cardinalities of the neighbors\n \n\n #Initialize input msgs, CPD_order & cardinalities\n #Note that creating factor nodes requires variable nodes to be created first, as CN input messages \n #are initialized already to the inner_factor field of every neighbor variable node\n\n for index,node in enumerate(neighbors):\n card=node['cardinality']\n factor_node['input_msgs'].append(\n create_message(input_node=node,output_node=factor_node,table=node['inner_factor']))\n factor_node['cardinalities'][index]=card\n factor_node['CPD_order'][index]=node['ID']\n \n return factor_node\n\n\ndef initialize_variable(var_node,observed_value_index=-1):\n\n #After running message passing, variable nodes store the incoming messages for future calculations\n #If we want to run again message passing in the same graph, we have to re-initialize both\n #variable nodes and check nodes.\n\n var_node['inner_factor']=np.zeros([var_node['cardinality'],1])\n var_node['observed']=observed_value_index\n\n if(observed_value_index!=-1):\n var_node['inner_factor']-=inf_log\n var_node['inner_factor'][observed_value_index]=inf_log\n \ndef initialize_factor_msgs(factor_node,neighbors):\n\n #After running message passing, variable nodes store the incoming messages for future calculations\n #If we want to run again message passing in the same graph, we have to re-initialize both\n #variable nodes and check nodes.\n\n factor_node['input_msgs']=[] \n \n for index,node in enumerate(neighbors):\n factor_node['input_msgs'].append(\n create_message(input_node=node,output_node=factor_node,table=node['inner_factor']))\n \n\n #The next two routines are used to encode and decode positions to store CPD values in a\n #vector form. We use a tree-encoding determined by the order of variables and their cardinalities\n #See First Example Message Passing.ipynb for an illustration \n\ndef CPD_position_to_variable_index(position,v_card,CPD_size):\n\n #We use this function to find the encoding for each position of a CPD table\n #of CPD_size positions, where the cardinalities of the variables (in order) are given in v_card \n #This function returns the index value of each variable \n\n v_card=np.array(v_card) #To make sure we have a np.array\n\n var_index=np.zeros([v_card.shape[0],1]).astype(int)\n \n remaining=CPD_size\n for i,card in enumerate(v_card):\n remaining=remaining//card\n index_i=position//remaining\n position=position-index_i*(remaining)\n var_index[i]=index_i\n \n return var_index\n\ndef variable_index_to_CPD_position(var_index,v_card,CPD_size):\n\n #This function returns the encoded CPD position for a given configuration of the variables. \n #The CPD table is of size CPD_size, the cardinalities of the variables (in order) are given in v_card\n #and the value indexes (in order) of the variables are given in var_index\n\n var_index=np.array(var_index)\n v_card=np.array(v_card)\n \n position=0\n offset=CPD_size\n for i,card in enumerate(v_card):\n offset=offset//card\n position+=var_index[i]*offset\n return position\n\n\ndef update_var_to_factor(var_node):\n \n #Routine to update the output messages of a variable node (var_node)\n\n prod_table=np.zeros([var_node['cardinality'],1])\n\n #We first multiply all the input messages (sums in the log domain)\n for msg in var_node['input_msgs']:\n prod_table+=msg['table']\n\n #We also take into account the inner_factor of the variable_node. In\n #case it is observed, the output messages have to be consistent with the observation\n prod_table+=var_node['inner_factor']\n \n\n #For every output message, we have to substract from prod_table the message received \n #through the corresponding edge\n\n for msg in var_node['input_msgs']:\n\n if(var_node['observed']==-1):\n reply_table=prod_table-msg['table']\n else:\n reply_table=np.ones([var_node['cardinality'],1])*(-inf_log)\n reply_table[var_node['observed']]=inf_log\n\n #We limit the absolute value of the messages, to exp(inf_log)\n\n reply_table[reply_table>inf_log]=inf_log\n reply_table[reply_table<-inf_log]=-inf_log\n\n\t#The ouput message is stored in the corresponding neighbor\n factor_rx=msg['input_node']\n reply_msg=create_message(input_node=var_node,output_node=factor_rx,table=reply_table)\n \n #Short foor loop to save messages in factor_node in the corresponding order\n for index,v in enumerate(factor_rx['CPD_order']):\n if(v==var_node['ID']):\n factor_rx['input_msgs'][index]=reply_msg\n break\n\n\n\ndef compute_var_marginal(var_node):\n \n #Routine to compute the marginal pmf of a variable node (var_node)\n #Simply the product of all incoming msgs times the inner_factor\n\n marg_table=np.zeros([var_node['cardinality'],1])\n\n for msg in var_node['input_msgs']:\n marg_table+=msg['table']\n\n marg_table+=var_node['inner_factor']\n \n marg_table=np.exp(marg_table)\n marg_table/=sum(marg_table)\n\n \n return marg_table\n\n\ndef update_factor_to_var(factor_node):\n\n #Routine to update the output messages of a check node (var_node)\n #This is the most complicated in the library, as it involves marginalization\n #over each argument of the CPD function times the product of incoming messgaes\n\n \n output_tables=[]\n \n #Output message tables initialization \n for card in factor_node['cardinalities']:\n output_tables.append(np.zeros([card,1]))\n \n\n #With a single loop we go only once through every element of the CPD table\n #It is multiplied accordingly to input messages and the resulting terms are\n #added to the corresponding output tables\n \n for CPD_entry,CPD_val in enumerate(factor_node['CPD']):\n \n values=CPD_position_to_variable_index(\n position=CPD_entry,v_card=factor_node['cardinalities'],CPD_size=factor_node['CPD'].shape[0])\n \n\t#The CPD value is multiplied by all incoming input messages but one, \n\t#and the result is added to the ouput table\n\n\t#Since we have to marginalize, not all operations can be done in the log domain\n\t#To avoid numerical inestabilities when performing the operations, we substract a large exponent (constant log)\n\t#which is sum at the very end, when we move back to the log domain\n\n for index in range(factor_node['cardinalities'].shape[0]):\n \n aux=CPD_val\n for index2 in range(factor_node['cardinalities'].shape[0]):\n if(index2!=index):\n aux*=np.exp(factor_node['input_msgs'][index2]['table'][values[index2]]-constant_log)\n output_tables[index][values[index]]+=aux\n \n #Once the output tables have been computed, we create the output messages and store them in \n #the corresponding variable nodes\n \n for index,msg in enumerate(factor_node['input_msgs']):\n \n output=output_tables[index]\n output=np.log(output)+constant_log\n output[output>inf_log]=inf_log\n output[output<-inf_log]=-inf_log\n \n var_rx=msg['input_node']\n reply_msg=create_message(input_node=factor_node,output_node=var_rx,table=output)\n \n #Short foor loop to save messages in factor_node in the corresponding order\n for index2,f in enumerate(var_rx['neighbor_order']):\n if(f==factor_node['ID']):\n var_rx['input_msgs'][index2]=reply_msg\n break\n \n \n \ndef create_joint_node(ID,node_members,neighbor_order,observed_values_indexes=-1):\n\n #Routine to define a joint variable node. This is useful to eliminate cycles in\n #the factor graph and perform exact inference.\n\n #Note a routine to create a joint factor node that uses joint variable nodes\n #is not provided. The corresponding CPD of such factor nodes has to be computed\n #first and then create the joint node with the function create_factor_node\n\n #We do not consider the case that the joint variable node is partially observed \n #(e.g. one of the joined variable nodes is observed). We only consider the case\n #where the joint node is completely observed.\n\n #See Second Example Message Passing.ipynb for an example of how to define and \n #manage joint variable nodes.\n\n var_node={}\n var_node['ID']=ID\n var_node['node_type']=0\n var_node['input_msgs']=[]\n var_node['observed']=-1\n var_node['neighbor_order']=np.array(neighbor_order)\n \n card=1\n\n #Cardinality of joint node is the product of cardinalities\n for member in node_members:\n card*=member['cardinality']\n \n var_node['cardinality']=card\n \n var_node['inner_factor']=np.zeros([card,1])\n \n if(observed_values_indexes!=-1):\n var_node['observed']=variable_index_to_CPD_position(observed_values_indexes,var_node['values'],card)\n var_node['inner_factor']-=inf_log\n var_node['inner_factor'][var_node['observed']]=inf_log\n \n #Initialize input msgs\n \n for index,f in enumerate(var_node['neighbor_order']):\n var_node['input_msgs'].append(0) \n \n return var_node \n \n \n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.exp",
"numpy.log",
"numpy.array"
]
] |
Alexsandruss/daal4py | [
"6e5a02d3fd46095585e618edba24fc258e8b0052"
] | [
"daal4py/sklearn/neighbors/_classification.py"
] | [
"#===============================================================================\n# Copyright 2020-2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#===============================================================================\n\n# daal4py KNN classification scikit-learn-compatible classes\n\nfrom ._base import NeighborsBase, KNeighborsMixin\nfrom ._base import parse_auto_method, prediction_algorithm\nfrom sklearn.base import ClassifierMixin as BaseClassifierMixin\nfrom .._utils import (\n getFPType,\n sklearn_check_version,\n get_patch_message,\n PatchingConditionsChain)\nfrom .._device_offload import support_usm_ndarray\nfrom sklearn.utils.validation import check_array\nimport numpy as np\nfrom scipy import sparse as sp\nimport logging\n\nif sklearn_check_version(\"0.22\"):\n from sklearn.neighbors._classification import KNeighborsClassifier as \\\n BaseKNeighborsClassifier\n from sklearn.neighbors._base import _check_weights\n from sklearn.utils.validation import _deprecate_positional_args\nelse:\n from sklearn.neighbors.classification import KNeighborsClassifier as \\\n BaseKNeighborsClassifier\n from sklearn.neighbors.base import _check_weights\n\n def _deprecate_positional_args(f):\n return f\n\n\ndef daal4py_classifier_predict(estimator, X, base_predict):\n if sklearn_check_version('1.0'):\n estimator._check_feature_names(X, reset=False)\n X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])\n daal_model = getattr(estimator, '_daal_model', None)\n n_features = getattr(estimator, 'n_features_in_', None)\n shape = getattr(X, 'shape', None)\n if n_features and shape and len(shape) > 1 and shape[1] != n_features:\n raise ValueError((f'X has {X.shape[1]} features, '\n f'but KNNClassifier is expecting '\n f'{n_features} features as input'))\n\n try:\n fptype = getFPType(X)\n except ValueError:\n fptype = None\n\n _patching_status = PatchingConditionsChain(\n \"sklearn.neighbors.KNeighborsClassifier.predict\")\n _dal_ready = _patching_status.and_conditions([\n (daal_model is not None, \"oneDAL model was not trained.\"),\n (fptype is not None, \"Unable to get dtype.\"),\n (not sp.issparse(X), \"X is sparse. Sparse input is not supported.\")])\n _patching_status.write_log()\n\n if _dal_ready:\n params = {\n 'method': 'defaultDense',\n 'k': estimator.n_neighbors,\n 'nClasses': len(estimator.classes_),\n 'voteWeights': 'voteUniform'\n if estimator.weights == 'uniform' else 'voteDistance',\n 'resultsToEvaluate': 'computeClassLabels',\n 'resultsToCompute': ''\n }\n\n method = parse_auto_method(\n estimator, estimator.algorithm, estimator.n_samples_fit_, n_features)\n predict_alg = prediction_algorithm(method, fptype, params)\n prediction_result = predict_alg.compute(X, daal_model)\n result = estimator.classes_.take(\n np.asarray(prediction_result.prediction.ravel(), dtype=np.intp))\n else:\n result = base_predict(estimator, X)\n\n return result\n\n\nif sklearn_check_version(\"0.24\"):\n class KNeighborsClassifier_(KNeighborsMixin, BaseClassifierMixin, NeighborsBase):\n @_deprecate_positional_args\n def __init__(self, n_neighbors=5, *,\n weights='uniform', algorithm='auto', leaf_size=30,\n p=2, metric='minkowski', metric_params=None, n_jobs=None,\n **kwargs):\n super().__init__(\n n_neighbors=n_neighbors,\n algorithm=algorithm,\n leaf_size=leaf_size, metric=metric, p=p,\n metric_params=metric_params,\n n_jobs=n_jobs, **kwargs)\n self.weights = \\\n weights if sklearn_check_version(\"1.0\") else _check_weights(weights)\nelif sklearn_check_version(\"0.22\"):\n from sklearn.neighbors._base import SupervisedIntegerMixin as \\\n BaseSupervisedIntegerMixin\n\n class KNeighborsClassifier_(NeighborsBase, KNeighborsMixin,\n BaseSupervisedIntegerMixin, BaseClassifierMixin):\n @_deprecate_positional_args\n def __init__(self, n_neighbors=5, *,\n weights='uniform', algorithm='auto', leaf_size=30,\n p=2, metric='minkowski', metric_params=None, n_jobs=None,\n **kwargs):\n super().__init__(\n n_neighbors=n_neighbors,\n algorithm=algorithm,\n leaf_size=leaf_size, metric=metric, p=p,\n metric_params=metric_params,\n n_jobs=n_jobs, **kwargs)\n self.weights = _check_weights(weights)\nelse:\n from sklearn.neighbors.base import SupervisedIntegerMixin as \\\n BaseSupervisedIntegerMixin\n\n class KNeighborsClassifier_(NeighborsBase, KNeighborsMixin,\n BaseSupervisedIntegerMixin, BaseClassifierMixin):\n @_deprecate_positional_args\n def __init__(self, n_neighbors=5, *,\n weights='uniform', algorithm='auto', leaf_size=30,\n p=2, metric='minkowski', metric_params=None, n_jobs=None,\n **kwargs):\n super().__init__(\n n_neighbors=n_neighbors,\n algorithm=algorithm,\n leaf_size=leaf_size, metric=metric, p=p,\n metric_params=metric_params,\n n_jobs=n_jobs, **kwargs)\n self.weights = _check_weights(weights)\n\n\nclass KNeighborsClassifier(KNeighborsClassifier_):\n @_deprecate_positional_args\n def __init__(self, n_neighbors=5, *,\n weights='uniform', algorithm='auto', leaf_size=30,\n p=2, metric='minkowski', metric_params=None, n_jobs=None,\n **kwargs):\n super().__init__(\n n_neighbors=n_neighbors,\n weights=weights,\n algorithm=algorithm,\n leaf_size=leaf_size, metric=metric, p=p,\n metric_params=metric_params,\n n_jobs=n_jobs, **kwargs)\n\n @support_usm_ndarray()\n def fit(self, X, y):\n return NeighborsBase._fit(self, X, y)\n\n @support_usm_ndarray()\n def predict(self, X):\n return daal4py_classifier_predict(self, X, BaseKNeighborsClassifier.predict)\n\n @support_usm_ndarray()\n def predict_proba(self, X):\n if sklearn_check_version('1.0'):\n self._check_feature_names(X, reset=False)\n return BaseKNeighborsClassifier.predict_proba(self, X)\n"
] | [
[
"scipy.sparse.issparse",
"sklearn.utils.validation.check_array",
"sklearn.neighbors.base._check_weights",
"sklearn.neighbors.classification.KNeighborsClassifier.predict_proba"
]
] |
ZEROSNU/zer018 | [
"c469cf22fa1fdf731b02c79f296ee96d35dccb25"
] | [
"zer018_perception/lane_vision/src/homography.py"
] | [
"import cv2\nimport numpy as np\nimport time\n\n'''\nTEST FILE using 1000, 1000 output image.\nActual code will have an output image of 200,200, which also means a different homography\n'''\n\n#recalculated homography\n\n# homography_front = np.array([[3.12570133882145e-05, 0.000286172662353515, -0.680179732686621],\n# [0.000967963380750764,-0.00220708598330688,-0.733040431894039],\n# [9.31003590466217e-08,-7.28146482745869e-06,-0.00116847956395974]])\n\n# homography_left = np.array([[-0.000710128671370178, 6.65307627276203e-05, -0.0692689783742822],\n# [0.000516381003921171, -0.00181011134155597, -0.997595526929844],\n# [-2.51074118905076e-08, -6.83854860981181e-06, -0.000959883483255739]])\n\n# homography_right = np.array([[-0.000926831714971124,-7.57332958427531e-05,0.994215703860414],\n# [-0.000923137149283102,0.00327126641381199,0.107337667969103],\n# [-2.77833313194565e-07,1.03110471009649e-05,0.00115801865068319]])\n\n\n# Original\nhomography_front = np.array([[4.62227601649053e-05, 0.000243520884225642, -0.678748083960862],\n [0.000969465596108860, -0.00207033488113324, -0.734366621126640],\n [1.58512860546350e-07, -6.83048800828728e-06, -0.00119023476366804]])\n\nhomography_left = np.array([[-0.000759672412515488, 2.34075591542924e-05, -0.0699936817773495],\n [0.000483107853918350, -0.00189886717269873, -0.997544805245074],\n [-1.49265515027449e-07, -7.08702713960990e-06, -0.000910631508297557]])\n\nhomography_right = np.array([[-0.000908962187561903, -3.67579540055241e-05, 0.994837127281325],\n [-0.000886484342219692, 0.00317263543314027, 0.101420799019439],\n [-1.14460320494404e-07, 9.99234254412552e-06, 0.00111021419224332]])\n\n#LARGER RANGE OF VIEW\ntranslation = np.array([[1, 0, 0],[0,1,100],[0,0,1]])\n\ndef warp_image(image, homography):\n im_out = cv2.warpPerspective(image, np.matmul(translation,homography), (600, 800))\n # cv2.imshow('warped', im_out)\n # cv2.waitKey(0)\n #cv2.imshow('image', im_out)\n return im_out\n\ndef left_hom(image):\n im_out = cv2.warp\n\n# Create mask of front image. im_mask indicates black pixel area\ndef find_mask(image):\n black_range1 = np.array([0,0,0])\n im_mask = (cv2.inRange(image, black_range1, black_range1)).astype('bool')\n im_mask_inv = (1-im_mask).astype('bool')\n im_mask_inv = np.dstack((im_mask_inv, im_mask_inv, im_mask_inv))\n im_mask= np.dstack((im_mask, im_mask, im_mask))\n return im_mask_inv, im_mask\n\nif __name__ == \"__main__\":\n count = 0\n while True:\n img_front = cv2.imread('../collected_images/5/center/'+ str(count)+'.jpg')\n img_left = cv2.imread('../collected_images/5/left/'+ str(count)+'.jpg')\n img_right = cv2.imread('../collected_images/5/right/'+ str(count)+'.jpg')\n\n\n im_front = warp_image(img_front, homography_front).astype('uint8')\n im_left = warp_image(img_left, homography_left).astype('uint8')\n im_right = warp_image(img_right, homography_right).astype('uint8')\n\n init_time = time.time()\n im_side = im_left + im_right\n im_mask_inv, im_mask = find_mask(im_side)\n front_masked = np.multiply(im_front, im_mask).astype('uint8')\n side_masked = np.multiply(im_side, im_mask_inv).astype('uint8')\n print(\"Masking Time: \", time.time()-init_time)\n summed_image = front_masked + side_masked\n #Gaussian Blurring?\n #summed_image = cv2.GaussianBlur(summed_image, (5,5), 0)\n # cv2.imshow('front', front_masked)\n # cv2.imshow('left', im_left)\n # cv2.imshow('right', im_right)\n # cv2.imshow('front', im_front)\n cv2.imshow('summed', summed_image)\n\n cv2.imwrite('../collected_images/5/mosaic_full/'+str(count) + '.jpg', summed_image)\n #summed_image_cropped = summed_image[200:800, :500, :]\n print(\"Time elapsed: \", (time.time() - init_time))\n #cv2.imshow('summed cropped', summed_image_cropped)\n\n count +=1\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n break\n\n\n\n"
] | [
[
"numpy.array",
"numpy.dstack",
"numpy.multiply",
"numpy.matmul"
]
] |
anonymous-iclr-2019/acai-iclr-2019 | [
"233058a8330e8162e199933ee22b8e5fcac22072"
] | [
"aae.py"
] | [
"# Copyright 2018\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#!/usr/bin/env python\n\"\"\"Adversarial autoencoder.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nfrom absl import app\nfrom absl import flags\n\nimport tensorflow as tf\nfrom lib import data, layers, train, utils, classifiers, eval\n\nFLAGS = flags.FLAGS\n\n\nclass AAE(train.AE):\n\n def model(self, latent, depth, scales, adversary_lr, disc_layer_sizes):\n x = tf.placeholder(tf.float32,\n [None, self.height, self.width, self.colors], 'x')\n l = tf.placeholder(tf.float32, [None, self.nclass], 'label')\n h = tf.placeholder(\n tf.float32,\n [None, self.height >> scales, self.width >> scales, latent], 'h')\n\n def encoder(x):\n return layers.encoder(x, scales, depth, latent, 'ae_enc')\n\n def decoder(h):\n return layers.decoder(h, scales, depth, self.colors, 'ae_dec')\n\n def discriminator(h):\n with tf.variable_scope('disc', reuse=tf.AUTO_REUSE):\n h = tf.layers.flatten(h)\n for size in [int(s) for s in disc_layer_sizes.split(',')]:\n h = tf.layers.dense(h, size, tf.nn.leaky_relu)\n return tf.layers.dense(h, 1)\n\n encode = encoder(x)\n decode = decoder(h)\n ae = decoder(encode)\n loss_ae = tf.losses.mean_squared_error(x, ae)\n\n prior_samples = tf.random_normal(tf.shape(encode), dtype=encode.dtype)\n adversary_logit_latent = discriminator(encode)\n adversary_logit_prior = discriminator(prior_samples)\n adversary_loss_latents = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=adversary_logit_latent,\n labels=tf.zeros_like(adversary_logit_latent)))\n adversary_loss_prior = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=adversary_logit_prior,\n labels=tf.ones_like(adversary_logit_prior)))\n autoencoder_loss_latents = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=adversary_logit_latent,\n labels=tf.ones_like(adversary_logit_latent)))\n\n def _accuracy(logits, label):\n labels = tf.logical_and(label, tf.ones_like(logits, dtype=bool))\n correct = tf.equal(tf.greater(logits, 0), labels)\n return tf.reduce_mean(tf.to_float(correct))\n latent_accuracy = _accuracy(adversary_logit_latent, False)\n prior_accuracy = _accuracy(adversary_logit_prior, True)\n adversary_accuracy = (latent_accuracy + prior_accuracy)/2\n\n utils.HookReport.log_tensor(loss_ae, 'loss_ae')\n utils.HookReport.log_tensor(adversary_loss_latents, 'loss_adv_latent')\n utils.HookReport.log_tensor(adversary_loss_prior, 'loss_adv_prior')\n utils.HookReport.log_tensor(autoencoder_loss_latents, 'loss_ae_latent')\n utils.HookReport.log_tensor(adversary_accuracy, 'adversary_accuracy')\n\n xops = classifiers.single_layer_classifier(\n tf.stop_gradient(encode), l, self.nclass)\n xloss = tf.reduce_mean(xops.loss)\n utils.HookReport.log_tensor(xloss, 'classify_latent')\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n ae_vars = tf.global_variables('ae_')\n disc_vars = tf.global_variables('disc')\n xl_vars = tf.global_variables('single_layer_classifier')\n with tf.control_dependencies(update_ops):\n train_ae = tf.train.AdamOptimizer(FLAGS.lr).minimize(\n loss_ae + autoencoder_loss_latents, var_list=ae_vars)\n train_disc = tf.train.AdamOptimizer(adversary_lr).minimize(\n adversary_loss_prior + adversary_loss_latents,\n var_list=disc_vars)\n train_xl = tf.train.AdamOptimizer(FLAGS.lr).minimize(\n xloss, tf.train.get_global_step(), var_list=xl_vars)\n ops = train.AEOps(x, h, l, encode, decode, ae,\n tf.group(train_ae, train_disc, train_xl),\n classify_latent=xops.output)\n\n n_interpolations = 16\n n_images_per_interpolation = 16\n\n def gen_images():\n return self.make_sample_grid_and_save(\n ops, interpolation=n_interpolations,\n height=n_images_per_interpolation)\n\n recon, inter, slerp, samples = tf.py_func(\n gen_images, [], [tf.float32]*4)\n tf.summary.image('reconstruction', tf.expand_dims(recon, 0))\n tf.summary.image('interpolation', tf.expand_dims(inter, 0))\n tf.summary.image('slerp', tf.expand_dims(slerp, 0))\n tf.summary.image('samples', tf.expand_dims(samples, 0))\n\n if FLAGS.dataset == 'lines32':\n batched = (n_interpolations, 32, n_images_per_interpolation, 32, 1)\n batched_interp = tf.transpose(\n tf.reshape(inter, batched), [0, 2, 1, 3, 4])\n mean_distance, mean_smoothness = tf.py_func(\n eval.line_eval, [batched_interp], [tf.float32, tf.float32])\n tf.summary.scalar('mean_distance', mean_distance)\n tf.summary.scalar('mean_smoothness', mean_smoothness)\n\n return ops\n\n\ndef main(argv):\n del argv # Unused.\n batch = FLAGS.batch\n dataset = data.get_dataset(FLAGS.dataset, dict(batch_size=batch))\n scales = int(round(math.log(dataset.width // FLAGS.latent_width, 2)))\n model = AAE(\n dataset,\n FLAGS.train_dir,\n latent=FLAGS.latent,\n depth=FLAGS.depth,\n scales=scales,\n adversary_lr=FLAGS.adversary_lr,\n disc_layer_sizes=FLAGS.disc_layer_sizes)\n model.train()\n\n\nif __name__ == '__main__':\n flags.DEFINE_integer('depth', 64, 'Depth of first for convolution.')\n flags.DEFINE_integer(\n 'latent', 16,\n 'Latent space depth, the total latent size is the depth multiplied by '\n 'latent_width ** 2.')\n flags.DEFINE_integer('latent_width', 4, 'Width of the latent space.')\n flags.DEFINE_float('adversary_lr', 1e-4,\n 'Learning rate for discriminator.')\n flags.DEFINE_string('disc_layer_sizes', '100,100',\n 'Comma-separated list of discriminator layer sizes.')\n app.run(main)\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.train.get_global_step",
"tensorflow.greater",
"tensorflow.layers.flatten",
"tensorflow.shape",
"tensorflow.ones_like",
"tensorflow.get_collection",
"tensorflow.to_float",
"tensorflow.expand_dims",
"tensorflow.zeros_like",
"tensorflow.global_variables",
"tensorflow.losses.mean_squared_error",
"tensorflow.group",
"tensorflow.layers.dense",
"tensorflow.control_dependencies",
"tensorflow.py_func",
"tensorflow.placeholder",
"tensorflow.train.AdamOptimizer",
"tensorflow.reduce_mean",
"tensorflow.stop_gradient"
]
] |
harris-2374/THEx | [
"04c4f56eb2cf86b8f55ddd6edd3f48029296bf5a"
] | [
"src/thexb/STAGE_topobinner.py"
] | [
"\"\"\"\nAuthor: Andrew Harris\nPython 3.8\n\"\"\"\nimport logging\nimport os\n\nimport pandas as pd\nfrom ete3 import Tree\nfrom tqdm import tqdm\n############################### Set up logger #################################\ndef set_logger_level(WORKING_DIR, LOG_LEVEL):\n logger = logging.getLogger(__name__)\n # Remove existing log file if present\n if os.path.exists(WORKING_DIR / 'logs/topobin.log'):\n os.remove(WORKING_DIR / 'logs/topobin.log')\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n file_handler = logging.FileHandler(WORKING_DIR / 'logs/topobin.log')\n file_handler.setFormatter(formatter)\n stream_handler = logging.StreamHandler()\n logger.addHandler(file_handler)\n logger.addHandler(stream_handler)\n logger.setLevel(LOG_LEVEL)\n return logger\n\n############################## Helper Functions ###############################\ndef remove_heterotachy_info(l):\n \"\"\"Remove any information in bracketsete3 \n does not support this format of newick\"\"\"\n if (\"[\" not in l) and (\"]\" not in l):\n return l\n open_brackets = [i for i, x in enumerate(l) if x == \"[\"]\n close_brackets = [i for i, x in enumerate(l) if x == \"]\"]\n final_string = f'{l[:open_brackets[0]]}'\n for ob, cb in zip(open_brackets[1:], close_brackets[:-1]):\n final_string += l[cb+1:ob]\n final_string += l[close_brackets[-1]+1:]\n return final_string\n\ndef tv_header_validation(df):\n \"\"\"Return False if first four required column headers are not valid\"\"\"\n required_cols = list(df.columns[:4])\n try:\n assert required_cols == [\"Chromosome\", \"Window\", \"NewickTree\", \"TopologyID\"]\n return True\n except AssertionError:\n return False\n\n############################### Main Function ################################\ndef topobinner(TREEVIEWER_FN, UPDATED_TV_FILENAME, TOPOBIN_ROOTED, WORKING_DIR, MULTIPROCESS, LOG_LEVEL):\n logger = set_logger_level(WORKING_DIR, LOG_LEVEL) # Setup log file level\n \n # Load in Tree Viewer excel file\n df = pd.read_excel(TREEVIEWER_FN, engine='openpyxl')\n df = df.reset_index(drop=True)\n # Validate headers\n header_check = tv_header_validation(df)\n if not header_check:\n raise AssertionError(\"Input file headers are not valid, please ensure required headers are correct.\")\n df['TopologyID'] = ['NULL']*len(df)\n trees = df['NewickTree']\n topologies = dict()\n logger.info(f\"{len(trees):,} trees to run\")\n # Set root boolean value\n if TOPOBIN_ROOTED == \"Y\":\n TOPOBIN_ROOTED = False\n else:\n TOPOBIN_ROOTED = True\n # Bin Trees\n tqdm_text = \"#\" + \"{}\".format(\"run1\").zfill(3)\n with tqdm(total=len(trees), desc=tqdm_text, ascii=True) as pbar:\n for n, t in enumerate(trees):\n # Check to see if tree is NoTree\n if t == \"NoTree\":\n pbar.update(1)\n continue\n # Set first tree in collection dictionary +\n # move to next tree \n if len(topologies.keys()) == 0:\n topologies[n] = {'count': 1, 'idx': [n]}\n pbar.update(1)\n continue\n else:\n # Iterate through topology list\n # add new topology if no rf == 0\n # increase count if rf == 0 with topology \n new_topology = True\n for idx in topologies.keys():\n if df.at[idx, 'NewickTree'] == \"NoTree\":\n continue\n t1 = Tree(remove_heterotachy_info(t))\n t2 = Tree(remove_heterotachy_info(df.at[idx, 'NewickTree']))\n comparison = t1.compare(t2, unrooted=TOPOBIN_ROOTED)\n rf = comparison['rf']\n if rf == 0:\n topologies[idx]['count'] += 1\n topologies[idx]['idx'].append(n)\n new_topology = False\n break\n else:\n continue\n if new_topology:\n topologies[n] = {'count': 1, 'idx': [n]}\n pbar.update(1)\n continue\n else:\n pbar.update(1)\n continue\n # Sort topologies dictionary by 'count'\n topologies = {k: v for k, v in sorted(topologies.items(), key=lambda item: item[1]['count'], reverse=True)}\n num_topologies = len(topologies.keys())\n # Set zfill number\n if num_topologies < 100:\n zfillnum = 3\n elif 100 < num_topologies < 1000:\n zfillnum = 4\n else:\n zfillnum = 5\n # Update DataFrame TopologyID column with results\n overview_df = pd.DataFrame(\n {\n \"TopologyID\": [(\"Tree\" + \"{}\".format(str(i)).zfill(zfillnum)) for i in range(1, len(topologies.keys())+1)],\n \"Count\": [topologies[i][\"count\"] for i in topologies.keys()],\n \"Rank\": [i for i in range(1, len(topologies.keys())+1)],\n }\n )\n topoCount = 1\n for topo in topologies.keys():\n idx = topologies[topo]['idx']\n topoName = \"Tree\" + \"{}\".format(topoCount).zfill(zfillnum)\n for i in idx:\n df.at[i, 'TopologyID'] = topoName\n continue\n topoCount += 1\n # Output updated Tree Viewer file\n df.to_excel(UPDATED_TV_FILENAME, index=False, engine='openpyxl')\n logger.info(f\"{overview_df}\")\n return\n\n\n"
] | [
[
"pandas.read_excel"
]
] |
ablifedev/ABLIRC | [
"875278b748a8e22ada2c76c3c76dbf970be4a6a4"
] | [
"ABLIRC/bin/Basic/Distance2XXX/reads_or_peaks_distribution_relative2xxx.py"
] | [
"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\n####################################################################################\n### Copyright (C) 2015-2019 by ABLIFE\n####################################################################################\n\n\n\n\n\n####################################################################################\n\n####################################################################################\n# Date Version Author ChangeLog\n\n\n#\n#\n#\n#####################################################################################\n\n\"\"\"\n程序功能说明:\n1.统计reads or peaks 相对于TTS,TSS,STARTCODON,STOPCODON的分布\n程序设计思路:\n利用gffutils和HTSeq包进行统计\n\"\"\"\n\n\nimport re, os, sys, logging, time, datetime\nfrom optparse import OptionParser, OptionGroup\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nimport subprocess\nimport threading\nimport gffutils\nimport HTSeq\nimport numpy\nimport multiprocessing\nfrom matplotlib import pyplot\n\nsys.path.insert(1, os.path.split(os.path.realpath(__file__))[0] + \"/../../\")\nfrom ablib.utils.tools import *\nfrom ablib.utils.distribution import *\n\n\n\nif sys.version_info < (2, 7):\n print(\"Python Version error: please use phthon2.7\")\n sys.exit(-1)\n\n\n_version = 'v0.1'\n\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\ndef configOpt():\n \"\"\"Init for option\n \"\"\"\n usage = 'Usage: %prog [option] [-h]'\n p = OptionParser(usage)\n ##basic options\n p.add_option(\n '-g', '--gff', dest='gff', action='store',\n type='string', help='gff file,do not have to provide it if db is exited')\n p.add_option(\n '-d', '--db', dest='db', default='gffdb', action='store',\n type='string', help='the gff database file to create or use')\n p.add_option(\n '-b', '--bamorbed', dest='bamorbed', action='store',\n type='string', help='bam or bed file, Important: the bamfile\\'s suffix must be \".bam\"')\n p.add_option(\n '-w', '--halfwinwidth', dest='halfwinwidth', default=1000, action='store',\n type='int', help='halfwinwidth,default is 1000')\n p.add_option(\n '-p', '--postype', dest='postype', action='store',\n type='string', help='gene position type:tss,tts,startcodon,stopcodon,intronstart,intronend')\n p.add_option(\n '-o', '--outfile', dest='outfile', default=\"distance2xxx_reads_density.txt\", action='store',\n type='string', help='gene expression file')\n p.add_option(\n '-n', '--samplename', dest='samplename', default='', action='store',\n type='string', help='sample name,default is \"\"')\n\n group = OptionGroup(p, \"Preset options\")\n ##preset options\n group.add_option(\n '-O', '--outDir', dest='outDir', default='./', action='store',\n type='string', help='output directory', metavar=\"DIR\")\n group.add_option(\n '-L', '--logDir', dest='logDir', default='', action='store',\n type='string', help='log dir ,default is same as outDir')\n group.add_option(\n '-P', '--logPrefix', dest='logPrefix', default='', action='store',\n type='string', help='log file prefix')\n group.add_option(\n '-E', '--email', dest='email', default='none', action='store',\n type='string', help='email address, if you want get a email when this job is finished,default is no email',\n metavar=\"EMAIL\")\n group.add_option(\n '-Q', '--quiet', dest='quiet', default=True, action='store_true',\n help='do not print messages to stdout')\n group.add_option(\n '-K', '--keepTemp', dest='keepTemp', default=False, action='store_true',\n help='keep temp dir')\n group.add_option(\n '-T', '--test', dest='isTest', default=False, action='store_true',\n help='run this program for test')\n p.add_option_group(group)\n if len(sys.argv) == 1:\n p.print_help()\n sys.exit(1)\n opt, args = p.parse_args()\n return (p, opt, args)\n\n\ndef listToString(x):\n \"\"\"获得完整的命令\n \"\"\"\n rVal = ''\n for a in x:\n rVal += a + ' '\n return rVal\n\n\n\nopt_parser, opt, args = configOpt()\n\nif not opt.postype:\n opt_parser.error('Option -p must be assigned.\\n')\n\nif opt.logDir == \"\":\n opt.logDir = opt.outDir + '/log/'\n\nsample = \"\"\nif opt.samplename != \"\":\n sample = opt.samplename + '_'\n\nif opt.outfile == 'distance2xxx_reads_density.txt':\n opt.outfile = sample + 'distance2' + opt.postype + '_reads_density.txt'\n\n\n\nintype = \"bam\"\nmatch = re.search(r'\\.bam$', opt.bamorbed)\nif not match:\n intype = \"bed\"\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\n\nscriptPath = os.path.abspath(os.path.dirname(__file__)) # absolute script path\nbinPath = \"/\".join(scriptPath.split(\"/\")[0:-2]) # absolute bin path\n\noutPath = os.path.abspath(opt.outDir) # absolute output path\n#os.mkdir(outPath) if not os.path.isdir(outPath) else None\nos.system('mkdir -p ' + outPath)\nlogPath = os.path.abspath(opt.logDir)\n#os.mkdir(logPath) if not os.path.isdir(logPath) else None\nos.system('mkdir -p ' + logPath)\ntempPath = outPath + '/temp/' # absolute bin path\n# os.mkdir(tempPath) if not os.path.isdir(tempPath) else None\nresultPath = outPath + '/result/'\n\n\n# os.mkdir(resultPath) if not os.path.isdir(resultPath) else None\n\n\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\n\n\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\ndef initLogging(logFilename):\n \"\"\"Init for logging\n \"\"\"\n logging.basicConfig(\n level=logging.DEBUG,\n format='[%(asctime)s : %(levelname)s] %(message)s',\n datefmt='%y-%m-%d %H:%M',\n filename=logFilename,\n filemode='w')\n if not opt.quiet:\n # define a Handler which writes INFO messages or higher to the sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which is simpler for console use\n formatter = logging.Formatter('[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M')\n # tell the handler to use this format\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\n\ndt = datetime.datetime.now()\nlogFile = logPath + '/' + opt.logPrefix + 'log.' + str(dt.strftime('%Y%m%d.%H%M%S.%f')) + '.txt'\ninitLogging(logFile)\nlogging.debug(sys.modules[__name__].__doc__)\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\nlogging.debug('Program version: %s' % _version)\nlogging.debug('Start the program with [%s]\\n', listToString(sys.argv))\nstartTime = datetime.datetime.now()\nlogging.debug(\"计时器:Program start at %s\" % startTime)\n\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------------------\n### S\n# -----------------------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------------------\n### E\n# -----------------------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\ndef main():\n print(\"Main procedure start...\")\n\n\n\n if opt.gff:\n db = gffutils.create_db(opt.gff, opt.db, merge_strategy=\"create_unique\", verbose=False, force=True)\n\n db = gffutils.FeatureDB(opt.db)\n\n\n Watcher()\n pool = multiprocessing.Pool(processes=15)\n server = multiprocessing.Manager()\n dis = server.dict()\n\n for chr in db.seqids():\n # if chr != \"chr1\":\n # continue\n if intype == \"bam\":\n chr_dict = readBamHeader(opt.bamorbed)\n if not chr in chr_dict:\n continue\n # print(chr)\n dis[chr] = [0 for x in range(2 * opt.halfwinwidth)]\n pool.apply_async(distributionToOnePointByChr,\n args=(chr, opt.bamorbed, opt.db, opt.outfile, opt.postype, opt.halfwinwidth, dis))\n pool.close()\n pool.join()\n\n d = dict(dis).copy()\n server.shutdown()\n\n profile = numpy.zeros(2 * opt.halfwinwidth, dtype='i')\n for chr in sorted(d.keys()):\n wincvg = numpy.fromiter(d[chr], dtype='i', count=2 * opt.halfwinwidth)\n profile += wincvg\n # pyplot.plot( numpy.arange( -opt.halfwinwidth, opt.halfwinwidth ), profile )\n # pyplot.show()\n\n os.chdir(opt.outDir)\n fout = open(opt.outfile, 'w')\n fout.writelines(\n \"+distance\\tdensity\\n\")\n\n n = 0\n for i in range(-opt.halfwinwidth, opt.halfwinwidth):\n fout.writelines(str(i) + '\\t' + str(profile[n]) + '\\n')\n n += 1\n fout.close()\n\n #cmd = \"cd \" + outPath + \"&& R --slave < /users/ablife/ablife-R/Line_single_ggplot2.r --args \" + opt.outfile + \" \" + sample + 'distance2' + opt.postype + '_reads_density ./ \\n'\n cmd = \"cd \" + outPath + \"&& Rscript \" + binPath + \"/plot/Line_single_ggplot2.r -f \" + opt.outfile + \" -t \" + sample + 'distance2' + opt.postype + '_reads_density -n ' + sample + 'distance2' + opt.postype + '_reads_density -o ./'\n os.system(cmd)\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\n main()\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\n\n\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\nif not opt.keepTemp:\n os.system('rm -rf ' + tempPath)\n logging.debug(\"Temp folder is deleted..\")\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\n\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\nlogging.debug(\"Program ended\")\ncurrentTime = datetime.datetime.now()\nrunningTime = (currentTime - startTime).seconds # in seconds\nlogging.debug(\"计时器:Program start at %s\" % startTime)\nlogging.debug(\"计时器:Program end at %s\" % currentTime)\nlogging.debug(\"计时器:Program ran %.2d:%.2d:%.2d\" % (runningTime / 3600, (runningTime % 3600) / 60, runningTime % 60))\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\nif opt.email != \"none\":\n run_cmd = listToString(sys.argv)\n sendEmail(opt.email, str(startTime), str(currentTime), run_cmd, outPath)\n logging.info(\"发送邮件通知到 %s\" % opt.email)\n\n\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\n\n\n\n"
] | [
[
"numpy.fromiter",
"numpy.zeros"
]
] |
drothlis/tensorflow | [
"04c318b69c5b565436cfeeaab1cb7fd5419dde27"
] | [
"tensorflow/contrib/layers/python/layers/layers.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# pylint: disable=g-short-docstring-punctuation\n\"\"\"Higher level ops for building layers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport os\nimport six\n\nfrom tensorflow.contrib.framework.python.ops import add_arg_scope\nfrom tensorflow.contrib.framework.python.ops import variables\nfrom tensorflow.contrib.layers.python.layers import initializers\nfrom tensorflow.contrib.layers.python.layers import utils\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.layers import base\nfrom tensorflow.python.layers import convolutional as convolutional_layers\nfrom tensorflow.python.layers import core as core_layers\nfrom tensorflow.python.layers import normalization as normalization_layers\nfrom tensorflow.python.layers import pooling as pooling_layers\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import standard_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.training import moving_averages\nfrom tensorflow.python.layers.maxout import maxout\n\n# TODO(b/28426988): Replace legacy_* fns migrated from slim.\n# TODO(b/28426988): Remove legacy_* when all uses have migrated to new API.\n__all__ = ['avg_pool2d',\n 'avg_pool3d',\n 'batch_norm',\n 'bias_add',\n 'conv2d',\n 'conv3d',\n 'conv2d_in_plane',\n 'conv2d_transpose',\n 'conv3d_transpose',\n 'convolution',\n 'convolution2d',\n 'convolution2d_in_plane',\n 'convolution2d_transpose',\n 'convolution3d',\n 'convolution3d_transpose',\n 'dropout',\n 'elu',\n 'flatten',\n 'fully_connected',\n 'GDN',\n 'gdn',\n 'layer_norm',\n 'linear',\n 'pool',\n 'max_pool2d',\n 'max_pool3d',\n 'one_hot_encoding',\n 'relu',\n 'relu6',\n 'repeat',\n 'scale_gradient',\n 'separable_conv2d',\n 'separable_convolution2d',\n 'softmax',\n 'spatial_softmax',\n 'stack',\n 'unit_norm',\n 'legacy_fully_connected',\n 'legacy_linear',\n 'legacy_relu',\n 'maxout']\n\nDATA_FORMAT_NCHW = 'NCHW'\nDATA_FORMAT_NHWC = 'NHWC'\nDATA_FORMAT_NCDHW = 'NCDHW'\nDATA_FORMAT_NDHWC = 'NDHWC'\n_FUSED_DEFAULT = os.getenv('TF_DEFAULT_USES_FUSED_BATCH_NORM',\n '').lower() in ('true', 't', '1')\n\n\n@add_arg_scope\ndef avg_pool2d(inputs,\n kernel_size,\n stride=2,\n padding='VALID',\n data_format=DATA_FORMAT_NHWC,\n outputs_collections=None,\n scope=None):\n \"\"\"Adds a 2D average pooling op.\n\n It is assumed that the pooling is done per image but not in batch or channels.\n\n Args:\n inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if\n `data_format` is `NHWC`, and `[batch_size, channels, height, width]` if\n `data_format` is `NCHW`.\n kernel_size: A list of length 2: [kernel_height, kernel_width] of the\n pooling kernel over which the op is computed. Can be an int if both\n values are the same.\n stride: A list of length 2: [stride_height, stride_width].\n Can be an int if both strides are the same. Note that presently\n both strides must have the same value.\n padding: The padding method, either 'VALID' or 'SAME'.\n data_format: A string. `NHWC` (default) and `NCHW` are supported.\n outputs_collections: The collections to which the outputs are added.\n scope: Optional scope for name_scope.\n\n Returns:\n A `Tensor` representing the results of the pooling operation.\n\n Raises:\n ValueError: If `data_format` is neither `NHWC` nor `NCHW`.\n \"\"\"\n if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):\n raise ValueError('data_format has to be either NCHW or NHWC.')\n with ops.name_scope(scope, 'AvgPool2D', [inputs]) as sc:\n inputs = ops.convert_to_tensor(inputs)\n df = ('channels_first' if data_format and data_format.startswith('NC')\n else 'channels_last')\n layer = pooling_layers.AveragePooling2D(pool_size=kernel_size,\n strides=stride,\n padding=padding,\n data_format=df,\n _scope=sc)\n outputs = layer.apply(inputs)\n return utils.collect_named_outputs(outputs_collections, sc, outputs)\n\n\n@add_arg_scope\ndef avg_pool3d(inputs,\n kernel_size,\n stride=2,\n padding='VALID',\n data_format=DATA_FORMAT_NDHWC,\n outputs_collections=None,\n scope=None):\n \"\"\"Adds a 3D average pooling op.\n\n It is assumed that the pooling is done per image but not in batch or channels.\n\n Args:\n inputs: A 5-D tensor of shape `[batch_size, depth, height, width, channels]` if\n `data_format` is `NDHWC`, and `[batch_size, channels, depth, height, width]` if\n `data_format` is `NCDHW`.\n kernel_size: A list of length 3: [kernel_depth, kernel_height, kernel_width] of the\n pooling kernel over which the op is computed. Can be an int if both\n values are the same.\n stride: A list of length 3: [stride_depth, stride_height, stride_width].\n Can be an int if both strides are the same. Note that presently\n both strides must have the same value.\n padding: The padding method, either 'VALID' or 'SAME'.\n data_format: A string. `NDHWC` (default) and `NCDHW` are supported.\n outputs_collections: The collections to which the outputs are added.\n scope: Optional scope for name_scope.\n\n Returns:\n A `Tensor` representing the results of the pooling operation.\n\n Raises:\n ValueError: If `data_format` is neither `NDHWC` nor `NCDHW`.\n \"\"\"\n if data_format not in (DATA_FORMAT_NCDHW, DATA_FORMAT_NDHWC):\n raise ValueError('data_format has to be either NCDHW or NDHWC.')\n with ops.name_scope(scope, 'AvgPool3D', [inputs]) as sc:\n inputs = ops.convert_to_tensor(inputs)\n df = ('channels_first' if data_format and data_format.startswith('NC')\n else 'channels_last')\n layer = pooling_layers.AveragePooling3D(pool_size=kernel_size,\n strides=stride,\n padding=padding,\n data_format=df,\n _scope=sc)\n outputs = layer.apply(inputs)\n return utils.collect_named_outputs(outputs_collections, sc, outputs)\n\n\ndef _fused_batch_norm(\n inputs,\n decay=0.999,\n center=True,\n scale=False,\n epsilon=0.001,\n activation_fn=None,\n param_initializers=None,\n updates_collections=ops.GraphKeys.UPDATE_OPS,\n is_training=True,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n data_format=DATA_FORMAT_NHWC,\n zero_debias_moving_mean=False,\n scope=None):\n \"\"\"Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.\n\n \"Batch Normalization: Accelerating Deep Network Training by Reducing\n Internal Covariate Shift\"\n\n Sergey Ioffe, Christian Szegedy\n\n Can be used as a normalizer function for conv2d and fully_connected.\n\n Note: when training, the moving_mean and moving_variance need to be updated.\n By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they\n need to be added as a dependency to the `train_op`. For example:\n\n ```python\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss)\n ```\n\n One can set updates_collections=None to force the updates in place, but that\n can have a speed penalty, especially in distributed settings.\n\n Args:\n inputs: A tensor with 2 or more dimensions, where the first dimension has\n `batch_size`. The normalization is over all but the last dimension if\n `data_format` is `NHWC` and the second dimension if `data_format` is\n `NCHW`.\n decay: Decay for the moving average. Reasonable values for `decay` are close\n to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.\n Lower `decay` value (recommend trying `decay`=0.9) if model experiences\n reasonably good training performance but poor validation and/or test\n performance.\n center: If True, add offset of `beta` to normalized tensor. If False,\n `beta` is ignored.\n scale: If True, multiply by `gamma`. If False, `gamma` is\n not used. When the next layer is linear (also e.g. `nn.relu`), this can be\n disabled since the scaling can be done by the next layer.\n epsilon: Small float added to variance to avoid dividing by zero.\n activation_fn: Activation function, default set to None to skip it and\n maintain a linear activation.\n param_initializers: Optional initializers for beta, gamma, moving mean and\n moving variance.\n updates_collections: Collections to collect the update ops for computation.\n The updates_ops need to be executed with the train_op.\n If None, a control dependency would be added to make sure the updates are\n computed in place.\n is_training: Whether or not the layer is in training mode. In training mode\n it would accumulate the statistics of the moments into `moving_mean` and\n `moving_variance` using an exponential moving average with the given\n `decay`. When it is not in training mode then it would use the values of\n the `moving_mean` and the `moving_variance`.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional collections for the variables.\n outputs_collections: Collections to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n data_format: A string. `NHWC` (default) and `NCHW` are supported.\n zero_debias_moving_mean: Use zero_debias for moving_mean.\n scope: Optional scope for `variable_scope`.\n\n Returns:\n A `Tensor` representing the output of the operation.\n\n Raises:\n ValueError: If `data_format` is neither `NHWC` nor `NCHW`.\n ValueError: If the rank of `inputs` is undefined.\n ValueError: If the rank of `inputs` is neither 2 or 4.\n ValueError: If rank or `C` dimension of `inputs` is undefined.\n \"\"\"\n if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):\n raise ValueError('data_format has to be either NCHW or NHWC.')\n with variable_scope.variable_scope(\n scope, 'BatchNorm', [inputs], reuse=reuse) as sc:\n inputs = ops.convert_to_tensor(inputs)\n original_shape = inputs.get_shape()\n original_inputs = inputs\n original_rank = original_shape.ndims\n if original_rank is None:\n raise ValueError('Inputs %s has undefined rank' % inputs.name)\n elif original_rank not in [2, 4]:\n raise ValueError('Inputs %s has unsupported rank.'\n ' Expected 2 or 4 but got %d' % (\n inputs.name, original_rank))\n if original_rank == 2:\n channels = inputs.get_shape()[-1].value\n if channels is None:\n raise ValueError('`C` dimension must be known but is None')\n new_shape = [-1, 1, 1, channels]\n if data_format == DATA_FORMAT_NCHW:\n new_shape = [-1, channels, 1, 1]\n inputs = array_ops.reshape(inputs, new_shape)\n inputs_shape = inputs.get_shape()\n dtype = inputs.dtype.base_dtype\n if data_format == DATA_FORMAT_NHWC:\n params_shape = inputs_shape[-1:]\n else:\n params_shape = inputs_shape[1:2]\n if not params_shape.is_fully_defined():\n raise ValueError('Inputs %s has undefined `C` dimension %s.' %\n (inputs.name, params_shape))\n\n # Allocate parameters for the beta and gamma of the normalization.\n trainable_beta = trainable and center\n beta_collections = utils.get_variable_collections(variables_collections,\n 'beta')\n if not param_initializers:\n param_initializers = {}\n if center:\n beta_initializer = param_initializers.get('beta',\n init_ops.zeros_initializer())\n beta = variables.model_variable(\n 'beta',\n shape=params_shape,\n dtype=dtype,\n initializer=beta_initializer,\n collections=beta_collections,\n trainable=trainable_beta)\n else:\n beta = array_ops.constant(0.0, shape=params_shape)\n\n if scale:\n gamma_collections = utils.get_variable_collections(\n variables_collections, 'gamma')\n gamma_initializer = param_initializers.get('gamma',\n init_ops.ones_initializer())\n gamma = variables.model_variable(\n 'gamma',\n shape=params_shape,\n dtype=dtype,\n initializer=gamma_initializer,\n collections=gamma_collections,\n trainable=trainable)\n else:\n gamma = array_ops.constant(1.0, shape=params_shape)\n\n # Create moving_mean and moving_variance variables and add them to the\n # appropriate collections.\n moving_mean_collections = utils.get_variable_collections(\n variables_collections, 'moving_mean')\n moving_mean_initializer = param_initializers.get(\n 'moving_mean', init_ops.zeros_initializer())\n moving_mean = variables.model_variable(\n 'moving_mean',\n shape=params_shape,\n dtype=dtype,\n initializer=moving_mean_initializer,\n trainable=False,\n collections=moving_mean_collections)\n moving_variance_collections = utils.get_variable_collections(\n variables_collections, 'moving_variance')\n moving_variance_initializer = param_initializers.get(\n 'moving_variance', init_ops.ones_initializer())\n moving_variance = variables.model_variable(\n 'moving_variance',\n shape=params_shape,\n dtype=dtype,\n initializer=moving_variance_initializer,\n trainable=False,\n collections=moving_variance_collections)\n\n def _fused_batch_norm_training():\n return nn.fused_batch_norm(\n inputs, gamma, beta, epsilon=epsilon, data_format=data_format)\n def _fused_batch_norm_inference():\n return nn.fused_batch_norm(\n inputs,\n gamma,\n beta,\n mean=moving_mean,\n variance=moving_variance,\n epsilon=epsilon,\n is_training=False,\n data_format=data_format)\n outputs, mean, variance = utils.smart_cond(is_training,\n _fused_batch_norm_training,\n _fused_batch_norm_inference)\n\n # If `is_training` doesn't have a constant value, because it is a `Tensor`,\n # a `Variable` or `Placeholder` then is_training_value will be None and\n # `need_updates` will be true.\n is_training_value = utils.constant_value(is_training)\n need_updates = is_training_value is None or is_training_value\n if need_updates:\n if updates_collections is None:\n no_updates = lambda: outputs\n def _force_updates():\n \"\"\"Internal function forces updates moving_vars if is_training.\"\"\"\n update_moving_mean = moving_averages.assign_moving_average(\n moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)\n update_moving_variance = moving_averages.assign_moving_average(\n moving_variance, variance, decay, zero_debias=False)\n with ops.control_dependencies(\n [update_moving_mean, update_moving_variance]):\n return array_ops.identity(outputs)\n outputs = utils.smart_cond(is_training, _force_updates, no_updates)\n else:\n moving_vars_fn = lambda: (moving_mean, moving_variance)\n def _delay_updates():\n \"\"\"Internal function that delay updates moving_vars if is_training.\"\"\"\n update_moving_mean = moving_averages.assign_moving_average(\n moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)\n update_moving_variance = moving_averages.assign_moving_average(\n moving_variance, variance, decay, zero_debias=False)\n return update_moving_mean, update_moving_variance\n update_mean, update_variance = utils.smart_cond(is_training,\n _delay_updates,\n moving_vars_fn)\n ops.add_to_collections(updates_collections, update_mean)\n ops.add_to_collections(updates_collections, update_variance)\n\n outputs.set_shape(inputs_shape)\n if original_shape.ndims == 2:\n outputs = array_ops.reshape(outputs, array_ops.shape(original_inputs))\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n\n@add_arg_scope\ndef batch_norm(inputs,\n decay=0.999,\n center=True,\n scale=False,\n epsilon=0.001,\n activation_fn=None,\n param_initializers=None,\n param_regularizers=None,\n updates_collections=ops.GraphKeys.UPDATE_OPS,\n is_training=True,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n batch_weights=None,\n fused=None,\n data_format=DATA_FORMAT_NHWC,\n zero_debias_moving_mean=False,\n scope=None,\n renorm=False,\n renorm_clipping=None,\n renorm_decay=0.99):\n \"\"\"Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.\n\n \"Batch Normalization: Accelerating Deep Network Training by Reducing\n Internal Covariate Shift\"\n\n Sergey Ioffe, Christian Szegedy\n\n Can be used as a normalizer function for conv2d and fully_connected.\n\n Note: when training, the moving_mean and moving_variance need to be updated.\n By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they\n need to be added as a dependency to the `train_op`. For example:\n\n ```python\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss)\n ```\n\n One can set updates_collections=None to force the updates in place, but that\n can have a speed penalty, especially in distributed settings.\n\n Args:\n inputs: A tensor with 2 or more dimensions, where the first dimension has\n `batch_size`. The normalization is over all but the last dimension if\n `data_format` is `NHWC` and the second dimension if `data_format` is\n `NCHW`.\n decay: Decay for the moving average. Reasonable values for `decay` are close\n to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.\n Lower `decay` value (recommend trying `decay`=0.9) if model experiences\n reasonably good training performance but poor validation and/or test\n performance. Try zero_debias_moving_mean=True for improved stability.\n center: If True, add offset of `beta` to normalized tensor. If False, `beta`\n is ignored.\n scale: If True, multiply by `gamma`. If False, `gamma` is\n not used. When the next layer is linear (also e.g. `nn.relu`), this can be\n disabled since the scaling can be done by the next layer.\n epsilon: Small float added to variance to avoid dividing by zero.\n activation_fn: Activation function, default set to None to skip it and\n maintain a linear activation.\n param_initializers: Optional initializers for beta, gamma, moving mean and\n moving variance.\n param_regularizers: Optional regularizer for beta and gamma.\n updates_collections: Collections to collect the update ops for computation.\n The updates_ops need to be executed with the train_op.\n If None, a control dependency would be added to make sure the updates are\n computed in place.\n is_training: Whether or not the layer is in training mode. In training mode\n it would accumulate the statistics of the moments into `moving_mean` and\n `moving_variance` using an exponential moving average with the given\n `decay`. When it is not in training mode then it would use the values of\n the `moving_mean` and the `moving_variance`.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional collections for the variables.\n outputs_collections: Collections to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n batch_weights: An optional tensor of shape `[batch_size]`,\n containing a frequency weight for each batch item. If present,\n then the batch normalization uses weighted mean and\n variance. (This can be used to correct for bias in training\n example selection.)\n fused: if `True`, use a faster, fused implementation if possible.\n If `None`, use the system recommended implementation.\n data_format: A string. `NHWC` (default) and `NCHW` are supported.\n zero_debias_moving_mean: Use zero_debias for moving_mean. It creates a new\n pair of variables 'moving_mean/biased' and 'moving_mean/local_step'.\n scope: Optional scope for `variable_scope`.\n renorm: Whether to use Batch Renormalization\n (https://arxiv.org/abs/1702.03275). This adds extra variables during\n training. The inference is the same for either value of this parameter.\n renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to\n scalar `Tensors` used to clip the renorm correction. The correction\n `(r, d)` is used as `corrected_value = normalized_value * r + d`, with\n `r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,\n dmax are set to inf, 0, inf, respectively.\n renorm_decay: Momentum used to update the moving means and standard\n deviations with renorm. Unlike `momentum`, this affects training\n and should be neither too small (which would add noise) nor too large\n (which would give stale estimates). Note that `decay` is still applied\n to get the means and variances for inference.\n\n Returns:\n A `Tensor` representing the output of the operation.\n\n Raises:\n ValueError: If `data_format` is neither `NHWC` nor `NCHW`.\n ValueError: If the rank of `inputs` is undefined.\n ValueError: If rank or channels dimension of `inputs` is undefined.\n \"\"\"\n # This environment variable is only used during the testing period of fused\n # batch norm and will be removed after that.\n if fused is None:\n fused = _FUSED_DEFAULT\n\n # Only use _fused_batch_norm if all of the following three\n # conditions are true:\n # (1) fused is set True;\n # (2) it is possible to use (currently it doesn't support batch weights,\n # renorm, and the case when rank is neither 2 nor 4);\n # (3) it is used with zero_debias_moving_mean, or an input shape of rank 2,\n # or non-default updates_collections (not implemented in\n # normalization_layers.BatchNormalization yet); otherwise use the fused\n # implementation in normalization_layers.BatchNormalization.\n inputs = ops.convert_to_tensor(inputs)\n rank = inputs.get_shape().ndims\n possible_to_fuse = batch_weights is None and not renorm and rank in [2, 4]\n if fused and possible_to_fuse and (\n zero_debias_moving_mean or rank == 2 or\n updates_collections is not ops.GraphKeys.UPDATE_OPS):\n return _fused_batch_norm(\n inputs,\n decay=decay,\n center=center,\n scale=scale,\n epsilon=epsilon,\n activation_fn=activation_fn,\n param_initializers=param_initializers,\n updates_collections=updates_collections,\n is_training=is_training,\n reuse=reuse,\n variables_collections=variables_collections,\n outputs_collections=outputs_collections,\n trainable=trainable,\n data_format=data_format,\n zero_debias_moving_mean=zero_debias_moving_mean,\n scope=scope)\n\n if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):\n raise ValueError('data_format has to be either NCHW or NHWC.')\n\n layer_variable_getter = _build_variable_getter()\n with variable_scope.variable_scope(\n scope, 'BatchNorm', [inputs], reuse=reuse,\n custom_getter=layer_variable_getter) as sc:\n inputs = ops.convert_to_tensor(inputs)\n\n # Determine whether we can use the core layer class.\n if (batch_weights is None and\n updates_collections is ops.GraphKeys.UPDATE_OPS and\n not zero_debias_moving_mean):\n # Use the core layer class.\n axis = 1 if data_format == DATA_FORMAT_NCHW else -1\n if not param_initializers:\n param_initializers = {}\n beta_initializer = param_initializers.get('beta',\n init_ops.zeros_initializer())\n gamma_initializer = param_initializers.get('gamma',\n init_ops.ones_initializer())\n moving_mean_initializer = param_initializers.get(\n 'moving_mean', init_ops.zeros_initializer())\n moving_variance_initializer = param_initializers.get(\n 'moving_variance', init_ops.ones_initializer())\n if not param_regularizers:\n param_regularizers = {}\n beta_regularizer = param_regularizers.get('beta')\n gamma_regularizer = param_regularizers.get('gamma')\n layer = normalization_layers.BatchNormalization(\n axis=axis,\n momentum=decay,\n epsilon=epsilon,\n center=center,\n scale=scale,\n beta_initializer=beta_initializer,\n gamma_initializer=gamma_initializer,\n moving_mean_initializer=moving_mean_initializer,\n moving_variance_initializer=moving_variance_initializer,\n beta_regularizer=beta_regularizer,\n gamma_regularizer=gamma_regularizer,\n trainable=trainable,\n renorm=renorm,\n renorm_clipping=renorm_clipping,\n renorm_momentum=renorm_decay,\n name=sc.name,\n _scope=sc,\n _reuse=reuse,\n fused=fused)\n outputs = layer.apply(inputs, training=is_training)\n\n # Add variables to collections.\n _add_variable_to_collections(\n layer.moving_mean, variables_collections, 'moving_mean')\n _add_variable_to_collections(\n layer.moving_variance, variables_collections, 'moving_variance')\n if layer.beta is not None:\n _add_variable_to_collections(layer.beta, variables_collections, 'beta')\n if layer.gamma is not None:\n _add_variable_to_collections(\n layer.gamma, variables_collections, 'gamma')\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n # Not supported by layer class: batch_weights argument,\n # and custom updates_collections. In that case, use the legacy BN\n # implementation.\n # Custom updates collections are not supported because the update logic\n # is different in this case, in particular w.r.t. \"forced updates\" and\n # update op reuse.\n if renorm:\n raise ValueError('renorm is not supported with batch_weights, '\n 'updates_collections or zero_debias_moving_mean')\n inputs_shape = inputs.get_shape()\n inputs_rank = inputs_shape.ndims\n if inputs_rank is None:\n raise ValueError('Inputs %s has undefined rank.' % inputs.name)\n dtype = inputs.dtype.base_dtype\n if batch_weights is not None:\n batch_weights = ops.convert_to_tensor(batch_weights)\n inputs_shape[0:1].assert_is_compatible_with(batch_weights.get_shape())\n # Reshape batch weight values so they broadcast across inputs.\n nshape = [-1] + [1 for _ in range(inputs_rank - 1)]\n batch_weights = array_ops.reshape(batch_weights, nshape)\n\n if data_format == DATA_FORMAT_NCHW:\n moments_axes = [0] + list(range(2, inputs_rank))\n params_shape = inputs_shape[1:2]\n # For NCHW format, rather than relying on implicit broadcasting, we\n # explicitly reshape the params to params_shape_broadcast when computing\n # the moments and the batch normalization.\n params_shape_broadcast = list(\n [1, inputs_shape[1].value] + [1 for _ in range(2, inputs_rank)])\n else:\n moments_axes = list(range(inputs_rank - 1))\n params_shape = inputs_shape[-1:]\n params_shape_broadcast = None\n if not params_shape.is_fully_defined():\n raise ValueError('Inputs %s has undefined channels dimension %s.' % (\n inputs.name, params_shape))\n\n # Allocate parameters for the beta and gamma of the normalization.\n beta, gamma = None, None\n if not param_initializers:\n param_initializers = {}\n if center:\n beta_collections = utils.get_variable_collections(variables_collections,\n 'beta')\n beta_initializer = param_initializers.get('beta',\n init_ops.zeros_initializer())\n beta = variables.model_variable('beta',\n shape=params_shape,\n dtype=dtype,\n initializer=beta_initializer,\n collections=beta_collections,\n trainable=trainable)\n if scale:\n gamma_collections = utils.get_variable_collections(variables_collections,\n 'gamma')\n gamma_initializer = param_initializers.get('gamma',\n init_ops.ones_initializer())\n gamma = variables.model_variable('gamma',\n shape=params_shape,\n dtype=dtype,\n initializer=gamma_initializer,\n collections=gamma_collections,\n trainable=trainable)\n\n # Create moving_mean and moving_variance variables and add them to the\n # appropriate collections. We disable variable partitioning while creating\n # them, because assign_moving_average is not yet supported for partitioned\n # variables.\n partitioner = variable_scope.get_variable_scope().partitioner\n try:\n variable_scope.get_variable_scope().set_partitioner(None)\n moving_mean_collections = utils.get_variable_collections(\n variables_collections, 'moving_mean')\n moving_mean_initializer = param_initializers.get(\n 'moving_mean', init_ops.zeros_initializer())\n moving_mean = variables.model_variable(\n 'moving_mean',\n shape=params_shape,\n dtype=dtype,\n initializer=moving_mean_initializer,\n trainable=False,\n collections=moving_mean_collections)\n moving_variance_collections = utils.get_variable_collections(\n variables_collections, 'moving_variance')\n moving_variance_initializer = param_initializers.get(\n 'moving_variance', init_ops.ones_initializer())\n moving_variance = variables.model_variable(\n 'moving_variance',\n shape=params_shape,\n dtype=dtype,\n initializer=moving_variance_initializer,\n trainable=False,\n collections=moving_variance_collections)\n finally:\n variable_scope.get_variable_scope().set_partitioner(partitioner)\n\n # If `is_training` doesn't have a constant value, because it is a `Tensor`,\n # a `Variable` or `Placeholder` then is_training_value will be None and\n # `needs_moments` will be true.\n is_training_value = utils.constant_value(is_training)\n need_moments = is_training_value is None or is_training_value\n if need_moments:\n # Calculate the moments based on the individual batch.\n if batch_weights is None:\n if data_format == DATA_FORMAT_NCHW:\n mean, variance = nn.moments(inputs, moments_axes, keep_dims=True)\n mean = array_ops.reshape(mean, [-1])\n variance = array_ops.reshape(variance, [-1])\n else:\n mean, variance = nn.moments(inputs, moments_axes)\n else:\n if data_format == DATA_FORMAT_NCHW:\n mean, variance = nn.weighted_moments(inputs, moments_axes,\n batch_weights, keep_dims=True)\n mean = array_ops.reshape(mean, [-1])\n variance = array_ops.reshape(variance, [-1])\n else:\n mean, variance = nn.weighted_moments(inputs, moments_axes,\n batch_weights)\n\n moving_vars_fn = lambda: (moving_mean, moving_variance)\n if updates_collections is None:\n def _force_updates():\n \"\"\"Internal function forces updates moving_vars if is_training.\"\"\"\n update_moving_mean = moving_averages.assign_moving_average(\n moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)\n update_moving_variance = moving_averages.assign_moving_average(\n moving_variance, variance, decay, zero_debias=False)\n with ops.control_dependencies([update_moving_mean,\n update_moving_variance]):\n return array_ops.identity(mean), array_ops.identity(variance)\n mean, variance = utils.smart_cond(is_training,\n _force_updates,\n moving_vars_fn)\n else:\n def _delay_updates():\n \"\"\"Internal function that delay updates moving_vars if is_training.\"\"\"\n update_moving_mean = moving_averages.assign_moving_average(\n moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)\n update_moving_variance = moving_averages.assign_moving_average(\n moving_variance, variance, decay, zero_debias=False)\n return update_moving_mean, update_moving_variance\n\n update_mean, update_variance = utils.smart_cond(is_training,\n _delay_updates,\n moving_vars_fn)\n ops.add_to_collections(updates_collections, update_mean)\n ops.add_to_collections(updates_collections, update_variance)\n # Use computed moments during training and moving_vars otherwise.\n vars_fn = lambda: (mean, variance)\n mean, variance = utils.smart_cond(is_training, vars_fn, moving_vars_fn)\n else:\n mean, variance = moving_mean, moving_variance\n if data_format == DATA_FORMAT_NCHW:\n mean = array_ops.reshape(mean, params_shape_broadcast)\n variance = array_ops.reshape(variance, params_shape_broadcast)\n if beta is not None:\n beta = array_ops.reshape(beta, params_shape_broadcast)\n if gamma is not None:\n gamma = array_ops.reshape(gamma, params_shape_broadcast)\n\n # Compute batch_normalization.\n outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma,\n epsilon)\n outputs.set_shape(inputs_shape)\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n\n@add_arg_scope\ndef bias_add(inputs,\n activation_fn=None,\n initializer=init_ops.zeros_initializer(),\n regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n data_format=DATA_FORMAT_NHWC,\n scope=None):\n \"\"\"Adds a bias to the inputs.\n\n Can be used as a normalizer function for conv2d and fully_connected.\n\n Args:\n inputs: A tensor of with at least rank 2 and value for the last dimension,\n e.g. `[batch_size, depth]`, `[None, None, None, depth]`.\n activation_fn: Activation function, default set to None to skip it and\n maintain a linear activation.\n initializer: An initializer for the bias, defaults to 0.\n regularizer: A regularizer like the result of\n `l1_regularizer` or `l2_regularizer`.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional collections for the variables.\n outputs_collections: Collections to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n data_format: A string. 'NHWC' and 'NCHW' are supported.\n scope: Optional scope for variable_scope.\n\n Returns:\n A tensor representing the result of adding biases to the inputs.\n\n Raises:\n ValueError: If `data_format` is neither `NHWC` nor `NCHW`.\n ValueError: If `data_format` is `NCHW` and rank of `inputs` is not 4.\n ValueError: If the rank of `inputs` is undefined.\n ValueError: If rank or `C` dimension of `inputs` is undefined.\n \"\"\"\n if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):\n raise ValueError('data_format has to be either NCHW or NHWC.')\n with variable_scope.variable_scope(scope, 'BiasAdd', [inputs],\n reuse=reuse) as sc:\n inputs = ops.convert_to_tensor(inputs)\n dtype = inputs.dtype.base_dtype\n inputs_shape = inputs.get_shape()\n inputs_rank = inputs_shape.ndims\n if inputs_rank is None:\n raise ValueError('Dims of shape must be known but is None')\n elif inputs_rank != 4 and data_format == DATA_FORMAT_NCHW:\n raise ValueError('Data format NCHW only supports 4D Tensor')\n axis = 1 if data_format == DATA_FORMAT_NCHW else -1\n num_features = inputs_shape[axis].value\n if num_features is None:\n raise ValueError('`C` dimension must be known but is None')\n biases_collections = utils.get_variable_collections(variables_collections,\n 'biases')\n biases = variables.model_variable('biases',\n shape=[num_features,],\n dtype=dtype,\n initializer=initializer,\n regularizer=regularizer,\n collections=biases_collections,\n trainable=trainable)\n outputs = nn.bias_add(inputs, biases, data_format=data_format)\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n\n# TODO(jbms): change `rate` parameter to `dilation_rate` for consistency with\n# underlying op.\n@add_arg_scope\ndef convolution(inputs,\n num_outputs,\n kernel_size,\n stride=1,\n padding='SAME',\n data_format=None,\n rate=1,\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer(),\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Adds an N-D convolution followed by an optional batch_norm layer.\n\n It is required that 1 <= N <= 3.\n\n `convolution` creates a variable called `weights`, representing the\n convolutional kernel, that is convolved (actually cross-correlated) with the\n `inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is\n provided (such as `batch_norm`), it is then applied. Otherwise, if\n `normalizer_fn` is None and a `biases_initializer` is provided then a `biases`\n variable would be created and added the activations. Finally, if\n `activation_fn` is not `None`, it is applied to the activations as well.\n\n Performs atrous convolution with input stride/dilation rate equal to `rate`\n if a value > 1 for any dimension of `rate` is specified. In this case\n `stride` values != 1 are not supported.\n\n Args:\n inputs: A Tensor of rank N+2 of shape\n `[batch_size] + input_spatial_shape + [in_channels]` if data_format does\n not start with \"NC\" (default), or\n `[batch_size, in_channels] + input_spatial_shape` if data_format starts\n with \"NC\".\n num_outputs: Integer, the number of output filters.\n kernel_size: A sequence of N positive integers specifying the spatial\n dimensions of the filters. Can be a single integer to specify the same\n value for all spatial dimensions.\n stride: A sequence of N positive integers specifying the stride at which to\n compute output. Can be a single integer to specify the same value for all\n spatial dimensions. Specifying any `stride` value != 1 is incompatible\n with specifying any `rate` value != 1.\n padding: One of `\"VALID\"` or `\"SAME\"`.\n data_format: A string or None. Specifies whether the channel dimension of\n the `input` and output is the last dimension (default, or if `data_format`\n does not start with \"NC\"), or the second dimension (if `data_format`\n starts with \"NC\"). For N=1, the valid values are \"NWC\" (default) and\n \"NCW\". For N=2, the valid values are \"NHWC\" (default) and \"NCHW\".\n For N=3, the valid values are \"NDHWC\" (default) and \"NCDHW\".\n rate: A sequence of N positive integers specifying the dilation rate to use\n for atrous convolution. Can be a single integer to specify the same\n value for all spatial dimensions. Specifying any `rate` value != 1 is\n incompatible with specifying any `stride` value != 1.\n activation_fn: Activation function. The default value is a ReLU function.\n Explicitly set it to None to skip it and maintain a linear activation.\n normalizer_fn: Normalization function to use instead of `biases`. If\n `normalizer_fn` is provided then `biases_initializer` and\n `biases_regularizer` are ignored and `biases` are not created nor added.\n default set to None for no normalizer function\n normalizer_params: Normalization function parameters.\n weights_initializer: An initializer for the weights.\n weights_regularizer: Optional regularizer for the weights.\n biases_initializer: An initializer for the biases. If None skip biases.\n biases_regularizer: Optional regularizer for the biases.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for all the variables or\n a dictionary containing a different list of collection per variable.\n outputs_collections: Collection to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n scope: Optional scope for `variable_scope`.\n\n Returns:\n A tensor representing the output of the operation.\n\n Raises:\n ValueError: If `data_format` is invalid.\n ValueError: Both 'rate' and `stride` are not uniformly 1.\n \"\"\"\n if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:\n raise ValueError('Invalid data_format: %r' % (data_format,))\n\n layer_variable_getter = _build_variable_getter(\n {'bias': 'biases', 'kernel': 'weights'})\n\n with variable_scope.variable_scope(\n scope, 'Conv', [inputs], reuse=reuse,\n custom_getter=layer_variable_getter) as sc:\n inputs = ops.convert_to_tensor(inputs)\n input_rank = inputs.get_shape().ndims\n\n if input_rank == 3:\n layer_class = convolutional_layers.Convolution1D\n elif input_rank == 4:\n layer_class = convolutional_layers.Convolution2D\n elif input_rank == 5:\n layer_class = convolutional_layers.Convolution3D\n else:\n raise ValueError('Convolution not supported for input with rank',\n input_rank)\n\n df = ('channels_first' if data_format and data_format.startswith('NC')\n else 'channels_last')\n layer = layer_class(filters=num_outputs,\n kernel_size=kernel_size,\n strides=stride,\n padding=padding,\n data_format=df,\n dilation_rate=rate,\n activation=None,\n use_bias=not normalizer_fn and biases_initializer,\n kernel_initializer=weights_initializer,\n bias_initializer=biases_initializer,\n kernel_regularizer=weights_regularizer,\n bias_regularizer=biases_regularizer,\n activity_regularizer=None,\n trainable=trainable,\n name=sc.name,\n dtype=inputs.dtype.base_dtype,\n _scope=sc,\n _reuse=reuse)\n outputs = layer.apply(inputs)\n\n # Add variables to collections.\n _add_variable_to_collections(layer.kernel, variables_collections, 'weights')\n if layer.use_bias:\n _add_variable_to_collections(layer.bias, variables_collections, 'biases')\n\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\nconvolution2d = convolution\nconvolution3d = convolution\n\n\n@add_arg_scope\ndef convolution2d_in_plane(\n inputs,\n kernel_size,\n stride=1,\n padding='SAME',\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer(),\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Performs the same in-plane convolution to each channel independently.\n\n This is useful for performing various simple channel-independent convolution\n operations such as image gradients:\n\n image = tf.constant(..., shape=(16, 240, 320, 3))\n vert_gradients = layers.conv2d_in_plane(image,\n kernel=[1, -1],\n kernel_size=[2, 1])\n horz_gradients = layers.conv2d_in_plane(image,\n kernel=[1, -1],\n kernel_size=[1, 2])\n\n Args:\n inputs: A 4-D tensor with dimensions [batch_size, height, width, channels].\n kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of\n of the pooling. Can be an int if both values are the same.\n stride: A list of length 2 `[stride_height, stride_width]`.\n Can be an int if both strides are the same. Note that presently\n both strides must have the same value.\n padding: The padding type to use, either 'SAME' or 'VALID'.\n activation_fn: Activation function. The default value is a ReLU function.\n Explicitly set it to None to skip it and maintain a linear activation.\n normalizer_fn: Normalization function to use instead of `biases`. If\n `normalizer_fn` is provided then `biases_initializer` and\n `biases_regularizer` are ignored and `biases` are not created nor added.\n default set to None for no normalizer function\n normalizer_params: Normalization function parameters.\n weights_initializer: An initializer for the weights.\n weights_regularizer: Optional regularizer for the weights.\n biases_initializer: An initializer for the biases. If None skip biases.\n biases_regularizer: Optional regularizer for the biases.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for all the variables or\n a dictionary containing a different list of collection per variable.\n outputs_collections: Collection to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n scope: Optional scope for `variable_scope`.\n\n Returns:\n A `Tensor` representing the output of the operation.\n \"\"\"\n with variable_scope.variable_scope(\n scope, 'ConvInPlane', [inputs], reuse=reuse) as sc:\n dtype = inputs.dtype.base_dtype\n kernel_h, kernel_w = utils.two_element_tuple(kernel_size)\n stride_h, stride_w = utils.two_element_tuple(stride)\n num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)\n weights_shape = [kernel_h, kernel_w, 1, 1]\n weights_collections = utils.get_variable_collections(\n variables_collections, 'weights')\n weights = variables.model_variable('weights',\n shape=weights_shape,\n dtype=dtype,\n initializer=weights_initializer,\n regularizer=weights_regularizer,\n collections=weights_collections,\n trainable=trainable)\n depthwise_weights = array_ops.tile(weights, [1, 1, num_filters_in, 1])\n outputs = nn.depthwise_conv2d(inputs, depthwise_weights,\n [1, stride_h, stride_w, 1], padding)\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n else:\n if biases_initializer is not None:\n biases_collections = utils.get_variable_collections(\n variables_collections, 'biases')\n biases = variables.model_variable('biases',\n shape=[num_filters_in,],\n dtype=dtype,\n initializer=biases_initializer,\n regularizer=biases_regularizer,\n collections=biases_collections,\n trainable=trainable)\n outputs = nn.bias_add(outputs, biases)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n\n@add_arg_scope\ndef convolution2d_transpose(\n inputs,\n num_outputs,\n kernel_size,\n stride=1,\n padding='SAME',\n data_format=DATA_FORMAT_NHWC,\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer(),\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Adds a convolution2d_transpose with an optional batch normalization layer.\n\n The function creates a variable called `weights`, representing the\n kernel, that is convolved with the input. If `normalizer_fn` is `None`, a\n second variable called 'biases' is added to the result of the operation.\n\n Args:\n inputs: A 4-D `Tensor` of type `float` and shape\n `[batch, height, width, in_channels]` for `NHWC` data format or\n `[batch, in_channels, height, width]` for `NCHW` data format.\n num_outputs: Integer, the number of output filters.\n kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of\n of the filters. Can be an int if both values are the same.\n stride: A list of length 2: [stride_height, stride_width].\n Can be an int if both strides are the same. Note that presently\n both strides must have the same value.\n padding: One of 'VALID' or 'SAME'.\n data_format: A string. `NHWC` (default) and `NCHW` are supported.\n activation_fn: Activation function. The default value is a ReLU function.\n Explicitly set it to None to skip it and maintain a linear activation.\n normalizer_fn: Normalization function to use instead of `biases`. If\n `normalizer_fn` is provided then `biases_initializer` and\n `biases_regularizer` are ignored and `biases` are not created nor added.\n default set to None for no normalizer function\n normalizer_params: Normalization function parameters.\n weights_initializer: An initializer for the weights.\n weights_regularizer: Optional regularizer for the weights.\n biases_initializer: An initializer for the biases. If None skip biases.\n biases_regularizer: Optional regularizer for the biases.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for all the variables or\n a dictionary containing a different list of collection per variable.\n outputs_collections: Collection to add the outputs.\n trainable: Whether or not the variables should be trainable or not.\n scope: Optional scope for variable_scope.\n\n Returns:\n A tensor representing the output of the operation.\n\n Raises:\n ValueError: If 'kernel_size' is not a list of length 2.\n ValueError: If `data_format` is neither `NHWC` nor `NCHW`.\n ValueError: If `C` dimension of `inputs` is None.\n \"\"\"\n layer_variable_getter = _build_variable_getter(\n {'bias': 'biases', 'kernel': 'weights'})\n\n with variable_scope.variable_scope(\n scope, 'Conv2d_transpose', [inputs], reuse=reuse,\n custom_getter=layer_variable_getter) as sc:\n if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):\n raise ValueError('data_format has to be either NCHW or NHWC.')\n\n inputs = ops.convert_to_tensor(inputs)\n\n df = ('channels_first' if data_format and data_format.startswith('NC')\n else 'channels_last')\n layer = convolutional_layers.Convolution2DTranspose(\n filters=num_outputs,\n kernel_size=kernel_size,\n strides=stride,\n padding=padding,\n data_format=df,\n activation=None,\n use_bias=not normalizer_fn and biases_initializer,\n kernel_initializer=weights_initializer,\n bias_initializer=biases_initializer,\n kernel_regularizer=weights_regularizer,\n bias_regularizer=biases_regularizer,\n activity_regularizer=None,\n trainable=trainable,\n name=sc.name,\n dtype=inputs.dtype.base_dtype,\n _scope=sc,\n _reuse=reuse)\n outputs = layer.apply(inputs)\n\n # Add variables to collections.\n _add_variable_to_collections(layer.kernel, variables_collections, 'weights')\n if layer.bias:\n _add_variable_to_collections(layer.bias, variables_collections, 'biases')\n\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n\n@add_arg_scope\ndef convolution3d_transpose(\n inputs,\n num_outputs,\n kernel_size,\n stride=1,\n padding='SAME',\n data_format=DATA_FORMAT_NDHWC,\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer(),\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Adds a convolution3d_transpose with an optional batch normalization layer.\n\n The function creates a variable called `weights`, representing the\n kernel, that is convolved with the input. If `batch_norm_params` is `None`, a\n second variable called 'biases' is added to the result of the operation.\n Args:\n inputs: A 5-D `Tensor` of type `float` and shape\n `[batch, depth, height, width, in_channels]` for `NDHWC` data format or\n `[batch, in_channels, depth, height, width]` for `NCDHW` data format.\n num_outputs: Integer, the number of output filters.\n kernel_size: A list of length 3 holding the [kernel_depth, kernel_height, kernel_width] of\n of the filters. Can be an int if both values are the same.\n stride: A list of length 3: [stride_depth, stride_height, stride_width].\n Can be an int if both strides are the same. Note that presently\n both strides must have the same value.\n padding: One of 'VALID' or 'SAME'.\n data_format: A string. `NDHWC` (default) and `NCDHW` are supported.\n activation_fn: Activation function. The default value is a ReLU function.\n Explicitly set it to None to skip it and maintain a linear activation.\n normalizer_fn: Normalization function to use instead of `biases`. If\n `normalizer_fn` is provided then `biases_initializer` and\n `biases_regularizer` are ignored and `biases` are not created nor added.\n default set to None for no normalizer function\n normalizer_params: Normalization function parameters.\n weights_initializer: An initializer for the weights.\n weights_regularizer: Optional regularizer for the weights.\n biases_initializer: An initializer for the biases. If None skip biases.\n biases_regularizer: Optional regularizer for the biases.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for all the variables or\n a dictionary containing a different list of collection per variable.\n outputs_collections: Collection to add the outputs.\n trainable: Whether or not the variables should be trainable or not.\n scope: Optional scope for variable_scope.\n Returns:\n A tensor representing the output of the operation.\n Raises:\n ValueError: If 'kernel_size' is not a list of length 3.\n ValueError: If `data_format` is neither `NDHWC` nor `NCDHW`.\n ValueError: If `C` dimension of `inputs` is None.\n \"\"\"\n layer_variable_getter = _build_variable_getter(\n {'bias': 'biases', 'kernel': 'weights'})\n\n with variable_scope.variable_scope(\n scope, 'Conv3d_transpose', [inputs], reuse=reuse,\n custom_getter=layer_variable_getter) as sc:\n if data_format not in (DATA_FORMAT_NCDHW, DATA_FORMAT_NDHWC):\n raise ValueError('data_format has to be either NCDHW or NDHWC.')\n\n inputs = ops.convert_to_tensor(inputs)\n\n df = ('channels_first' if data_format and data_format.startswith('NC')\n else 'channels_last')\n layer = convolutional_layers.Convolution3DTranspose(\n filters=num_outputs,\n kernel_size=kernel_size,\n strides=stride,\n padding=padding,\n data_format=df,\n activation=None,\n use_bias=not normalizer_fn and biases_initializer,\n kernel_initializer=weights_initializer,\n bias_initializer=biases_initializer,\n kernel_regularizer=weights_regularizer,\n bias_regularizer=biases_regularizer,\n activity_regularizer=None,\n trainable=trainable,\n name=sc.name,\n dtype=inputs.dtype.base_dtype,\n _scope=sc,\n _reuse=reuse)\n outputs = layer.apply(inputs)\n\n # Add variables to collections.\n _add_variable_to_collections(layer.kernel, variables_collections, 'weights')\n if layer.bias:\n _add_variable_to_collections(layer.bias, variables_collections, 'biases')\n\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n\n@add_arg_scope\ndef dropout(inputs,\n keep_prob=0.5,\n noise_shape=None,\n is_training=True,\n outputs_collections=None,\n scope=None):\n \"\"\"Returns a dropout op applied to the input.\n\n With probability `keep_prob`, outputs the input element scaled up by\n `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected\n sum is unchanged.\n\n Args:\n inputs: The tensor to pass to the nn.dropout op.\n keep_prob: A scalar `Tensor` with the same type as x. The probability\n that each element is kept.\n noise_shape: A 1-D `Tensor` of type `int32`, representing the\n shape for randomly generated keep/drop flags.\n is_training: A bool `Tensor` indicating whether or not the model\n is in training mode. If so, dropout is applied and values scaled.\n Otherwise, inputs is returned.\n outputs_collections: Collection to add the outputs.\n scope: Optional scope for name_scope.\n\n Returns:\n A tensor representing the output of the operation.\n \"\"\"\n with variable_scope.variable_scope(\n scope, 'Dropout', [inputs], custom_getter=_model_variable_getter) as sc:\n inputs = ops.convert_to_tensor(inputs)\n layer = core_layers.Dropout(rate=1 - keep_prob,\n noise_shape=noise_shape,\n name=sc.name,\n _scope=sc)\n outputs = layer.apply(inputs, training=is_training)\n return utils.collect_named_outputs(\n outputs_collections, sc.original_name_scope, outputs)\n\n\n@add_arg_scope\ndef flatten(inputs,\n outputs_collections=None,\n scope=None):\n \"\"\"Flattens the input while maintaining the batch_size.\n\n Assumes that the first dimension represents the batch.\n\n Args:\n inputs: A tensor of size [batch_size, ...].\n outputs_collections: Collection to add the outputs.\n scope: Optional scope for name_scope.\n\n Returns:\n A flattened tensor with shape [batch_size, k].\n Raises:\n ValueError: If inputs rank is unknown or less than 2.\n \"\"\"\n with ops.name_scope(scope, 'Flatten', [inputs]) as sc:\n inputs = ops.convert_to_tensor(inputs)\n inputs_rank = inputs.get_shape().ndims\n if (inputs_rank is None) or (inputs_rank < 2):\n raise ValueError('Inputs must have a least 2 dimensions.')\n\n inputs_shape = array_ops.shape(inputs)\n\n batch_dim = array_ops.slice(inputs_shape, [0], [1])\n spatial_dims = array_ops.slice(inputs_shape, [1], [inputs_rank - 1])\n\n flat_spatial_dim = math_ops.reduce_prod(spatial_dims)\n flat_spatial_dim = array_ops.expand_dims(flat_spatial_dim, 0)\n flat_shape = array_ops.concat([batch_dim, flat_spatial_dim], 0)\n\n outputs = array_ops.reshape(inputs, flat_shape)\n\n # Attempt to propagate shape information, if it is defined.\n input_shape = inputs.get_shape().as_list()\n batch_dim, spatial_dims = input_shape[0], input_shape[1:]\n if all(spatial_dims):\n outputs.set_shape([batch_dim,\n functools.reduce(lambda x, y: x * y, spatial_dims)])\n else:\n outputs.set_shape([batch_dim, None])\n\n return utils.collect_named_outputs(outputs_collections, sc, outputs)\n\n\ndef _sparse_inner_flatten(inputs, new_rank):\n \"\"\"Helper function for `inner_flatten`.\"\"\"\n inputs_rank = inputs.dense_shape.get_shape().as_list()[0]\n if inputs_rank < new_rank:\n raise ValueError(\n 'Inputs has rank less than new_rank. {} must have rank at least'\n ' {}. Received rank {}, shape {}'.format(inputs, new_rank, inputs_rank,\n inputs.get_shape()))\n\n outer_dimensions = inputs.dense_shape[:new_rank - 1]\n inner_dimensions = inputs.dense_shape[new_rank - 1:]\n new_shape = array_ops.concat((outer_dimensions,\n [math_ops.reduce_prod(inner_dimensions)]), 0)\n flattened = sparse_ops.sparse_reshape(inputs, new_shape)\n return flattened\n\n\ndef _dense_inner_flatten(inputs, new_rank):\n \"\"\"Helper function for `inner_flatten`.\"\"\"\n rank_assertion = check_ops.assert_rank_at_least(\n inputs, new_rank, message='inputs has rank less than new_rank')\n with ops.control_dependencies([rank_assertion]):\n outer_dimensions = array_ops.strided_slice(\n array_ops.shape(inputs), [0], [new_rank - 1])\n new_shape = array_ops.concat((outer_dimensions, [-1]), 0)\n reshaped = array_ops.reshape(inputs, new_shape)\n\n # if `new_rank` is an integer, try to calculate new shape.\n if isinstance(new_rank, six.integer_types):\n static_shape = inputs.get_shape()\n if static_shape is not None and static_shape.dims is not None:\n static_shape = static_shape.as_list()\n static_outer_dims = static_shape[:new_rank - 1]\n static_inner_dims = static_shape[new_rank - 1:]\n flattened_dimension = 1\n for inner_dim in static_inner_dims:\n if inner_dim is None:\n flattened_dimension = None\n break\n flattened_dimension *= inner_dim\n reshaped.set_shape(static_outer_dims + [flattened_dimension])\n return reshaped\n\n\n@add_arg_scope\ndef _inner_flatten(inputs, new_rank, output_collections=None, scope=None):\n \"\"\"Flattens inner dimensions of `inputs`, returns a Tensor with `new_rank`.\n\n For example:\n '''\n x = tf.random_uniform(shape=[1, 2, 3, 4, 5, 6])\n y = _inner_flatten(x, 4)\n assert y.get_shape().as_list() == [1, 2, 3, (4 * 5 * 6)]\n '''\n This layer will fail at run time if `new_rank` is greater than the current\n rank of `inputs`.\n\n Args:\n inputs: A `Tensor` or `SparseTensor`.\n new_rank: The desired rank of the returned `Tensor` or `SparseTensor`.\n output_collections: Collection to which the outputs will be added.\n scope: Optional scope for `name_scope`.\n Returns:\n A `Tensor` or `SparseTensor` conataining the same values as `inputs`, but\n with innermost dimensions flattened to obtain rank `new_rank`.\n\n Raises:\n TypeError: `inputs` is not a `Tensor` or `SparseTensor`.\n \"\"\"\n with ops.name_scope(scope, 'InnerFlatten', [inputs, new_rank]) as sc:\n if isinstance(inputs, sparse_tensor.SparseTensor):\n flattened = _sparse_inner_flatten(inputs, new_rank)\n else:\n inputs = ops.convert_to_tensor(inputs)\n flattened = _dense_inner_flatten(inputs, new_rank)\n return utils.collect_named_outputs(output_collections, sc, flattened)\n\n\ndef _model_variable_getter(getter, name, shape=None, dtype=None,\n initializer=None, regularizer=None, trainable=True,\n collections=None, caching_device=None,\n partitioner=None, rename=None, use_resource=None,\n **_):\n \"\"\"Getter that uses model_variable for compatibility with core layers.\"\"\"\n short_name = name.split('/')[-1]\n if rename and short_name in rename:\n name_components = name.split('/')\n name_components[-1] = rename[short_name]\n name = '/'.join(name_components)\n return variables.model_variable(\n name, shape=shape, dtype=dtype, initializer=initializer,\n regularizer=regularizer, collections=collections, trainable=trainable,\n caching_device=caching_device, partitioner=partitioner,\n custom_getter=getter, use_resource=use_resource)\n\n\ndef _build_variable_getter(rename=None):\n \"\"\"Build a model variable getter that respects scope getter and renames.\"\"\"\n # VariableScope will nest the getters\n def layer_variable_getter(getter, *args, **kwargs):\n kwargs['rename'] = rename\n return _model_variable_getter(getter, *args, **kwargs)\n return layer_variable_getter\n\n\ndef _add_variable_to_collections(variable, collections_set, collections_name):\n \"\"\"Adds variable (or all its parts) to all collections with that name.\"\"\"\n collections = utils.get_variable_collections(\n collections_set, collections_name) or []\n variables_list = [variable]\n if isinstance(variable, tf_variables.PartitionedVariable):\n variables_list = [v for v in variable]\n for collection in collections:\n for var in variables_list:\n if var not in ops.get_collection(collection):\n ops.add_to_collection(collection, var)\n\n\n@add_arg_scope\ndef fully_connected(inputs,\n num_outputs,\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer(),\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Adds a fully connected layer.\n\n `fully_connected` creates a variable called `weights`, representing a fully\n connected weight matrix, which is multiplied by the `inputs` to produce a\n `Tensor` of hidden units. If a `normalizer_fn` is provided (such as\n `batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is\n None and a `biases_initializer` is provided then a `biases` variable would be\n created and added the hidden units. Finally, if `activation_fn` is not `None`,\n it is applied to the hidden units as well.\n\n Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened\n prior to the initial matrix multiply by `weights`.\n\n Args:\n inputs: A tensor of at least rank 2 and static value for the last dimension;\n i.e. `[batch_size, depth]`, `[None, None, None, channels]`.\n num_outputs: Integer or long, the number of output units in the layer.\n activation_fn: Activation function. The default value is a ReLU function.\n Explicitly set it to None to skip it and maintain a linear activation.\n normalizer_fn: Normalization function to use instead of `biases`. If\n `normalizer_fn` is provided then `biases_initializer` and\n `biases_regularizer` are ignored and `biases` are not created nor added.\n default set to None for no normalizer function\n normalizer_params: Normalization function parameters.\n weights_initializer: An initializer for the weights.\n weights_regularizer: Optional regularizer for the weights.\n biases_initializer: An initializer for the biases. If None skip biases.\n biases_regularizer: Optional regularizer for the biases.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for all the variables or\n a dictionary containing a different list of collections per variable.\n outputs_collections: Collection to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n scope: Optional scope for variable_scope.\n\n Returns:\n The tensor variable representing the result of the series of operations.\n\n Raises:\n ValueError: If x has rank less than 2 or if its last dimension is not set.\n \"\"\"\n if not isinstance(num_outputs, six.integer_types):\n raise ValueError(\n 'num_outputs should be int or long, got %s.' % (num_outputs,))\n\n layer_variable_getter = _build_variable_getter({'bias': 'biases',\n 'kernel': 'weights'})\n\n with variable_scope.variable_scope(\n scope, 'fully_connected', [inputs],\n reuse=reuse, custom_getter=layer_variable_getter) as sc:\n inputs = ops.convert_to_tensor(inputs)\n layer = core_layers.Dense(\n units=num_outputs,\n activation=None,\n use_bias=not normalizer_fn and biases_initializer,\n kernel_initializer=weights_initializer,\n bias_initializer=biases_initializer,\n kernel_regularizer=weights_regularizer,\n bias_regularizer=biases_regularizer,\n activity_regularizer=None,\n trainable=trainable,\n name=sc.name,\n dtype=inputs.dtype.base_dtype,\n _scope=sc,\n _reuse=reuse)\n outputs = layer.apply(inputs)\n\n # Add variables to collections.\n _add_variable_to_collections(layer.kernel, variables_collections, 'weights')\n if layer.bias is not None:\n _add_variable_to_collections(layer.bias, variables_collections, 'biases')\n\n # Apply normalizer function / layer.\n if normalizer_fn is not None:\n if not normalizer_params:\n normalizer_params = {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n\n return utils.collect_named_outputs(\n outputs_collections, sc.original_name_scope, outputs)\n\n\nclass GDN(base.Layer):\n \"\"\"Generalized divisive normalization layer.\n\n Based on the papers:\n\n \"Density Modeling of Images using a Generalized Normalization\n Transformation\"\n\n Johannes Ballé, Valero Laparra, Eero P. Simoncelli\n\n https://arxiv.org/abs/1511.06281\n\n \"End-to-end Optimized Image Compression\"\n\n Johannes Ballé, Valero Laparra, Eero P. Simoncelli\n\n https://arxiv.org/abs/1611.01704\n\n Implements an activation function that is essentially a multivariate\n generalization of a particular sigmoid-type function:\n\n ```\n y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))\n ```\n\n where `i` and `j` run over channels. This implementation never sums across\n spatial dimensions. It is similar to local response normalization, but much\n more flexible, as `beta` and `gamma` are trainable parameters.\n\n Arguments:\n inverse: If `False` (default), compute GDN response. If `True`, compute IGDN\n response (one step of fixed point iteration to invert GDN; the division\n is replaced by multiplication).\n beta_min: Lower bound for beta, to prevent numerical error from causing\n square root of zero or negative values.\n gamma_init: The gamma matrix will be initialized as the identity matrix\n multiplied with this value. If set to zero, the layer is effectively\n initialized to the identity operation, since beta is initialized as one.\n A good default setting is somewhere between 0 and 0.5.\n reparam_offset: Offset added to the reparameterization of beta and gamma.\n The reparameterization of beta and gamma as their square roots lets the\n training slow down when their values are close to zero, which is desirable\n as small values in the denominator can lead to a situation where gradient\n noise on beta/gamma leads to extreme amounts of noise in the GDN\n activations. However, without the offset, we would get zero gradients if\n any elements of beta or gamma were exactly zero, and thus the training\n could get stuck. To prevent this, we add this small constant. The default\n value was empirically determined as a good starting point. Making it\n bigger potentially leads to more gradient noise on the activations, making\n it too small may lead to numerical precision issues.\n data_format: Format of input tensor. Currently supports `'channels_first'`\n and `'channels_last'`.\n activity_regularizer: Regularizer function for the output.\n trainable: Boolean, if `True`, also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n name: String, the name of the layer. Layers with the same name will\n share weights, but to avoid mistakes we require `reuse=True` in such\n cases.\n\n Properties:\n inverse: Boolean, whether GDN is computed (`True`) or IGDN (`False`).\n data_format: Format of input tensor. Currently supports `'channels_first'`\n and `'channels_last'`.\n beta: The beta parameter as defined above (1D `Tensor`).\n gamma: The gamma parameter as defined above (2D `Tensor`).\n \"\"\"\n\n def __init__(self,\n inverse=False,\n beta_min=1e-6,\n gamma_init=.1,\n reparam_offset=2 ** -18,\n data_format='channels_last',\n activity_regularizer=None,\n trainable=True,\n name=None,\n **kwargs):\n super(GDN, self).__init__(trainable=trainable, name=name, **kwargs)\n self.inverse = inverse\n self._beta_min = beta_min\n self._gamma_init = gamma_init\n self._reparam_offset = reparam_offset\n self.data_format = data_format\n self.activity_regularizer = activity_regularizer\n self._channel_axis() # trigger ValueError early\n self.input_spec = base.InputSpec(min_ndim=3, max_ndim=5)\n\n def _channel_axis(self):\n try:\n return {'channels_first': 1, 'channels_last': -1}[self.data_format]\n except KeyError:\n raise ValueError('Unsupported `data_format` for GDN layer: {}.'.format(\n self.data_format))\n\n @staticmethod\n def _lower_bound(inputs, bound, name=None):\n \"\"\"Same as tf.maximum, but with helpful gradient for inputs < bound.\n\n The gradient is overwritten so that it is passed through if the input is not\n hitting the bound. If it is, only gradients that push `inputs` higher than\n the bound are passed through. No gradients are passed through to the bound.\n\n Args:\n inputs: input tensor\n bound: lower bound for the input tensor\n name: name for this op\n\n Returns:\n tf.maximum(inputs, bound)\n \"\"\"\n with ops.name_scope(name, 'GDNLowerBound', [inputs, bound]) as scope:\n inputs = ops.convert_to_tensor(inputs, name='inputs')\n bound = ops.convert_to_tensor(bound, name='bound')\n with ops.get_default_graph().gradient_override_map(\n {'Maximum': 'GDNLowerBound'}):\n return math_ops.maximum(inputs, bound, name=scope)\n\n @staticmethod\n def _lower_bound_grad(op, grad):\n \"\"\"Gradient for `_lower_bound`.\n\n Args:\n op: the tensorflow op for which to calculate a gradient\n grad: gradient with respect to the output of the op\n\n Returns:\n gradients with respect to the inputs of the op\n \"\"\"\n inputs = op.inputs[0]\n bound = op.inputs[1]\n pass_through_if = math_ops.logical_or(inputs >= bound, grad < 0)\n return [math_ops.cast(pass_through_if, grad.dtype) * grad, None]\n\n def build(self, input_shape):\n channel_axis = self._channel_axis()\n input_shape = tensor_shape.TensorShape(input_shape)\n num_channels = input_shape[channel_axis].value\n if num_channels is None:\n raise ValueError('The channel dimension of the inputs to `GDN` '\n 'must be defined.')\n self._input_rank = input_shape.ndims\n self.input_spec = base.InputSpec(ndim=input_shape.ndims,\n axes={channel_axis: num_channels})\n\n pedestal = array_ops.constant(self._reparam_offset ** 2, dtype=self.dtype)\n beta_bound = array_ops.constant(\n (self._beta_min + self._reparam_offset ** 2) ** .5, dtype=self.dtype)\n gamma_bound = array_ops.constant(self._reparam_offset, dtype=self.dtype)\n\n def beta_initializer(shape, dtype=None, partition_info=None):\n del partition_info # unused\n return math_ops.sqrt(array_ops.ones(shape, dtype=dtype) + pedestal)\n\n def gamma_initializer(shape, dtype=None, partition_info=None):\n del partition_info # unused\n assert len(shape) == 2\n assert shape[0] == shape[1]\n eye = linalg_ops.eye(shape[0], dtype=dtype)\n return math_ops.sqrt(self._gamma_init * eye + pedestal)\n\n beta = self.add_variable('reparam_beta',\n shape=[num_channels],\n initializer=beta_initializer,\n dtype=self.dtype,\n trainable=True)\n beta = self._lower_bound(beta, beta_bound)\n self.beta = math_ops.square(beta) - pedestal\n\n gamma = self.add_variable('reparam_gamma',\n shape=[num_channels, num_channels],\n initializer=gamma_initializer,\n dtype=self.dtype,\n trainable=True)\n gamma = self._lower_bound(gamma, gamma_bound)\n self.gamma = math_ops.square(gamma) - pedestal\n\n self.built = True\n\n def call(self, inputs):\n inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)\n ndim = self._input_rank\n\n shape = self.gamma.get_shape().as_list()\n gamma = array_ops.reshape(self.gamma, (ndim - 2) * [1] + shape)\n\n # Compute normalization pool.\n if self.data_format == 'channels_first':\n norm_pool = nn.convolution(math_ops.square(inputs), gamma, 'VALID',\n data_format='NC' + 'DHW'[-(ndim - 2):])\n if ndim == 3:\n norm_pool = array_ops.expand_dims(norm_pool, 2)\n norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')\n norm_pool = array_ops.squeeze(norm_pool, [2])\n elif ndim == 5:\n shape = array_ops.shape(norm_pool)\n norm_pool = array_ops.reshape(norm_pool, shape[:3] + [-1])\n norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')\n norm_pool = array_ops.reshape(norm_pool, shape)\n else: # ndim == 4\n norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')\n else: # channels_last\n norm_pool = nn.convolution(math_ops.square(inputs), gamma, 'VALID')\n norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NHWC')\n norm_pool = math_ops.sqrt(norm_pool)\n\n if self.inverse:\n outputs = inputs * norm_pool\n else:\n outputs = inputs / norm_pool\n outputs.set_shape(inputs.get_shape())\n return outputs\n\n def _compute_output_shape(self, input_shape):\n channel_axis = self._channel_axis()\n input_shape = tensor_shape.TensorShape(input_shape)\n if not 3 <= input_shape.ndim <= 5:\n raise ValueError('`input_shape` must be of rank 3 to 5, inclusive.')\n if input_shape[channel_axis].value is None:\n raise ValueError(\n 'The channel dimension of `input_shape` must be defined.')\n return input_shape\n\n\nops.RegisterGradient('GDNLowerBound')(GDN._lower_bound_grad) # pylint:disable=protected-access\n\n\ndef gdn(inputs,\n inverse=False,\n beta_min=1e-6,\n gamma_init=.1,\n reparam_offset=2 ** -18,\n data_format='channels_last',\n activity_regularizer=None,\n trainable=True,\n name=None,\n reuse=None):\n \"\"\"Functional interface for GDN layer.\n\n Based on the papers:\n\n \"Density Modeling of Images using a Generalized Normalization\n Transformation\"\n Johannes Ballé, Valero Laparra, Eero P. Simoncelli\n https://arxiv.org/abs/1511.06281\n\n \"End-to-end Optimized Image Compression\"\n Johannes Ballé, Valero Laparra, Eero P. Simoncelli\n https://arxiv.org/abs/1611.01704\n\n Implements an activation function that is essentially a multivariate\n generalization of a particular sigmoid-type function:\n\n ```\n y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))\n ```\n\n where `i` and `j` run over channels. This implementation never sums across\n spatial dimensions. It is similar to local response normalization, but much\n more flexible, as `beta` and `gamma` are trainable parameters.\n\n Args:\n inputs: Tensor input.\n inverse: If `False` (default), compute GDN response. If `True`, compute IGDN\n response (one step of fixed point iteration to invert GDN; the division\n is replaced by multiplication).\n beta_min: Lower bound for beta, to prevent numerical error from causing\n square root of zero or negative values.\n gamma_init: The gamma matrix will be initialized as the identity matrix\n multiplied with this value. If set to zero, the layer is effectively\n initialized to the identity operation, since beta is initialized as one.\n A good default setting is somewhere between 0 and 0.5.\n reparam_offset: Offset added to the reparameterization of beta and gamma.\n The reparameterization of beta and gamma as their square roots lets the\n training slow down when their values are close to zero, which is desirable\n as small values in the denominator can lead to a situation where gradient\n noise on beta/gamma leads to extreme amounts of noise in the GDN\n activations. However, without the offset, we would get zero gradients if\n any elements of beta or gamma were exactly zero, and thus the training\n could get stuck. To prevent this, we add this small constant. The default\n value was empirically determined as a good starting point. Making it\n bigger potentially leads to more gradient noise on the activations, making\n it too small may lead to numerical precision issues.\n data_format: Format of input tensor. Currently supports `'channels_first'`\n and `'channels_last'`.\n activity_regularizer: Regularizer function for the output.\n trainable: Boolean, if `True`, also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n name: String, the name of the layer. Layers with the same name will\n share weights, but to avoid mistakes we require `reuse=True` in such\n cases.\n reuse: Boolean, whether to reuse the weights of a previous layer by the same\n name.\n\n Returns:\n Output tensor.\n \"\"\"\n layer = GDN(inverse=inverse,\n beta_min=beta_min,\n gamma_init=gamma_init,\n reparam_offset=reparam_offset,\n data_format=data_format,\n activity_regularizer=activity_regularizer,\n trainable=trainable,\n name=name,\n dtype=inputs.dtype.base_dtype,\n _scope=name,\n _reuse=reuse)\n return layer.apply(inputs)\n\n\n@add_arg_scope\ndef layer_norm(inputs,\n center=True,\n scale=True,\n activation_fn=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n begin_norm_axis=1,\n begin_params_axis=-1,\n scope=None):\n \"\"\"Adds a Layer Normalization layer.\n\n Based on the paper:\n\n \"Layer Normalization\"\n\n Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton\n\n https://arxiv.org/abs/1607.06450.\n\n Can be used as a normalizer function for conv2d and fully_connected.\n\n Given a tensor `inputs` of rank `R`, moments are calculated and normalization\n is performed over axes `begin_norm_axis ... R - 1`. Scaling and centering,\n if requested, is performed over axes `begin_shift_axis .. R - 1`.\n\n By default, `begin_norm_axis = 1` and `begin_params_axis = -1`,\n meaning that normalization is performed over all but the first axis\n (the `HWC` if `inputs` is `NHWC`), while the `beta` and `gamma` trainable\n parameters are calculated for the rightmost axis (the `C` if `inputs` is\n `NHWC`). Scaling and recentering is performed via broadcast of the\n `beta` and `gamma` parameters with the normalized tensor.\n\n The shapes of `beta` and `gamma` are `inputs.shape[begin_params_axis:]`,\n and this part of the inputs' shape must be fully defined.\n\n Args:\n inputs: A tensor having rank `R`. The normalization is performed over\n axes `begin_norm_axis ... R - 1` and centering and scaling parameters\n are calculated over `begin_params_axis ... R - 1`.\n center: If True, add offset of `beta` to normalized tensor. If False, `beta`\n is ignored.\n scale: If True, multiply by `gamma`. If False, `gamma` is\n not used. When the next layer is linear (also e.g. `nn.relu`), this can be\n disabled since the scaling can be done by the next layer.\n activation_fn: Activation function, default set to None to skip it and\n maintain a linear activation.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional collections for the variables.\n outputs_collections: Collections to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n begin_norm_axis: The first normalization dimension: normalization will be\n performed along dimensions `begin_norm_axis : rank(inputs)`\n begin_params_axis: The first parameter (beta, gamma) dimension: scale\n and centering parameters will have dimensions\n `begin_params_axis : rank(inputs)` and will be broadcast with the\n normalized inputs accordingly.\n scope: Optional scope for `variable_scope`.\n\n Returns:\n A `Tensor` representing the output of the operation, having the same\n shape and dtype as `inputs`.\n\n Raises:\n ValueError: If the rank of `inputs` is not known at graph build time,\n or if `inputs.shape[begin_params_axis:]` is not fully defined at\n graph build time.\n \"\"\"\n with variable_scope.variable_scope(scope, 'LayerNorm', [inputs],\n reuse=reuse) as sc:\n inputs = ops.convert_to_tensor(inputs)\n inputs_shape = inputs.shape\n inputs_rank = inputs_shape.ndims\n if inputs_rank is None:\n raise ValueError('Inputs %s has undefined rank.' % inputs.name)\n dtype = inputs.dtype.base_dtype\n if begin_norm_axis < 0:\n begin_norm_axis = inputs_rank + begin_norm_axis\n if begin_params_axis >= inputs_rank or begin_norm_axis >= inputs_rank:\n raise ValueError(\n 'begin_params_axis (%d) and begin_norm_axis (%d) '\n 'must be < rank(inputs) (%d)'\n % (begin_params_axis, begin_norm_axis, inputs_rank))\n params_shape = inputs_shape[begin_params_axis:]\n if not params_shape.is_fully_defined():\n raise ValueError(\n 'Inputs %s: shape(inputs)[%s:] is not fully defined: %s' % (\n inputs.name, begin_params_axis, inputs_shape))\n # Allocate parameters for the beta and gamma of the normalization.\n beta, gamma = None, None\n if center:\n beta_collections = utils.get_variable_collections(variables_collections,\n 'beta')\n beta = variables.model_variable(\n 'beta',\n shape=params_shape,\n dtype=dtype,\n initializer=init_ops.zeros_initializer(),\n collections=beta_collections,\n trainable=trainable)\n if scale:\n gamma_collections = utils.get_variable_collections(variables_collections,\n 'gamma')\n gamma = variables.model_variable(\n 'gamma',\n shape=params_shape,\n dtype=dtype,\n initializer=init_ops.ones_initializer(),\n collections=gamma_collections,\n trainable=trainable)\n # Calculate the moments on the last axis (layer activations).\n norm_axes = list(range(begin_norm_axis, inputs_rank))\n mean, variance = nn.moments(inputs, norm_axes, keep_dims=True)\n # Compute layer normalization using the batch_normalization function.\n variance_epsilon = 1e-12\n outputs = nn.batch_normalization(\n inputs, mean, variance, offset=beta, scale=gamma,\n variance_epsilon=variance_epsilon)\n outputs.set_shape(inputs_shape)\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope,\n outputs)\n\n\n@add_arg_scope\ndef max_pool2d(inputs,\n kernel_size,\n stride=2,\n padding='VALID',\n data_format=DATA_FORMAT_NHWC,\n outputs_collections=None,\n scope=None):\n \"\"\"Adds a 2D Max Pooling op.\n\n It is assumed that the pooling is done per image but not in batch or channels.\n\n Args:\n inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if\n `data_format` is `NHWC`, and `[batch_size, channels, height, width]` if\n `data_format` is `NCHW`.\n kernel_size: A list of length 2: [kernel_height, kernel_width] of the\n pooling kernel over which the op is computed. Can be an int if both\n values are the same.\n stride: A list of length 2: [stride_height, stride_width].\n Can be an int if both strides are the same. Note that presently\n both strides must have the same value.\n padding: The padding method, either 'VALID' or 'SAME'.\n data_format: A string. `NHWC` (default) and `NCHW` are supported.\n outputs_collections: The collections to which the outputs are added.\n scope: Optional scope for name_scope.\n\n Returns:\n A `Tensor` representing the results of the pooling operation.\n\n Raises:\n ValueError: If `data_format` is neither `NHWC` nor `NCHW`.\n ValueError: If 'kernel_size' is not a 2-D list\n \"\"\"\n if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):\n raise ValueError('data_format has to be either NCHW or NHWC.')\n with ops.name_scope(scope, 'MaxPool2D', [inputs]) as sc:\n inputs = ops.convert_to_tensor(inputs)\n df = ('channels_first' if data_format and data_format.startswith('NC')\n else 'channels_last')\n layer = pooling_layers.MaxPooling2D(pool_size=kernel_size,\n strides=stride,\n padding=padding,\n data_format=df,\n _scope=sc)\n outputs = layer.apply(inputs)\n return utils.collect_named_outputs(outputs_collections, sc, outputs)\n\n\n@add_arg_scope\ndef max_pool3d(inputs,\n kernel_size,\n stride=2,\n padding='VALID',\n data_format=DATA_FORMAT_NDHWC,\n outputs_collections=None,\n scope=None):\n \"\"\"Adds a 3D Max Pooling op.\n\n It is assumed that the pooling is done per image but not in batch or channels.\n\n Args:\n inputs: A 5-D tensor of shape `[batch_size, depth, height, width, channels]` if\n `data_format` is `NDHWC`, and `[batch_size, channels, depth, height, width]` if\n `data_format` is `NCDHW`.\n kernel_size: A list of length 3: [kernel_depth, kernel_height, kernel_width] of the\n pooling kernel over which the op is computed. Can be an int if both\n values are the same.\n stride: A list of length 3: [stride_depth, stride_height, stride_width].\n Can be an int if both strides are the same. Note that presently\n both strides must have the same value.\n padding: The padding method, either 'VALID' or 'SAME'.\n data_format: A string. `NDHWC` (default) and `NCDHW` are supported.\n outputs_collections: The collections to which the outputs are added.\n scope: Optional scope for name_scope.\n\n Returns:\n A `Tensor` representing the results of the pooling operation.\n\n Raises:\n ValueError: If `data_format` is neither `NDHWC` nor `NCDHW`.\n ValueError: If 'kernel_size' is not a 3-D list\n \"\"\"\n if data_format not in (DATA_FORMAT_NCDHW, DATA_FORMAT_NDHWC):\n raise ValueError('data_format has to be either NCDHW or NDHWC.')\n with ops.name_scope(scope, 'MaxPool3D', [inputs]) as sc:\n inputs = ops.convert_to_tensor(inputs)\n df = ('channels_first' if data_format and data_format.startswith('NC')\n else 'channels_last')\n layer = pooling_layers.MaxPooling3D(pool_size=kernel_size,\n strides=stride,\n padding=padding,\n data_format=df,\n _scope=sc)\n outputs = layer.apply(inputs)\n return utils.collect_named_outputs(outputs_collections, sc, outputs)\n\n\n@add_arg_scope\ndef pool(inputs,\n kernel_size,\n pooling_type,\n padding='VALID',\n data_format=None,\n dilation_rate=1,\n stride=1,\n outputs_collections=None,\n scope=None):\n # pylint: disable=line-too-long\n \"\"\"Adds a pooling op.\n\n\n Args:\n inputs: Tensor of rank N+2, of shape\n `[batch_size] + input_spatial_shape + [num_channels]` if data_format does\n not start with \"NC\" (default), or\n `[batch_size, num_channels] + input_spatial_shape` if data_format starts\n with \"NC\". Pooling happens over the spatial dimensions only.\n kernel_size: Sequence of N ints >= 1. Can also be a single integer to\n specify the same value for all spatial dimensions.\n pooling_type: Specifies pooling operation, must be \"AVG\" or \"MAX\".\n padding: The padding algorithm, must be \"SAME\" or \"VALID\".\n data_format: A string or None. Specifies whether the channel dimension of\n the `input` and output is the last dimension (default, or if `data_format`\n does not start with \"NC\"), or the second dimension (if `data_format`\n starts with \"NC\"). For N=1, the valid values are \"NWC\" (default) and\n \"NCW\". For N=2, the valid values are \"NHWC\" (default) and \"NCHW\".\n For N=3, the valid values are \"NDHWC\" (default) and \"NCDHW\".\n dilation_rate: Optional. Dilation rate. Sequence of N ints >= 1. Defaults\n to [1]*N. Can also be a single integer to specify the same value for all\n spatial dimensions. If any value of dilation_rate is > 1, then all values\n of stride must be 1.\n stride: Optional. Sequence of N ints >= 1. Defaults to [1]*N. Can also be\n a single integer to specify the same value for all spatial dimensions. If\n any value of stride is > 1, then all values of dilation_rate must be 1.\n outputs_collections: The collections to which the outputs are added.\n scope: Optional scope for name_scope.\n\n Returns:\n A `Tensor` representing the results of the pooling operation.\n\n Raises:\n ValueError: If arguments are invalid.\n\n \"\"\"\n # pylint: enable=line-too-long\n with ops.name_scope(scope, '%s_pool' %\n (pooling_type.lower()), [inputs]) as sc:\n inputs = ops.convert_to_tensor(inputs)\n input_rank = inputs.get_shape().ndims\n if input_rank is None:\n raise ValueError('Rank of inputs must be known')\n if input_rank < 3:\n raise ValueError('Rank of inputs must be >= 3')\n num_spatial_dims = input_rank - 2\n output = nn.pool(\n input=inputs,\n window_shape=utils.n_positive_integers(num_spatial_dims, kernel_size),\n pooling_type=pooling_type,\n padding=padding,\n data_format=data_format,\n dilation_rate=utils.n_positive_integers(num_spatial_dims,\n dilation_rate),\n strides=utils.n_positive_integers(num_spatial_dims, stride),\n name=sc)\n return utils.collect_named_outputs(outputs_collections, sc, output)\n\n\n@add_arg_scope\ndef one_hot_encoding(labels,\n num_classes,\n on_value=1.0,\n off_value=0.0,\n outputs_collections=None,\n scope=None):\n \"\"\"Transform numeric labels into onehot_labels using `tf.one_hot`.\n\n Args:\n labels: [batch_size] target labels.\n num_classes: Total number of classes.\n on_value: A scalar defining the on-value.\n off_value: A scalar defining the off-value.\n outputs_collections: Collection to add the outputs.\n scope: Optional scope for name_scope.\n\n Returns:\n One-hot encoding of the labels.\n \"\"\"\n with ops.name_scope(scope, 'OneHotEncoding', [labels, num_classes]) as sc:\n labels = ops.convert_to_tensor(labels)\n if labels.dtype == dtypes.int32:\n labels = standard_ops.to_int64(labels)\n outputs = standard_ops.one_hot(labels,\n num_classes,\n on_value=on_value,\n off_value=off_value)\n return utils.collect_named_outputs(outputs_collections, sc, outputs)\n\n\ndef _apply_activation(y, activation_fn, output_collections):\n if activation_fn is not None:\n y = activation_fn(y)\n ops.add_to_collections(list(output_collections or []) +\n [ops.GraphKeys.ACTIVATIONS], y)\n return y\n\n\ndef repeat(inputs, repetitions, layer, *args, **kwargs):\n \"\"\"Applies the same layer with the same arguments repeatedly.\n\n ```python\n y = repeat(x, 3, conv2d, 64, [3, 3], scope='conv1')\n # It is equivalent to:\n\n x = conv2d(x, 64, [3, 3], scope='conv1/conv1_1')\n x = conv2d(x, 64, [3, 3], scope='conv1/conv1_2')\n y = conv2d(x, 64, [3, 3], scope='conv1/conv1_3')\n ```\n\n If the `scope` argument is not given in `kwargs`, it is set to\n `layer.__name__`, or `layer.func.__name__` (for `functools.partial`\n objects). If neither `__name__` nor `func.__name__` is available, the\n layers are called with `scope='stack'`.\n\n Args:\n inputs: A `Tensor` suitable for layer.\n repetitions: Int, number of repetitions.\n layer: A layer with arguments `(inputs, *args, **kwargs)`\n *args: Extra args for the layer.\n **kwargs: Extra kwargs for the layer.\n\n Returns:\n A tensor result of applying the layer, repetitions times.\n Raises:\n ValueError: If the op is unknown or wrong.\n \"\"\"\n scope = kwargs.pop('scope', None)\n with variable_scope.variable_scope(scope, 'Repeat', [inputs]):\n inputs = ops.convert_to_tensor(inputs)\n if scope is None:\n if hasattr(layer, '__name__'):\n scope = layer.__name__\n elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):\n scope = layer.func.__name__ # In case layer is a functools.partial.\n else:\n scope = 'repeat'\n outputs = inputs\n for i in range(repetitions):\n kwargs['scope'] = scope + '_' + str(i+1)\n outputs = layer(outputs, *args, **kwargs)\n return outputs\n\n\ndef _scale_gradient_shape(op):\n \"\"\"Shape helper function for scale_gradient function below.\"\"\"\n return [op.inputs[0].shape]\n\n\ndef _scale_gradient_grad(op, grad):\n \"\"\"Python gradient helper function for scale_gradient function below.\"\"\"\n return [grad * op.inputs[1], None]\n\n\[email protected](python_grad_func=_scale_gradient_grad,\n shape_func=_scale_gradient_shape)\ndef scale_gradient(inputs, gradient_multiplier):\n \"\"\"Identity operation, but with the gradient multiplied by a tensor.\n\n The TensorFlow gradient system will compute the gradient with respect to\n `inputs` as the product of the gradient with respect to the `output`\n multiplied by a specified `gradient_multiplier` tensor. If\n `gradient_multiplier` is equal to 1, then this results in the true gradient.\n Otherwise, it results in a scaled gradient.\n\n This can be useful for adjusting the relative learning rate of different\n parameter tensors when performing gradient descent, and because this rescaling\n can be inserted at arbitrary locations within a graph, is often more\n convenient to apply than simply rescaling the final computed gradients.\n\n Args:\n inputs: Tensor to be output.\n gradient_multiplier: Tensor by which to multiply the gradient with respect\n to `output` to compute the gradient with respect to `inputs`. Its shape\n must be broadcastable to the shape of `inputs`.\n\n Returns:\n output Tensor, equal to `inputs`.\n \"\"\"\n # gradient_multiplier is implicitly saved by decorator, and only used for\n # gradient computation.\n del gradient_multiplier\n\n return inputs\n\n\n@add_arg_scope\ndef separable_convolution2d(\n inputs,\n num_outputs,\n kernel_size,\n depth_multiplier,\n stride=1,\n padding='SAME',\n data_format=DATA_FORMAT_NHWC,\n rate=1,\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer(),\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n scope=None):\n \"\"\"Adds a depth-separable 2D convolution with optional batch_norm layer.\n\n This op first performs a depthwise convolution that acts separately on\n channels, creating a variable called `depthwise_weights`. If `num_outputs`\n is not None, it adds a pointwise convolution that mixes channels, creating a\n variable called `pointwise_weights`. Then, if `normalizer_fn` is None,\n it adds bias to the result, creating a variable called 'biases', otherwise,\n the `normalizer_fn` is applied. It finally applies an activation function\n to produce the end result.\n\n Args:\n inputs: A tensor of size [batch_size, height, width, channels].\n num_outputs: The number of pointwise convolution output filters. If is\n None, then we skip the pointwise convolution stage.\n kernel_size: A list of length 2: [kernel_height, kernel_width] of\n of the filters. Can be an int if both values are the same.\n depth_multiplier: The number of depthwise convolution output channels for\n each input channel. The total number of depthwise convolution output\n channels will be equal to `num_filters_in * depth_multiplier`.\n stride: A list of length 2: [stride_height, stride_width], specifying the\n depthwise convolution stride. Can be an int if both strides are the same.\n padding: One of 'VALID' or 'SAME'.\n data_format: A string. `NHWC` (default) and `NCHW` are supported.\n rate: A list of length 2: [rate_height, rate_width], specifying the dilation\n rates for atrous convolution. Can be an int if both rates are the same.\n If any value is larger than one, then both stride values need to be one.\n activation_fn: Activation function. The default value is a ReLU function.\n Explicitly set it to None to skip it and maintain a linear activation.\n normalizer_fn: Normalization function to use instead of `biases`. If\n `normalizer_fn` is provided then `biases_initializer` and\n `biases_regularizer` are ignored and `biases` are not created nor added.\n default set to None for no normalizer function\n normalizer_params: Normalization function parameters.\n weights_initializer: An initializer for the weights.\n weights_regularizer: Optional regularizer for the weights.\n biases_initializer: An initializer for the biases. If None skip biases.\n biases_regularizer: Optional regularizer for the biases.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for all the variables or\n a dictionary containing a different list of collection per variable.\n outputs_collections: Collection to add the outputs.\n trainable: Whether or not the variables should be trainable or not.\n scope: Optional scope for variable_scope.\n\n Returns:\n A `Tensor` representing the output of the operation.\n Raises:\n ValueError: If `data_format` is invalid.\n \"\"\"\n if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):\n raise ValueError('data_format has to be either NCHW or NHWC.')\n layer_variable_getter = _build_variable_getter(\n {'bias': 'biases',\n 'depthwise_kernel': 'depthwise_weights',\n 'pointwise_kernel': 'pointwise_weights'})\n\n with variable_scope.variable_scope(\n scope, 'SeparableConv2d', [inputs], reuse=reuse,\n custom_getter=layer_variable_getter) as sc:\n inputs = ops.convert_to_tensor(inputs)\n\n df = ('channels_first' if data_format and data_format.startswith('NC')\n else 'channels_last')\n if num_outputs is not None:\n # Apply separable conv using the SeparableConvolution2D layer.\n layer = convolutional_layers.SeparableConvolution2D(\n filters=num_outputs,\n kernel_size=kernel_size,\n strides=stride,\n padding=padding,\n data_format=df,\n dilation_rate=utils.two_element_tuple(rate),\n activation=None,\n depth_multiplier=depth_multiplier,\n use_bias=not normalizer_fn and biases_initializer,\n depthwise_initializer=weights_initializer,\n pointwise_initializer=weights_initializer,\n bias_initializer=biases_initializer,\n depthwise_regularizer=weights_regularizer,\n pointwise_regularizer=weights_regularizer,\n bias_regularizer=biases_regularizer,\n activity_regularizer=None,\n trainable=trainable,\n name=sc.name,\n dtype=inputs.dtype.base_dtype,\n _scope=sc,\n _reuse=reuse)\n outputs = layer.apply(inputs)\n\n # Add variables to collections.\n _add_variable_to_collections(layer.depthwise_kernel,\n variables_collections, 'weights')\n _add_variable_to_collections(layer.pointwise_kernel,\n variables_collections, 'weights')\n if layer.bias:\n _add_variable_to_collections(layer.bias,\n variables_collections, 'biases')\n\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n else:\n # Actually apply depthwise conv instead of separable conv.\n dtype = inputs.dtype.base_dtype\n kernel_h, kernel_w = utils.two_element_tuple(kernel_size)\n stride_h, stride_w = utils.two_element_tuple(stride)\n num_filters_in = utils.channel_dimension(\n inputs.get_shape(), df, min_rank=4)\n weights_collections = utils.get_variable_collections(\n variables_collections, 'weights')\n\n depthwise_shape = [kernel_h, kernel_w,\n num_filters_in, depth_multiplier]\n depthwise_weights = variables.model_variable(\n 'depthwise_weights',\n shape=depthwise_shape,\n dtype=dtype,\n initializer=weights_initializer,\n regularizer=weights_regularizer,\n trainable=trainable,\n collections=weights_collections)\n strides = [1, stride_h, stride_w, 1]\n\n outputs = nn.depthwise_conv2d(inputs, depthwise_weights, strides, padding,\n rate=utils.two_element_tuple(rate),\n data_format=data_format)\n num_outputs = depth_multiplier * num_filters_in\n\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n else:\n if biases_initializer is not None:\n biases_collections = utils.get_variable_collections(\n variables_collections, 'biases')\n biases = variables.model_variable('biases',\n shape=[num_outputs,],\n dtype=dtype,\n initializer=biases_initializer,\n regularizer=biases_regularizer,\n trainable=trainable,\n collections=biases_collections)\n outputs = nn.bias_add(outputs, biases, data_format=data_format)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections,\n sc.original_name_scope, outputs)\n\n\n@add_arg_scope\ndef softmax(logits, scope=None):\n \"\"\"Performs softmax on Nth dimension of N-dimensional logit tensor.\n\n For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension\n needs to have a specified number of elements (number of classes).\n\n Args:\n logits: N-dimensional `Tensor` with logits, where N > 1.\n scope: Optional scope for variable_scope.\n\n Returns:\n A `Tensor` with same shape and type as logits.\n \"\"\"\n # TODO(jrru): Add axis argument which defaults to last dimension.\n with variable_scope.variable_scope(scope, 'softmax', [logits]):\n num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)\n logits_2d = array_ops.reshape(logits, [-1, num_logits])\n predictions = nn.softmax(logits_2d)\n predictions = array_ops.reshape(predictions, array_ops.shape(logits))\n predictions.set_shape(logits.get_shape())\n return predictions\n\n\n@add_arg_scope\ndef spatial_softmax(features,\n temperature=None,\n name=None,\n variables_collections=None,\n trainable=True,\n data_format='NHWC'):\n \"\"\"Computes the spatial softmax of a convolutional feature map.\n\n First computes the softmax over the spatial extent of each channel of a\n convolutional feature map. Then computes the expected 2D position of the\n points of maximal activation for each channel, resulting in a set of\n feature keypoints [x1, y1, ... xN, yN] for all N channels.\n\n Read more here:\n \"Learning visual feature spaces for robotic manipulation with\n deep spatial autoencoders.\" Finn et. al, http://arxiv.org/abs/1509.06113.\n\n Args:\n features: A `Tensor` of size [batch_size, W, H, num_channels]; the\n convolutional feature map.\n temperature: Softmax temperature (optional). If None, a learnable\n temperature is created.\n name: A name for this operation (optional).\n variables_collections: Collections for the temperature variable.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n data_format: A string. `NHWC` (default) and `NCHW` are supported.\n Returns:\n feature_keypoints: A `Tensor` with size [batch_size, num_channels * 2];\n the expected 2D locations of each channel's feature keypoint (normalized\n to the range (-1,1)). The inner dimension is arranged as\n [x1, y1, ... xN, yN].\n Raises:\n ValueError: If unexpected data_format specified.\n ValueError: If num_channels dimension is unspecified.\n \"\"\"\n shape = array_ops.shape(features)\n static_shape = features.shape\n if data_format == DATA_FORMAT_NHWC:\n height, width, num_channels = shape[1], shape[2], static_shape[3]\n elif data_format == DATA_FORMAT_NCHW:\n num_channels, height, width = static_shape[1], shape[2], shape[3]\n else:\n raise ValueError('data_format has to be either NCHW or NHWC.')\n if num_channels.value is None:\n raise ValueError('The num_channels dimension of the inputs to '\n '`spatial_softmax` should be defined. Found `None`.')\n\n with ops.name_scope(name, 'spatial_softmax', [features]) as name:\n # Create tensors for x and y coordinate values, scaled to range [-1, 1].\n pos_x, pos_y = array_ops.meshgrid(math_ops.lin_space(-1., 1., num=height),\n math_ops.lin_space(-1., 1., num=width),\n indexing='ij')\n pos_x = array_ops.reshape(pos_x, [height * width])\n pos_y = array_ops.reshape(pos_y, [height * width])\n if temperature is None:\n temperature_collections = utils.get_variable_collections(\n variables_collections, 'temperature')\n temperature = variables.model_variable(\n 'temperature',\n shape=(),\n dtype=dtypes.float32,\n initializer=init_ops.ones_initializer(),\n collections=temperature_collections,\n trainable=trainable)\n if data_format == 'NCHW':\n features = array_ops.reshape(features, [-1, height * width])\n else:\n features = array_ops.reshape(\n array_ops.transpose(features, [0, 3, 1, 2]), [-1, height * width])\n\n softmax_attention = nn.softmax(features/temperature)\n expected_x = math_ops.reduce_sum(\n pos_x * softmax_attention, [1], keep_dims=True)\n expected_y = math_ops.reduce_sum(\n pos_y * softmax_attention, [1], keep_dims=True)\n expected_xy = array_ops.concat([expected_x, expected_y], 1)\n feature_keypoints = array_ops.reshape(\n expected_xy, [-1, num_channels.value * 2])\n feature_keypoints.set_shape([None, num_channels.value * 2])\n return feature_keypoints\n\n\ndef stack(inputs, layer, stack_args, **kwargs):\n \"\"\"Builds a stack of layers by applying layer repeatedly using stack_args.\n\n `stack` allows you to repeatedly apply the same operation with different\n arguments `stack_args[i]`. For each application of the layer, `stack` creates\n a new scope appended with an increasing number. For example:\n\n ```python\n y = stack(x, fully_connected, [32, 64, 128], scope='fc')\n # It is equivalent to:\n\n x = fully_connected(x, 32, scope='fc/fc_1')\n x = fully_connected(x, 64, scope='fc/fc_2')\n y = fully_connected(x, 128, scope='fc/fc_3')\n ```\n\n If the `scope` argument is not given in `kwargs`, it is set to\n `layer.__name__`, or `layer.func.__name__` (for `functools.partial`\n objects). If neither `__name__` nor `func.__name__` is available, the\n layers are called with `scope='stack'`.\n\n Args:\n inputs: A `Tensor` suitable for layer.\n layer: A layer with arguments `(inputs, *args, **kwargs)`\n stack_args: A list/tuple of parameters for each call of layer.\n **kwargs: Extra kwargs for the layer.\n\n Returns:\n A `Tensor` result of applying the stacked layers.\n\n Raises:\n ValueError: If the op is unknown or wrong.\n \"\"\"\n scope = kwargs.pop('scope', None)\n if not isinstance(stack_args, (list, tuple)):\n raise ValueError('stack_args need to be a list or tuple')\n with variable_scope.variable_scope(scope, 'Stack', [inputs]):\n inputs = ops.convert_to_tensor(inputs)\n if scope is None:\n if hasattr(layer, '__name__'):\n scope = layer.__name__\n elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):\n scope = layer.func.__name__ # In case layer is a functools.partial.\n else:\n scope = 'stack'\n outputs = inputs\n for i in range(len(stack_args)):\n kwargs['scope'] = scope + '_' + str(i+1)\n layer_args = stack_args[i]\n if not isinstance(layer_args, (list, tuple)):\n layer_args = [layer_args]\n outputs = layer(outputs, *layer_args, **kwargs)\n return outputs\n\n\n@add_arg_scope\ndef unit_norm(inputs, dim, epsilon=1e-7, scope=None):\n \"\"\"Normalizes the given input across the specified dimension to unit length.\n\n Note that the rank of `input` must be known.\n\n Args:\n inputs: A `Tensor` of arbitrary size.\n dim: The dimension along which the input is normalized.\n epsilon: A small value to add to the inputs to avoid dividing by zero.\n scope: Optional scope for variable_scope.\n\n Returns:\n The normalized `Tensor`.\n\n Raises:\n ValueError: If dim is smaller than the number of dimensions in 'inputs'.\n \"\"\"\n with variable_scope.variable_scope(scope, 'UnitNorm', [inputs]):\n if not inputs.get_shape():\n raise ValueError('The input rank must be known.')\n input_rank = len(inputs.get_shape().as_list())\n if dim < 0 or dim >= input_rank:\n raise ValueError(\n 'dim must be positive but smaller than the input rank.')\n\n lengths = math_ops.sqrt(epsilon + math_ops.reduce_sum(\n math_ops.square(inputs), dim, True))\n multiples = []\n if dim > 0:\n multiples.append(array_ops.ones([dim], dtypes.int32))\n multiples.append(\n array_ops.strided_slice(array_ops.shape(inputs), [dim], [dim + 1]))\n if dim < (input_rank - 1):\n multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))\n multiples = array_ops.concat(multiples, 0)\n return math_ops.div(inputs, array_ops.tile(lengths, multiples))\n\n\ndef poincare_normalize(x, axis=1, epsilon=1e-5, name=None):\n \"\"\"Project into the Poincare ball with norm <= 1.0 - epsilon.\n\n https://en.wikipedia.org/wiki/Poincare_ball_model\n\n Used in\n Poincare Embeddings for Learning Hierarchical Representations\n Maximilian Nickel, Douwe Kiela\n https://arxiv.org/pdf/1705.08039.pdf\n\n For a 1-D tensor with `axis = 0`, computes\n\n (x * (1 - epsilon)) / ||x|| if ||x|| > 1 - epsilon\n output =\n x otherwise\n\n For `x` with more dimensions, independently normalizes each 1-D slice along\n dimension `axis`.\n\n Args:\n x: A `Tensor`.\n axis: Axis along which to normalize. A scalar or a vector of\n integers.\n epsilon: A small deviation from the edge of the unit sphere for numerical\n stability.\n name: A name for this operation (optional).\n\n Returns:\n A `Tensor` with the same shape as `x`.\n \"\"\"\n with ops.name_scope(name, 'poincare_normalize', [x]) as name:\n x = ops.convert_to_tensor(x, name='x')\n square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keep_dims=True)\n x_inv_norm = math_ops.rsqrt(square_sum)\n x_inv_norm = math_ops.minimum((1. - epsilon) * x_inv_norm, 1.)\n return math_ops.multiply(x, x_inv_norm, name=name)\n\n\ndef legacy_fully_connected(x,\n num_output_units,\n activation_fn=None,\n weight_init=initializers.xavier_initializer(),\n bias_init=init_ops.zeros_initializer(),\n name=None,\n weight_collections=(ops.GraphKeys.WEIGHTS,),\n bias_collections=(ops.GraphKeys.BIASES,),\n output_collections=(ops.GraphKeys.ACTIVATIONS,),\n trainable=True,\n weight_regularizer=None,\n bias_regularizer=None):\n # pylint: disable=anomalous-backslash-in-string\n r\"\"\"Adds the parameters for a fully connected layer and returns the output.\n\n A fully connected layer is generally defined as a matrix multiply:\n `y = f(w * x + b)` where `f` is given by `activation_fn`. If\n `activation_fn` is `None`, the result of `y = w * x + b` is\n returned.\n\n If `x` has shape [\\\\\\(\\\\text{dim}_0, \\\\text{dim}_1, ..., \\\\text{dim}_n\\\\\\)]\n with more than 2 dimensions (\\\\\\(n > 1\\\\\\)), then we repeat the matrix\n multiply along the first dimensions. The result r is a tensor of shape\n [\\\\\\(\\\\text{dim}_0, ..., \\\\text{dim}_{n-1},\\\\\\) `num_output_units`],\n where \\\\\\( r_{i_0, ..., i_{n-1}, k} =\n \\\\sum_{0 \\\\leq j < \\\\text{dim}_n} x_{i_0, ... i_{n-1}, j} \\cdot w_{j, k}\\\\\\).\n This is accomplished by reshaping `x` to 2-D\n [\\\\\\(\\\\text{dim}_0 \\\\cdot ... \\\\cdot \\\\text{dim}_{n-1}, \\\\text{dim}_n\\\\\\)]\n before the matrix multiply and afterwards reshaping it to\n [\\\\\\(\\\\text{dim}_0, ..., \\\\text{dim}_{n-1},\\\\\\) `num_output_units`].\n\n This op creates `w` and optionally `b`. Bias (`b`) can be disabled by setting\n `bias_init` to `None`.\n\n The variable creation is compatible with `tf.variable_scope` and so can be\n reused with `tf.variable_scope` or `tf.make_template`.\n\n Most of the details of variable creation can be controlled by specifying the\n initializers (`weight_init` and `bias_init`) and in which collections to place\n the created variables (`weight_collections` and `bias_collections`; note that\n the variables are always added to the `VARIABLES` collection). The output of\n the layer can be placed in custom collections using `output_collections`.\n The collections arguments default to `WEIGHTS`, `BIASES` and `ACTIVATIONS`,\n respectively.\n\n A per layer regularization can be specified by setting `weight_regularizer`\n and `bias_regularizer`, which are applied to the weights and biases\n respectively, and whose output is added to the `REGULARIZATION_LOSSES`\n collection.\n\n Args:\n x: The input `Tensor`.\n num_output_units: The size of the output.\n activation_fn: Activation function, default set to None to skip it and\n maintain a linear activation.\n weight_init: An optional weight initialization, defaults to\n `xavier_initializer`.\n bias_init: An initializer for the bias, defaults to 0. Set to `None` in\n order to disable bias.\n name: The name for this operation is used to name operations and to find\n variables. If specified it must be unique for this scope, otherwise a\n unique name starting with \"fully_connected\" will be created. See\n `tf.variable_scope` for details.\n weight_collections: List of graph collections to which weights are added.\n bias_collections: List of graph collections to which biases are added.\n output_collections: List of graph collections to which outputs are added.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n weight_regularizer: A regularizer like the result of\n `l1_regularizer` or `l2_regularizer`. Used for weights.\n bias_regularizer: A regularizer like the result of\n `l1_regularizer` or `l2_regularizer`. Used for biases.\n\n Returns:\n The output of the fully connected layer.\n\n Raises:\n ValueError: If x has rank less than 2 or if its last dimension is not set.\n \"\"\"\n with variable_scope.variable_scope(name, 'fully_connected', [x]):\n x = ops.convert_to_tensor(x)\n dims = x.get_shape().dims\n if dims is None:\n raise ValueError('dims of x must be known but is None')\n if len(dims) < 2:\n raise ValueError('rank of x must be at least 2 not: %d' % len(dims))\n num_input_units = dims[-1].value\n if num_input_units is None:\n raise ValueError('last dimension of x must be known but is None')\n dtype = x.dtype.base_dtype\n\n weight_collections = set(list(weight_collections or []) +\n [ops.GraphKeys.GLOBAL_VARIABLES])\n w = variable_scope.get_variable('weights',\n shape=[num_input_units, num_output_units],\n dtype=dtype,\n initializer=weight_init,\n collections=weight_collections,\n regularizer=weight_regularizer,\n trainable=trainable)\n x_2_dim = x if len(dims) <= 2 else array_ops.reshape(x,\n [-1, num_input_units])\n y = standard_ops.matmul(x_2_dim, w)\n\n if bias_init is not None:\n bias_collections = set(list(bias_collections or []) +\n [ops.GraphKeys.GLOBAL_VARIABLES])\n b = variable_scope.get_variable('bias',\n shape=[num_output_units],\n dtype=dtype,\n initializer=bias_init,\n collections=bias_collections,\n regularizer=bias_regularizer,\n trainable=trainable)\n\n y = nn.bias_add(y, b)\n\n if len(dims) > 2:\n out_shape = array_ops.unstack(array_ops.shape(x))\n out_shape[-1] = num_output_units\n\n y = array_ops.reshape(y, array_ops.stack(out_shape))\n\n static_shape = x.get_shape().as_list()\n static_shape[-1] = num_output_units\n y.set_shape(static_shape)\n\n return _apply_activation(y, activation_fn, output_collections)\n\n\n# TODO(eiderm): Verify and fix autocomplete in colab (also relu6).\n# Simple aliases which remove the activation_fn parameter.\nelu = functools.partial(fully_connected, activation_fn=nn.elu)\nlegacy_relu = functools.partial(legacy_fully_connected, activation_fn=nn.relu)\nlegacy_linear = functools.partial(legacy_fully_connected, activation_fn=None)\nrelu = functools.partial(fully_connected, activation_fn=nn.relu)\nrelu6 = functools.partial(fully_connected, activation_fn=nn.relu6)\nlinear = functools.partial(fully_connected, activation_fn=None)\n\n# Simple alias.\nconv2d = convolution2d\nconv3d = convolution3d\nconv2d_transpose = convolution2d_transpose\nconv3d_transpose = convolution3d_transpose\nconv2d_in_plane = convolution2d_in_plane\nseparable_conv2d = separable_convolution2d\n"
] | [
[
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.layers.pooling.MaxPooling2D",
"tensorflow.python.ops.math_ops.rsqrt",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.sparse_ops.sparse_reshape",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.framework.ops.add_to_collections",
"tensorflow.python.layers.pooling.AveragePooling2D",
"tensorflow.contrib.layers.python.layers.initializers.xavier_initializer",
"tensorflow.python.ops.nn.depthwise_conv2d",
"tensorflow.python.training.moving_averages.assign_moving_average",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.contrib.layers.python.layers.utils.get_variable_collections",
"tensorflow.python.framework.ops.add_to_collection",
"tensorflow.python.layers.convolutional.Convolution2DTranspose",
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.ops.nn.bias_add",
"tensorflow.python.layers.convolutional.Convolution3DTranspose",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.ops.init_ops.zeros_initializer",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.standard_ops.to_int64",
"tensorflow.python.ops.nn.softmax",
"tensorflow.python.layers.pooling.AveragePooling3D",
"tensorflow.contrib.layers.python.layers.utils.two_element_tuple",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.nn.weighted_moments",
"tensorflow.python.ops.nn.moments",
"tensorflow.python.ops.nn.batch_normalization",
"tensorflow.contrib.framework.python.ops.variables.model_variable",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.layers.pooling.MaxPooling3D",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.logical_or",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.ops.check_ops.assert_rank_at_least",
"tensorflow.python.ops.standard_ops.matmul",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.nn.fused_batch_norm",
"tensorflow.python.ops.linalg_ops.eye",
"tensorflow.python.ops.math_ops.lin_space",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.layers.base.InputSpec",
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.contrib.layers.python.layers.utils.smart_cond",
"tensorflow.python.layers.core.Dense",
"tensorflow.python.ops.standard_ops.one_hot",
"tensorflow.contrib.layers.python.layers.utils.n_positive_integers",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.contrib.layers.python.layers.utils.constant_value",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.layers.core.Dropout",
"tensorflow.contrib.layers.python.layers.utils.collect_named_outputs",
"tensorflow.python.layers.normalization.BatchNormalization",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.framework.function.Defun",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.init_ops.ones_initializer",
"tensorflow.python.ops.array_ops.transpose"
]
] |
rchen2123/ryu | [
"99400f8c421c1a84b4c7e80dd26224b36ee6d779"
] | [
"venv/lib/python3.7/site-packages/colormath/color_diff_matrix.py"
] | [
"\"\"\"\nThis module contains the formulas for comparing Lab values with matrices\nand vectors. The benefit of using NumPy's matrix capabilities is speed. These\ncalls can be used to efficiently compare large volumes of Lab colors.\n\"\"\"\n\nimport numpy\n\n\ndef delta_e_cie1976(lab_color_vector, lab_color_matrix):\n \"\"\"\n Calculates the Delta E (CIE1976) between `lab_color_vector` and all\n colors in `lab_color_matrix`.\n \"\"\"\n return numpy.sqrt(\n numpy.sum(numpy.power(lab_color_vector - lab_color_matrix, 2), axis=1))\n\n\n# noinspection PyPep8Naming\ndef delta_e_cie1994(lab_color_vector, lab_color_matrix,\n K_L=1, K_C=1, K_H=1, K_1=0.045, K_2=0.015):\n \"\"\"\n Calculates the Delta E (CIE1994) of two colors.\n\n K_l:\n 0.045 graphic arts\n 0.048 textiles\n K_2:\n 0.015 graphic arts\n 0.014 textiles\n K_L:\n 1 default\n 2 textiles\n \"\"\"\n C_1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))\n C_2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))\n\n delta_lab = lab_color_vector - lab_color_matrix\n\n delta_L = delta_lab[:, 0].copy()\n delta_C = C_1 - C_2\n delta_lab[:, 0] = delta_C\n\n delta_H_sq = numpy.sum(numpy.power(delta_lab, 2) * numpy.array([-1, 1, 1]), axis=1)\n # noinspection PyArgumentList\n delta_H = numpy.sqrt(delta_H_sq.clip(min=0))\n\n S_L = 1\n S_C = 1 + K_1 * C_1\n S_H = 1 + K_2 * C_1\n\n LCH = numpy.vstack([delta_L, delta_C, delta_H])\n params = numpy.array([[K_L * S_L], [K_C * S_C], [K_H * S_H]])\n\n return numpy.sqrt(numpy.sum(numpy.power(LCH / params, 2), axis=0))\n\n\n# noinspection PyPep8Naming\ndef delta_e_cmc(lab_color_vector, lab_color_matrix, pl=2, pc=1):\n \"\"\"\n Calculates the Delta E (CIE1994) of two colors.\n\n CMC values\n Acceptability: pl=2, pc=1\n Perceptability: pl=1, pc=1\n \"\"\"\n L, a, b = lab_color_vector\n\n C_1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))\n C_2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))\n\n delta_lab = lab_color_vector - lab_color_matrix\n\n delta_L = delta_lab[:, 0].copy()\n delta_C = C_1 - C_2\n delta_lab[:, 0] = delta_C\n\n H_1 = numpy.degrees(numpy.arctan2(b, a))\n\n if H_1 < 0:\n H_1 += 360\n\n F = numpy.sqrt(numpy.power(C_1, 4) / (numpy.power(C_1, 4) + 1900.0))\n\n # noinspection PyChainedComparisons\n if 164 <= H_1 and H_1 <= 345:\n T = 0.56 + abs(0.2 * numpy.cos(numpy.radians(H_1 + 168)))\n else:\n T = 0.36 + abs(0.4 * numpy.cos(numpy.radians(H_1 + 35)))\n\n if L < 16:\n S_L = 0.511\n else:\n S_L = (0.040975 * L) / (1 + 0.01765 * L)\n\n S_C = ((0.0638 * C_1) / (1 + 0.0131 * C_1)) + 0.638\n S_H = S_C * (F * T + 1 - F)\n\n delta_C = C_1 - C_2\n\n delta_H_sq = numpy.sum(numpy.power(delta_lab, 2) * numpy.array([-1, 1, 1]), axis=1)\n # noinspection PyArgumentList\n delta_H = numpy.sqrt(delta_H_sq.clip(min=0))\n\n LCH = numpy.vstack([delta_L, delta_C, delta_H])\n params = numpy.array([[pl * S_L], [pc * S_C], [S_H]])\n\n return numpy.sqrt(numpy.sum(numpy.power(LCH / params, 2), axis=0))\n\n\n# noinspection PyPep8Naming\ndef delta_e_cie2000(lab_color_vector, lab_color_matrix, Kl=1, Kc=1, Kh=1):\n \"\"\"\n Calculates the Delta E (CIE2000) of two colors.\n \"\"\"\n L, a, b = lab_color_vector\n\n avg_Lp = (L + lab_color_matrix[:, 0]) / 2.0\n\n C1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))\n C2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))\n\n avg_C1_C2 = (C1 + C2) / 2.0\n\n G = 0.5 * (1 - numpy.sqrt(numpy.power(avg_C1_C2, 7.0) / (numpy.power(avg_C1_C2, 7.0) + numpy.power(25.0, 7.0))))\n\n a1p = (1.0 + G) * a\n a2p = (1.0 + G) * lab_color_matrix[:, 1]\n\n C1p = numpy.sqrt(numpy.power(a1p, 2) + numpy.power(b, 2))\n C2p = numpy.sqrt(numpy.power(a2p, 2) + numpy.power(lab_color_matrix[:, 2], 2))\n\n avg_C1p_C2p = (C1p + C2p) / 2.0\n\n h1p = numpy.degrees(numpy.arctan2(b, a1p))\n h1p += (h1p < 0) * 360\n\n h2p = numpy.degrees(numpy.arctan2(lab_color_matrix[:, 2], a2p))\n h2p += (h2p < 0) * 360\n\n avg_Hp = (((numpy.fabs(h1p - h2p) > 180) * 360) + h1p + h2p) / 2.0\n\n T = 1 - 0.17 * numpy.cos(numpy.radians(avg_Hp - 30)) + \\\n 0.24 * numpy.cos(numpy.radians(2 * avg_Hp)) + \\\n 0.32 * numpy.cos(numpy.radians(3 * avg_Hp + 6)) - \\\n 0.2 * numpy.cos(numpy.radians(4 * avg_Hp - 63))\n\n diff_h2p_h1p = h2p - h1p\n delta_hp = diff_h2p_h1p + (numpy.fabs(diff_h2p_h1p) > 180) * 360\n delta_hp -= (h2p > h1p) * 720\n\n delta_Lp = lab_color_matrix[:, 0] - L\n delta_Cp = C2p - C1p\n delta_Hp = 2 * numpy.sqrt(C2p * C1p) * numpy.sin(numpy.radians(delta_hp) / 2.0)\n\n S_L = 1 + ((0.015 * numpy.power(avg_Lp - 50, 2)) / numpy.sqrt(20 + numpy.power(avg_Lp - 50, 2.0)))\n S_C = 1 + 0.045 * avg_C1p_C2p\n S_H = 1 + 0.015 * avg_C1p_C2p * T\n\n delta_ro = 30 * numpy.exp(-(numpy.power(((avg_Hp - 275) / 25), 2.0)))\n R_C = numpy.sqrt((numpy.power(avg_C1p_C2p, 7.0)) / (numpy.power(avg_C1p_C2p, 7.0) + numpy.power(25.0, 7.0)))\n R_T = -2 * R_C * numpy.sin(2 * numpy.radians(delta_ro))\n\n return numpy.sqrt(\n numpy.power(delta_Lp / (S_L * Kl), 2) +\n numpy.power(delta_Cp / (S_C * Kc), 2) +\n numpy.power(delta_Hp / (S_H * Kh), 2) +\n R_T * (delta_Cp / (S_C * Kc)) * (delta_Hp / (S_H * Kh)))\n"
] | [
[
"numpy.vstack",
"numpy.arctan2",
"numpy.sqrt",
"numpy.fabs",
"numpy.power",
"numpy.array",
"numpy.radians"
]
] |
keurfonluu/toughio | [
"1db0600ee5ad1abb5ca858c81c8ac5226c9dbb4f"
] | [
"test/test_output.py"
] | [
"import os\n\nimport helpers\nimport numpy\nimport pytest\n\nimport toughio\n\nwrite_read = lambda output, writer_kws, reader_kws: helpers.write_read(\n \"output\",\n output,\n toughio.write_output,\n toughio.read_output,\n writer_kws=writer_kws,\n reader_kws=reader_kws,\n)\n\n\[email protected](\n \"filename, data_ref\",\n [\n (\n \"FOFT_A1912.csv\",\n {\n \"TIME\": 4.393722000e9,\n \"PRES\": 1.8740899675005e8,\n \"TEMP\": 720.0,\n \"SAT_G\": 0.0,\n \"SAT_L\": 24.0,\n },\n ),\n (\n \"FOFT_A1912_T2.csv\",\n {\n \"TIME\": 3.06639400e9,\n \"PRES\": 1.83000721e8,\n \"TEMP\": 660.0,\n \"SAT_G\": 0.0,\n \"SAT_L\": 22.0,\n },\n ),\n (\n \"GOFT_A1162.csv\",\n {\"TIME\": 4.393722000e9, \"GEN\": -30.0, \"ENTG\": 1.528048035348e7, \"PWB\": 0.0},\n ),\n (\n \"GOFT_A1162_T2.csv\",\n {\"TIME\": 3.06639400e9, \"GEN\": -27.5, \"ENTG\": 1.40141971e7, \"PWB\": 0.0},\n ),\n ],\n)\ndef test_history(filename, data_ref):\n this_dir = os.path.dirname(os.path.abspath(__file__))\n filename = os.path.join(this_dir, \"support_files\", \"outputs\", filename)\n data = toughio.read_history(filename)\n\n for k, v in data_ref.items():\n assert numpy.allclose(v, data[k].sum())\n\n\[email protected](\n \"filename, filename_ref\",\n [\n (\"OUTPUT_ELEME.csv\", \"SAVE.out\"),\n (\"OUTPUT_ELEME.tec\", \"SAVE.out\"),\n (\"OUTPUT_ELEME_PETRASIM.csv\", \"SAVE.out\"),\n (\"OUTPUT.out\", \"SAVE.out\"),\n (\"OUTPUT_6.out\", \"SAVE_6.out\"),\n ],\n)\ndef test_output_eleme(filename, filename_ref):\n this_dir = os.path.dirname(os.path.abspath(__file__))\n filename = os.path.join(this_dir, \"support_files\", \"outputs\", filename)\n outputs = toughio.read_output(filename)\n\n filename = os.path.join(this_dir, \"support_files\", \"outputs\", filename_ref)\n save = toughio.read_output(filename)\n\n assert len(outputs) == 5\n\n times_ref = [\n 0.2592000e08,\n 0.3155800e08,\n 0.1577900e09,\n 0.3155800e09,\n 0.7889400e09,\n ]\n keys_ref = [\"POR\", \"PRES\", \"SAT_G\", \"TEMP\", \"X\", \"Y\", \"Z\"]\n for output, time_ref in zip(outputs, times_ref):\n assert time_ref == output.time\n assert (\n save.labels.tolist() == output.labels.tolist()\n if output.format in {\"csv\", \"petrasim\", \"tough\"}\n else output.labels == None\n )\n if output.format != \"tough\":\n assert keys_ref == sorted(list(output.data.keys()))\n\n assert numpy.allclose(save.data[\"X1\"], outputs[-1].data[\"PRES\"])\n assert numpy.allclose(save.data[\"X2\"], outputs[-1].data[\"TEMP\"], atol=0.1)\n\n\[email protected](\n \"filename\",\n [\"OUTPUT_CONNE.csv\", \"OUTPUT.out\", \"OUTPUT_6.out\"],\n)\ndef test_output_conne(filename):\n this_dir = os.path.dirname(os.path.abspath(__file__))\n filename = os.path.join(this_dir, \"support_files\", \"outputs\", filename)\n outputs = toughio.read_output(filename, connection=True)\n\n times_ref = [\n 0.2592000e08,\n 0.3155800e08,\n 0.1577900e09,\n 0.3155800e09,\n 0.7889400e09,\n ]\n data_ref = [\n 52542.0,\n 52475.0,\n 51146.0,\n 49600.0,\n 45623.0,\n ]\n for output, time_ref, data in zip(outputs, times_ref, data_ref):\n assert time_ref == output.time\n assert (\n len(set(\"\".join(labels) for labels in output.labels))\n == output.data[\"HEAT\"].size\n )\n assert numpy.allclose(data, numpy.abs(output.data[\"HEAT\"]).mean(), atol=1.0)\n\n\[email protected](\n \"output_ref, file_format\",\n [\n (helpers.output_eleme, \"csv\"),\n (helpers.output_eleme[0], \"csv\"),\n (helpers.output_eleme, \"petrasim\"),\n (helpers.output_eleme[0], \"petrasim\"),\n (helpers.output_eleme, \"tecplot\"),\n (helpers.output_eleme[0], \"tecplot\"),\n (helpers.output_conne, \"csv\"),\n (helpers.output_conne[0], \"csv\"),\n ],\n)\ndef test_output(output_ref, file_format):\n output = write_read(\n output=output_ref,\n writer_kws={\"file_format\": file_format},\n reader_kws={},\n )\n\n output_ref = output_ref if isinstance(output_ref, list) else [output_ref]\n for out_ref, out in zip(output_ref, output):\n helpers.allclose_output(out_ref, out)\n\n\ndef test_save():\n this_dir = os.path.dirname(os.path.abspath(__file__))\n filename = os.path.join(this_dir, \"support_files\", \"outputs\", \"SAVE.out\")\n save = toughio.read_output(filename)\n\n x_ref = [6.35804123e05, 1.42894499e02, 9.91868799e-01]\n assert numpy.allclose(\n x_ref, numpy.mean([save.data[\"X1\"], save.data[\"X2\"], save.data[\"X3\"]], axis=1)\n )\n\n assert numpy.allclose(0.01, save.data[\"porosity\"].mean())\n\n assert \"userx\" not in save.data.keys()\n"
] | [
[
"numpy.allclose",
"numpy.abs",
"numpy.mean"
]
] |
vincentchoqueuse/python-control-plotly | [
"5f4f7d354d4de2628ea52a5e544ebeb138d106bc"
] | [
"control_plotly/utils.py"
] | [
"import control as ctl\nimport numpy as np\n\ndef damp(sys,display=False):\n pole_list = []\n m_list = []\n wn_list = []\n\n for pole in sys.pole():\n pole = pole.astype(complex) # WTF: the python control \"damp\" function is buggy due to this missing cast !\n\n if ctl.isctime(sys):\n pole_continuous = pole\n else:\n pole_continuous = np.log(pole)/sys.dt\n \n wn = np.abs(pole_continuous)\n m = -np.real(pole_continuous)/wn\n\n pole_list.append(pole)\n wn_list.append(wn)\n m_list.append(m)\n\n if display:\n print(\"pole {:.3f} : wn={:.3f} rad/s, m= {:.3f}\".format(pole, wn, m))\n\n return wn_list, m_list, pole_list\n \n"
] | [
[
"numpy.log",
"numpy.abs",
"numpy.real"
]
] |
ai-nikolai/Retrograph-1 | [
"54bd534d47218ca437c422a1abe5b1e995f55d71"
] | [
"training_utility/run_pretraining_adapter.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Run masked LM/next sentence masked_lm pre-training for BERT.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom retrograph.modeling import modeling_adapter as modeling\nfrom retrograph.modeling import optimization_adapter as optimization\nimport tensorflow as tf\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"bert_config_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\n \"input_file\", None,\n \"Input TF example files (can be a glob or comma separated).\")\n\nflags.DEFINE_string(\n \"output_dir\", None,\n \"The output directory where the model checkpoints will be written.\")\n\n## Other parameters\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded. Must match data generation.\")\n\nflags.DEFINE_integer(\n \"max_predictions_per_seq\", 20,\n \"Maximum number of masked LM predictions per sequence. \"\n \"Must match data generation.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_integer(\"num_train_steps\", 100000, \"Number of training steps.\")\n\nflags.DEFINE_integer(\"num_warmup_steps\", 10000, \"Number of warmup steps.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_integer(\"max_eval_steps\", 100, \"Maximum number of eval steps.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\ntf.flags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\n\ndef model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n next_sentence_labels = features[\"next_sentence_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), model.get_embedding_table(),\n masked_lm_positions, masked_lm_ids, masked_lm_weights)\n\n (next_sentence_loss, next_sentence_example_loss,\n next_sentence_log_probs) = get_next_sentence_output(\n bert_config, model.get_pooled_output(), next_sentence_labels)\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n next_sentence_log_probs = tf.reshape(\n next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])\n next_sentence_predictions = tf.argmax(\n next_sentence_log_probs, axis=-1, output_type=tf.int32)\n next_sentence_labels = tf.reshape(next_sentence_labels, [-1])\n next_sentence_accuracy = tf.metrics.accuracy(\n labels=next_sentence_labels, predictions=next_sentence_predictions)\n next_sentence_mean_loss = tf.metrics.mean(\n values=next_sentence_example_loss)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n \"next_sentence_accuracy\": next_sentence_accuracy,\n \"next_sentence_loss\": next_sentence_mean_loss,\n }\n\n eval_metrics = (metric_fn, [\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels\n ])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\"Only TRAIN and EVAL modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn\n\n\ndef get_masked_lm_output(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights):\n \"\"\"Get loss and log probs for the masked LM.\"\"\"\n input_tensor = gather_indexes(input_tensor, positions)\n\n with tf.variable_scope(\"cls/predictions\"):\n # We apply one more non-linear transformation before the output layer.\n # This matrix is not used after pre-training.\n with tf.variable_scope(\"transform\"):\n input_tensor = tf.layers.dense(\n input_tensor,\n units=bert_config.hidden_size,\n activation=modeling.get_activation(bert_config.hidden_act),\n kernel_initializer=modeling.create_initializer(\n bert_config.initializer_range))\n input_tensor = modeling.layer_norm(input_tensor)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n output_bias = tf.get_variable(\n \"output_bias\",\n shape=[bert_config.vocab_size],\n initializer=tf.zeros_initializer())\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n label_ids = tf.reshape(label_ids, [-1])\n label_weights = tf.reshape(label_weights, [-1])\n\n one_hot_labels = tf.one_hot(\n label_ids, depth=bert_config.vocab_size, dtype=tf.float32)\n\n # The `positions` tensor might be zero-padded (if the sequence is too\n # short to have the maximum number of predictions). The `label_weights`\n # tensor has a value of 1.0 for every real prediction and 0.0 for the\n # padding predictions.\n per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])\n numerator = tf.reduce_sum(label_weights * per_example_loss)\n denominator = tf.reduce_sum(label_weights) + 1e-5\n loss = numerator / denominator\n\n return (loss, per_example_loss, log_probs)\n\n\ndef get_next_sentence_output(bert_config, input_tensor, labels):\n \"\"\"Get loss and log probs for the next sentence prediction.\"\"\"\n\n # Simple binary classification. Note that 0 is \"next sentence\" and 1 is\n # \"random sentence\". This weight matrix is not used after pre-training.\n with tf.variable_scope(\"cls/seq_relationship\"):\n output_weights = tf.get_variable(\n \"output_weights\",\n shape=[2, bert_config.hidden_size],\n initializer=modeling.create_initializer(bert_config.initializer_range))\n output_bias = tf.get_variable(\n \"output_bias\", shape=[2], initializer=tf.zeros_initializer())\n\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n labels = tf.reshape(labels, [-1])\n one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, per_example_loss, log_probs)\n\n\ndef gather_indexes(sequence_tensor, positions):\n \"\"\"Gathers the vectors at the specific positions over a minibatch.\"\"\"\n sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n return output_tensor\n\n\ndef input_fn_builder(input_files,\n max_seq_length,\n max_predictions_per_seq,\n is_training,\n num_cpu_threads=4):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n name_to_features = {\n \"input_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"input_mask\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"segment_ids\":\n tf.FixedLenFeature([max_seq_length], tf.int64),\n \"masked_lm_positions\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_ids\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.int64),\n \"masked_lm_weights\":\n tf.FixedLenFeature([max_predictions_per_seq], tf.float32),\n \"next_sentence_labels\":\n tf.FixedLenFeature([1], tf.int64),\n }\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n if is_training:\n d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))\n d = d.repeat()\n d = d.shuffle(buffer_size=len(input_files))\n\n # `cycle_length` is the number of parallel files that get read.\n cycle_length = min(num_cpu_threads, len(input_files))\n\n # `sloppy` mode means that the interleaving is not exact. This adds\n # even more randomness to the training pipeline.\n d = d.apply(\n tf.contrib.data.parallel_interleave(\n tf.data.TFRecordDataset,\n sloppy=is_training,\n cycle_length=cycle_length))\n d = d.shuffle(buffer_size=100)\n else:\n d = tf.data.TFRecordDataset(input_files)\n # Since we evaluate for a fixed number of steps we don't want to encounter\n # out-of-range exceptions.\n d = d.repeat()\n\n # We must `drop_remainder` on training because the TPU requires fixed\n # size dimensions. For eval, we assume we are evaluating on the CPU or GPU\n # and we *don't* want to drop the remainder, otherwise we wont cover\n # every sample.\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n num_parallel_batches=num_cpu_threads,\n drop_remainder=True))\n return d\n\n return input_fn\n\n\ndef _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n if not FLAGS.do_train and not FLAGS.do_eval:\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n input_files = []\n for input_pattern in FLAGS.input_file.split(\",\"):\n input_files.extend(tf.gfile.Glob(input_pattern))\n\n tf.logging.info(\"*** Input Files ***\")\n for input_file in input_files:\n tf.logging.info(\" %s\" % input_file)\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n keep_checkpoint_max=20,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=FLAGS.num_train_steps,\n num_warmup_steps=FLAGS.num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size)\n\n if FLAGS.do_train:\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n train_input_fn = input_fn_builder(\n input_files=input_files,\n max_seq_length=FLAGS.max_seq_length,\n max_predictions_per_seq=FLAGS.max_predictions_per_seq,\n is_training=True)\n estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)\n\n if FLAGS.do_eval:\n tf.logging.info(\"***** Running evaluation *****\")\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n\n eval_input_fn = input_fn_builder(\n input_files=input_files,\n max_seq_length=FLAGS.max_seq_length,\n max_predictions_per_seq=FLAGS.max_predictions_per_seq,\n is_training=False)\n\n result = estimator.evaluate(\n input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)\n\n output_eval_file = os.path.join(FLAGS.output_dir, \"eval_results.txt\")\n with tf.gfile.GFile(output_eval_file, \"w\") as writer:\n tf.logging.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n tf.logging.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"input_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n"
] | [
[
"tensorflow.data.TFRecordDataset",
"tensorflow.reshape",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.logging.set_verbosity",
"tensorflow.variable_scope",
"tensorflow.matmul",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.one_hot",
"tensorflow.reduce_sum",
"tensorflow.metrics.mean",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.gfile.GFile",
"tensorflow.nn.log_softmax",
"tensorflow.gfile.Glob",
"tensorflow.FixedLenFeature",
"tensorflow.train.init_from_checkpoint",
"tensorflow.train.Scaffold",
"tensorflow.gfile.MakeDirs",
"tensorflow.constant",
"tensorflow.app.run",
"tensorflow.parse_single_example",
"tensorflow.zeros_initializer",
"tensorflow.flags.DEFINE_string",
"tensorflow.metrics.accuracy",
"tensorflow.logging.info",
"tensorflow.nn.bias_add",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.trainable_variables",
"tensorflow.to_int32",
"tensorflow.argmax",
"tensorflow.gather",
"tensorflow.contrib.data.parallel_interleave"
]
] |
TimS-ml/Scratch-ML | [
"0ea010e2b7ead5f98ba9a0db621cc7d7471e97af"
] | [
"scratchML/examples/linear_discriminant_analysis.py"
] | [
"from __future__ import print_function\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom scratchML.supervised_learning import LDA\nfrom scratchML.utils import calculate_covariance_matrix, accuracy_score\nfrom scratchML.utils import normalize, standardize, train_test_split, Plot\nfrom scratchML.unsupervised_learning import PCA\n\n\ndef main():\n # Load the dataset\n data = datasets.load_iris()\n X = data.data\n y = data.target\n\n # Three -> two classes\n X = X[y != 2]\n y = y[y != 2]\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)\n\n # Fit and predict using LDA\n lda = LDA()\n lda.fit(X_train, y_train)\n y_pred = lda.predict(X_test)\n\n accuracy = accuracy_score(y_test, y_pred)\n\n print(\"Accuracy:\", accuracy)\n\n Plot().plot_in_2d(X_test, y_pred, title=\"LDA\", accuracy=accuracy)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"sklearn.datasets.load_iris"
]
] |
nclarey/pyg-base | [
"a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862"
] | [
"src/pyg_base/_pandas.py"
] | [
"\"\"\"\nWe want to simplify the operations for pandas dataframes assuming we are using timeseries as the main objects.\n\nWhen we have multiple timeseries, we will:\n \n 1) calculate joint index using df_index()\n 2) reindex each timeseries to the joint index\n \nWe then need to worry about multiple columns if there are. If none, each timeseries will be considered as pd.Series\n\nIf there are multiple columns, we will perform the calculations columns by columns. \n\n\"\"\"\nfrom pyg_base._types import is_df, is_str, is_num, is_tss, is_int, is_arr, is_ts, is_arrs, is_tuples, is_pd\nfrom pyg_base._dictable import dictable\nfrom pyg_base._as_list import as_list\nfrom pyg_base._zip import zipper\nfrom pyg_base._reducer import reducing, reducer\nfrom pyg_base._decorators import wrapper\nfrom pyg_base._loop import loop\nfrom pyg_base._dates import dt\nimport pandas as pd\nimport numpy as np\nfrom copy import copy\nimport inspect\nimport datetime\nfrom operator import add, mul\n\n\n__all__ = ['df_fillna', 'df_index', 'df_reindex', 'df_columns', 'presync', 'np_reindex', 'nona', 'df_slice', 'df_unslice', 'min_', 'max_', 'add_', 'mul_', 'sub_', 'div_', 'pow_']\n\ndef _list(values):\n \"\"\"\n >>> assert _list([1,2,[3,4,5,[6,7]],dict(a =[8,9], b=[10,[11,12]])]) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] \n >>> assert _list(1) == [1] \n >>> assert _list(dict(a=1, b=2)) == [1,2] \n\n \"\"\"\n if isinstance(values, list):\n return sum([_list(df) for df in values], [])\n elif isinstance(values, dict):\n return _list(list(values.values()))\n else:\n return [values]\n\n\n@loop(list, tuple, dict)\ndef _index(ts):\n if isinstance(ts, pd.Index):\n return ts\n elif is_pd(ts):\n return ts.index\n elif is_arr(ts):\n return len(ts)\n else:\n raise ValueError('did not provide an index')\n \n\ndef _df_index(indexes, index):\n if len(indexes) > 0:\n if is_str(index):\n if index[0].lower() == 'i':#nner\n return reducing('intersection')(indexes) \n elif index[0].lower() == 'o':#uter\n return reducing('union')(indexes) \n elif index[0].lower() == 'l':#uter\n return indexes[0]\n elif index[0].lower() == 'r':#uter\n return indexes[-1]\n else:\n return _index(index)\n else:\n return None\n\n\ndef _np_index(indexes, index):\n if len(indexes) > 0:\n if index[0].lower() == 'i':#nner\n return min(indexes) \n elif index[0].lower() == 'o':#uter\n return max(indexes) \n elif index[0].lower() == 'l':#uter\n return indexes[0]\n elif index[0].lower() == 'r':#uter\n return indexes[-1]\n else:\n return None\n\n\ndef df_index(seq, index = 'inner'):\n \"\"\"\n Determines a joint index of multiple timeseries objects.\n\n :Parameters:\n ----------------\n seq : sequence whose index needs to be determined\n a (possible nested) sequence of timeseries/non-timeseries object within lists/dicts\n index : str, optional\n method to determine the index. The default is 'inner'.\n\n :Returns:\n -------\n pd.Index\n The joint index.\n \n :Example:\n ---------\n >>> tss = [pd.Series(np.random.normal(0,1,10), drange(-i, 9-i)) for i in range(5)]\n >>> more_tss_as_dict = dict(zip('abcde',[pd.Series(np.random.normal(0,1,10), drange(-i, 9-i)) for i in range(5)]))\n >>> res = df_index(tss + [more_tss_as_dict], 'inner')\n >>> assert len(res) == 6\n >>> res = df_index(more_tss_as_dict, 'outer')\n >>> assert len(res) == 14\n \"\"\"\n listed = _list(seq)\n indexes = [ts.index for ts in listed if is_pd(ts)]\n if len(indexes):\n return _df_index(indexes, index)\n arrs = [len(ts) for ts in listed if is_arr(ts)]\n if len(arrs):\n return _np_index(arrs, index)\n else:\n return None\n \n\ndef df_columns(seq, index = 'inner'):\n \"\"\"\n returns the columns of the joint object\n \n :Example:\n ---------\n >>> a = pd.DataFrame(np.random.normal(0,1,(100,5)), drange(-99), list('abcde'))\n >>> b = pd.DataFrame(np.random.normal(0,1,(100,5)), drange(-99), list('bcdef'))\n >>> assert list(df_columns([a,b])) == list('bcde')\n >>> assert list(df_columns([a,b], 'oj')) == list('abcdef')\n >>> assert list(df_columns([a,b], 'lj')) == list('abcde')\n >>> assert list(df_columns([a,b], 'rj')) == list('bcdef')\n\n :Parameters:\n ----------\n seq : sequence of dataframes \n DESCRIPTION.\n index : str, optional\n how to inner-join. The default is 'inner'.\n\n :Returns:\n -------\n pd.Index\n list of columns.\n \"\"\"\n \n listed = _list(seq)\n indexes= [ts.columns for ts in listed if is_df(ts) and ts.shape[1]>1 and len(set(ts.columns)) == ts.shape[1]] #dataframe with non-unique columns are treated like arrays\n if len(indexes):\n return _df_index(indexes, index)\n arrs = [ts.shape[1] for ts in listed if (is_arr(ts) or is_df(ts)) and len(ts.shape)>1 and ts.shape[1]>1]\n if len(arrs):\n return _np_index(arrs, index)\n return None\n\n@loop(list, tuple, dict)\ndef _df_fillna(df, method = None, axis = 0, limit = None):\n methods = as_list(method)\n if len(methods) == 0:\n return df\n if is_arr(df):\n return df_fillna(pd.DataFrame(df) if len(df.shape)==2 else pd.Series(df), method, axis, limit).values\n res = df\n for m in methods:\n if is_num(m):\n res = res.fillna(value = m, axis = axis, limit = limit)\n elif m in ['backfill', 'bfill', 'pad', 'ffill']:\n res = res.fillna(method = m, axis = axis, limit = limit)\n elif m in ('fnna', 'nona'):\n nonan = ~np.isnan(res)\n if len(res.shape)==2:\n nonan = nonan.max(axis=1)\n if m == 'fnna':\n nonan = nonan[nonan.values]\n if len(nonan):\n res = res[nonan.index[0]:]\n else:\n res = res.iloc[:0]\n elif m == 'nona':\n res = res[nonan.values]\n else:\n if is_num(limit) and limit<0:\n res = res.interpolate(method = m, axis = axis, limit = abs(limit), \n limit_direction = 'backward')\n else:\n res = res.interpolate(method = m, axis = axis, limit = limit)\n return res\n\ndef df_fillna(df, method = None, axis = 0, limit = None):\n \"\"\"\n Equivelent to df.fillna() except:\n\n - support np.ndarray as well as dataframes\n - support multiple methods of filling/interpolation\n - supports removal of nan from the start/all of the timeseries\n - supports action on multiple timeseries\n \n :Parameters:\n ----------------\n df : dataframe/numpy array\n \n method : string, list of strings or None, optional\n Either a fill method (bfill, ffill, pad)\n Or an interplation method: 'linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh', 'spline', 'polynomial', 'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima', 'cubicspline'\n Or 'fnna': removes all to the first non nan\n Or 'nona': removes all nans\n axis : int, optional\n axis. The default is 0.\n limit : TYPE, optional\n when filling, how many nan get filled. The default is None (indefinite)\n \n :Example: method ffill or bfill\n -----------------------------------------------\n >>> from pyg import *; import numpy as np\n >>> df = np.array([np.nan, 1., np.nan, 9, np.nan, 25]) \n >>> assert eq(df_fillna(df, 'ffill'), np.array([ np.nan, 1., 1., 9., 9., 25.]))\n >>> assert eq(df_fillna(df, ['ffill','bfill']), np.array([ 1., 1., 1., 9., 9., 25.]))\n >>> assert eq(df_fillna(df, ['ffill','bfill']), np.array([ 1., 1., 1., 9., 9., 25.]))\n\n >>> df = np.array([np.nan, 1., np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 9, np.nan, 25]) \n >>> assert eq(df_fillna(df, 'ffill', limit = 2), np.array([np.nan, 1., 1., 1., np.nan, np.nan, np.nan, np.nan, 9., 9., 25.]))\n\n df_fillna does not maintain state of latest 'prev' value: use ffill_ for that.\n\n :Example: interpolation methods\n -----------------------------------------------\n >>> from pyg import *; import numpy as np\n >>> df = np.array([np.nan, 1., np.nan, 9, np.nan, 25]) \n >>> assert eq(df_fillna(df, 'linear'), np.array([ np.nan, 1., 5., 9., 17., 25.]))\n >>> assert eq(df_fillna(df, 'quadratic'), np.array([ np.nan, 1., 4., 9., 16., 25.]))\n\n\n :Example: method = fnna and nona\n ---------------------------------------------\n >>> from pyg import *; import numpy as np\n >>> ts = np.array([np.nan] * 10 + [1.] * 10 + [np.nan])\n >>> assert eq(df_fillna(ts, 'fnna'), np.array([1.]*10 + [np.nan]))\n >>> assert eq(df_fillna(ts, 'nona'), np.array([1.]*10))\n\n >>> assert len(df_fillna(np.array([np.nan]), 'nona')) == 0\n >>> assert len(df_fillna(np.array([np.nan]), 'fnna')) == 0\n\n :Returns:\n -------\n array/dataframe with nans removed/filled\n\n \"\"\"\n return _df_fillna(df, method = method, axis = axis, limit = limit)\n\n@loop(dict, list, tuple)\ndef _nona(df, value = np.nan):\n if np.isnan(value):\n mask = np.isnan(df)\n elif np.isinf(value):\n mask = np.isinf(df)\n else:\n mask = df == value\n if len(mask.shape) == 2:\n mask = mask.min(axis=1) == 1\n return df[~mask]\n\ndef nona(a, value = np.nan):\n \"\"\"\n removes rows that are entirely nan (or a specific other value)\n\n :Parameters:\n ----------------\n a : dataframe/ndarray\n \n value : float, optional\n value to be removed. The default is np.nan.\n \n :Example:\n ----------\n >>> from pyg import *\n >>> a = np.array([1,np.nan,2,3])\n >>> assert eq(nona(a), np.array([1,2,3]))\n\n :Example: multiple columns\n ---------------------------\n >>> a = np.array([[1,np.nan,2,np.nan], [np.nan, np.nan, np.nan, 3]]).T \n >>> b = np.array([[1,2,np.nan], [np.nan, np.nan, 3]]).T ## 2nd row has nans across\n >>> assert eq(nona(a), b)\n\n\n \"\"\"\n return _nona(a)\n\n\n@loop(list, tuple, dict)\ndef _df_reindex(ts, index, method = None, limit = None):\n methods = as_list(method)\n if is_pd(ts):\n if is_int(index):\n raise ValueError('trying to reindex dataframe %s using numpy interval length %i'%(ts, index))\n if len(methods) and methods[0] in ['backfill', 'bfill', 'pad', 'ffill']:\n res = _nona(ts).reindex(index, method = methods[0], limit = limit)\n res = _df_fillna(res, method = methods[1:], limit = limit)\n else:\n res = ts.reindex(index)\n res = _df_fillna(res, method = method, limit = limit)\n return res\n elif is_arr(ts):\n if isinstance(index, pd.Index):\n if len(index) == len(ts):\n return ts\n else:\n raise ValueError('trying to reindex numpy array %s using pandas index %s'%(ts, index))\n elif is_int(index):\n if index<len(ts):\n res = ts[-index:]\n elif index>len(ts):\n shape = (index - len(ts),) + ts.shape[1:]\n res = np.concatenate([np.full(shape, np.nan),ts])\n else:\n res = ts\n return df_fillna(res, method = methods, limit = limit)\n else:\n return ts\n else:\n return ts\n \n\n@loop(list, tuple, dict)\ndef _df_recolumn(ts, columns):\n if columns is not None and is_df(ts) and ts.shape[1] > 1 and len(set(ts.columns)) == ts.shape[1]:\n return pd.DataFrame({col: ts[col].values if col in ts.columns else np.nan for col in columns}, index = ts.index)\n else:\n return ts\n\ndef df_recolumn(ts, columns = None):\n return _df_recolumn(ts, columns)\n\ndef np_reindex(ts, index, columns = None):\n \"\"\"\n pyg assumes that when working with numpy arrays representing timeseries, you:\n - determine a global timestamp\n - resample all timeseries to that one, and then covert to numpy.array, possibly truncating leading nan's.\n - do the maths you need to do\n - having worked with numpy arrays, if we want to reindex them back into dataframe, use np_reindex\n \n :Example:\n -------\n >>> from pyg import *\n >>> ts = np.array(np.random.normal(0,1,1000))\n >>> index = pd.Index(drange(-1999))\n >>> np_reindex(ts, index)\n\n :Parameters:\n ----------------\n ts : numpy array\n\n index : pandas.Index\n\n columns: list/array of columns names\n\n :Returns:\n ----------\n pd.DataFrame/pd.Series\n\n \"\"\"\n if is_pd(index):\n index = index.index\n if len(index)>len(ts):\n index = index[-len(ts):]\n elif len(index)<len(ts):\n ts = ts[-len(index):]\n res = pd.Series(ts, index) if len(ts.shape)<2 else pd.DataFrame(ts, index)\n if columns is not None:\n if is_df(columns):\n columns = columns.columns\n res.columns = columns\n return res\n\ndef df_reindex(ts, index = None, method = None, limit = None):\n \"\"\"\n A slightly more general version of df.reindex(index)\n\n :Parameters:\n ----------------\n ts : dataframe or numpy array (or list/dict of theses)\n timeseries to be reindexed\n index : str, timeseries, pd.Index.\n The new index\n method : str, list of str, float, optional\n various methods of handling nans are available. The default is None.\n See df_fillna for a full list.\n\n :Returns:\n -------\n timeseries/np.ndarray (or list/dict of theses)\n timeseries reindex.\n \n :Example: index = inner/outer\n -----------------------------\n >>> tss = [pd.Series(np.random.normal(0,1,10), drange(-i, 9-i)) for i in range(5)]\n >>> res = df_reindex(tss, 'inner')\n >>> assert len(res[0]) == 6\n >>> res = df_reindex(tss, 'outer')\n >>> assert len(res[0]) == 14\n\n :Example: index provided\n -----------------------------\n >>> tss = [pd.Series(np.random.normal(0,1,10), drange(-i, 9-i)) for i in range(5)]\n >>> res = df_reindex(tss, tss[0])\n >>> assert eq(res[0], tss[0])\n >>> res = df_reindex(tss, tss[0].index)\n >>> assert eq(res[0], tss[0])\n\n \"\"\"\n if index is None:\n return ts\n elif is_str(index):\n index = df_index(ts, index)\n elif is_ts(index):\n index = index.index\n elif is_arr(index):\n index = pd.Index(index)\n return _df_reindex(ts, index = index, method = method, limit = limit)\n\n\ndef df_concat(objs, columns = None, axis=1, join = 'outer'):\n \"\"\"\n simple concatenator, \n - defaults to to concatenating by date (for timeseries)\n - supports columns renaming\n\n :Parameters:\n ----------\n objs : list/dict\n collection of timeseries\n columns : str/list\n Names of new columns. The default is None.\n axis : int, optional\n axis to merge. The default is 1.\n join : str, optional\n join method inner/outer, see pd.concat. The default is 'outer'.\n\n :Returns:\n -------\n res : pd.DataFrame\n joined dataframe\n \n :Example:\n ---------\n >>> objs = [pd.Series([1,2,3], [4,5,6]), pd.Series([3,4,5], [1,2,4])]\n >>> columns = ['a', 'b']; \n >>> axis = 1; join = 'outer'\n >>> res = df_concat(objs, columns)\n\n >>> res\n >>> a b\n >>> 1 NaN 3.0\n >>> 2 NaN 4.0\n >>> 4 1.0 5.0\n >>> 5 2.0 NaN\n >>> 6 3.0 NaN \n\n >>> df_concat(res, dict(a = 'x', b = 'y'))\n >>> res\n >>> x y\n >>> 1 NaN 3.0\n >>> 2 NaN 4.0\n >>> 4 1.0 5.0\n >>> 5 2.0 NaN\n >>> 6 3.0 NaN \n\n \"\"\"\n if isinstance(objs, dict):\n columns = list(objs.keys())\n objs = list(objs.values())\n if isinstance(objs, list):\n df_objs = [o for o in objs if is_pd(o)]\n res = pd.concat(df_objs, axis = axis, join = join)\n if len(df_objs) < len(objs):\n df_objs = [o if is_pd(o) else pd.Series(o, res.index) for o in objs]\n res = pd.concat(df_objs, axis = axis, join = join) \n elif isinstance(objs, pd.DataFrame):\n res = objs.copy() if columns is not None else objs\n if columns is not None:\n if isinstance(columns, list):\n res.columns = columns \n else:\n res = res.rename(columns = columns)\n return res\n\n\n@loop(list, dict, tuple)\ndef _df_column(ts, column, i = None, n = None):\n \"\"\"\n This is mostly a helper function to help us loop through multiple columns.\n Function grabs a column from a dataframe/2d array\n\n :Parameters:\n ----------\n ts : datafrane\n the original dataframe or 2-d numpy array\n column : str\n name of the column to grab.\n i : int, optional\n Can grab the column using its index. The default is None.\n n : int, optional\n asserting the number of columns, ts.shape[1]. The default is None.\n\n :Returns:\n -------\n a series or a 1-d numpy array\n \"\"\"\n \n if is_df(ts):\n if ts.shape[1] == 1:\n return ts[ts.columns[0]]\n elif column in ts.columns:\n return ts[column]\n elif column is None and i is not None:\n if len(set(ts.columns)) == ts.shape[1]: #unique columns, don't call me using i\n raise ValueError('trying to grab %ith column from a dataframe with proper columns: %s'%(i, ts.columns))\n elif n is not None and ts.shape[1]!=n:\n raise ValueError('trying to grab %ith column and asserting must have %i columns but have %i'%(i, n, ts.shape[1]))\n else:\n if i<ts.shape[1]:\n return ts.iloc[:,i]\n else:\n return np.nan\n else:\n return np.nan\n elif is_arr(ts) and len(ts.shape) == 2:\n if ts.shape[1] == 1:\n return ts.T[0]\n elif i is not None:\n if n is not None and ts.shape[1]!=n:\n raise ValueError('trying to grab %ith column and asserting must have %i columns but have %i'%(i, n, ts.shape[1]))\n elif i<ts.shape[1]:\n return ts.T[i]\n else:\n return np.nan\n else:\n return ts\n else:\n return ts\n\n\ndef df_column(ts, column, i = None, n = None):\n \"\"\"\n This is mostly a helper function to help us loop through multiple columns.\n Function grabs a column from a dataframe/2d array\n\n :Parameters:\n ----------\n ts : datafrane\n the original dataframe or 2-d numpy array\n column : str\n name of the column to grab.\n i : int, optional\n Can grab the column using its index. The default is None.\n n : int, optional\n asserting the number of columns, ts.shape[1]. The default is None.\n\n :Returns:\n -------\n a series or a 1-d numpy array\n \"\"\"\n return _df_column(ts = ts, column = column, i = i, n = n)\n\ndef _convert(res, columns):\n \"\"\"\n We run a result per each column, now we want to convert it back to objects\n ----------\n res : dict\n results run per each column.\n \"\"\"\n values = list(res.values())\n if is_tss(values):\n return pd.DataFrame(res)\n elif is_arrs(values) and is_int(columns):\n return np.array(values).T\n elif is_tuples(values):\n return tuple([_convert(dict(zip(res.keys(), row)), columns) for row in zipper(*values)])\n else: \n return np.array(values) if is_int(columns) else pd.Series(res)\n\ndef df_sync(dfs, join = 'ij', method = None, columns = 'ij'):\n \"\"\"\n df_sync performs a sync of multiple dataframes\n \n :Parameters:\n ----------\n dfs : list or dict of timeseries\n dataframes to be synched\n join : str, optional\n index join method. The default is 'ij'.\n method : str/float, optional\n how the nan's are to be filled once reindexing occurs. The default is None.\n columns : str, optional\n how to sync multi-column timeseries. The default is 'ij'.\n\n :Example:\n -------\n >>> a = pd.DataFrame(np.random.normal(0,1,(100,5)), drange(-100,-1), list('abcde'))\n >>> b = pd.DataFrame(np.random.normal(0,1,(100,5)), drange(-99), list('bcdef'))\n >>> c = 'not a timeseries'\n >>> d = pd.DataFrame(np.random.normal(0,1,(100,1)), drange(-98,1), ['single_column_df'])\n >>> s = pd.Series(np.random.normal(0,1,105), drange(-104))\n \n :Example: inner join on index and columns\n --------------------------------\n >>> dfs = [a,b,c,d,s]\n >>> join = 'ij'; method = None; columns = 'ij'\n >>> res = df_sync(dfs, 'ij')\n >>> assert len(res[0]) == len(res[1]) == len(res[-1]) == 98\n >>> assert res[2] == 'not a timeseries'\n >>> assert list(res[0].columns) == list('bcde')\n\n :Example: outer join on index and inner join on columns\n --------------------------------\n >>> res = df_sync(dfs, join = 'oj')\n >>> assert len(res[0]) == len(res[1]) == len(res[-1]) == 106; assert res[2] == 'not a timeseries'\n >>> assert list(res[0].columns) == list('bcde')\n\n >>> res = df_sync(dfs, join = 'oj', method = 1)\n >>> assert res[0].iloc[0].sum() == 4\n\n :Example: outer join on index and columns\n -------------------------------------------\n >>> res = df_sync(dfs, join = 'oj', method = 1, columns = 'oj')\n >>> assert res[0].iloc[0].sum() == 5\n >>> assert list(res[0].columns) == list('abcdef')\n >>> assert list(res[-2].columns) == ['single_column_df'] # single column unaffected\n\n :Example: synching of dict rather than a list\n -------------------------------------------\n >>> dfs = Dict(a = a, b = b, c = c, d = d, s = s)\n >>> res = df_sync(dfs, join = 'oj', method = 1, columns = 'oj')\n >>> assert res.c == 'not a timeseries'\n >>> assert res.a.shape == (106,6)\n \"\"\"\n if isinstance(dfs, dict):\n values = list(dfs.values())\n elif isinstance(dfs, (list, tuple)):\n values = list(dfs)\n else:\n return dfs\n listed = _list(values)\n tss = [ts for ts in listed if is_ts(ts)]\n index = df_index(listed, join)\n dfs = df_reindex(dfs, index, method = method)\n\n ### now we do the columns\n if columns is False or columns is None:\n return dfs\n else:\n cols = df_columns(tss, columns)\n dfs = df_recolumn(dfs, cols)\n return dfs\n \n\nclass presync(wrapper):\n \"\"\"\n Much of timeseries analysis in Pandas is spent aligning multiple timeseries before feeding them into a function.\n presync allows easy presynching of all paramters of a function.\n \n :Parameters:\n ----------\n function : callable, optional\n function to be presynched. The default is None.\n index : str, optional\n index join policy. The default is 'inner'.\n method : str/int/list of these, optional\n method of nan handling. The default is None.\n columns : str, optional\n columns join policy. The default is 'inner'.\n default : float, optional\n value when no data is available. The default is np.nan.\n\n :Returns:\n -------\n presynch-decorated function\n\n \n :Example:\n ------- \n >>> from pyg import *\n >>> x = pd.Series([1,2,3,4], drange(-3))\n >>> y = pd.Series([1,2,3,4], drange(-4,-1)) \n >>> z = pd.DataFrame([[1,2],[3,4]], drange(-3,-2), ['a','b'])\n >>> addition = lambda a, b: a+b \n\n #We get some nonsensical results:\n\n >>> assert list(addition(x,z).columns) == list(x.index) + ['a', 'b']\n \n #But:\n \n >>> assert list(presync(addition)(x,z).columns) == ['a', 'b']\n >>> res = presync(addition, index='outer', method = 'ffill')(x,z)\n >>> assert eq(res.a.values, np.array([2,5,6,7]))\n \n \n :Example 2: alignment works for parameters 'buried' within...\n -------------------------------------------------------\n >>> function = lambda a, b: a['x'] + a['y'] + b \n >>> f = presync(function, 'outer', method = 'ffill')\n >>> res = f(dict(x = x, y = y), b = z)\n >>> assert eq(res, pd.DataFrame(dict(a = [np.nan, 4, 8, 10, 11], b = [np.nan, 5, 9, 11, 12]), index = drange(-4)))\n \n \n :Example 3: alignment of numpy arrays\n -------------------------------------\n >>> addition = lambda a, b: a+b\n >>> a = presync(addition)\n >>> assert eq(a(pd.Series([1,2,3,4], drange(-3)), np.array([[1,2,3,4]]).T), pd.Series([2,4,6,8], drange(-3)))\n >>> assert eq(a(pd.Series([1,2,3,4], drange(-3)), np.array([1,2,3,4])), pd.Series([2,4,6,8], drange(-3)))\n >>> assert eq(a(pd.Series([1,2,3,4], drange(-3)), np.array([[1,2,3,4],[5,6,7,8]]).T), pd.DataFrame({0:[2,4,6,8], 1:[6,8,10,12]}, drange(-3)))\n >>> assert eq(a(np.array([1,2,3,4]), np.array([[1,2,3,4]]).T), np.array([2,4,6,8]))\n\n\n :Example 4: inner join alignment of columns in dataframes by default\n ---------------------------------------------------------------------\n >>> x = pd.DataFrame({'a':[2,4,6,8], 'b':[6,8,10,12.]}, drange(-3))\n >>> y = pd.DataFrame({'wrong':[2,4,6,8], 'columns':[6,8,10,12]}, drange(-3))\n >>> assert len(a(x,y)) == 0 \n >>> y = pd.DataFrame({'a':[2,4,6,8], 'other':[6,8,10,12.]}, drange(-3))\n >>> assert eq(a(x,y),x[['a']]*2)\n >>> y = pd.DataFrame({'a':[2,4,6,8], 'b':[6,8,10,12.]}, drange(-3))\n >>> assert eq(a(x,y),x*2)\n >>> y = pd.DataFrame({'column name for a single column dataframe is ignored':[1,1,1,1]}, drange(-3)) \n >>> assert eq(a(x,y),x+1)\n \n >>> a = presync(addition, columns = 'outer')\n >>> y = pd.DataFrame({'other':[2,4,6,8], 'a':[6,8,10,12]}, drange(-3))\n >>> assert sorted(a(x,y).columns) == ['a','b','other'] \n\n :Example 4: ffilling, bfilling\n ------------------------------\n >>> x = pd.Series([1.,np.nan,3.,4.], drange(-3)) \n >>> y = pd.Series([1.,np.nan,3.,4.], drange(-4,-1)) \n >>> assert eq(a(x,y), pd.Series([np.nan, np.nan,7], drange(-3,-1)))\n\n but, we provide easy conversion of internal parameters of presync:\n\n >>> assert eq(a.ffill(x,y), pd.Series([2,4,7], drange(-3,-1)))\n >>> assert eq(a.bfill(x,y), pd.Series([4,6,7], drange(-3,-1)))\n >>> assert eq(a.oj(x,y), pd.Series([np.nan, np.nan, np.nan, 7, np.nan], drange(-4)))\n >>> assert eq(a.oj.ffill(x,y), pd.Series([np.nan, 2, 4, 7, 8], drange(-4)))\n \n :Example 5: indexing to a specific index\n ----------------------------------------\n >>> index = pd.Index([dt(-3), dt(-1)])\n >>> a = presync(addition, index = index)\n >>> x = pd.Series([1.,np.nan,3.,4.], drange(-3)) \n >>> y = pd.Series([1.,np.nan,3.,4.], drange(-4,-1)) \n >>> assert eq(a(x,y), pd.Series([np.nan, 7], index))\n \n \n :Example 6: returning complicated stuff\n ----------------------------------------\n >>> from pyg import * \n >>> a = pd.DataFrame(np.random.normal(0,1,(100,10)), drange(-99))\n >>> b = pd.DataFrame(np.random.normal(0,1,(100,10)), drange(-99))\n\n >>> def f(a, b):\n >>> return (a*b, ts_sum(a), ts_sum(b))\n\n >>> old = f(a,b) \n >>> self = presync(f)\n >>> args = (); kwargs = dict(a = a, b = b)\n >>> new = self(*args, **kwargs)\n >>> assert eq(new, old)\n \"\"\"\n \n def __init__(self, function = None, index = 'inner', method = None, columns = 'inner', default = np.nan):\n super(presync, self).__init__(function = function, index = index, method = method, columns = columns , default = default)\n \n @property\n def ij(self):\n return copy(self) + dict(index = 'inner')\n\n @property\n def oj(self):\n return self + dict(index = 'outer')\n\n @property\n def lj(self):\n return self + dict(index = 'left')\n\n @property\n def rj(self):\n return self + dict(index = 'right')\n\n @property\n def ffill(self):\n return copy(self) + dict(method = 'ffill')\n\n @property\n def bfill(self):\n return self + dict(method = 'bfill')\n\n\n def wrapped(self, *args, **kwargs):\n _idx = kwargs.pop('join', self.index)\n _method = kwargs.pop('method', self.method)\n _columns = kwargs.pop('columns', self.columns)\n \n values = list(args) + list(kwargs.values())\n listed = _list(values)\n tss = [ts for ts in listed if is_ts(ts)]\n callargs = inspect.getcallargs(self.function, *args, **kwargs)\n if is_str(_idx) and _idx in callargs:\n index = _index(callargs[_idx])\n else:\n index = df_index(listed, _idx)\n args_= df_reindex(args, index, method = _method)\n kwargs_= df_reindex(kwargs, index, method = _method)\n ### now we do the columns\n if _columns is False:\n return self.function(*args_, **kwargs_)\n else:\n cols = [tuple(ts.columns) for ts in tss if is_df(ts) and ts.shape[1]>1]\n if len(set(cols))==1: # special case where all 2-d dataframes have same column headers\n columns = cols[0]\n n = len(columns)\n res = {column: self.function(*df_column(args_,column = column, i = i, n = n), **df_column(kwargs_, column=column, i = i, n = n)) for i, column in enumerate(columns)}\n else:\n columns = df_columns(listed, _columns)\n if is_int(columns):\n res = {i: self.function(*df_column(args_, column = None, i = i), **df_column(kwargs_, column=None, i = i)) for i in range(columns)}\n elif columns is None:\n return self.function(*df_column(args_, column = None), **df_column(kwargs_, column = None))\n else:\n columns = list(columns) if isinstance(columns, pd.Index) else as_list(columns)\n columns = sorted(columns)\n res = {column: self.function(*df_column(args_,column = column), **df_column(kwargs_, column=column)) for column in columns} \n converted = _convert(res, columns)\n return converted \n\n\n@presync\ndef _div_(a, b):\n \"\"\"\n division of a by b supporting presynching (inner join) of timeseries\n \"\"\"\n return a/b\n\n@presync\ndef _sub_(a, b):\n \"\"\"\n subtraction of b from a supporting presynching (inner join) of timeseries\n \"\"\"\n return a-b\n\n@presync\ndef _add_(a, b):\n \"\"\"\n addition of a and b supporting presynching (inner join) of timeseries\n \"\"\"\n return a + b\n\n@presync\ndef _mul_(a, b):\n \"\"\"\n multiplication of b and a supporting presynching (inner join) of timeseries\n \"\"\"\n return a * b\n\n@presync\ndef _pow_(a, b):\n \"\"\"\n equivalent to a**b supporting presynching (inner join) of timeseries\n \"\"\"\n return a**b\n\n\ndef add_(a, b = None, join = 'ij', method = None, columns = 'ij'):\n \"\"\"\n a = pd.Series([1,2,3], drange(-2))\n b = pd.Series([1,2,3], drange(-3,-1))\n add_(a,b, 'oj', method = 0)\n \n addition of a and b supporting presynching (inner join) of timeseries\n \"\"\"\n dfs = as_list(a) + as_list(b)\n f = lambda a, b: _add_(a, b, join = join, method = method, columns = columns)\n return reducer(f, dfs)\n \n\ndef mul_(a, b = None, join = 'ij', method = None, columns = 'ij'):\n \"\"\"\n multiplication of a and b supporting presynching (inner join) of timeseries\n mul_(a,b,join = 'oj', method = 'ffill')\n cell(mul_, a = a, b = b, join = 'oj')()\n \"\"\"\n dfs = as_list(a) + as_list(b)\n f = lambda a, b: _mul_(a, b, join = join, method = method, columns = columns)\n return reducer(f, dfs)\n\ndef div_(a, b, join = 'ij', method = None, columns = 'ij'):\n \"\"\"\n division of a by b supporting presynching (inner join) of timeseries\n \"\"\"\n if isinstance(a, list):\n a = mul_(a, join = join, method = method, columns = columns)\n if isinstance(b, list):\n b = mul_(b, join = join, method = method, columns = columns)\n return _div_(a, b, join = join, method = method, columns = columns)\n\ndef sub_(a, b, join = 'ij', method = None, columns = 'ij'):\n \"\"\"\n subtraction of b from a supporting presynching (inner join) of timeseries\n \"\"\"\n if isinstance(a, list):\n a = add_(a, join = join, method = method, columns = columns)\n if isinstance(b, list):\n b = add_(b, join = join, method = method, columns = columns)\n return _sub_(a, b, join = join, method = method, columns = columns)\n\ndef pow_(a, b, join = 'ij', method = None, columns = 'ij'):\n \"\"\"\n equivalent to a**b supporting presynching (inner join) of timeseries\n \"\"\"\n return _pow_(a,b, join = join, method = method, columns = columns)\n\ndef min_(a, b = None, join = 'ij', method = None, columns = 'ij'):\n \"\"\"\n equivalent to redced np.minimum operation supporting presynching of timeseries\n \"\"\"\n dfs = as_list(a) + as_list(b)\n dfs = df_sync(dfs, join = join, method = method, columns = columns) \n return reducer(np.minimum, dfs)\n\ndef max_(a, b = None, join = 'ij', method = None, columns = 'ij'):\n \"\"\"\n equivalent to redced np.minimum operation supporting presynching of timeseries\n \"\"\"\n dfs = as_list(a) + as_list(b)\n dfs = df_sync(dfs, join = join, method = method, columns = columns)\n return reducer(np.maximum, dfs)\n\n\ndef _closed(oc):\n if oc in '()oO':\n return False\n elif oc in '[]cC':\n return True\n else:\n raise ValueError('not sure how to parse boundary %s'%oc)\n \n\ndef _df_slice(df, lb = None, ub = None, openclose = '[)'):\n \"\"\" \n Performs a one-time slice of the dataframe. Does not stich slices together\n \n pandas slices has two issues:\n 1) it fails for timeseries quite a but\n 2) for timeseries df[dt1:dt2] is close-close while for normal dataframe df[lb,ub] is close-open\n \n \"\"\"\n if isinstance(df, (pd.Index, pd.Series, pd.DataFrame)) and len(df)>0 and (ub is not None or lb is not None):\n l,u = openclose if openclose else '[)'\n l = _closed(l); u = _closed(u)\n if is_ts(df):\n lb = lb if lb is None or isinstance(lb, datetime.time) else dt(lb)\n ub = ub if ub is None or isinstance(ub, datetime.time) else dt(ub)\n if (l or lb is None) and (u or ub is None):\n try:\n return df[lb:ub]\n except Exception:\n pass\n elif (l or lb is None) and (ub is None or not u):\n try:\n return df[lb:ub]\n except Exception:\n pass\n if lb is not None:\n index = df if isinstance(df, pd.Index) else df.index\n if isinstance(lb, datetime.time):\n index = index.time\n df = df[index>=lb] if l else df[index>lb]\n if ub is not None:\n index = df if isinstance(df, pd.Index) else df.index\n if isinstance(ub, datetime.time):\n index = index.time \n df = df[index<=ub] if u else df[index<ub]\n return df\n\n\ndef df_slice(df, lb = None, ub = None, openclose = '(]', n = 1):\n \"\"\"\n slices a dataframe/series/index based on lower/upper bounds.\n If multiple timeseries are sliced at different times, will then stitch them together.\n \n :Parameters:\n ----------\n df : dataframe\n Either a single dataframe or a list of dataframes.\n lb : single or multiple lower bounds\n lower bounds to cut the data.\n ub : single or multiple upper bounds\n upper bounds to cut the data\n openclose : 2-character string\n defines how left/right boundary behave.\n [,] or c : close\n (,) or o : open\n ' ' : do not cut\n \n :Returns:\n -------\n filtered (and possibly stictched) timeseries\n \n\n :Example: single timeseries filtering\n ---------\n >>> df = pd.Series(np.random.normal(0,1,1000), drange(-999))\n >>> df_slice(df, None, '-1m')\n >>> df_slice(df, '-1m', None)\n\n :Example: single timeseries, multiple filtering\n ---------\n >>> df = pd.Series(np.random.normal(0,1,1000), drange(-999))\n >>> lb = jan1 = drange(2018, None, '1y')\n >>> ub = feb1 = drange(dt(2018,2,1), None, '1y')\n >>> assert set(df_slice(df, jan1, feb1).index.month) == {1}\n\n\n :Example: single timeseries time of day filtering\n ---------\n >>> dates = drange(-5, 0, '5n')\n >>> df = pd.Series(np.random.normal(0,1,12*24*5+1), dates)\n >>> assert len(df_slice(df, None, datetime.time(hour = 10))) == 606\n >>> assert len(df_slice(df, datetime.time(hour = 5), datetime.time(hour = 10))) == 300\n >>> assert len(df_slice(df, lb = datetime.time(hour = 10), ub = datetime.time(hour = 5))) == len(dates) - 300\n\n\n :Example: stitching together multiple future contracts for a continuous price\n ---------\n >>> ub = drange(1980, 2000, '3m')\n >>> df = [pd.Series(np.random.normal(0,1,1000), drange(-999, date)) for date in ub]\n >>> df_slice(df, ub = ub)\n\n :Example: stitching together multiple future contracts for a continuous price in front 5 contracts\n ---------\n >>> ub = drange(1980, 2000, '3m')\n >>> df = [pd.Series(np.random.normal(0,1,1000), drange(-999, date)) for date in ub]\n >>> df_slice(df, ub = ub, n = 5).iloc[500:]\n\n :Example: stitching together symbols\n ---------\n >>> from pyg import * \n >>> ub = drange(1980, 2000, '3m')\n >>> df = loop(list)(dt2str)(ub)\n >>> df_slice(df, ub = ub, n = 3)\n\n \n \"\"\"\n if isinstance(lb, tuple) and len(lb) == 2 and ub is None:\n lb, ub = lb\n if isinstance(ub, datetime.time) and isinstance(lb, datetime.time) and lb>ub:\n pre = df_slice(df, None, ub)\n post = df_slice(df, lb, None)\n return pd.concat([pre, post]).sort_index() \n if isinstance(df, list): \n if isinstance(lb, list) and ub is None:\n ub = lb[1:] + [None]\n elif isinstance(ub, list) and lb is None:\n lb = [None] + ub[:-1]\n boundaries = sorted(set([date for date in lb + ub if date is not None]))\n df = [d if is_pd(d) else pd.Series(d, boundaries) for d in df]\n if n > 1:\n df = [pd.concat(df[i: i+n], axis = 1) for i in range(len(df))]\n for d in df:\n d.columns = range(d.shape[1])\n dfs = as_list(df)\n dlu = zipper(dfs, lb, ub)\n res = [_df_slice(d, lb = l, ub = u, openclose = openclose) for d, l, u in dlu]\n if len(res) == 0:\n return None\n elif len(res) == 1:\n return res[0]\n elif isinstance(lb, list) and isinstance(ub, list):\n res = pd.concat(res)\n return res\n\ndef df_unslice(df, ub):\n \"\"\"\n If we have a rolled multi-column timeseries, and we want to know where each timeseries is originally associated with.\n As long as you provide the stiching points, forming the upper bound of each original timeseries, \n df_unslice will return a dict from each upper bound to a single-column timeseries\n\n :Example:\n ---------\n >>> ub = drange(1980, 2000, '3m')\n >>> dfs = [pd.Series(date.year * 100 + date.month, drange(-999, date)) for date in ub]\n >>> df = df_slice(dfs, ub = ub, n = 10)\n\n >>> df.iloc[700:-700:] \n \n >>> 0 1 2 3 4 5 6 7 8 9\n >>> 1979-03-08 198001.0 198004.0 198007.0 198010.0 198101.0 198104.0 198107.0 198110.0 NaN NaN\n >>> 1979-03-09 198001.0 198004.0 198007.0 198010.0 198101.0 198104.0 198107.0 198110.0 NaN NaN\n >>> 1979-03-10 198001.0 198004.0 198007.0 198010.0 198101.0 198104.0 198107.0 198110.0 NaN NaN\n >>> 1979-03-11 198001.0 198004.0 198007.0 198010.0 198101.0 198104.0 198107.0 198110.0 NaN NaN\n >>> 1979-03-12 198001.0 198004.0 198007.0 198010.0 198101.0 198104.0 198107.0 198110.0 NaN NaN\n >>> ... ... ... ... ... ... ... ... .. ..\n >>> 1998-01-27 199804.0 199807.0 199810.0 199901.0 199904.0 199907.0 199910.0 200001.0 NaN NaN\n >>> 1998-01-28 199804.0 199807.0 199810.0 199901.0 199904.0 199907.0 199910.0 200001.0 NaN NaN\n >>> 1998-01-29 199804.0 199807.0 199810.0 199901.0 199904.0 199907.0 199910.0 200001.0 NaN NaN\n >>> 1998-01-30 199804.0 199807.0 199810.0 199901.0 199904.0 199907.0 199910.0 200001.0 NaN NaN\n >>> 1998-01-31 199804.0 199807.0 199810.0 199901.0 199904.0 199907.0 199910.0 200001.0 NaN NaN\n\n >>> res = df_unslice(df, ub)\n >>> res[ub[0]]\n >>> 1977-04-07 198001.0\n >>> 1977-04-08 198001.0\n >>> 1977-04-09 198001.0\n >>> 1977-04-10 198001.0\n >>> 1977-04-11 198001.0\n >>> ...\n >>> 1979-12-28 198001.0\n >>> 1979-12-29 198001.0\n >>> 1979-12-30 198001.0\n >>> 1979-12-31 198001.0\n >>> 1980-01-01 198001.0\n >>> Name: 0, Length: 1000, dtype: float64\n \n We can then even slice the data again:\n \n >>> assert eq(df_slice(list(res.values()), ub = ub, n = 10), df)\n\n \"\"\"\n n = df.shape[1] if is_df(df) else 1\n res = dictable(ub = ub, lb = [None] + ub[:-1], i = range(len(ub)))\n res = res(ts = lambda lb, ub: df_slice(df, lb, ub, '(]'))\n res = res(rs = lambda i, ts: dictable(u = ub[i: i+n], j = range(len(ub[i: i+n])))(ts = lambda j: ts[j]))\n rs = dictable.concat(res.rs).listby('u').do([pd.concat, nona], 'ts')\n return dict(rs['u', 'ts'])"
] | [
[
"pandas.Series",
"numpy.isinf",
"pandas.DataFrame",
"pandas.concat",
"numpy.isnan",
"numpy.array",
"pandas.Index",
"numpy.full"
]
] |
czbiohub/opencell-portal-pub | [
"2b056924e4f55490b16349ff0dcf3e719ab516c7"
] | [
"opencell/imaging/images.py"
] | [
"import datetime\nimport hashlib\nimport json\nimport numpy as np\nimport pandas as pd\nimport tifffile\n\n\ndef timestamp():\n return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n\nclass MicroManagerTIFF:\n\n def __init__(self, src_filepath, verbose=True):\n '''\n\n '''\n\n self.verbose = verbose\n self.src_filepath = src_filepath\n\n self.events = []\n self.global_metadata = {'processing_timestamp': timestamp()}\n\n self.open_tiff()\n\n\n def event_logger(self, message):\n '''\n '''\n if self.verbose:\n print('EVENT: %s' % message)\n self.events.append({'message': message, 'timestamp': timestamp()})\n\n\n def save_events(self, dst_filepath):\n if not self.events:\n return\n pd.DataFrame(data=self.events).to_csv(dst_filepath, index=False)\n\n\n def save_global_metadata(self, dst_filepath):\n with open(dst_filepath, 'w') as file:\n json.dump(self.global_metadata, file)\n\n\n def save_mm_metadata(self, dst_filepath):\n self.mm_metadata.to_csv(dst_filepath, index=False)\n\n\n def calc_hash(self):\n '''\n Calculate the sha1 hash from the file contents\n '''\n sha1 = hashlib.sha1()\n with open(self.src_filepath, 'rb') as file:\n sha1.update(file.read())\n\n hash_value = sha1.hexdigest()\n self.global_metadata['sha1_hash'] = hash_value\n return hash_value\n\n\n def open_tiff(self):\n '''\n Open the stack using tifffile.TiffFile\n '''\n self.tiff = tifffile.TiffFile(self.src_filepath)\n\n\n @staticmethod\n def _parse_mm_tag_schema_v1(mm_tag):\n '''\n Parse a MicroManagerMetadata tag in the 'old' schema\n (KC: I believe this schema corresponds to MicroManager 1.x)\n '''\n metadata = {\n 'slice_ind': mm_tag['SliceIndex'],\n 'frame_ind': mm_tag['FrameIndex'],\n 'channel_ind': mm_tag['ChannelIndex'],\n 'position_ind': mm_tag['PositionIndex'],\n 'exposure_time': mm_tag['AndorEMCCD-Exposure'],\n 'laser_status_405': mm_tag['AndorILE-A-Laser 405-Power Enable'],\n 'laser_power_405': mm_tag['AndorILE-A-Laser 405-Power Setpoint'],\n 'laser_status_488': mm_tag['AndorILE-A-Laser 488-Power Enable'],\n 'laser_power_488': mm_tag['AndorILE-A-Laser 488-Power Setpoint'],\n }\n return metadata\n\n\n @staticmethod\n def _parse_mm_tag_schema_v2(mm_tag):\n '''\n Parse a MicroManagerMetadata tag in the 'new' schema\n (KC: I believe this schema corresponds to MicroManager 2.x)\n '''\n metadata = {\n 'slice_ind': mm_tag['SliceIndex'],\n 'frame_ind': mm_tag['FrameIndex'],\n 'channel_ind': mm_tag['ChannelIndex'],\n 'position_ind': mm_tag['PositionIndex'],\n 'exposure_time': mm_tag.get('Andor EMCCD-Exposure')['PropVal'],\n 'laser_status_405': mm_tag.get('Andor ILE-A-Laser 405-Power Enable')['PropVal'],\n 'laser_power_405': mm_tag.get('Andor ILE-A-Laser 405-Power Setpoint')['PropVal'],\n 'laser_status_488': mm_tag.get('Andor ILE-A-Laser 488-Power Enable')['PropVal'],\n 'laser_power_488': mm_tag.get('Andor ILE-A-Laser 488-Power Setpoint')['PropVal'],\n }\n return metadata\n\n\n def parse_micromanager_metadata(self):\n '''\n Parse the MicroManager metadata for each page in the TIFF file\n '''\n\n # the IJMetadata appears only in the first page\n ij_metadata = None\n try:\n ij_metadata = self.tiff.pages[0].tags['IJMetadata'].value['Info']\n except Exception:\n self.event_logger('There was no IJMetadata tag found on the first page')\n\n if ij_metadata is not None:\n try:\n ij_metadata = json.loads(ij_metadata)\n except Exception:\n self.event_logger('IJMetadata could not be parsed by json.loads')\n\n mm_metadata_rows = []\n for ind, page in enumerate(self.tiff.pages):\n mm_metadata_row = {\n 'page_ind': ind,\n 'error': False\n }\n\n mm_tag = page.tags.get('MicroManagerMetadata')\n if not isinstance(mm_tag, tifffile.tifffile.TiffTag):\n self.event_logger('There was no MicroManagerMetadata tag found on page %s' % ind)\n mm_metadata_row['error'] = True\n mm_metadata_rows.append(mm_metadata_row)\n continue\n\n try:\n page_metadata_v1 = self._parse_mm_tag_schema_v1(mm_tag.value)\n except Exception:\n page_metadata_v1 = None\n try:\n page_metadata_v2 = self._parse_mm_tag_schema_v2(mm_tag.value)\n except Exception:\n page_metadata_v2 = None\n\n page_metadata = {}\n mm_metadata_version = None\n if page_metadata_v1 is not None:\n mm_metadata_version = 'v1'\n page_metadata = page_metadata_v1\n elif page_metadata_v2 is not None:\n mm_metadata_version = 'v2'\n page_metadata = page_metadata_v2\n else:\n mm_metadata_row['error'] = True\n self.event_logger('Unable to parse MicroManagerMetadata tag from page %s' % ind)\n\n mm_metadata_rows.append({**mm_metadata_row, **page_metadata})\n\n self.mm_metadata = pd.DataFrame(data=mm_metadata_rows)\n self.global_metadata['mm_metadata_version'] = mm_metadata_version\n\n\n\nclass RawPipelineTIFF(MicroManagerTIFF):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # the channels we expect to find in a Pipeline-like TIFF\n self.laser_405 = '405'\n self.laser_488 = '488'\n\n\n def validate_micromanager_metadata(self):\n '''\n Validate the parsed MicroManager metadata tags for a raw Pipeline-like TIFF file\n (these are TIFFs found in the 'PlateMicroscopy' directory)\n\n Generates validated_mm_metadata and sets various flags\n that determine whether and how to split the pages into the 405 and 488 channels\n\n Steps\n ------\n - drop rows with any NAs\n - check that the dropped rows had a parsing error\n - check for two channel_inds and an equal number of pages from each\n - if there are no channel_inds, check for an even number of pages\n - if there are two channel_inds, check that slice_inds\n and exposure settings are consistent within each channel\n\n '''\n\n # whether the MM metadata has two channel inds with an equal number of slices\n self.has_valid_channel_inds = False\n\n # whether the MM metadata for each channel has slice_inds that increment by one\n self.has_valid_slice_inds = False\n\n # whether it is safe to split the TIFF stack into channels by splitting the pages in half,\n # when there are not valid channel inds\n self.safe_to_split_in_half = False\n\n md = self.mm_metadata.copy()\n\n # remove the error flag column\n errors = md['error']\n md = md.drop(labels='error', axis=1)\n\n # drop rows with NAs in any of the columns parsed from the MicroManagerMetadata tag\n parsed_columns = set(md.columns).difference(['page_ind'])\n md = md.dropna(how='any', subset=parsed_columns, axis=0)\n\n # check that the dropped rows had an error\n # (note that 'error' means either there was no MM tag or it could not be parsed)\n num_error_rows = errors.sum()\n num_dropped_rows = self.mm_metadata.shape[0] - md.shape[0]\n if num_dropped_rows != num_error_rows:\n self.event_logger(\n '%s rows with NAs were dropped but %s rows had errors'\n % (num_dropped_rows, num_error_rows)\n )\n\n # check that we can coerce the parsed columns as expected\n int_columns = ['slice_ind', 'channel_ind']\n for column in int_columns:\n md[column] = md[column].apply(int)\n\n float_columns = ['laser_power_405', 'laser_power_488', 'exposure_time']\n for column in float_columns:\n md[column] = md[column].apply(float)\n\n # if there are two distinct channels, we assign the first to 405 and the second to 488\n self.channel_inds = None\n unique_channel_inds = sorted(md.channel_ind.unique())\n if len(unique_channel_inds) == 2:\n self.channel_inds = {\n self.laser_405: min(unique_channel_inds),\n self.laser_488: max(unique_channel_inds),\n }\n\n # if there are three channel_inds, we assume the third channel is brightfield\n elif set(unique_channel_inds) == set([0, 1, 2]):\n self.event_logger('There were three channel inds')\n self.channel_inds = {\n self.laser_405: 0,\n self.laser_488: 1,\n }\n\n # if there's one channel index, check for an even number of pages\n elif len(unique_channel_inds) == 1:\n if np.mod(md.shape[0], 2) == 0:\n self.safe_to_split_in_half = True\n else:\n self.event_logger('There is one channel_ind and an odd number of pages')\n else:\n self.event_logger('Unexpected number of channel_inds (%s)' % unique_channel_inds)\n\n # if there were valid channel_inds, check for an equal number of pages from each channel\n if self.channel_inds is not None:\n num_405 = (md.channel_ind == self.channel_inds[self.laser_405]).sum()\n num_488 = (md.channel_ind == self.channel_inds[self.laser_488]).sum()\n if num_405 == num_488:\n self.has_valid_channel_inds = True\n else:\n self.event_logger(\n 'Channels have unequal number of slices: %s and %s' % (num_405, num_488)\n )\n\n # in each channel, check that slice_ind increments by 1.0\n # and that exposure time and laser power are consistent\n for channel_ind in unique_channel_inds:\n md_channel = md.loc[md.channel_ind == channel_ind]\n steps = np.unique(np.diff(md_channel.slice_ind))\n\n # check that slice inds are contiguous\n if len(steps) == 1 and steps[0] == 1:\n self.has_valid_slice_inds = True\n elif len(steps) == 1:\n self.event_logger(\n 'Unexpected slice_ind increment %s for channel_ind %s'\n % (steps[0], channel_ind)\n )\n elif len(steps) > 1:\n self.event_logger(\n 'The slice_inds are not contiguous for channel_ind %s' % channel_ind\n )\n\n for column in float_columns:\n steps = np.unique(np.diff(md_channel[column]))\n if len(steps) > 1 or steps[0] != 0:\n self.event_logger(\n 'Inconsistent values found in column %s for channel_ind %s'\n % (column, channel_ind)\n )\n\n self.validated_mm_metadata = md\n\n\n @staticmethod\n def tag_and_coerce_metadata(row, tag):\n '''\n Transform `row` to a dict, prepend the keys with `tag`,\n and do some hackish type coercion\n '''\n d = {}\n for key, val in dict(row).items():\n key = '%s_%s' % (key, tag)\n try:\n val = float(val)\n except Exception:\n pass\n d[key] = val\n return d\n\n\n def split_channels(self):\n '''\n Split the pages of the pipeline-like TIFF into 405 and 488 channels\n to construct the z-stack for each channel and, if possible,\n extract the channel-specific MM metadata (i.e., exposure time and laser power)\n\n Overview\n --------\n In a perfect world, this would be easy: we would simple use the two unique channel_inds\n to split the pages by channel (and verify the page order using the slice_inds).\n\n Unfortunately, due to a bug, the MM metadata tag in some TIFFs is the same on every page\n (this is notably true for 'disentangled' TIFFs from Plates 16,17,18).\n In these cases, we split the tiff into channels simply by splitting the pages in half.\n\n Note that we use the flags set in self.validate_mm_metadata to determine\n which of these methods to use.\n\n Assignment of channels\n ----------------------\n When there are two valid channel_inds, the 405 laser is assigned\n to the lower channel_ind (which is either 0 or -1).\n When there are no channel_inds, the 405 laser is assigned\n to the first half of the pages.\n\n '''\n\n self.did_split_channels = True\n\n self.stacks = {}\n md = self.validated_mm_metadata.copy()\n\n if self.has_valid_channel_inds:\n for channel_name in (self.laser_405, self.laser_488):\n channel_md = md.loc[md.channel_ind == self.channel_inds[channel_name]]\n self.global_metadata.update(\n self.tag_and_coerce_metadata(channel_md.iloc[0], tag=channel_name)\n )\n self.stacks[channel_name] = self.concat_pages(channel_md.page_ind.values)\n\n elif self.safe_to_split_in_half:\n n = int(md.shape[0]/2)\n self.stacks[self.laser_405] = self.concat_pages(md.iloc[:n].page_ind.values)\n self.stacks[self.laser_488] = self.concat_pages(md.iloc[n:].page_ind.values)\n\n else:\n self.event_logger('Unable to safely split pages by channel')\n self.did_split_channels = False\n\n\n def concat_pages(self, page_inds):\n '''\n '''\n stack = np.array([self.tiff.pages[ind].asarray() for ind in page_inds])\n return stack\n\n\n def project_stack(self, channel_name, axis, dst_filepath=None):\n '''\n Generate x-, y-, or z-projections and log the max and min intensities\n '''\n\n axis_inds = {'x': 1, 'y': 2, 'z': 0}\n if axis not in axis_inds.keys():\n raise ValueError(\"Axis must be one of 'x', 'y', or 'z'\")\n axis_ind = axis_inds[axis]\n\n try:\n proj = self.stacks[channel_name].max(axis=axis_ind)\n minmax = {\n 'min_intensity': int(proj.min()),\n 'max_intensity': int(proj.max()),\n }\n self.global_metadata.update(self.tag_and_coerce_metadata(minmax, tag=channel_name))\n\n if dst_filepath is not None:\n tifffile.imsave(dst_filepath, proj)\n\n except Exception:\n self.event_logger(\n 'An error occured while %s-projecting the %s channel' % (axis, channel_name)\n )\n\n\n def calculate_z_profiles(self, channel):\n '''\n Calculate various statistics of the intensities for each z-slice\n '''\n stack = self.stacks[channel]\n return {\n 'min': np.array([zslice.min() for zslice in stack]).astype(int),\n 'max': np.array([zslice.max() for zslice in stack]).astype(int),\n 'mean': np.array([zslice.mean() for zslice in stack]).astype(int),\n 'p9999': np.array([np.percentile(zslice, 99.99) for zslice in stack]).astype(int),\n }\n\n\n @staticmethod\n def find_cell_layer(stack):\n '''\n Estimate the center of the cell layer using the center of mass\n of the z-profile of the mean intensity of the Hoechst staining\n '''\n\n # z-profile of the mean intensity in the Hoechst channel\n raw_profile = np.array([zslice.mean() for zslice in stack]).astype(float)\n profile = raw_profile - raw_profile.mean()\n profile[profile < 0] = 0\n\n x = np.arange(len(profile))\n center_of_mass = (profile * x).sum()/profile.sum()\n return center_of_mass, raw_profile\n\n\n def align_cell_layer(\n self, cell_layer_bottom, cell_layer_top, step_size, bottom_wiggle_room=0\n ):\n '''\n Approximately align the 405 and 488 stacks to correct for chromatic aberration,\n and crop around the cell layer so that it is in the center of the stack\n\n cell_layer_bottom : the position of the bottom of the cell layer, in microns,\n relative to the center of the cell layer (should be negative)\n cell_layer_top : the position of the top of cell layer, in microns,\n relative to the center (should be positive)\n step_size : the z-step size of the stack (in microns)\n (note that the step size is not included in the MicroManager metadata,\n so it must be provided by the user)\n bottom_wiggle_room : optional 'wiggle room', in microns, for the cell_layer_bottom;\n if the actual bottom of the stack is within this distance of cell_layer_bottom,\n the stack is still cropped, and the bottom of the cropped stack padded with zeros.\n For example, if cell_layer_bottom is -5um but the actual bottom is at -4.5um,\n setting bottom_wiggle_room to 1um would allow the stack to be cropped\n (because -4.5 + 5 < 1)\n '''\n\n stacks = {}\n result = {}\n\n stack_405 = self.stacks[self.laser_405].copy()\n stack_488 = self.stacks[self.laser_488].copy()\n\n # hard-coded chromatic aberration offset in microns\n # this is an empirically estimated median offset,\n # obtained by inspecting z-stacks from nucleus-localized targets\n chromatic_aberration_offset = 1.0\n offset_ind = int(chromatic_aberration_offset/step_size)\n\n stack_405 = stack_405[:-offset_ind, :, :]\n stack_488 = stack_488[offset_ind:, :, :]\n\n # estimate the cell layer center and round it the nearest z-slice\n cell_layer_center, _ = self.find_cell_layer(stack_405)\n cell_layer_center = np.round(cell_layer_center)\n\n # absolute position, in number of z-slices, of the top and bottom of the cell layer\n bottom_ind = int(np.floor(cell_layer_center + cell_layer_bottom/step_size))\n top_ind = int(np.ceil(cell_layer_center + cell_layer_top/step_size))\n\n # log some parameters (for debugging, mostly)\n result['padded'] = False\n result['stack_shape'] = stack_405.shape\n result['crop_window'] = [bottom_ind, top_ind]\n result['cell_layer_center'] = cell_layer_center\n result['chromatic_aberration_offset'] = offset_ind\n\n pad_depth = None\n if bottom_ind < 0:\n if abs(bottom_ind) <= np.round(bottom_wiggle_room/step_size):\n pad_depth = abs(bottom_ind)\n bottom_ind = 0\n else:\n result['error'] = 'The cell layer center was too close to the bottom of the stack'\n return stacks, result\n\n if top_ind >= stack_405.shape[0]:\n result['error'] = 'The cell layer center was too close to the top of the stack'\n return stacks, result\n\n stack_405 = stack_405[bottom_ind:top_ind, :, :]\n stack_488 = stack_488[bottom_ind:top_ind, :, :]\n\n # pad the bottom of the stack if necessary\n if pad_depth:\n result['padded'] = True\n result['pad_depth'] = pad_depth\n padding = np.zeros((pad_depth, *stack_405.shape[1:]), dtype=stack_405.dtype)\n stack_405 = np.concatenate((padding, stack_405), axis=0)\n stack_488 = np.concatenate((padding, stack_488), axis=0)\n\n stacks = {'405': stack_405, '488': stack_488}\n return stacks, result\n"
] | [
[
"numpy.ceil",
"numpy.zeros",
"numpy.diff",
"pandas.DataFrame",
"numpy.floor",
"numpy.mod",
"numpy.round",
"numpy.concatenate",
"numpy.percentile"
]
] |
XDong18/bdd-mtl | [
"c89703006a2a5250f4d1c71e0aad958d72526885"
] | [
"bdd_mtl/mmdet/models/detectors/two_stage.py"
] | [
"import torch\nimport torch.nn as nn\n\nfrom .base import BaseDetector\nfrom .test_mixins import RPNTestMixin, BBoxTestMixin, MaskTestMixin\nfrom .. import builder\nfrom ..registry import DETECTORS\nfrom mmdet.core import bbox2roi, bbox2result, build_assigner, build_sampler\n\n\[email protected]_module\nclass TwoStageDetector(BaseDetector, RPNTestMixin, BBoxTestMixin,\n MaskTestMixin):\n\n def __init__(self,\n backbone,\n neck=None,\n shared_head=None,\n rpn_head=None,\n bbox_roi_extractor=None,\n bbox_head=None,\n mask_roi_extractor=None,\n mask_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None):\n super(TwoStageDetector, self).__init__()\n self.backbone = builder.build_backbone(backbone)\n\n if neck is not None:\n self.neck = builder.build_neck(neck)\n\n if shared_head is not None:\n self.shared_head = builder.build_shared_head(shared_head)\n\n if rpn_head is not None:\n self.rpn_head = builder.build_head(rpn_head)\n\n if bbox_head is not None:\n self.bbox_roi_extractor = builder.build_roi_extractor(\n bbox_roi_extractor)\n self.bbox_head = builder.build_head(bbox_head)\n\n if mask_head is not None:\n if mask_roi_extractor is not None:\n self.mask_roi_extractor = builder.build_roi_extractor(\n mask_roi_extractor)\n self.share_roi_extractor = False\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor\n self.mask_head = builder.build_head(mask_head)\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n self.init_weights(pretrained=pretrained)\n\n @property\n def with_rpn(self):\n return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n def init_weights(self, pretrained=None):\n super(TwoStageDetector, self).init_weights(pretrained)\n self.backbone.init_weights(pretrained=pretrained)\n if self.with_neck:\n if isinstance(self.neck, nn.Sequential):\n for m in self.neck:\n m.init_weights()\n else:\n self.neck.init_weights()\n if self.with_shared_head:\n self.shared_head.init_weights(pretrained=pretrained)\n if self.with_rpn:\n self.rpn_head.init_weights()\n if self.with_bbox:\n self.bbox_roi_extractor.init_weights()\n self.bbox_head.init_weights()\n if self.with_mask:\n self.mask_head.init_weights()\n if not self.share_roi_extractor:\n self.mask_roi_extractor.init_weights()\n\n def extract_feat(self, img):\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def forward_train(self,\n img,\n img_meta,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None):\n x = self.extract_feat(img)\n\n losses = dict()\n\n # RPN forward and loss\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,\n self.train_cfg.rpn)\n rpn_losses = self.rpn_head.loss(\n *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)\n losses.update(rpn_losses)\n\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n proposal_inputs = rpn_outs + (img_meta, proposal_cfg)\n proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)\n else:\n proposal_list = proposals\n\n # assign gts and sample proposals\n if self.with_bbox or self.with_mask:\n bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)\n bbox_sampler = build_sampler(\n self.train_cfg.rcnn.sampler, context=self)\n num_imgs = img.size(0)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n for i in range(num_imgs):\n assign_result = bbox_assigner.assign(proposal_list[i],\n gt_bboxes[i],\n gt_bboxes_ignore[i],\n gt_labels[i])\n sampling_result = bbox_sampler.sample(\n assign_result,\n proposal_list[i],\n gt_bboxes[i],\n gt_labels[i],\n feats=[lvl_feat[i][None] for lvl_feat in x])\n sampling_results.append(sampling_result)\n\n # bbox head forward and loss\n if self.with_bbox:\n rois = bbox2roi([res.bboxes for res in sampling_results])\n # TODO: a more flexible way to decide which feature maps to use\n bbox_feats = self.bbox_roi_extractor(\n x[:self.bbox_roi_extractor.num_inputs], rois)\n if self.with_shared_head:\n bbox_feats = self.shared_head(bbox_feats)\n cls_score, bbox_pred = self.bbox_head(bbox_feats)\n\n bbox_targets = self.bbox_head.get_target(sampling_results,\n gt_bboxes, gt_labels,\n self.train_cfg.rcnn)\n loss_bbox = self.bbox_head.loss(cls_score, bbox_pred,\n *bbox_targets)\n losses.update(loss_bbox)\n\n # mask head forward and loss\n if self.with_mask:\n if not self.share_roi_extractor:\n pos_rois = bbox2roi(\n [res.pos_bboxes for res in sampling_results])\n mask_feats = self.mask_roi_extractor(\n x[:self.mask_roi_extractor.num_inputs], pos_rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n else:\n pos_inds = []\n device = bbox_feats.device\n for res in sampling_results:\n pos_inds.append(\n torch.ones(\n res.pos_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds.append(\n torch.zeros(\n res.neg_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds = torch.cat(pos_inds)\n mask_feats = bbox_feats[pos_inds]\n mask_pred = self.mask_head(mask_feats)\n\n mask_targets = self.mask_head.get_target(sampling_results,\n gt_masks,\n self.train_cfg.rcnn)\n pos_labels = torch.cat(\n [res.pos_gt_labels for res in sampling_results])\n loss_mask = self.mask_head.loss(mask_pred, mask_targets,\n pos_labels)\n losses.update(loss_mask)\n\n return losses\n\n def simple_test(self, img, img_meta, proposals=None, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, \"Bbox head must be implemented.\"\n\n x = self.extract_feat(img)\n\n proposal_list = self.simple_test_rpn(\n x, img_meta, self.test_cfg.rpn) if proposals is None else proposals\n\n det_bboxes, det_labels = self.simple_test_bboxes(\n x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=rescale)\n bbox_results = bbox2result(det_bboxes, det_labels,\n self.bbox_head.num_classes)\n\n if not self.with_mask:\n return bbox_results\n else:\n segm_results = self.simple_test_mask(\n x, img_meta, det_bboxes, det_labels, rescale=rescale)\n return bbox_results, segm_results\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n # recompute feats to save memory\n proposal_list = self.aug_test_rpn(\n self.extract_feats(imgs), img_metas, self.test_cfg.rpn)\n det_bboxes, det_labels = self.aug_test_bboxes(\n self.extract_feats(imgs), img_metas, proposal_list,\n self.test_cfg.rcnn)\n\n if rescale:\n _det_bboxes = det_bboxes\n else:\n _det_bboxes = det_bboxes.clone()\n _det_bboxes[:, :4] *= img_metas[0][0]['scale_factor']\n bbox_results = bbox2result(_det_bboxes, det_labels,\n self.bbox_head.num_classes)\n\n # det_bboxes always keep the original scale\n if self.with_mask:\n segm_results = self.aug_test_mask(\n self.extract_feats(imgs), img_metas, det_bboxes, det_labels)\n return bbox_results, segm_results\n else:\n return bbox_results\n"
] | [
[
"torch.zeros",
"torch.ones",
"torch.cat"
]
] |
nasa/giant | [
"1e939272d9a0ca533b4da400d132f854520f3adc"
] | [
"unittests/ray_tracer/test_kdtree.py"
] | [
"from unittest import TestCase, skip\nimport copy\n\nimport numpy as np\n\nfrom giant import rotations as at\nfrom giant.ray_tracer import kdtree, shapes, rays\n\n\nclass TestKDTree(TestCase):\n\n def setUp(self):\n\n self.max_depth = 4\n\n tri1 = np.array([[-5, -4, -4.5],\n [0, 0, 1],\n [0, 0, 0]])\n\n tri2 = tri1+np.array([[2.5, 0, 0]]).T\n\n tri3 = tri2+np.array([[2.5, 0, 0]]).T\n\n tri4 = tri3+np.array([[2.5, 0, 0]]).T\n\n self.triangles = shapes.Triangle64(np.hstack([tri1, tri2, tri3, tri4]).T, 1,\n np.arange(12).reshape(-1, 3))\n\n self.shapes = self.triangles\n\n self.stacked_tries = shapes.Triangle64(np.hstack([tri1, tri2,\n tri1+[[0], [0], [2.5]],\n tri2 + [[0], [0], [2.5]]]).T, 1,\n np.arange(12).reshape(-1, 3))\n\n\n def test_creation(self):\n\n tree = kdtree.KDTree(self.shapes, max_depth=self.max_depth)\n\n self.assertEqual(tree.max_depth, self.max_depth)\n self.assertEqual(tree.surface, self.shapes)\n\n def test_build(self):\n\n tree = kdtree.KDTree(self.shapes, max_depth=self.max_depth)\n\n tree.build(force=True, print_progress=False)\n\n facets = np.arange(12).reshape(-1, 3)\n tris = [shapes.Triangle64(self.triangles.vertices, self.triangles.albedos, face)\n for face in facets]\n\n for tri in tris:\n tri.bounding_box = None\n\n node20 = kdtree.KDNode(tris[0])\n\n node21 = kdtree.KDNode(tris[1])\n\n node22 = kdtree.KDNode(tris[2])\n\n node23 = kdtree.KDNode(tris[3])\n\n node10 = kdtree.KDNode()\n node10.bounding_box = shapes.AxisAlignedBoundingBox([-5, 0, 0], [-1.5, 1, 0])\n node10.left = node20\n node10.right = node21\n\n node11 = kdtree.KDNode()\n node11.bounding_box = shapes.AxisAlignedBoundingBox([0., 0, 0], [3.5, 1, 0])\n node11.left = node22\n node11.right = node23\n\n node00 = kdtree.KDNode()\n node00.bounding_box = self.triangles.bounding_box\n node00.left = node10\n node00.right = node11\n node00.order = 0\n\n self.assertEqual(node00, tree.root)\n\n def test_trace(self):\n\n with self.subTest(stacked=False):\n tree = kdtree.KDTree(self.shapes, max_depth=self.max_depth)\n\n tree.build(force=True, print_progress=False)\n\n starts = np.array([[-4.5, -2, 0.5, 3],\n [0.5, 0.5, 0.5, 0.5],\n [1, 1, 1, 1]])\n directions = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [-1, -1, -1, -1]], dtype=np.float64)\n\n rays_test = rays.Rays(starts, directions)\n\n ints = tree.trace(rays_test)\n\n nodes = [tree.root.left.left, tree.root.left.right, tree.root.right.left, tree.root.right.right]\n\n with self.subTest(rotation=None, translation=None):\n\n for ind, int_check in enumerate(ints):\n\n with self.subTest(ignore=False, ind=ind):\n self.assertTrue(int_check[\"check\"])\n\n np.testing.assert_array_equal(int_check[\"intersect\"], starts[:, ind]-[0, 0, 1])\n\n np.testing.assert_array_equal(int_check[\"normal\"], self.triangles.normals[ind])\n\n self.assertEqual(int_check[\"albedo\"], 1.0)\n\n self.assertEqual(int_check[\"facet\"], 0+nodes[ind].id*(10**(tree.root.order+1)))\n\n ignore_ind = 2\n\n rays_test.ignore = [nodes[ignore_ind].id*(10**(tree.root.order+1))]*rays_test.num_rays\n\n ints = tree.trace(rays_test)\n\n for ind, int_check in enumerate(ints):\n\n with self.subTest(ignore=True, ind=ind):\n\n if ind != ignore_ind:\n # int_check = int_check[0]\n\n self.assertTrue(int_check[\"check\"])\n\n np.testing.assert_array_equal(int_check[\"intersect\"], starts[:, ind]-[0, 0, 1])\n\n np.testing.assert_array_equal(int_check[\"normal\"], self.triangles.normals[ind])\n\n self.assertEqual(int_check[\"albedo\"], 1.0)\n\n self.assertEqual(int_check[\"facet\"], 0+nodes[ind].id*(10**(tree.root.order+1)))\n\n else:\n\n self.assertFalse(int_check[\"check\"])\n\n self.assertTrue(np.isnan(int_check[\"intersect\"]).all())\n self.assertTrue(np.isnan(int_check[\"normal\"]).all())\n self.assertTrue(np.isnan(int_check[\"albedo\"]))\n self.assertEqual(int_check[\"facet\"], -1)\n\n rotation = at.Rotation([0, 0, -np.pi / 2])\n rays_test.ignore = None\n\n with self.subTest(rotation=rotation, translation=None):\n\n tc = copy.deepcopy(tree)\n\n tc.rotate(rotation)\n\n ints = tc.trace(rays_test)\n\n self.assertFalse(ints[\"check\"].any())\n\n starts2 = np.array([[0.5, 0.5, 0.5, 0.5],\n [4.5, 2, -0.5, -3],\n [1, 1, 1, 1]])\n directions2 = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [-1, -1, -1, -1]], dtype=np.float64)\n\n rays_test2 = rays.Rays(starts2, directions2)\n\n ints = tc.trace(rays_test2)\n\n for ind, int_check in enumerate(ints):\n # int_check = int_check[0]\n self.assertTrue(int_check[\"check\"])\n\n np.testing.assert_array_almost_equal(int_check[\"intersect\"], starts2[:, ind]-[0, 0, 1])\n\n np.testing.assert_array_equal(int_check[\"normal\"], [email protected][ind])\n\n self.assertEqual(int_check[\"albedo\"], 1.0)\n\n self.assertEqual(int_check[\"facet\"], 0+nodes[ind].id*(10**(tc.root.order+1)))\n\n translation = [0, 0, -0.5]\n\n with self.subTest(rotation=None, translation=translation):\n\n tc = copy.deepcopy(tree)\n\n tc.translate(translation)\n\n ints = tc.trace(rays_test)\n\n for ind, int_check in enumerate(ints):\n # int_check = int_check[0]\n self.assertTrue(int_check[\"check\"])\n\n np.testing.assert_array_almost_equal(int_check[\"intersect\"], starts[:, ind]-[0, 0, 1.5])\n\n np.testing.assert_array_almost_equal(int_check[\"normal\"], self.triangles.normals[ind])\n\n self.assertEqual(int_check[\"albedo\"], 1.0)\n\n self.assertEqual(int_check[\"facet\"], 0+nodes[ind].id*(10**(tc.root.order+1)))\n\n with self.subTest(rotation=rotation, translation=translation):\n\n tc = copy.deepcopy(tree)\n\n tc.rotate(rotation)\n tc.translate(translation)\n\n ints = tc.trace(rays_test)\n\n self.assertFalse(ints[\"check\"].any())\n\n starts2 = np.array([[0.5, 0.5, 0.5, 0.5],\n [4.5, 2, -0.5, -3],\n [1, 1, 1, 1]])\n directions2 = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [-1, -1, -1, -1]], dtype=np.float64)\n\n rays_test2 = rays.Rays(starts2, directions2)\n\n ints = tc.trace(rays_test2)\n\n for ind, int_check in enumerate(ints):\n # int_check = int_check[0]\n self.assertTrue(int_check[\"check\"])\n\n np.testing.assert_array_almost_equal(int_check[\"intersect\"], starts2[:, ind]-[0, 0, 1.5])\n\n np.testing.assert_array_equal(int_check[\"normal\"], [email protected][ind])\n\n self.assertEqual(int_check[\"albedo\"], 1.0)\n\n self.assertEqual(int_check[\"facet\"], 0+nodes[ind].id*(10**(tc.root.order+1)))\n\n rotation = at.Rotation([np.pi / 2, 0, 0])\n\n with self.subTest(rotation=rotation, translation=None):\n\n tc = copy.deepcopy(tree)\n\n tc.rotate(rotation)\n\n ints = tc.trace(rays_test)\n\n self.assertFalse(ints[\"check\"].any())\n\n starts2 = np.array([[-4.5, -2, 0.5, 3],\n [1, 1, 1, 1],\n [0.5, 0.5, 0.5, 0.5]])\n directions2 = np.array([[0, 0, 0, 0],\n [-1, -1, -1, -1],\n [0, 0, 0, 0]], dtype=np.float64)\n\n rays_test2 = rays.Rays(starts2, directions2)\n\n ints = tc.trace(rays_test2)\n\n for ind, int_check in enumerate(ints):\n # int_check = int_check[0]\n self.assertTrue(int_check[\"check\"])\n\n np.testing.assert_array_almost_equal(int_check[\"intersect\"], starts2[:, ind]-[0, 1, 0])\n\n np.testing.assert_array_equal(int_check[\"normal\"], [email protected][ind])\n\n self.assertEqual(int_check[\"albedo\"], 1.0)\n\n self.assertEqual(int_check[\"facet\"], 0+nodes[ind].id*(10**(tc.root.order+1)))\n\n translation = [2.5, 0, 0]\n\n with self.subTest(rotation=None, translation=translation):\n\n tc = copy.deepcopy(tree)\n\n tc.translate(translation)\n\n ints = tc.trace(rays_test)\n\n self.assertFalse(ints[\"check\"][0])\n\n for ind, int_check in enumerate(ints[1:]):\n ind += 1\n # int_check = int_check[0]\n self.assertTrue(int_check[\"check\"])\n\n np.testing.assert_array_almost_equal(int_check[\"intersect\"], starts[:, ind]-[0, 0, 1])\n\n np.testing.assert_array_almost_equal(int_check[\"normal\"], self.triangles.normals[ind-1])\n\n self.assertEqual(int_check[\"albedo\"], 1.0)\n\n self.assertEqual(int_check[\"facet\"], 0+nodes[ind-1].id*(10**(tc.root.order+1)))\n\n translation = [0, -0.5, 0]\n\n with self.subTest(rotation=rotation, translation=translation):\n\n with self.subTest(order='rt'):\n tc = copy.deepcopy(tree)\n\n tc.rotate(rotation)\n tc.translate(translation)\n\n ints = tc.trace(rays_test)\n\n self.assertFalse(ints[\"check\"].any())\n\n starts2 = np.array([[-4.5, -2, 0.5, 3],\n [1, 1, 1, 1],\n [0.5, 0.5, 0.5, 0.5]])\n directions2 = np.array([[0, 0, 0, 0],\n [-1, -1, -1, -1],\n [0, 0, 0, 0]], dtype=np.float64)\n\n rays_test2 = rays.Rays(starts2, directions2)\n\n ints = tc.trace(rays_test2)\n\n for ind, int_check in enumerate(ints):\n # int_check = int_check[0]\n self.assertTrue(int_check[\"check\"])\n\n np.testing.assert_array_almost_equal(int_check[\"intersect\"], starts2[:, ind]-[0, 1.5, 0])\n\n np.testing.assert_array_equal(int_check[\"normal\"], [email protected][ind])\n\n self.assertEqual(int_check[\"albedo\"], 1.0)\n\n self.assertEqual(int_check[\"facet\"], 0+nodes[ind].id*(10**(tc.root.order+1)))\n\n with self.subTest(order='tr'):\n tc = copy.deepcopy(tree)\n\n tc.translate(translation)\n tc.rotate(rotation)\n\n ints = tc.trace(rays_test)\n\n self.assertFalse(ints[\"check\"].any())\n\n starts2 = np.array([[-4.5, -2, 0.5, 3],\n [1, 1, 1, 1],\n [0, 0, 0, 0]])\n directions2 = np.array([[0, 0, 0, 0],\n [-1, -1, -1, -1],\n [0, 0, 0, 0]], dtype=np.float64)\n\n rays_test2 = rays.Rays(starts2, directions2)\n\n ints = tc.trace(rays_test2)\n\n for ind, int_check in enumerate(ints):\n # int_check = int_check[0]\n self.assertTrue(int_check[\"check\"])\n\n np.testing.assert_array_almost_equal(int_check[\"intersect\"], starts2[:, ind]-[0, 1, 0])\n\n np.testing.assert_array_equal(int_check[\"normal\"], [email protected][ind])\n\n self.assertEqual(int_check[\"albedo\"], 1.0)\n\n self.assertEqual(int_check[\"facet\"], 0+nodes[ind].id*(10**(tc.root.order+1)))\n\n with self.subTest(stacked=True):\n tree = kdtree.KDTree(self.stacked_tries, max_depth=self.max_depth)\n\n tree.build(force=True, print_progress=False)\n\n starts = np.array([[-4.5, -2, -4.5, -2],\n [0.5, 0.5, 0.5, 0.5],\n [1, 1, 5, 5]])\n directions = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [-1, -1, -1, -1]], dtype=np.float64)\n\n rays_test = rays.Rays(starts, directions)\n\n ints = tree.trace(rays_test)\n\n nodes = [tree.root.left.left, tree.root.right.left, tree.root.left.right, tree.root.right.right]\n\n for ind, int_check in enumerate(ints):\n\n with self.subTest(ignore=False, ind=ind):\n self.assertTrue(int_check[\"check\"])\n\n if ind < 2:\n np.testing.assert_array_equal(int_check[\"intersect\"], starts[:, ind]-[0, 0, 1])\n else:\n np.testing.assert_array_equal(int_check[\"intersect\"], starts[:, ind]-[0, 0, 2.5])\n\n np.testing.assert_array_equal(int_check[\"normal\"], self.triangles.normals[ind])\n\n self.assertEqual(int_check[\"albedo\"], 1.0)\n\n self.assertEqual(int_check[\"facet\"], 0+nodes[ind].id*(10**(tree.root.order+1)))\n\n ignore_ind = 2\n\n rays_test.ignore = [nodes[ignore_ind].id*(10**(tree.root.order+1))]*rays_test.num_rays\n\n ints = tree.trace(rays_test)\n\n for ind, int_check in enumerate(ints):\n\n with self.subTest(ignore=True, ind=ind):\n\n if ind != ignore_ind:\n # int_check = int_check[0]\n\n self.assertTrue(int_check[\"check\"])\n\n if ind < 2:\n np.testing.assert_array_equal(int_check[\"intersect\"], starts[:, ind]-[0, 0, 1])\n else:\n np.testing.assert_array_equal(int_check[\"intersect\"], starts[:, ind]-[0, 0, 2.5])\n\n np.testing.assert_array_equal(int_check[\"normal\"], self.triangles.normals[ind])\n\n self.assertEqual(int_check[\"albedo\"], 1.0)\n\n self.assertEqual(int_check[\"facet\"], 0+nodes[ind].id*(10**(tree.root.order+1)))\n\n else:\n\n self.assertTrue(int_check[\"check\"])\n\n np.testing.assert_array_equal(int_check[\"intersect\"], starts[:, ind]-[0, 0, 5])\n\n np.testing.assert_array_equal(int_check[\"normal\"], self.triangles.normals[ind])\n\n self.assertEqual(int_check[\"albedo\"], 1.0)\n\n self.assertEqual(int_check[\"facet\"], 0+nodes[0].id*(10**(tree.root.order+1)))\n\n\nclass TestKDNode(TestCase):\n\n def setUp(self):\n\n tri1 = np.array([[-5, -4, -4.5],\n [0, 0, 1],\n [0, 0, 0]])\n\n tri2 = tri1+np.array([[2.5, 0, 0]]).T\n\n tri3 = tri2+np.array([[2.5, 0, 0]]).T\n\n tri4 = tri3+np.array([[2.5, 0, 0]]).T\n\n self.triangles = shapes.Triangle64(np.hstack([tri1, tri2, tri3, tri4]).T, 1, np.arange(12).reshape(-1, 3))\n\n def test_creation(self):\n\n node = kdtree.KDNode(surface=self.triangles)\n\n self.assertEqual(node.surface, self.triangles)\n\n self.assertEqual(node.bounding_box, self.triangles.bounding_box)\n\n self.assertIsNone(node.left)\n self.assertIsNone(node.right)\n\n def test_compute_bounding_box(self):\n\n node = kdtree.KDNode()\n\n node.surface = self.triangles\n node.has_surface = True\n\n node.compute_bounding_box()\n\n self.assertEqual(node.bounding_box, self.triangles.bounding_box)\n\n def test_split(self):\n\n node = kdtree.KDNode(surface=self.triangles)\n\n node.split(force=True, print_progress=False)\n\n left_tris = kdtree.KDNode(shapes.Triangle64(self.triangles.vertices, 1, np.arange(6).reshape(3, -1), compute_bounding_box=False))\n right_tris = kdtree.KDNode(shapes.Triangle64(self.triangles.vertices, 1, np.arange(6, 12).reshape(3, -1), compute_bounding_box=False))\n\n self.assertEqual(node.left, left_tris)\n self.assertEqual(node.right, right_tris)\n\n def test_trace(self):\n\n # TODO: figure out how to implement this\n pass\n\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.arange",
"numpy.hstack",
"numpy.testing.assert_array_almost_equal",
"numpy.isnan",
"numpy.array"
]
] |
sebwolf-de/Examples | [
"329db390d540e6f5fe1dff35372528f723882271"
] | [
"tpv29/generate_mytopo_tpv29.py"
] | [
"import numpy as np\n\n# Read scec input file\nfid = open(\"tpv29_tpv30_geometry_25m_data.txt\")\nline = fid.readline()\nline = fid.readline()\nheader = [float(a) for a in line.split()]\nnx, ny, lx, ly = header\nroughness = np.loadtxt(fid)\nroughness = roughness[:, 4]\nfid.close()\n\n# create x and y vectors\nx = np.linspace(-lx / 2, lx / 2, int(nx) + 1)\ny = np.linspace(0, ly, int(ny) + 1)\n\n# write mytopo_tpv29\nfout = open(\"mytopo_tpv29\", \"w\")\nfout.write(\"%d %d\\n\" % (nx + 1, ny + 1))\nnp.savetxt(fout, x, fmt=\"%f\")\nnp.savetxt(fout, y, fmt=\"%f\")\nnp.savetxt(fout, roughness, fmt=\"%f\")\nfout.close()\n"
] | [
[
"numpy.savetxt",
"numpy.loadtxt"
]
] |
srujan71/CubeSat-Mission-Planner | [
"62d1ad33c2dcb1a2f8fb3ff615cc5cc0e6716969"
] | [
"example1.py"
] | [
"\"\"\"\nexample1.py\n\n\"A simple example how to use the CubeSat-Power-Estimation tool.\"\n\n@author: Johan Monster (https://github.com/Hans-Bananendans/)\n\"\"\"\n\n# Import packages\nimport numpy as np\nimport pandas as pd\n\nfrom mission import Mission\n\n# Defining the config\nconfig = {\n \"years_passed\" : 0, # How many [years] the satellite has been in space for\n \n \"battery_capacity\" : 81000, # Battery capacity in [W.s] (or: Joule)\n \"battery_degradation_factor\" : 0.04,\n \"battery_init\" : 0.5, # 0.5 = Battery begins at 50% charge\n \n \"panel_degradation_factor\" : 0.02,\n \n \"blip_period\" : 30, # Currently unused, telemetry blip period\n \"blip_duration\" : 1, # Currently unused, telemetry blip duration\n \"no_blips\" : [\"downlink\"], # Currently unused\n \n \"orbital_altitude\" : 550 # Orbital altitude in [km]\n }\n\n# List of the names of all used EPS channels.\nchannels = [\"None\", \"5V_1\", \"5V_2\", \"5V_3\", \"5V_4\", \"3.3V_1\", \\\n \"3.3V_2\", \"3.3V_3\", \"3.3V_4\", \"Var_rail\"]\n\n# Dict of typical voltage supplied to each channel.\nchannel_voltages = {\n \"5V_1\" : 5, \n \"5V_2\" : 5, \n \"5V_3\" : 5, \n \"5V_4\" : 5, \n \"3.3V_1\" : 3.3,\n \"3.3V_2\" : 3.3,\n \"3.3V_3\" : 3.3,\n \"3.3V_4\" : 3.3,\n \"Var_rail\" : 6.5 # Can between 6.5-8 VDC, highest current is at 6.5V\n }\n \n# Dict specifiying which device is on which EPS channel\ndevice_channels = {\n \"adcs\" : \"5V_4\",\n \"payload_dice\" : \"5V_3\",\n \"payload_bitflip\" : \"3.3V_3\",\n \"antenna\" : \"3.3V_4\",\n \"obc\" : \"5V_2\",\n \"obc_board\" : \"5V_2\",\n \"rx\" : \"Var_rail\",\n \"tx\" : \"Var_rail\",\n \"eps\" : \"None\",\n \"sensors_1\" : \"3.3V_2\",\n \"sensors_2\" : \"3.3V_4\",\n }\n\n# List of all possible OpStates the satellite can be in.\n# This list must be consistent with the specified power.xlsx\nstate_list = [\"idle\",\"recharge\",\"dice_payload\",\"wheel_unloading\", \\\n \"transponder\",\"downlink\",\"safe_mode\",\"recovery_mode\", \\\n \"detumbling_mode\"]\n\n# Dict of which colour will be used for each OpState whilst plotting\nstate_colours = {\n \"idle\" : \"#ffffff\",\n \"recharge\" : \"#2ca02c\",\n \"dice_payload\" : \"#8000ff\",\n \"wheel_unloading\" : \"#0080ff\",\n \"transponder\" : \"#ff8000\",\n \"downlink\" : \"#ff0000\",\n \"safe_mode\" : \"#4000ff\",\n \"recovery_mode\" : \"#777777\", \n \"detumbling_mode\" : \"#ff00ff\"\n }\n\n# Baby's first satellite schedule\nschedule1 = {\n 0 : \"idle\",\n 50 : \"downlink\",\n 100 : \"recharge\"\n }\n\n# Loading the power frame, or the device/OpState table\npower_frame = pd.read_excel('power.xlsx',index_col=0)\n\n# Loading the two power input vectors, generated by CubeSat-Solar-Estimator\np_sun = np.load(\"P_sun.npy\")\np_alb = np.load(\"P_alb.npy\")\n\n# Assembling the mission object\nm1 = Mission(config, device_channels, state_list, channels, \\\n power_frame, p_sun, p_alb)\n\n# Calling the Mission.propagate() method to start the simulation\nresults = m1.propagate(schedule1, tsim=200, dt=1)\n\n# Plotting\nm1.plot_timeline_power(state_colours)\n"
] | [
[
"numpy.load",
"pandas.read_excel"
]
] |
leaderj1001/Action-Localization | [
"04d972e6dc3c07d347c70893723d91487c1c8cbd"
] | [
"action-baseline/evaluation/get_ava_performance.py"
] | [
"r\"\"\"Compute action detection performance for the AVA dataset.\n\nPlease send any questions about this code to the Google Group ava-dataset-users:\nhttps://groups.google.com/forum/#!forum/ava-dataset-users\n\nExample usage:\npython -O get_ava_performance.py \\\n -l ava/ava_action_list_v2.1_for_activitynet_2018.pbtxt.txt \\\n -g ava_val_v2.1.csv \\\n -e ava_val_excluded_timestamps_v2.1.csv \\\n -d your_results.csv\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nfrom collections import defaultdict\nimport csv\nimport heapq\nimport logging\nimport pprint\nimport sys\nimport time\nimport numpy as np\n\nfrom evaluation.ava import object_detection_evaluation\nfrom evaluation.ava import standard_fields\n\n\ndef print_time(message, start):\n logging.info(\"==> %g seconds to %s\", time.time() - start, message)\n\n\ndef make_image_key(video_id, timestamp):\n \"\"\"Returns a unique identifier for a video id & timestamp.\"\"\"\n return \"%s,%04d\" % (video_id, int(timestamp))\n\n\ndef read_csv(csv_file, class_whitelist=None, capacity=0):\n \"\"\"Loads boxes and class labels from a CSV file in the AVA format.\n\n CSV file format described at https://research.google.com/ava/download.html.\n\n Args:\n csv_file: A file object.\n class_whitelist: If provided, boxes corresponding to (integer) class labels\n not in this set are skipped.\n capacity: Maximum number of labeled boxes allowed for each example.\n Default is 0 where there is no limit.\n\n Returns:\n boxes: A dictionary mapping each unique image key (string) to a list of\n boxes, given as coordinates [y1, x1, y2, x2].\n labels: A dictionary mapping each unique image key (string) to a list of\n integer class lables, matching the corresponding box in `boxes`.\n scores: A dictionary mapping each unique image key (string) to a list of\n score values lables, matching the corresponding label in `labels`. If\n scores are not provided in the csv, then they will default to 1.0.\n \"\"\"\n start = time.time()\n entries = defaultdict(list)\n boxes = defaultdict(list)\n labels = defaultdict(list)\n scores = defaultdict(list)\n reader = csv.reader(csv_file)\n for row in reader:\n assert len(row) in [7, 8], \"Wrong number of columns: \" + row\n image_key = make_image_key(row[0], row[1])\n x1, y1, x2, y2 = [float(n) for n in row[2:6]]\n action_id = int(row[6])\n if class_whitelist and action_id not in class_whitelist:\n continue\n score = 1.0\n if len(row) == 8:\n score = float(row[7])\n if capacity < 1 or len(entries[image_key]) < capacity:\n heapq.heappush(entries[image_key],\n (score, action_id, y1, x1, y2, x2))\n elif score > entries[image_key][0][0]:\n heapq.heapreplace(entries[image_key],\n (score, action_id, y1, x1, y2, x2))\n for image_key in entries:\n # Evaluation API assumes boxes with descending scores\n entry = sorted(entries[image_key], key=lambda tup: -tup[0])\n for item in entry:\n score, action_id, y1, x1, y2, x2 = item\n boxes[image_key].append([y1, x1, y2, x2])\n labels[image_key].append(action_id)\n scores[image_key].append(score)\n print_time(\"read file \" + csv_file.name, start)\n return boxes, labels, scores\n\n\ndef read_exclusions(exclusions_file):\n \"\"\"Reads a CSV file of excluded timestamps.\n\n Args:\n exclusions_file: A file object containing a csv of video-id,timestamp.\n\n Returns:\n A set of strings containing excluded image keys, e.g. \"aaaaaaaaaaa,0904\",\n or an empty set if exclusions file is None.\n \"\"\"\n excluded = set()\n if exclusions_file:\n reader = csv.reader(exclusions_file)\n for row in reader:\n assert len(row) == 2, \"Expected only 2 columns, got: \" + row\n excluded.add(make_image_key(row[0], row[1]))\n return excluded\n\n\ndef read_labelmap(labelmap_file):\n \"\"\"Reads a labelmap without the dependency on protocol buffers.\n\n Args:\n labelmap_file: A file object containing a label map protocol buffer.\n\n Returns:\n labelmap: The label map in the form used by the object_detection_evaluation\n module - a list of {\"id\": integer, \"name\": classname } dicts.\n class_ids: A set containing all of the valid class id integers.\n \"\"\"\n labelmap = []\n class_ids = set()\n name = \"\"\n class_id = \"\"\n for line in labelmap_file:\n if line.startswith(\" name:\"):\n name = line.split('\"')[1]\n elif line.startswith(\" id:\") or line.startswith(\" label_id:\"):\n class_id = int(line.strip().split(\" \")[-1])\n labelmap.append({\"id\": class_id, \"name\": name})\n class_ids.add(class_id)\n return labelmap, class_ids\n\n\ndef run_evaluation(labelmap, groundtruth, detections, exclusions):\n \"\"\"Runs evaluations given input files.\n\n Args:\n labelmap: file object containing map of labels to consider, in pbtxt format\n groundtruth: file object\n detections: file object\n exclusions: file object or None.\n \"\"\"\n categories, class_whitelist = read_labelmap(labelmap)\n logging.info(\"CATEGORIES (%d):\\n%s\", len(categories),\n pprint.pformat(categories, indent=2))\n excluded_keys = read_exclusions(exclusions)\n\n pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(\n categories)\n\n # Reads the ground truth data.\n boxes, labels, _ = read_csv(groundtruth, class_whitelist, 0)\n start = time.time()\n for image_key in boxes:\n if image_key in excluded_keys:\n logging.info((\"Found excluded timestamp in ground truth: %s. \"\n \"It will be ignored.\"), image_key)\n continue\n pascal_evaluator.add_single_ground_truth_image_info(\n image_key, {\n standard_fields.InputDataFields.groundtruth_boxes:\n np.array(boxes[image_key], dtype=float),\n standard_fields.InputDataFields.groundtruth_classes:\n np.array(labels[image_key], dtype=int),\n standard_fields.InputDataFields.groundtruth_difficult:\n np.zeros(len(boxes[image_key]), dtype=bool)\n })\n print_time(\"convert groundtruth\", start)\n\n # Reads detections data.\n boxes, labels, scores = read_csv(detections, class_whitelist, 50)\n start = time.time()\n for image_key in boxes:\n if image_key in excluded_keys:\n logging.info((\"Found excluded timestamp in detections: %s. \"\n \"It will be ignored.\"), image_key)\n continue\n pascal_evaluator.add_single_detected_image_info(\n image_key, {\n standard_fields.DetectionResultFields.detection_boxes:\n np.array(boxes[image_key], dtype=float),\n standard_fields.DetectionResultFields.detection_classes:\n np.array(labels[image_key], dtype=int),\n standard_fields.DetectionResultFields.detection_scores:\n np.array(scores[image_key], dtype=float)\n })\n print_time(\"convert detections\", start)\n\n start = time.time()\n metrics = pascal_evaluator.evaluate()\n print_time(\"run_evaluator\", start)\n pprint.pprint(metrics, indent=2)\n\n\ndef parse_arguments():\n \"\"\"Parses command-line flags.\n\n Returns:\n args: a named tuple containing three file objects args.labelmap,\n args.groundtruth, and args.detections.\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-l\",\n \"--labelmap\",\n help=\"Filename of label map\",\n type=argparse.FileType(\"r\"),\n default=\"./ava/ava_action_list_v2.1_for_activitynet_2018.pbtxt.txt\")\n parser.add_argument(\n \"-g\",\n \"--groundtruth\",\n default='./ava_val_v2.2.csv',\n help=\"CSV file containing ground truth.\",\n type=argparse.FileType(\"r\"),\n # required=True\n )\n parser.add_argument(\n \"-d\",\n \"--detections\",\n default='results.csv',\n help=\"CSV file containing inferred action detections.\",\n type=argparse.FileType(\"r\"),\n # required=True\n )\n parser.add_argument(\n \"-e\",\n \"--exclusions\",\n help=(\"Optional CSV file containing videoid,timestamp pairs to exclude from evaluation.\"),\n type=argparse.FileType(\"r\"),\n required=False)\n return parser.parse_args()\n\n\ndef main():\n logging.basicConfig(level=logging.INFO)\n args = parse_arguments()\n run_evaluation(**vars(args))\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.array"
]
] |
lxtGH/cvpods-1 | [
"614a975e5425bbaeb66bbd1ffca552d633ba89ca"
] | [
"tools/train_net.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# Modified by BaseDetection, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nDetection Training Script.\n\nThis scripts reads a given config file and runs the training or evaluation.\nIt is an entry point that is made to train standard models in cvpods.\n\nIn order to let one script support training of many models,\nthis script contains logic that are specific to these built-in models and therefore\nmay not be suitable for your own project.\nFor example, your research project perhaps only needs a single \"evaluator\".\n\nTherefore, we recommend you to use cvpods as an library and take\nthis file as an example of how to use the library.\nYou may want to write your own script with your datasets and other customizations.\n\"\"\"\nimport logging\nimport os\nimport pickle as pkl\nimport sys\nfrom collections import OrderedDict\nfrom colorama import Fore, Style\n\nimport torch\n\nfrom cvpods.checkpoint import DetectionCheckpointer\nfrom cvpods.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch\nfrom cvpods.evaluation import build_evaluator, verify_results\nfrom cvpods.modeling import GeneralizedRCNNWithTTA\nfrom cvpods.utils import comm\n\nsys.path.insert(0, '.')\nfrom config import config # noqa: E402\nfrom net import build_model # noqa: E402\n\n\nclass Trainer(DefaultTrainer):\n \"\"\"\n We use the \"DefaultTrainer\" which contains pre-defined default logic for\n standard training workflow. They may not work for you, especially if you\n are working on a new research project. In that case you can use the cleaner\n \"SimpleTrainer\", or write your own training loop. You can use\n \"tools/plain_train_net.py\" as an example.\n \"\"\"\n\n @classmethod\n def build_evaluator(cls, cfg, dataset_name, dataset, output_folder=None):\n \"\"\"\n Create evaluator(s) for a given dataset.\n This uses the special metadata \"evaluator_type\" associated with each builtin dataset.\n For your own dataset, you can simply create an evaluator manually in your\n script and do not have to worry about the hacky if-else logic here.\n \"\"\"\n dump_train = config.GLOBAL.DUMP_TRAIN\n return build_evaluator(cfg, dataset_name, dataset, output_folder, dump=dump_train)\n\n @classmethod\n def test_with_TTA(cls, cfg, model):\n logger = logging.getLogger(\"cvpods.trainer\")\n # In the end of training, run an evaluation with TTA\n # Only support some R-CNN models.\n logger.info(\"Running inference with test-time augmentation ...\")\n model = GeneralizedRCNNWithTTA(cfg, model)\n res = cls.test(cfg, model, output_folder=os.path.join(cfg.OUTPUT_DIR, \"inference_TTA\"))\n res = OrderedDict({k + \"_TTA\": v for k, v in res.items()})\n return res\n\n\ndef stage_main(args, cfg, build):\n cfg.merge_from_list(args.opts)\n cfg, logger = default_setup(cfg, args)\n model_build_func = build\n\n \"\"\"\n If you'd like to do anything fancier than the standard training logic,\n consider writing your own training loop or subclassing the trainer.\n \"\"\"\n trainer = Trainer(cfg, model_build_func)\n trainer.resume_or_load(resume=args.resume)\n\n if args.eval_only:\n DetectionCheckpointer(\n trainer.model, save_dir=cfg.OUTPUT_DIR, resume=args.resume).resume_or_load(\n cfg.MODEL.WEIGHTS, resume=args.resume)\n res = Trainer.test(cfg, trainer.model)\n if comm.is_main_process():\n verify_results(cfg, res)\n if cfg.TEST.AUG.ENABLED:\n res.update(Trainer.test_with_TTA(cfg, trainer.model))\n return res\n\n # check wheather worksapce has enough storeage space\n # assume that a single dumped model is 700Mb\n file_sys = os.statvfs(cfg.OUTPUT_DIR)\n free_space_Gb = (file_sys.f_bfree * file_sys.f_frsize) / 2**30\n eval_space_Gb = (cfg.SOLVER.LR_SCHEDULER.MAX_ITER // cfg.SOLVER.CHECKPOINT_PERIOD) * 700 / 2**10\n if eval_space_Gb > free_space_Gb:\n logger.warning(f\"{Fore.RED}Remaining space({free_space_Gb}GB) \"\n f\"is less than ({eval_space_Gb}GB){Style.RESET_ALL}\")\n\n if cfg.TEST.AUG.ENABLED:\n trainer.register_hooks(\n [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]\n )\n\n trainer.train()\n\n if comm.is_main_process() and cfg.MODEL.AS_PRETRAIN:\n # convert last ckpt to pretrain format\n convert_to_pretrained_model(\n input=os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\"),\n save_path=os.path.join(cfg.OUTPUT_DIR, \"model_final_pretrain_weight.pkl\")\n )\n\n\ndef convert_to_pretrained_model(input, save_path):\n obj = torch.load(input, map_location=\"cpu\")\n obj = obj[\"model\"]\n\n newmodel = {}\n for k, v in obj.items():\n if not k.startswith(\"encoder_q.\") and not k.startswith(\"network\"):\n continue\n old_k = k\n if k.startswith(\"encoder_q.\"):\n k = k.replace(\"encoder_q.\", \"\")\n elif k.startswith(\"network\"):\n k = k.replace(\"network.\", \"\")\n print(old_k, \"->\", k)\n newmodel[k] = v.numpy()\n\n res = {\n \"model\": newmodel,\n \"__author__\": \"MOCO\" if k.startswith(\"encoder_q.\") else \"CLS\",\n \"matching_heuristics\": True\n }\n\n with open(save_path, \"wb\") as f:\n pkl.dump(res, f)\n\n\ndef main(args):\n if isinstance(config, list):\n assert isinstance(build_model, list) and len(config) == len(build_model)\n for cfg, build in zip(config, build_model):\n stage_main(args, cfg, build)\n else:\n stage_main(args, config, build_model)\n\n\nif __name__ == \"__main__\":\n args = default_argument_parser().parse_args()\n if isinstance(config, list):\n assert len(config) > 0\n print(\"soft link first config in list to {}\".format(config[0].OUTPUT_DIR))\n config[0].link_log()\n else:\n print(\"soft link to {}\".format(config.OUTPUT_DIR))\n config.link_log()\n print(\"Command Line Args:\", args)\n launch(\n main,\n args.num_gpus,\n num_machines=args.num_machines,\n machine_rank=args.machine_rank,\n dist_url=args.dist_url,\n args=(args,),\n )\n"
] | [
[
"torch.load"
]
] |
RafaelSouza94/Python_Machine_Learning | [
"5b150613410ddc64a61690f232ec61751744fa41"
] | [
"Ch_03/LogisticRegression.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nLogistic Regression Gradient Descent\n\"\"\"\n\nimport numpy as np\n\nclass LogisticRegressionGD(object):\n \"\"\"Logistic Regression Classifier using gradient descent.\n \n Parameters\n ------------\n eta : float\n Learning rate (between 0.0 and 1.0)\n n_iter : int\n Passes over the training dataset.\n random_state : int\n Random number generator seed for random weight\n initialization.\n \n \n Attributes\n -----------\n w_ : 1d-array\n Weights after fitting.\n cost_ : list\n Logistic cost function value in each epoch.\n \n \"\"\"\n def __init__(self, eta=0.05, n_iter=100, random_state=1):\n self.eta = eta\n self.n_iter = n_iter\n self.random_state = random_state\n \n def fit(self, X, y):\n \"\"\" Fit training data.\n \n Parameters\n ----------\n X : {array-like}, shape = [n_examples, n_features]\n Training vectors, where n_examples is the number of\n examples and n_features is the number of features.\n y : array-like, shape = [n_examples]\n Target values.\n \n Returns\n -------\n self : object\n \n \"\"\"\n rgen = np.random.RandomState(self.random_state)\n self.w_ = rgen.normal(loc=0.0, scale=0.01,\n size=1 + X.shape[1])\n self.cost_ = []\n \n for i in range(self.n_iter):\n net_input = self.net_input(X)\n output = self.activation(net_input)\n errors = (y - output)\n self.w_[1:] += self.eta * X.T.dot(errors)\n self.w_[0] += self.eta * errors.sum()\n \n # note that we compute the logistic `cost` now\n # instead of the sum of squared errors cost\n cost = (-y.dot(np.log(output)) -\n ((1 - y).dot(np.log(1 - output))))\n self.cost_.append(cost)\n return self\n \n def net_input(self, X):\n \"\"\"Calculate net input\"\"\"\n return np.dot(X, self.w_[1:]) + self.w_[0]\n \n def activation(self, z):\n \"\"\"Compute logistic sigmoid activation\"\"\"\n return 1. / (1. + np.exp(-np.clip(z, -250, 250)))\n \n def predict(self, X):\n \"\"\"Return class label after unit step\"\"\"\n return np.where(self.net_input(X) >= 0.0, 1, 0)\n # equivalent to:\n # return np.where(self.activation(self.net_input(X))\n # >= 0.5, 1, 0)"
] | [
[
"numpy.random.RandomState",
"numpy.dot",
"numpy.log",
"numpy.clip"
]
] |
jilljenn/vfm | [
"4cb2f5157ee7301321bb4babedb62223a720d231"
] | [
"vfm.py"
] | [
"import chainer\nfrom chainer import training\nfrom chainer.training import extensions\nfrom chainer.datasets import TupleDataset\n\nfrom chainer import Chain\nfrom chainer import links as L\nfrom chainer import functions as F\nfrom chainer import reporter\nfrom chainer import cuda\nimport numpy as np\n\n\ndef dot(a, b):\n \"\"\" Simple dot product\"\"\"\n return F.sum(a * b, axis=-1)\n\n\ndef batch_interactions(x):\n xp = cuda.get_array_module(x.data)\n batchsize = x.shape[0]\n shape = (batchsize, x.shape[1] ** 2)\n left = xp.tile(x.data, (1, x.shape[1]))\n right = xp.repeat(x.data, x.shape[1]).reshape(shape)\n return left, right\n\n\nclass VFM(Chain):\n lv_floor = -100.0\n\n def __init__(self, n_features=None, n_dim=1 , lossfun=F.mean_squared_error,\n lambda0=1, lambda1=1, lambda2=1, init_bias_mu=0.0,\n init_bias_lv=0.0, intx_term=True, total_nobs=1):\n self.n_dim = n_dim\n self.n_features = n_features\n self.lossfun = lossfun\n self.lambda0 = lambda0\n self.lambda1 = lambda1\n self.lambda2 = lambda2\n self.intx_term = intx_term\n self.total_nobs = total_nobs\n\n # In contrast to the FM model, the slopes and latent vectors\n # will have means (mu) and log variances (lv) for each component.\n super(VFM, self).__init__(bias_mu=L.Bias(shape=(1,)),\n bias_lv=L.Bias(shape=(1,)),\n slop_mu=L.Bias(shape=(1, 1)),\n slop_lv=L.Bias(shape=(1, 1)),\n slop_delta_mu=L.EmbedID(n_features, 1,\n ignore_label=-1),\n slop_delta_lv=L.EmbedID(n_features, 1,\n ignore_label=-1),\n feat_mu_vec=L.Bias(shape=(1, 1, n_dim)),\n feat_lv_vec=L.Bias(shape=(1, 1, n_dim)),\n feat_delta_mu=L.EmbedID(n_features, n_dim,\n ignore_label=-1),\n feat_delta_lv=L.EmbedID(n_features, n_dim,\n ignore_label=-1))\n\n # Xavier initialize weights\n c = np.sqrt(n_features * n_dim) * 1e3\n d = np.sqrt(n_features) * 1e3\n self.feat_delta_mu.W.data[...] = np.random.randn(n_features, n_dim) / c\n self.feat_delta_lv.W.data[...] = np.random.randn(n_features, n_dim) / c\n self.slop_delta_mu.W.data[...] = np.random.randn(n_features, 1) / d\n self.slop_delta_lv.W.data[...] = np.random.randn(n_features, 1) / d\n self.bias_mu.b.data[...] *= 0.0\n self.bias_mu.b.data[...] += init_bias_mu\n self.bias_lv.b.data[...] *= 0.0\n self.bias_lv.b.data[...] += init_bias_lv\n\n def term_bias(self, bs, train=True):\n \"\"\" Compute overall bias and broadcast to shape of batchsize\n \"\"\"\n\n shape = (bs, 1,)\n # Bias is drawn from a Gaussian with given mu and log variance\n bs_mu = F.broadcast_to(self.bias_mu.b, shape)\n bs_lv = F.broadcast_to(self.bias_lv.b, shape)\n bias = F.flatten(F.gaussian(bs_mu, bs_lv))\n\n # Add a very negative log variance so we're sampling\n # from a very narrow distribution about the mean.\n # Useful for validation dataset when we want to only guess\n # the mean.\n if not train:\n bs_lv += self.lv_floor\n\n # Compute prior on the bias, so compute the KL div\n # from the KL(N(mu_bias, var_bias) | N(0, 1))\n kld = F.gaussian_kl_divergence(self.bias_mu.b, self.bias_lv.b)\n return bias, kld\n\n def term_slop(self, loc, val, bs, nf, train=True):\n \"\"\" Compute the slope for each active feature.\n \"\"\"\n shape = (bs, nf)\n\n # Reshape all of our constants\n pr_mu = F.broadcast_to(self.slop_mu.b, shape)\n pr_lv = F.broadcast_to(self.slop_lv.b, shape)\n # This is either zero or a very negative number\n # indicating to sample N(mean, logvar) or just draw\n # the mean preicsely\n if not train:\n pr_lv += self.lv_floor\n\n # The feature slopes are grouped together so that they\n # all share a common mean. Then individual features slop_delta_lv\n # are shrunk towards zero, which effectively sets features to fall\n # back on the group mean.\n sl_mu = F.reshape(self.slop_delta_mu(loc), shape) + pr_mu\n sl_lv = F.reshape(self.slop_delta_lv(loc), shape) + pr_lv\n coef = F.gaussian(sl_mu, sl_lv)\n slop = F.sum(coef * val, axis=1)\n\n # Calculate divergence between group mean and N(0, 1)\n kld1 = F.gaussian_kl_divergence(self.slop_mu.b, self.slop_lv.b)\n # Calculate divergence of individual delta means and delta vars\n args = (self.slop_delta_mu.W, self.slop_delta_lv.W)\n kld2 = F.gaussian_kl_divergence(*args)\n\n return slop, kld1 + kld2\n\n def term_feat(self, iloc, jloc, ival, jval, bs, nf, train=True):\n # Change all of the shapes to form interaction vectors\n shape = (bs, nf * 2, self.n_dim)\n feat_mu_vec = F.broadcast_to(self.feat_mu_vec.b, shape)\n feat_lv_vec = F.broadcast_to(self.feat_lv_vec.b, shape)\n if not train:\n feat_lv_vec += self.lv_floor\n\n # Construct the interaction mean and variance\n # iloc is (bs, nf), feat(iloc) is (bs, nf, ndim) and\n # dot(feat, feat) is (bs, nf)\n ivec = F.gaussian(feat_mu_vec + self.feat_delta_mu(iloc),\n feat_lv_vec + self.feat_delta_lv(iloc))\n jvec = F.gaussian(feat_mu_vec + self.feat_delta_mu(jloc),\n feat_lv_vec + self.feat_delta_lv(jloc))\n # feat is (bs, )\n feat = dot(F.sum(ivec * jvec, axis=2), ival * jval)\n\n # Compute the KLD for the group mean vector and variance vector\n kld1 = F.gaussian_kl_divergence(self.feat_mu_vec.b, self.feat_lv_vec.b)\n # Compute the KLD for vector deviations from the group mean and var\n kld2 = F.gaussian_kl_divergence(self.feat_delta_mu.W,\n self.feat_delta_lv.W)\n return feat, kld1 + kld2\n\n def forward(self, loc, val, y, train=True):\n \"\"\" Given the sparse feature vector defined by location\n integers for the column index and the value at that index.\n y ~ c + sum(w_i x_i) + sum_ij( <v_i, v_j> * x_i * x_j)\n\n Parameters\n ----------\n val : array of float\n Values in the feature array. Should of shape (batchsize, n_feat_max)\n\n loc : array of int\n Location of the non-zero columns in the sparse vector. Should be of\n shape (batchsize, n_feat_max)\n\n y : array of float\n Array of expected outcome.\n\n train: bool\n If True uses the reparameterization trick to estimate variables.\n If False, this sets the variance to nearly zero such that\n parameters are always set to the mean with no noise, which is useful\n at test time.\n\n \"\"\"\n bs = val.data.shape[0]\n nf = val.data.shape[1]\n\n iloc, jloc = batch_interactions(loc)\n ival, jval = batch_interactions(val)\n\n # Compute scalar bias term\n bias, kld0 = self.term_bias(bs, train=train)\n # Compute the feature weights\n slop, kld1 = self.term_slop(loc, val, bs, nf, train=train)\n # Compute factorized weights on interaction features\n feat, kld2 = self.term_feat(iloc, jloc, ival, jval,\n bs, nf, train=train)\n\n # Optionally choose to include the interaction term\n # without this is linear regression\n pred = bias + slop\n if self.intx_term:\n pred += feat\n\n return pred, kld0, kld1, kld2\n\n def __call__(self, loc, val, y, train=True):\n bs = val.data.shape[0]\n pred, kld0, kld1, kld2 = self.forward(loc, val, y, train=train)\n\n # Compute MSE loss\n mse = F.mean_squared_error(pred, y)\n rmse = F.sqrt(mse) # Only used for reporting\n\n # Now compute the total KLD loss\n kldt = kld0 * self.lambda0 + kld1 * self.lambda1 + kld2 * self.lambda2\n\n # Total loss is MSE plus regularization losses\n loss = mse + kldt * (1.0 / self.total_nobs)\n\n # Log the errors\n logs = {'loss': loss, 'rmse': rmse, 'kld0': kld0, 'kld1': kld1,\n 'kld2': kld2, 'kldt': kldt, 'bias': F.sum(self.bias_mu.b)}\n reporter.report(logs, self)\n return loss\n\n\nclass TestModeEvaluator(extensions.Evaluator):\n def evaluate(self):\n model = self.get_target('main')\n model.train = False\n ret = super(TestModeEvaluator, self).evaluate()\n model.train = True\n return ret\n\n\ndef fit(model, train, valid, device=-1, batchsize=4096, n_epoch=500,\n resume=None, alpha=1e-3):\n if device >= 0:\n chainer.cuda.get_device(device).use()\n model.to_gpu(device)\n optimizer = chainer.optimizers.Adam(alpha)\n optimizer.setup(model)\n\n # Setup iterators\n train_iter = chainer.iterators.SerialIterator(train, batchsize)\n valid_iter = chainer.iterators.SerialIterator(valid, batchsize,\n repeat=False, shuffle=False)\n updater = training.StandardUpdater(train_iter, optimizer, device=device)\n trainer = training.Trainer(updater, (n_epoch, 'epoch'),\n out='out_' + str(device))\n\n # Setup logging, printing & saving\n keys = ['loss', 'rmse', 'bias', 'kld0', 'kld1']\n keys += ['kldg', 'kldi', 'hypg', 'hypi']\n keys += ['hypglv', 'hypilv']\n reports = ['epoch']\n reports += ['main/' + key for key in keys]\n reports += ['validation/main/rmse']\n trainer.extend(TestModeEvaluator(valid_iter, model, device=device))\n trainer.extend(extensions.Evaluator(valid_iter, model, device=device))\n trainer.extend(extensions.dump_graph('main/loss'))\n trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))\n trainer.extend(extensions.LogReport(trigger=(1, 'epoch')))\n trainer.extend(extensions.PrintReport(reports))\n trainer.extend(extensions.ProgressBar(update_interval=10))\n\n # If previous model detected, resume\n if resume:\n print(\"Loading from {}\".format(resume))\n chainer.serializers.load_npz(resume, trainer)\n\n # Run the model\n trainer.run()\n"
] | [
[
"numpy.sqrt",
"numpy.random.randn"
]
] |
degiere/zipline | [
"bc0b117dc94b8e93081818964e3b1bdbf9b33abb"
] | [
"zipline/pipeline/factors/factor.py"
] | [
"\"\"\"\nfactor.py\n\"\"\"\nfrom functools import wraps\nfrom operator import attrgetter\nfrom numbers import Number\n\nfrom numpy import inf, where\nfrom toolz import curry\n\nfrom zipline.errors import UnknownRankMethod\nfrom zipline.lib.normalize import naive_grouped_rowwise_apply\nfrom zipline.lib.rank import masked_rankdata_2d\nfrom zipline.pipeline.classifiers import Classifier, Everything, Quantiles\nfrom zipline.pipeline.mixins import (\n CustomTermMixin,\n LatestMixin,\n PositiveWindowLengthMixin,\n RestrictedDTypeMixin,\n SingleInputMixin,\n)\nfrom zipline.pipeline.term import (\n ComputableTerm,\n NotSpecified,\n NotSpecifiedType,\n Term,\n)\nfrom zipline.pipeline.expression import (\n BadBinaryOperator,\n COMPARISONS,\n is_comparison,\n MATH_BINOPS,\n method_name_for_op,\n NumericalExpression,\n NUMEXPR_MATH_FUNCS,\n UNARY_OPS,\n unary_op_name,\n)\nfrom zipline.pipeline.filters import (\n Filter,\n NumExprFilter,\n PercentileFilter,\n NullFilter,\n)\nfrom zipline.utils.input_validation import expect_types\nfrom zipline.utils.math_utils import nanmean, nanstd\nfrom zipline.utils.numpy_utils import (\n bool_dtype,\n coerce_to_dtype,\n datetime64ns_dtype,\n float64_dtype,\n int64_dtype,\n)\nfrom zipline.utils.preprocess import preprocess\n\n\n_RANK_METHODS = frozenset(['average', 'min', 'max', 'dense', 'ordinal'])\n\n\ndef coerce_numbers_to_my_dtype(f):\n \"\"\"\n A decorator for methods whose signature is f(self, other) that coerces\n ``other`` to ``self.dtype``.\n\n This is used to make comparison operations between numbers and `Factor`\n instances work independently of whether the user supplies a float or\n integer literal.\n\n For example, if I write::\n\n my_filter = my_factor > 3\n\n my_factor probably has dtype float64, but 3 is an int, so we want to coerce\n to float64 before doing the comparison.\n \"\"\"\n @wraps(f)\n def method(self, other):\n if isinstance(other, Number):\n other = coerce_to_dtype(self.dtype, other)\n return f(self, other)\n return method\n\n\n@curry\ndef set_attribute(name, value):\n \"\"\"\n Decorator factory for setting attributes on a function.\n\n Doesn't change the behavior of the wrapped function.\n\n Usage\n -----\n >>> @set_attribute('__name__', 'foo')\n ... def bar():\n ... return 3\n ...\n >>> bar()\n 3\n >>> bar.__name__\n 'foo'\n \"\"\"\n def decorator(f):\n setattr(f, name, value)\n return f\n return decorator\n\n\n# Decorators for setting the __name__ and __doc__ properties of a decorated\n# function.\n# Example:\nwith_name = set_attribute('__name__')\nwith_doc = set_attribute('__doc__')\n\n\ndef binop_return_type(op):\n if is_comparison(op):\n return NumExprFilter\n else:\n return NumExprFactor\n\n\ndef binop_return_dtype(op, left, right):\n \"\"\"\n Compute the expected return dtype for the given binary operator.\n\n Parameters\n ----------\n op : str\n Operator symbol, (e.g. '+', '-', ...).\n left : numpy.dtype\n Dtype of left hand side.\n right : numpy.dtype\n Dtype of right hand side.\n\n Returns\n -------\n outdtype : numpy.dtype\n The dtype of the result of `left <op> right`.\n \"\"\"\n if is_comparison(op):\n if left != right:\n raise TypeError(\n \"Don't know how to compute {left} {op} {right}.\\n\"\n \"Comparisons are only supported between Factors of equal \"\n \"dtypes.\".format(left=left, op=op, right=right)\n )\n return bool_dtype\n\n elif left != float64_dtype or right != float64_dtype:\n raise TypeError(\n \"Don't know how to compute {left} {op} {right}.\\n\"\n \"Arithmetic operators are only supported between Factors of \"\n \"dtype 'float64'.\".format(\n left=left.name,\n op=op,\n right=right.name,\n )\n )\n return float64_dtype\n\n\ndef binary_operator(op):\n \"\"\"\n Factory function for making binary operator methods on a Factor subclass.\n\n Returns a function, \"binary_operator\" suitable for implementing functions\n like __add__.\n \"\"\"\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n @with_doc(\"Binary Operator: '%s'\" % op)\n @with_name(method_name_for_op(op))\n @coerce_numbers_to_my_dtype\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n dtype=binop_return_dtype(op, self.dtype, other.dtype),\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Term):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n dtype=binop_return_dtype(op, self.dtype, other.dtype),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n dtype=binop_return_dtype(op, self.dtype, other.dtype),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n # .dtype access is safe here because coerce_numbers_to_my_dtype\n # will convert any input numbers to numpy equivalents.\n dtype=binop_return_dtype(op, self.dtype, other.dtype)\n )\n raise BadBinaryOperator(op, self, other)\n\n return binary_operator\n\n\ndef reflected_binary_operator(op):\n \"\"\"\n Factory function for making binary operator methods on a Factor.\n\n Returns a function, \"reflected_binary_operator\" suitable for implementing\n functions like __radd__.\n \"\"\"\n assert not is_comparison(op)\n\n @with_name(method_name_for_op(op, commute=True))\n @coerce_numbers_to_my_dtype\n def reflected_binary_operator(self, other):\n\n if isinstance(self, NumericalExpression):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return NumExprFactor(\n \"({left}) {op} ({right})\".format(\n left=other_expr,\n right=self_expr,\n op=op,\n ),\n new_inputs,\n dtype=binop_return_dtype(op, other.dtype, self.dtype)\n )\n\n # Only have to handle the numeric case because in all other valid cases\n # the corresponding left-binding method will be called.\n elif isinstance(other, Number):\n return NumExprFactor(\n \"{constant} {op} x_0\".format(op=op, constant=other),\n binds=(self,),\n dtype=binop_return_dtype(op, other.dtype, self.dtype),\n )\n raise BadBinaryOperator(op, other, self)\n return reflected_binary_operator\n\n\ndef unary_operator(op):\n \"\"\"\n Factory function for making unary operator methods for Factors.\n \"\"\"\n # Only negate is currently supported.\n valid_ops = {'-'}\n if op not in valid_ops:\n raise ValueError(\"Invalid unary operator %s.\" % op)\n\n @with_doc(\"Unary Operator: '%s'\" % op)\n @with_name(unary_op_name(op))\n def unary_operator(self):\n if self.dtype != float64_dtype:\n raise TypeError(\n \"Can't apply unary operator {op!r} to instance of \"\n \"{typename!r} with dtype {dtypename!r}.\\n\"\n \"{op!r} is only supported for Factors of dtype \"\n \"'float64'.\".format(\n op=op,\n typename=type(self).__name__,\n dtypename=self.dtype.name,\n )\n )\n\n # This can't be hoisted up a scope because the types returned by\n # unary_op_return_type aren't defined when the top-level function is\n # invoked.\n if isinstance(self, NumericalExpression):\n return NumExprFactor(\n \"{op}({expr})\".format(op=op, expr=self._expr),\n self.inputs,\n dtype=float64_dtype,\n )\n else:\n return NumExprFactor(\n \"{op}x_0\".format(op=op),\n (self,),\n dtype=float64_dtype,\n )\n return unary_operator\n\n\ndef function_application(func):\n \"\"\"\n Factory function for producing function application methods for Factor\n subclasses.\n \"\"\"\n if func not in NUMEXPR_MATH_FUNCS:\n raise ValueError(\"Unsupported mathematical function '%s'\" % func)\n\n @with_name(func)\n def mathfunc(self):\n if isinstance(self, NumericalExpression):\n return NumExprFactor(\n \"{func}({expr})\".format(func=func, expr=self._expr),\n self.inputs,\n dtype=float64_dtype,\n )\n else:\n return NumExprFactor(\n \"{func}(x_0)\".format(func=func),\n (self,),\n dtype=float64_dtype,\n )\n return mathfunc\n\n\ndef restrict_to_dtype(dtype, message_template):\n \"\"\"\n A factory for decorators that restricting Factor methods to only be\n callable on Factors with a specific dtype.\n\n This is conceptually similar to\n zipline.utils.input_validation.expect_dtypes, but provides more flexibility\n for providing error messages that are specifically targeting Factor\n methods.\n\n Parameters\n ----------\n dtype : numpy.dtype\n The dtype on which the decorated method may be called.\n message_template : str\n A template for the error message to be raised.\n `message_template.format` will be called with keyword arguments\n `method_name`, `expected_dtype`, and `received_dtype`.\n\n Usage\n -----\n @restrict_to_dtype(\n dtype=float64_dtype,\n message_template=(\n \"{method_name}() was called on a factor of dtype {received_dtype}.\"\n \"{method_name}() requires factors of dtype{expected_dtype}.\"\n\n ),\n )\n def some_factor_method(self, ...):\n self.stuff_that_requires_being_float64(...)\n \"\"\"\n def processor(factor_method, _, factor_instance):\n factor_dtype = factor_instance.dtype\n if factor_dtype != dtype:\n raise TypeError(\n message_template.format(\n method_name=factor_method.__name__,\n expected_dtype=dtype.name,\n received_dtype=factor_dtype,\n )\n )\n return factor_instance\n return preprocess(self=processor)\n\n# Decorators for Factor methods.\nif_not_float64_tell_caller_to_use_isnull = restrict_to_dtype(\n dtype=float64_dtype,\n message_template=(\n \"{method_name}() was called on a factor of dtype {received_dtype}.\\n\"\n \"{method_name}() is only defined for dtype {expected_dtype}.\"\n \"To filter missing data, use isnull() or notnull().\"\n )\n)\n\nfloat64_only = restrict_to_dtype(\n dtype=float64_dtype,\n message_template=(\n \"{method_name}() is only defined on Factors of dtype {expected_dtype},\"\n \" but it was called on a Factor of dtype {received_dtype}.\"\n )\n)\n\n\nFACTOR_DTYPES = frozenset([datetime64ns_dtype, float64_dtype, int64_dtype])\n\n\nclass Factor(RestrictedDTypeMixin, ComputableTerm):\n \"\"\"\n Pipeline API expression producing a numerical or date-valued output.\n\n Factors are the most commonly-used Pipeline term, representing the result\n of any computation producing a numerical result.\n\n Factors can be combined, both with other Factors and with scalar values,\n via any of the builtin mathematical operators (``+``, ``-``, ``*``, etc).\n This makes it easy to write complex expressions that combine multiple\n Factors. For example, constructing a Factor that computes the average of\n two other Factors is simply::\n\n >>> f1 = SomeFactor(...)\n >>> f2 = SomeOtherFactor(...)\n >>> average = (f1 + f2) / 2.0\n\n Factors can also be converted into :class:`zipline.pipeline.Filter` objects\n via comparison operators: (``<``, ``<=``, ``!=``, ``eq``, ``>``, ``>=``).\n\n There are many natural operators defined on Factors besides the basic\n numerical operators. These include methods identifying missing or\n extreme-valued outputs (isnull, notnull, isnan, notnan), methods for\n normalizing outputs (rank, demean, zscore), and methods for constructing\n Filters based on rank-order properties of results (top, bottom,\n percentile_between).\n \"\"\"\n ALLOWED_DTYPES = FACTOR_DTYPES # Used by RestrictedDTypeMixin\n\n # Dynamically add functions for creating NumExprFactor/NumExprFilter\n # instances.\n clsdict = locals()\n clsdict.update(\n {\n method_name_for_op(op): binary_operator(op)\n # Don't override __eq__ because it breaks comparisons on tuples of\n # Factors.\n for op in MATH_BINOPS.union(COMPARISONS - {'=='})\n }\n )\n clsdict.update(\n {\n method_name_for_op(op, commute=True): reflected_binary_operator(op)\n for op in MATH_BINOPS\n }\n )\n clsdict.update(\n {\n unary_op_name(op): unary_operator(op)\n for op in UNARY_OPS\n }\n )\n\n clsdict.update(\n {\n funcname: function_application(funcname)\n for funcname in NUMEXPR_MATH_FUNCS\n }\n )\n\n __truediv__ = clsdict['__div__']\n __rtruediv__ = clsdict['__rdiv__']\n\n eq = binary_operator('==')\n\n @expect_types(\n mask=(Filter, NotSpecifiedType),\n groupby=(Classifier, NotSpecifiedType),\n )\n @float64_only\n def demean(self, mask=NotSpecified, groupby=NotSpecified):\n \"\"\"\n Construct a Factor that computes ``self`` and subtracts the mean from\n row of the result.\n\n If ``mask`` is supplied, ignore values where ``mask`` returns False\n when computing row means, and output NaN anywhere the mask is False.\n\n If ``groupby`` is supplied, compute by partitioning each row based on\n the values produced by ``groupby``, de-meaning the partitioned arrays,\n and stitching the sub-results back together.\n\n Parameters\n ----------\n mask : zipline.pipeline.Filter, optional\n A Filter defining values to ignore when computing means.\n groupby : zipline.pipeline.Classifier, optional\n A classifier defining partitions over which to compute means.\n\n Example\n -------\n Let ``f`` be a Factor which would produce the following output::\n\n AAPL MSFT MCD BK\n 2017-03-13 1.0 2.0 3.0 4.0\n 2017-03-14 1.5 2.5 3.5 1.0\n 2017-03-15 2.0 3.0 4.0 1.5\n 2017-03-16 2.5 3.5 1.0 2.0\n\n Let ``c`` be a Classifier producing the following output::\n\n AAPL MSFT MCD BK\n 2017-03-13 1 1 2 2\n 2017-03-14 1 1 2 2\n 2017-03-15 1 1 2 2\n 2017-03-16 1 1 2 2\n\n Let ``m`` be a Filter producing the following output::\n\n AAPL MSFT MCD BK\n 2017-03-13 False True True True\n 2017-03-14 True False True True\n 2017-03-15 True True False True\n 2017-03-16 True True True False\n\n Then ``f.demean()`` will subtract the mean from each row produced by\n ``f``.\n\n ::\n\n AAPL MSFT MCD BK\n 2017-03-13 -1.500 -0.500 0.500 1.500\n 2017-03-14 -0.625 0.375 1.375 -1.125\n 2017-03-15 -0.625 0.375 1.375 -1.125\n 2017-03-16 0.250 1.250 -1.250 -0.250\n\n ``f.demean(mask=m)`` will subtract the mean from each row, but means\n will be calculated ignoring values on the diagonal, and NaNs will\n written to the diagonal in the output. Diagonal values are ignored\n because they are the locations where the mask ``m`` produced False.\n\n ::\n\n AAPL MSFT MCD BK\n 2017-03-13 NaN -1.000 0.000 1.000\n 2017-03-14 -0.500 NaN 1.500 -1.000\n 2017-03-15 -0.166 0.833 NaN -0.666\n 2017-03-16 0.166 1.166 -1.333 NaN\n\n ``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and\n MCD/BK from their respective entries. The AAPL/MSFT are grouped\n together because both assets always produce 1 in the output of the\n classifier ``c``. Similarly, MCD/BK are grouped together because they\n always produce 2.\n\n ::\n\n AAPL MSFT MCD BK\n 2017-03-13 -0.500 0.500 -0.500 0.500\n 2017-03-14 -0.500 0.500 1.250 -1.250\n 2017-03-15 -0.500 0.500 1.250 -1.250\n 2017-03-16 -0.500 0.500 -0.500 0.500\n\n ``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of\n AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on\n the diagonal , and NaNs will be written to the diagonal in the output.\n\n ::\n\n AAPL MSFT MCD BK\n 2017-03-13 NaN 0.000 -0.500 0.500\n 2017-03-14 0.000 NaN 1.250 -1.250\n 2017-03-15 -0.500 0.500 NaN 0.000\n 2017-03-16 -0.500 0.500 0.000 NaN\n\n Notes\n -----\n Mean is sensitive to the magnitudes of outliers. When working with\n factor that can potentially produce large outliers, it is often useful\n to use the ``mask`` parameter to discard values at the extremes of the\n distribution::\n\n >>> base = MyFactor(...)\n >>> normalized = base.demean(mask=base.percentile_between(1, 99))\n\n ``demean()`` is only supported on Factors of dtype float64.\n\n See Also\n --------\n :meth:`pandas.DataFrame.groupby`\n \"\"\"\n # This is a named function so that it has a __name__ for use in the\n # graph repr of GroupedRowTransform.\n def demean(row):\n return row - nanmean(row)\n\n return GroupedRowTransform(\n transform=demean,\n factor=self,\n mask=mask,\n groupby=groupby,\n )\n\n @expect_types(\n mask=(Filter, NotSpecifiedType),\n groupby=(Classifier, NotSpecifiedType),\n )\n @float64_only\n def zscore(self, mask=NotSpecified, groupby=NotSpecified):\n \"\"\"\n Construct a Factor that Z-Scores each day's results.\n\n The Z-Score of a row is defined as::\n\n (row - row.mean()) / row.stddev()\n\n If ``mask`` is supplied, ignore values where ``mask`` returns False\n when computing row means and standard deviations, and output NaN\n anywhere the mask is False.\n\n If ``groupby`` is supplied, compute by partitioning each row based on\n the values produced by ``groupby``, z-scoring the partitioned arrays,\n and stitching the sub-results back together.\n\n Parameters\n ----------\n mask : zipline.pipeline.Filter, optional\n A Filter defining values to ignore when Z-Scoring.\n groupby : zipline.pipeline.Classifier, optional\n A classifier defining partitions over which to compute Z-Scores.\n\n Returns\n -------\n zscored : zipline.pipeline.Factor\n A Factor producing that z-scores the output of self.\n\n Notes\n -----\n Mean and standard deviation are sensitive to the magnitudes of\n outliers. When working with factor that can potentially produce large\n outliers, it is often useful to use the ``mask`` parameter to discard\n values at the extremes of the distribution::\n\n >>> base = MyFactor(...)\n >>> normalized = base.zscore(mask=base.percentile_between(1, 99))\n\n ``zscore()`` is only supported on Factors of dtype float64.\n\n Example\n -------\n See :meth:`~zipline.pipeline.factors.Factor.demean` for an in-depth\n example of the semantics for ``mask`` and ``groupby``.\n\n See Also\n --------\n :meth:`pandas.DataFrame.groupby`\n \"\"\"\n # This is a named function so that it has a __name__ for use in the\n # graph repr of GroupedRowTransform.\n def zscore(row):\n return (row - nanmean(row)) / nanstd(row)\n\n return GroupedRowTransform(\n transform=zscore,\n factor=self,\n mask=mask,\n groupby=groupby,\n )\n\n def rank(self, method='ordinal', ascending=True, mask=NotSpecified):\n \"\"\"\n Construct a new Factor representing the sorted rank of each column\n within each row.\n\n Parameters\n ----------\n method : str, {'ordinal', 'min', 'max', 'dense', 'average'}\n The method used to assign ranks to tied elements. See\n `scipy.stats.rankdata` for a full description of the semantics for\n each ranking method. Default is 'ordinal'.\n ascending : bool, optional\n Whether to return sorted rank in ascending or descending order.\n Default is True.\n mask : zipline.pipeline.Filter, optional\n A Filter representing assets to consider when computing ranks.\n If mask is supplied, ranks are computed ignoring any asset/date\n pairs for which `mask` produces a value of False.\n\n Returns\n -------\n ranks : zipline.pipeline.factors.Rank\n A new factor that will compute the ranking of the data produced by\n `self`.\n\n Notes\n -----\n The default value for `method` is different from the default for\n `scipy.stats.rankdata`. See that function's documentation for a full\n description of the valid inputs to `method`.\n\n Missing or non-existent data on a given day will cause an asset to be\n given a rank of NaN for that day.\n\n See Also\n --------\n :func:`scipy.stats.rankdata`\n :class:`zipline.pipeline.factors.factor.Rank`\n \"\"\"\n return Rank(self, method=method, ascending=ascending, mask=mask)\n\n @expect_types(bins=int, mask=(Filter, NotSpecifiedType))\n def quantiles(self, bins, mask=NotSpecified):\n \"\"\"\n Construct a Classifier computing quantiles of the output of ``self``.\n\n Every non-NaN data point the output is labelled with an integer value\n from 0 to (bins - 1). NaNs are labelled with -1.\n\n If ``mask`` is supplied, ignore data points in locations for which\n ``mask`` produces False, and emit a label of -1 at those locations.\n\n Parameters\n ----------\n bins : int\n Number of bins labels to compute.\n mask : zipline.pipeline.Filter, optional\n Mask of values to ignore when computing quantiles.\n\n Returns\n -------\n quantiles : zipline.pipeline.classifiers.Quantiles\n A Classifier producing integer labels ranging from 0 to (bins - 1).\n \"\"\"\n if mask is NotSpecified:\n mask = self.mask\n return Quantiles(inputs=(self,), bins=bins, mask=mask)\n\n @expect_types(mask=(Filter, NotSpecifiedType))\n def quartiles(self, mask=NotSpecified):\n \"\"\"\n Construct a Classifier computing quartiles over the output of ``self``.\n\n Every non-NaN data point the output is labelled with a value of either\n 0, 1, 2, or 3, corresponding to the first, second, third, or fourth\n quartile over each row. NaN data points are labelled with -1.\n\n If ``mask`` is supplied, ignore data points in locations for which\n ``mask`` produces False, and emit a label of -1 at those locations.\n\n Parameters\n ----------\n mask : zipline.pipeline.Filter, optional\n Mask of values to ignore when computing quartiles.\n\n Returns\n -------\n quartiles : zipline.pipeline.classifiers.Quantiles\n A Classifier producing integer labels ranging from 0 to 3.\n \"\"\"\n return self.quantiles(bins=4, mask=mask)\n\n @expect_types(mask=(Filter, NotSpecifiedType))\n def quintiles(self, mask=NotSpecified):\n \"\"\"\n Construct a Classifier computing quintile labels on ``self``.\n\n Every non-NaN data point the output is labelled with a value of either\n 0, 1, 2, or 3, 4, corresonding to quintiles over each row. NaN data\n points are labelled with -1.\n\n If ``mask`` is supplied, ignore data points in locations for which\n ``mask`` produces False, and emit a label of -1 at those locations.\n\n Parameters\n ----------\n mask : zipline.pipeline.Filter, optional\n Mask of values to ignore when computing quintiles.\n\n Returns\n -------\n quintiles : zipline.pipeline.classifiers.Quantiles\n A Classifier producing integer labels ranging from 0 to 4.\n \"\"\"\n return self.quantiles(bins=5, mask=mask)\n\n @expect_types(mask=(Filter, NotSpecifiedType))\n def deciles(self, mask=NotSpecified):\n \"\"\"\n Construct a Classifier computing decile labels on ``self``.\n\n Every non-NaN data point the output is labelled with a value from 0 to\n 9 corresonding to deciles over each row. NaN data points are labelled\n with -1.\n\n If ``mask`` is supplied, ignore data points in locations for which\n ``mask`` produces False, and emit a label of -1 at those locations.\n\n Parameters\n ----------\n mask : zipline.pipeline.Filter, optional\n Mask of values to ignore when computing deciles.\n\n Returns\n -------\n deciles : zipline.pipeline.classifiers.Quantiles\n A Classifier producing integer labels ranging from 0 to 9.\n \"\"\"\n return self.quantiles(bins=10, mask=mask)\n\n def top(self, N, mask=NotSpecified):\n \"\"\"\n Construct a Filter matching the top N asset values of self each day.\n\n Parameters\n ----------\n N : int\n Number of assets passing the returned filter each day.\n mask : zipline.pipeline.Filter, optional\n A Filter representing assets to consider when computing ranks.\n If mask is supplied, top values are computed ignoring any\n asset/date pairs for which `mask` produces a value of False.\n\n Returns\n -------\n filter : zipline.pipeline.filters.Filter\n \"\"\"\n return self.rank(ascending=False, mask=mask) <= N\n\n def bottom(self, N, mask=NotSpecified):\n \"\"\"\n Construct a Filter matching the bottom N asset values of self each day.\n\n Parameters\n ----------\n N : int\n Number of assets passing the returned filter each day.\n mask : zipline.pipeline.Filter, optional\n A Filter representing assets to consider when computing ranks.\n If mask is supplied, bottom values are computed ignoring any\n asset/date pairs for which `mask` produces a value of False.\n\n Returns\n -------\n filter : zipline.pipeline.Filter\n \"\"\"\n return self.rank(ascending=True, mask=mask) <= N\n\n def percentile_between(self,\n min_percentile,\n max_percentile,\n mask=NotSpecified):\n \"\"\"\n Construct a new Filter representing entries from the output of this\n Factor that fall within the percentile range defined by min_percentile\n and max_percentile.\n\n Parameters\n ----------\n min_percentile : float [0.0, 100.0]\n Return True for assets falling above this percentile in the data.\n max_percentile : float [0.0, 100.0]\n Return True for assets falling below this percentile in the data.\n mask : zipline.pipeline.Filter, optional\n A Filter representing assets to consider when percentile\n calculating thresholds. If mask is supplied, percentile cutoffs\n are computed each day using only assets for which ``mask`` returns\n True. Assets for which ``mask`` produces False will produce False\n in the output of this Factor as well.\n\n Returns\n -------\n out : zipline.pipeline.filters.PercentileFilter\n A new filter that will compute the specified percentile-range mask.\n\n See Also\n --------\n zipline.pipeline.filters.filter.PercentileFilter\n \"\"\"\n return PercentileFilter(\n self,\n min_percentile=min_percentile,\n max_percentile=max_percentile,\n mask=mask,\n )\n\n def isnull(self):\n \"\"\"\n A Filter producing True for values where this Factor has missing data.\n\n Equivalent to self.isnan() when ``self.dtype`` is float64.\n Otherwise equivalent to ``self.eq(self.missing_value)``.\n\n Returns\n -------\n filter : zipline.pipeline.filters.Filter\n \"\"\"\n if self.dtype == float64_dtype:\n # Using isnan is more efficient when possible because we can fold\n # the isnan computation with other NumExpr expressions.\n return self.isnan()\n else:\n return NullFilter(self)\n\n def notnull(self):\n \"\"\"\n A Filter producing True for values where this Factor has complete data.\n\n Equivalent to ``~self.isnan()` when ``self.dtype`` is float64.\n Otherwise equivalent to ``(self != self.missing_value)``.\n \"\"\"\n return ~self.isnull()\n\n @if_not_float64_tell_caller_to_use_isnull\n def isnan(self):\n \"\"\"\n A Filter producing True for all values where this Factor is NaN.\n\n Returns\n -------\n nanfilter : zipline.pipeline.filters.Filter\n \"\"\"\n return self != self\n\n @if_not_float64_tell_caller_to_use_isnull\n def notnan(self):\n \"\"\"\n A Filter producing True for values where this Factor is not NaN.\n\n Returns\n -------\n nanfilter : zipline.pipeline.filters.Filter\n \"\"\"\n return ~self.isnan()\n\n @if_not_float64_tell_caller_to_use_isnull\n def isfinite(self):\n \"\"\"\n A Filter producing True for values where this Factor is anything but\n NaN, inf, or -inf.\n \"\"\"\n return (-inf < self) & (self < inf)\n\n\nclass NumExprFactor(NumericalExpression, Factor):\n \"\"\"\n Factor computed from a numexpr expression.\n\n Parameters\n ----------\n expr : string\n A string suitable for passing to numexpr. All variables in 'expr'\n should be of the form \"x_i\", where i is the index of the corresponding\n factor input in 'binds'.\n binds : tuple\n A tuple of factors to use as inputs.\n\n Notes\n -----\n NumExprFactors are constructed by numerical operators like `+` and `-`.\n Users should rarely need to construct a NumExprFactor directly.\n \"\"\"\n pass\n\n\nclass GroupedRowTransform(Factor):\n \"\"\"\n A Factor that transforms an input factor by applying a row-wise\n shape-preserving transformation on classifier-defined groups of that\n Factor.\n\n This is most often useful for normalization operators like ``zscore`` or\n ``demean``.\n\n Parameters\n ----------\n transform : function[ndarray[ndim=1] -> ndarray[ndim=1]]\n Function to apply over each row group.\n factor : zipline.pipeline.Factor\n The factor providing baseline data to transform.\n mask : zipline.pipeline.Filter\n Mask of entries to ignore when calculating transforms.\n groupby : zipline.pipeline.Classifier\n Classifier partitioning ``factor`` into groups to use when calculating\n means.\n\n Notes\n -----\n Users should rarely construct instances of this factor directly. Instead,\n they should construct instances via factor normalization methods like\n ``zscore`` and ``demean``.\n\n See Also\n --------\n zipline.pipeline.factors.Factor.zscore\n zipline.pipeline.factors.Factor.demean\n \"\"\"\n window_length = 0\n\n def __new__(cls, transform, factor, mask, groupby):\n\n if mask is NotSpecified:\n mask = factor.mask\n else:\n mask = mask & factor.mask\n\n if groupby is NotSpecified:\n groupby = Everything(mask=mask)\n\n return super(GroupedRowTransform, cls).__new__(\n GroupedRowTransform,\n transform=transform,\n inputs=(factor, groupby),\n missing_value=factor.missing_value,\n mask=mask,\n dtype=factor.dtype,\n )\n\n def _init(self, transform, *args, **kwargs):\n self._transform = transform\n return super(GroupedRowTransform, self)._init(*args, **kwargs)\n\n @classmethod\n def static_identity(cls, transform, *args, **kwargs):\n return (\n super(GroupedRowTransform, cls).static_identity(*args, **kwargs),\n transform,\n )\n\n def _compute(self, arrays, dates, assets, mask):\n data = arrays[0]\n null_group_value = self.inputs[1].missing_value\n group_labels = where(\n mask,\n arrays[1],\n null_group_value,\n )\n\n return where(\n group_labels != null_group_value,\n naive_grouped_rowwise_apply(\n data=data,\n group_labels=group_labels,\n func=self._transform,\n ),\n self.missing_value,\n )\n\n @property\n def transform_name(self):\n return self._transform.__name__\n\n def short_repr(self):\n return type(self).__name__ + '(%r)' % self.transform_name\n\n\nclass Rank(SingleInputMixin, Factor):\n \"\"\"\n A Factor representing the row-wise rank data of another Factor.\n\n Parameters\n ----------\n factor : zipline.pipeline.factors.Factor\n The factor on which to compute ranks.\n method : str, {'average', 'min', 'max', 'dense', 'ordinal'}\n The method used to assign ranks to tied elements. See\n `scipy.stats.rankdata` for a full description of the semantics for each\n ranking method.\n\n See Also\n --------\n :func:`scipy.stats.rankdata`\n :class:`Factor.rank`\n\n Notes\n -----\n Most users should call Factor.rank rather than directly construct an\n instance of this class.\n \"\"\"\n window_length = 0\n dtype = float64_dtype\n\n def __new__(cls, factor, method, ascending, mask):\n return super(Rank, cls).__new__(\n cls,\n inputs=(factor,),\n method=method,\n ascending=ascending,\n mask=mask,\n )\n\n def _init(self, method, ascending, *args, **kwargs):\n self._method = method\n self._ascending = ascending\n return super(Rank, self)._init(*args, **kwargs)\n\n @classmethod\n def static_identity(cls, method, ascending, *args, **kwargs):\n return (\n super(Rank, cls).static_identity(*args, **kwargs),\n method,\n ascending,\n )\n\n def _validate(self):\n \"\"\"\n Verify that the stored rank method is valid.\n \"\"\"\n if self._method not in _RANK_METHODS:\n raise UnknownRankMethod(\n method=self._method,\n choices=set(_RANK_METHODS),\n )\n return super(Rank, self)._validate()\n\n def _compute(self, arrays, dates, assets, mask):\n \"\"\"\n For each row in the input, compute a like-shaped array of per-row\n ranks.\n \"\"\"\n return masked_rankdata_2d(\n arrays[0],\n mask,\n self.inputs[0].missing_value,\n self._method,\n self._ascending,\n )\n\n def __repr__(self):\n return \"{type}({input_}, method='{method}', mask={mask})\".format(\n type=type(self).__name__,\n input_=self.inputs[0],\n method=self._method,\n mask=self.mask,\n )\n\n\nclass CustomFactor(PositiveWindowLengthMixin, CustomTermMixin, Factor):\n '''\n Base class for user-defined Factors.\n\n Parameters\n ----------\n inputs : iterable, optional\n An iterable of `BoundColumn` instances (e.g. USEquityPricing.close),\n describing the data to load and pass to `self.compute`. If this\n argument is passed to the CustomFactor constructor, we look for a\n class-level attribute named `inputs`.\n window_length : int, optional\n Number of rows to pass for each input. If this argument is not passed\n to the CustomFactor constructor, we look for a class-level attribute\n named `window_length`.\n mask : zipline.pipeline.Filter, optional\n A Filter describing the assets on which we should compute each day.\n Each call to ``CustomFactor.compute`` will only receive assets for\n which ``mask`` produced True on the day for which compute is being\n called.\n\n Notes\n -----\n Users implementing their own Factors should subclass CustomFactor and\n implement a method named `compute` with the following signature:\n\n .. code-block:: python\n\n def compute(self, today, assets, out, *inputs):\n ...\n\n On each simulation date, ``compute`` will be called with the current date,\n an array of sids, an output array, and an input array for each expression\n passed as inputs to the CustomFactor constructor.\n\n The specific types of the values passed to `compute` are as follows::\n\n today : np.datetime64[ns]\n Row label for the last row of all arrays passed as `inputs`.\n assets : np.array[int64, ndim=1]\n Column labels for `out` and`inputs`.\n out : np.array[self.dtype, ndim=1]\n Output array of the same shape as `assets`. `compute` should write\n its desired return values into `out`.\n *inputs : tuple of np.array\n Raw data arrays corresponding to the values of `self.inputs`.\n\n ``compute`` functions should expect to be passed NaN values for dates on\n which no data was available for an asset. This may include dates on which\n an asset did not yet exist.\n\n For example, if a CustomFactor requires 10 rows of close price data, and\n asset A started trading on Monday June 2nd, 2014, then on Tuesday, June\n 3rd, 2014, the column of input data for asset A will have 9 leading NaNs\n for the preceding days on which data was not yet available.\n\n Examples\n --------\n\n A CustomFactor with pre-declared defaults:\n\n .. code-block:: python\n\n class TenDayRange(CustomFactor):\n \"\"\"\n Computes the difference between the highest high in the last 10\n days and the lowest low.\n\n Pre-declares high and low as default inputs and `window_length` as\n 10.\n \"\"\"\n\n inputs = [USEquityPricing.high, USEquityPricing.low]\n window_length = 10\n\n def compute(self, today, assets, out, highs, lows):\n from numpy import nanmin, nanmax\n\n highest_highs = nanmax(highs, axis=0)\n lowest_lows = nanmin(lows, axis=0)\n out[:] = highest_highs - lowest_lows\n\n\n # Doesn't require passing inputs or window_length because they're\n # pre-declared as defaults for the TenDayRange class.\n ten_day_range = TenDayRange()\n\n A CustomFactor without defaults:\n\n .. code-block:: python\n\n class MedianValue(CustomFactor):\n \"\"\"\n Computes the median value of an arbitrary single input over an\n arbitrary window..\n\n Does not declare any defaults, so values for `window_length` and\n `inputs` must be passed explicitly on every construction.\n \"\"\"\n\n def compute(self, today, assets, out, data):\n from numpy import nanmedian\n out[:] = data.nanmedian(data, axis=0)\n\n # Values for `inputs` and `window_length` must be passed explicitly to\n # MedianValue.\n median_close10 = MedianValue([USEquityPricing.close], window_length=10)\n median_low15 = MedianValue([USEquityPricing.low], window_length=15)\n '''\n dtype = float64_dtype\n\n\nclass Latest(LatestMixin, CustomFactor):\n \"\"\"\n Factor producing the most recently-known value of `inputs[0]` on each day.\n\n The `.latest` attribute of DataSet columns returns an instance of this\n Factor.\n \"\"\"\n window_length = 1\n\n def compute(self, today, assets, out, data):\n out[:] = data[-1]\n"
] | [
[
"numpy.where"
]
] |
DwaraknathT/sparsity | [
"705f2cba074e6ab4f7655c6af98882773cd826bf",
"705f2cba074e6ab4f7655c6af98882773cd826bf"
] | [
"src/layers/transformers/sublayers.py",
"src/attacks/base.py"
] | [
"\"\"\" Define the sublayers in encoder/decoder layer \"\"\"\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ScaledDotProductAttention(nn.Module):\n \"\"\" Scaled Dot-Product Attention \"\"\"\n\n def __init__(self, temperature, attn_dropout=0.1):\n super().__init__()\n self.temperature = temperature\n\n def forward(self, q, k, v, mask=None):\n # Scale based on the current shape\n attn = torch.matmul(q / (q.shape[-1] ** 0.5), k.transpose(2, 3))\n if mask is not None:\n attn = attn.masked_fill(mask == 0, -1e9)\n attn = F.softmax(attn, dim=-1)\n output = torch.matmul(attn, v)\n\n return output, attn\n\n\nclass MultiHeadAttention(nn.Module):\n \"\"\" Multi-Head Attention module \"\"\"\n\n def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):\n super().__init__()\n\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)\n self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)\n self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)\n self.fc = nn.Linear(n_head * d_v, d_model, bias=False)\n\n self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)\n\n self.dropout = nn.Dropout(dropout)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n\n def forward(self, q, k, v, mask=None):\n\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)\n\n residual = q\n\n # Pass through the pre-attention projection: b x lq x (n*dv)\n # Separate different heads: b x lq x n x dv\n q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)\n\n # Transpose for attention dot product: b x n x lq x dv\n q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)\n\n if mask is not None:\n mask = mask.unsqueeze(1) # For head axis broadcasting.\n\n q, attn = self.attention(q, k, v, mask=mask)\n\n # Transpose to move the head dimension back: b x lq x n x dv\n # Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)\n q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)\n q = self.dropout(self.fc(q))\n q += residual\n\n q = self.layer_norm(q)\n\n return q, attn\n\n\nclass PositionwiseFeedForward(nn.Module):\n \"\"\" A two-feed-forward-layer module \"\"\"\n\n def __init__(self, d_in, d_hid, dropout=0.1):\n super().__init__()\n self.w_1 = nn.Linear(d_in, d_hid) # position-wise\n self.w_2 = nn.Linear(d_hid, d_in) # position-wise\n self.layer_norm = nn.LayerNorm(d_in, eps=1e-6)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n\n residual = x\n\n x = self.w_2(F.relu(self.w_1(x)))\n x = self.dropout(x)\n x += residual\n\n x = self.layer_norm(x)\n\n return x\n",
"import torch\n\n\nclass Attack(object):\n r\"\"\"\n Base class for all attacks.\n\n .. note::\n It automatically set device to the device where given model is.\n It temporarily changes the original model's training mode to `test`\n by `.eval()` only during an attack process.\n \"\"\"\n\n def __init__(self, name, model):\n r\"\"\"\n Initializes internal attack state.\n\n Arguments:\n name (str) : name of an attack.\n model (torch.nn.Module): model to attack.\n \"\"\"\n\n self.attack = name\n self.model = model\n self.model_name = str(model).split(\"(\")[0]\n\n self.training = model.training\n self.device = next(model.parameters()).device\n\n self._targeted = 1\n self._attack_mode = \"original\"\n self._return_type = \"float\"\n\n def forward(self, *input):\n r\"\"\"\n It defines the computation performed at every call.\n Should be overridden by all subclasses.\n \"\"\"\n raise NotImplementedError\n\n def set_attack_mode(self, mode):\n r\"\"\"\n Set the attack mode.\n\n Arguments:\n mode (str) : 'original' (DEFAULT)\n 'targeted' - Use input labels as targeted labels.\n 'least_likely' - Use least likely labels as targeted labels.\n\n \"\"\"\n if self._attack_mode is \"only_original\":\n raise ValueError(\n \"Changing attack mode is not supported in this attack method.\"\n )\n\n if mode == \"original\":\n self._attack_mode = \"original\"\n self._targeted = 1\n self._transform_label = self._get_label\n elif mode == \"targeted\":\n self._attack_mode = \"targeted\"\n self._targeted = -1\n self._transform_label = self._get_label\n elif mode == \"least_likely\":\n self._attack_mode = \"least_likely\"\n self._targeted = -1\n self._transform_label = self._get_least_likely_label\n else:\n raise ValueError(\n mode\n + \" is not a valid mode. [Options : original, targeted, least_likely]\"\n )\n\n def set_return_type(self, type):\n r\"\"\"\n Set the return type of adversarial images: `int` or `float`.\n\n Arguments:\n type (str) : 'float' or 'int'. (DEFAULT : 'float')\n\n \"\"\"\n if type == \"float\":\n self._return_type = \"float\"\n elif type == \"int\":\n self._return_type = \"int\"\n else:\n raise ValueError(type + \" is not a valid type. [Options : float, int]\")\n\n def save(self, save_path, data_loader, verbose=True):\n r\"\"\"\n Save adversarial images as torch.tensor from given torch.utils.data.DataLoader.\n\n Arguments:\n save_path (str) : save_path.\n data_loader (torch.utils.data.DataLoader) : data loader.\n verbose (bool) : True for displaying detailed information. (DEFAULT : True)\n\n \"\"\"\n self.model.eval()\n\n image_list = []\n label_list = []\n\n correct = 0\n total = 0\n\n total_batch = len(data_loader)\n\n for step, (images, labels) in enumerate(data_loader):\n adv_images = self.__call__(images, labels)\n\n image_list.append(adv_images.cpu())\n label_list.append(labels.cpu())\n\n if self._return_type == \"int\":\n adv_images = adv_images.float() / 255\n\n if verbose:\n outputs = self.model(adv_images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels.to(self.device)).sum()\n\n acc = 100 * float(correct) / total\n print(\n \"- Save Progress : %2.2f %% / Accuracy : %2.2f %%\"\n % ((step + 1) / total_batch * 100, acc),\n end=\"\\r\",\n )\n\n x = torch.cat(image_list, 0)\n y = torch.cat(label_list, 0)\n torch.save((x, y), save_path)\n print(\"\\n- Save Complete!\")\n\n self._switch_model()\n\n def _transform_label(self, images, labels):\n r\"\"\"\n Function for changing the attack mode.\n \"\"\"\n return labels\n\n def _get_label(self, images, labels):\n r\"\"\"\n Function for changing the attack mode.\n Return input labels.\n \"\"\"\n return labels\n\n def _get_least_likely_label(self, images, labels):\n r\"\"\"\n Function for changing the attack mode.\n Return least likely labels.\n \"\"\"\n outputs = self.model(images)\n _, labels = torch.min(outputs.data, 1)\n labels = labels.detach_()\n return labels\n\n def _to_uint(self, images):\n r\"\"\"\n Function for changing the return type.\n Return images as int.\n \"\"\"\n return (images * 255).type(torch.uint8)\n\n def _switch_model(self):\n r\"\"\"\n Function for changing the training mode of the model.\n \"\"\"\n if self.training:\n self.model.train()\n else:\n self.model.eval()\n\n def __str__(self):\n info = self.__dict__.copy()\n\n del_keys = [\"model\", \"attack\"]\n\n for key in info.keys():\n if key[0] == \"_\":\n del_keys.append(key)\n\n for key in del_keys:\n del info[key]\n\n info[\"attack_mode\"] = self._attack_mode\n if info[\"attack_mode\"] == \"only_original\":\n info[\"attack_mode\"] = \"original\"\n\n info[\"return_type\"] = self._return_type\n\n return (\n self.attack\n + \"(\"\n + \", \".join(\"{}={}\".format(key, val) for key, val in info.items())\n + \")\"\n )\n\n def __call__(self, *input, **kwargs):\n self.model.eval()\n images = self.forward(*input, **kwargs)\n self._switch_model()\n\n if self._return_type == \"int\":\n images = self._to_uint(images)\n\n return images\n"
] | [
[
"torch.nn.Linear",
"torch.nn.functional.softmax",
"torch.nn.LayerNorm",
"torch.nn.Dropout",
"torch.matmul"
],
[
"torch.min",
"torch.save",
"torch.cat",
"torch.max"
]
] |
maldil/CPATMiner2.0 | [
"88b96a5af438a9c2ea2dab351cb8b210119132a2"
] | [
"AtomicASTChangeMining/src/test/resources/ASTConversion/sklearn/utils/tests/test_seq_dataset.py"
] | [
"# Author: Tom Dupre la Tour\n# Joan Massich <[email protected]>\n#\n# License: BSD 3 clause\n\nimport numpy as np\nimport pytest\nimport scipy.sparse as sp\nfrom numpy.testing import assert_array_equal\nfrom sklearn.utils._seq_dataset import (\n ArrayDataset32, ArrayDataset64, CSRDataset32, CSRDataset64)\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.utils._testing import assert_allclose\n\niris = load_iris()\nX64 = iris.data.astype(np.float64)\ny64 = iris.target.astype(np.float64)\nX_csr64 = sp.csr_matrix(X64)\nsample_weight64 = np.arange(y64.size, dtype=np.float64)\n\nX32 = iris.data.astype(np.float32)\ny32 = iris.target.astype(np.float32)\nX_csr32 = sp.csr_matrix(X32)\nsample_weight32 = np.arange(y32.size, dtype=np.float32)\n\n\ndef assert_csr_equal_values(current, expected):\n current.eliminate_zeros()\n expected.eliminate_zeros()\n expected = expected.astype(current.dtype)\n assert current.shape[0] == expected.shape[0]\n assert current.shape[1] == expected.shape[1]\n assert_array_equal(current.data, expected.data)\n assert_array_equal(current.indices, expected.indices)\n assert_array_equal(current.indptr, expected.indptr)\n\n\ndef make_dense_dataset_32():\n return ArrayDataset32(X32, y32, sample_weight32, seed=42)\n\n\ndef make_dense_dataset_64():\n return ArrayDataset64(X64, y64, sample_weight64, seed=42)\n\n\ndef make_sparse_dataset_32():\n return CSRDataset32(X_csr32.data, X_csr32.indptr, X_csr32.indices, y32,\n sample_weight32, seed=42)\n\n\ndef make_sparse_dataset_64():\n return CSRDataset64(X_csr64.data, X_csr64.indptr, X_csr64.indices, y64,\n sample_weight64, seed=42)\n\n\[email protected]('dataset_constructor', [\n make_dense_dataset_32,\n make_dense_dataset_64,\n make_sparse_dataset_32,\n make_sparse_dataset_64,\n])\ndef test_seq_dataset_basic_iteration(dataset_constructor):\n NUMBER_OF_RUNS = 5\n dataset = dataset_constructor()\n for _ in range(NUMBER_OF_RUNS):\n # next sample\n xi_, yi, swi, idx = dataset._next_py()\n xi = sp.csr_matrix((xi_), shape=(1, X64.shape[1]))\n\n assert_csr_equal_values(xi, X_csr64[idx])\n assert yi == y64[idx]\n assert swi == sample_weight64[idx]\n\n # random sample\n xi_, yi, swi, idx = dataset._random_py()\n xi = sp.csr_matrix((xi_), shape=(1, X64.shape[1]))\n\n assert_csr_equal_values(xi, X_csr64[idx])\n assert yi == y64[idx]\n assert swi == sample_weight64[idx]\n\n\[email protected]('make_dense_dataset,make_sparse_dataset', [\n (make_dense_dataset_32, make_sparse_dataset_32),\n (make_dense_dataset_64, make_sparse_dataset_64),\n])\ndef test_seq_dataset_shuffle(make_dense_dataset, make_sparse_dataset):\n dense_dataset, sparse_dataset = make_dense_dataset(), make_sparse_dataset()\n # not shuffled\n for i in range(5):\n _, _, _, idx1 = dense_dataset._next_py()\n _, _, _, idx2 = sparse_dataset._next_py()\n assert idx1 == i\n assert idx2 == i\n\n for i in [132, 50, 9, 18, 58]:\n _, _, _, idx1 = dense_dataset._random_py()\n _, _, _, idx2 = sparse_dataset._random_py()\n assert idx1 == i\n assert idx2 == i\n\n seed = 77\n dense_dataset._shuffle_py(seed)\n sparse_dataset._shuffle_py(seed)\n\n idx_next = [63, 91, 148, 87, 29]\n idx_shuffle = [137, 125, 56, 121, 127]\n for i, j in zip(idx_next, idx_shuffle):\n _, _, _, idx1 = dense_dataset._next_py()\n _, _, _, idx2 = sparse_dataset._next_py()\n assert idx1 == i\n assert idx2 == i\n\n _, _, _, idx1 = dense_dataset._random_py()\n _, _, _, idx2 = sparse_dataset._random_py()\n assert idx1 == j\n assert idx2 == j\n\n\[email protected]('make_dataset_32,make_dataset_64', [\n (make_dense_dataset_32, make_dense_dataset_64),\n (make_sparse_dataset_32, make_sparse_dataset_64),\n])\ndef test_fused_types_consistency(make_dataset_32, make_dataset_64):\n dataset_32, dataset_64 = make_dataset_32(), make_dataset_64()\n NUMBER_OF_RUNS = 5\n for _ in range(NUMBER_OF_RUNS):\n # next sample\n (xi_data32, _, _), yi32, _, _ = dataset_32._next_py()\n (xi_data64, _, _), yi64, _, _ = dataset_64._next_py()\n\n assert xi_data32.dtype == np.float32\n assert xi_data64.dtype == np.float64\n\n assert_allclose(xi_data64, xi_data32, rtol=1e-5)\n assert_allclose(yi64, yi32, rtol=1e-5)\n\n\n\n"
] | [
[
"sklearn.utils._seq_dataset.CSRDataset32",
"sklearn.utils._testing.assert_allclose",
"scipy.sparse.csr_matrix",
"numpy.testing.assert_array_equal",
"numpy.arange",
"sklearn.utils._seq_dataset.ArrayDataset64",
"sklearn.utils._seq_dataset.CSRDataset64",
"sklearn.utils._seq_dataset.ArrayDataset32",
"sklearn.datasets.load_iris"
]
] |
veritas9872/fastMRI-kspace | [
"4c484b3183e9f06838b5ee108af283611c2e1e77"
] | [
"train/new_model_trainers/img_only.py"
] | [
"import torch\nfrom torch import nn, optim, multiprocessing\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard.writer import SummaryWriter\n\nfrom tqdm import tqdm\n\nfrom time import time\nfrom collections import defaultdict\n\nfrom utils.run_utils import get_logger\nfrom utils.train_utils import CheckpointManager, make_k_grid, make_img_grid, make_rss_slice, standardize_image\nfrom data.data_transforms import complex_abs\nfrom metrics.new_1d_ssim import SSIM\nfrom metrics.custom_losses import psnr, nmse\n\n\n# Send this somewhere else soon...\ndef get_class_name(obj):\n return 'None' if obj is None else str(obj.__class__).split(\"'\")[1]\n\n\nclass ModelTrainerIMG:\n \"\"\"\n Model trainer for real-valued image domain losses.\n This model trainer can accept k-space an semi-k-space, regardless of weighting.\n Both complex and real-valued image domain losses can be calculated.\n \"\"\"\n\n def __init__(self, args, model, optimizer, train_loader, val_loader, input_train_transform, input_val_transform,\n output_train_transform, output_val_transform, losses, scheduler=None):\n\n # Allow multiple processes to access tensors on GPU. Add checking for multiple continuous runs.\n if multiprocessing.get_start_method(allow_none=True) is None:\n multiprocessing.set_start_method(method='spawn')\n\n self.logger = get_logger(name=__name__, save_file=args.log_path / args.run_name)\n\n # Checking whether inputs are correct.\n assert isinstance(model, nn.Module), '`model` must be a Pytorch Module.'\n assert isinstance(optimizer, optim.Optimizer), '`optimizer` must be a Pytorch Optimizer.'\n assert isinstance(train_loader, DataLoader) and isinstance(val_loader, DataLoader), \\\n '`train_loader` and `val_loader` must be Pytorch DataLoader objects.'\n\n assert callable(input_train_transform) and callable(input_val_transform), \\\n 'input_transforms must be callable functions.'\n # I think this would be best practice.\n assert isinstance(output_train_transform, nn.Module) and isinstance(output_val_transform, nn.Module), \\\n '`output_train_transform` and `output_val_transform` must be Pytorch Modules.'\n\n # 'losses' is expected to be a dictionary.\n # Even composite losses should be a single loss module with a tuple as its output.\n losses = nn.ModuleDict(losses)\n\n if scheduler is not None:\n if isinstance(scheduler, optim.lr_scheduler.ReduceLROnPlateau):\n self.metric_scheduler = True\n elif isinstance(scheduler, optim.lr_scheduler._LRScheduler):\n self.metric_scheduler = False\n else:\n raise TypeError('`scheduler` must be a Pytorch Learning Rate Scheduler.')\n\n # Display interval of 0 means no display of validation images on TensorBoard.\n if args.max_images <= 0:\n self.display_interval = 0\n else:\n self.display_interval = int(len(val_loader.dataset) // (args.max_images * args.batch_size))\n\n self.manager = CheckpointManager(model, optimizer, mode='min', save_best_only=args.save_best_only,\n ckpt_dir=args.ckpt_path, max_to_keep=args.max_to_keep)\n\n # loading from checkpoint if specified.\n if vars(args).get('prev_model_ckpt'):\n self.manager.load(load_dir=args.prev_model_ckpt, load_optimizer=False)\n\n self.model = model\n self.optimizer = optimizer\n self.train_loader = train_loader\n self.val_loader = val_loader\n self.input_train_transform = input_train_transform\n self.input_val_transform = input_val_transform\n self.output_train_transform = output_train_transform\n self.output_val_transform = output_val_transform\n self.losses = losses\n self.scheduler = scheduler\n self.writer = SummaryWriter(str(args.log_path))\n\n self.verbose = args.verbose\n self.num_epochs = args.num_epochs\n self.smoothing_factor = args.smoothing_factor\n self.shrink_scale = args.shrink_scale\n self.use_slice_metrics = args.use_slice_metrics\n\n # This part should get SSIM, not 1 - SSIM.\n self.ssim = SSIM(filter_size=7).to(device=args.device) # Needed to cache the kernel.\n\n # Logging all components of the Model Trainer.\n # Train and Val input and output transforms are assumed to use the same input transform class.\n self.logger.info(f'''\n Summary of Model Trainer Components:\n Model: {get_class_name(model)}.\n Optimizer: {get_class_name(optimizer)}.\n Input Transforms: {get_class_name(input_val_transform)}.\n Output Transform: {get_class_name(output_val_transform)}.\n Image Domain Loss: {get_class_name(losses['img_loss'])}.\n Learning-Rate Scheduler: {get_class_name(scheduler)}.\n ''') # This part has parts different for IMG and CMG losses!!\n\n def train_model(self):\n tic_tic = time()\n self.logger.info('Beginning Training Loop.')\n for epoch in range(1, self.num_epochs + 1): # 1 based indexing of epochs.\n tic = time() # Training\n train_epoch_loss, train_epoch_metrics = self._train_epoch(epoch=epoch)\n toc = int(time() - tic)\n self._log_epoch_outputs(epoch, train_epoch_loss, train_epoch_metrics, elapsed_secs=toc, training=True)\n\n tic = time() # Validation\n val_epoch_loss, val_epoch_metrics = self._val_epoch(epoch=epoch)\n toc = int(time() - tic)\n self._log_epoch_outputs(epoch, val_epoch_loss, val_epoch_metrics, elapsed_secs=toc, training=False)\n\n self.manager.save(metric=val_epoch_loss, verbose=True)\n\n if self.scheduler is not None:\n if self.metric_scheduler: # If the scheduler is a metric based scheduler, include metrics.\n self.scheduler.step(metrics=val_epoch_loss)\n else:\n self.scheduler.step()\n\n self.writer.close() # Flushes remaining data to TensorBoard.\n toc_toc = int(time() - tic_tic)\n self.logger.info(f'Finishing Training Loop. Total elapsed time: '\n f'{toc_toc // 3600} hr {(toc_toc // 60) % 60} min {toc_toc % 60} sec.')\n\n def _train_epoch(self, epoch):\n self.model.train()\n torch.autograd.set_grad_enabled(True)\n\n epoch_loss = list() # Appending values to list due to numerical underflow and NaN values.\n epoch_metrics = defaultdict(list)\n\n data_loader = enumerate(self.train_loader, start=1)\n if not self.verbose: # tqdm has to be on the outermost iterator to function properly.\n data_loader = tqdm(data_loader, total=len(self.train_loader.dataset)) # Should divide by batch size.\n\n for step, data in data_loader:\n # Data pre-processing is expected to have gradient calculations removed inside already.\n inputs, targets, extra_params = self.input_train_transform(*data)\n\n # 'recons' is a dictionary containing k-space, complex image, and real image reconstructions.\n recons, step_loss, step_metrics = self._train_step(inputs, targets, extra_params)\n epoch_loss.append(step_loss.detach()) # Perhaps not elegant, but underflow makes this necessary.\n\n # Gradients are not calculated so as to boost speed and remove weird errors.\n with torch.no_grad(): # Update epoch loss and metrics\n if self.use_slice_metrics:\n slice_metrics = self._get_slice_metrics(recons, targets, extra_params)\n step_metrics.update(slice_metrics)\n\n [epoch_metrics[key].append(value.detach()) for key, value in step_metrics.items()]\n\n if self.verbose:\n self._log_step_outputs(epoch, step, step_loss, step_metrics, training=True)\n\n # Converted to scalar and dict with scalar values respectively.\n return self._get_epoch_outputs(epoch, epoch_loss, epoch_metrics, training=True)\n\n def _train_step(self, inputs, targets, extra_params):\n self.optimizer.zero_grad()\n outputs = self.model(inputs)\n recons = self.output_train_transform(outputs, targets, extra_params)\n step_loss, step_metrics = self._step(recons, targets, extra_params)\n step_loss.backward()\n self.optimizer.step()\n return recons, step_loss, step_metrics\n\n def _val_epoch(self, epoch):\n self.model.eval()\n torch.autograd.set_grad_enabled(False)\n\n epoch_loss = list()\n epoch_metrics = defaultdict(list)\n\n # 1 based indexing for steps.\n data_loader = enumerate(self.val_loader, start=1)\n if not self.verbose:\n data_loader = tqdm(data_loader, total=len(self.val_loader.dataset))\n\n for step, data in data_loader:\n inputs, targets, extra_params = self.input_val_transform(*data)\n recons, step_loss, step_metrics = self._val_step(inputs, targets, extra_params)\n epoch_loss.append(step_loss.detach())\n\n if self.use_slice_metrics:\n slice_metrics = self._get_slice_metrics(recons, targets, extra_params)\n step_metrics.update(slice_metrics)\n\n [epoch_metrics[key].append(value.detach()) for key, value in step_metrics.items()]\n\n if self.verbose:\n self._log_step_outputs(epoch, step, step_loss, step_metrics, training=False)\n\n # Visualize images on TensorBoard.\n self._visualize_images(recons, targets, extra_params, epoch, step, training=False)\n\n # Converted to scalar and dict with scalar values respectively.\n return self._get_epoch_outputs(epoch, epoch_loss, epoch_metrics, training=False)\n\n def _val_step(self, inputs, targets, extra_params):\n outputs = self.model(inputs)\n recons = self.output_val_transform(outputs, targets, extra_params)\n step_loss, step_metrics = self._step(recons, targets, extra_params)\n return recons, step_loss, step_metrics\n\n def _step(self, recons, targets, extra_params):\n step_loss = self.losses['img_loss'](recons['img_recons'], targets['img_targets'])\n\n # If img_loss is a tuple, it is expected to contain all its component losses as a dict in its second element.\n step_metrics = dict()\n if isinstance(step_loss, tuple):\n step_loss, step_metrics = step_loss\n\n acc = extra_params[\"acceleration\"]\n if step_metrics: # This has to be checked before anything is added to step_metrics.\n for key, value in step_metrics.items():\n step_metrics[f'acc_{acc}_{key}'] = value\n step_metrics[f'acc_{acc}_loss'] = step_loss\n return step_loss, step_metrics\n\n def _visualize_images(self, recons, targets, extra_params, epoch, step, training=False):\n mode = 'Training' if training else 'Validation'\n\n # This numbering scheme seems to have issues for certain numbers.\n # Please check cases when there is no remainder.\n if self.display_interval and (step % self.display_interval == 0):\n img_recon_grid = make_img_grid(recons['img_recons'], self.shrink_scale)\n\n # The delta image is obtained by subtracting at the complex image, not the real valued image.\n delta_image = complex_abs(targets['cmg_targets'] - recons['cmg_recons'])\n delta_img_grid = make_img_grid(delta_image, self.shrink_scale)\n\n acc = extra_params['acceleration']\n kwargs = dict(global_step=epoch, dataformats='HW')\n\n self.writer.add_image(f'{mode} Image Recons/{acc}/{step}', img_recon_grid, **kwargs)\n self.writer.add_image(f'{mode} Delta Image/{acc}/{step}', delta_img_grid, **kwargs)\n\n if 'kspace_recons' in recons:\n kspace_recon_grid = make_k_grid(recons['kspace_recons'], self.smoothing_factor, self.shrink_scale)\n self.writer.add_image(f'{mode} k-space Recons/{acc}/{step}', kspace_recon_grid, **kwargs)\n\n # Adding RSS images of reconstructions and targets.\n if 'rss_recons' in recons:\n recon_rss = standardize_image(recons['rss_recons'])\n delta_rss = standardize_image(make_rss_slice(delta_image))\n self.writer.add_image(f'{mode} RSS Recons/{acc}/{step}', recon_rss, **kwargs)\n self.writer.add_image(f'{mode} RSS Delta/{acc}/{step}', delta_rss, **kwargs)\n\n if 'semi_kspace_recons' in recons:\n semi_kspace_recon_grid = make_k_grid(\n recons['semi_kspace_recons'], self.smoothing_factor, self.shrink_scale)\n\n self.writer.add_image(f'{mode} semi-k-space Recons/{acc}/{step}', semi_kspace_recon_grid, **kwargs)\n\n if epoch == 1: # Maybe add input images too later on.\n img_target_grid = make_img_grid(targets['img_targets'], self.shrink_scale)\n self.writer.add_image(f'{mode} Image Targets/{acc}/{step}', img_target_grid, **kwargs)\n\n if 'kspace_targets' in targets:\n kspace_target_grid = \\\n make_k_grid(targets['kspace_targets'], self.smoothing_factor, self.shrink_scale)\n self.writer.add_image(f'{mode} k-space Targets/{acc}/{step}', kspace_target_grid, **kwargs)\n\n if 'img_inputs' in targets:\n # Not actually the input but what the input looks like as an image.\n img_grid = make_img_grid(targets['img_inputs'], self.shrink_scale)\n self.writer.add_image(f'{mode} Inputs as Images/{acc}/{step}', img_grid, **kwargs)\n\n if 'rss_targets' in targets:\n target_rss = standardize_image(targets['rss_targets'])\n self.writer.add_image(f'{mode} RSS Targets/{acc}/{step}', target_rss, **kwargs)\n\n if 'semi_kspace_targets' in targets:\n semi_kspace_target_grid = make_k_grid(\n targets['semi_kspace_targets'], self.smoothing_factor, self.shrink_scale)\n\n self.writer.add_image(f'{mode} semi-k-space Targets/{acc}/{step}',\n semi_kspace_target_grid, **kwargs)\n\n def _get_slice_metrics(self, recons, targets, extra_params):\n img_recons = recons['img_recons'].detach() # Just in case.\n img_targets = targets['img_targets'].detach()\n max_range = img_targets.max() - img_targets.min()\n\n slice_ssim = self.ssim(img_recons, img_targets)\n slice_psnr = psnr(img_recons, img_targets, data_range=max_range)\n slice_nmse = nmse(img_recons, img_targets)\n\n slice_metrics = {'slice/ssim': slice_ssim, 'slice/nmse': slice_nmse, 'slice/psnr': slice_psnr}\n\n if 'rss_recons' in recons:\n rss_recons = recons['rss_recons'].detach()\n rss_targets = targets['rss_targets'].detach()\n max_range = rss_targets.max() - rss_targets.min()\n\n rss_ssim = self.ssim(rss_recons, rss_targets)\n rss_psnr = psnr(rss_recons, rss_targets, data_range=max_range)\n rss_nmse = nmse(rss_recons, rss_targets)\n\n slice_metrics['rss/ssim'] = rss_ssim\n slice_metrics['rss/psnr'] = rss_psnr\n slice_metrics['rss/nmse'] = rss_nmse\n else:\n rss_ssim = rss_psnr = rss_nmse = 0\n\n # Additional metrics for separating between acceleration factors.\n if 'acceleration' in extra_params:\n acc = extra_params[\"acceleration\"]\n slice_metrics[f'slice_acc_{acc}/ssim'] = slice_ssim\n slice_metrics[f'slice_acc_{acc}/psnr'] = slice_psnr\n slice_metrics[f'slice_acc_{acc}/nmse'] = slice_nmse\n\n if 'rss_recons' in recons:\n slice_metrics[f'rss_acc_{acc}/ssim'] = rss_ssim\n slice_metrics[f'rss_acc_{acc}/psnr'] = rss_psnr\n slice_metrics[f'rss_acc_{acc}/nmse'] = rss_nmse\n\n return slice_metrics\n\n def _get_epoch_outputs(self, epoch, epoch_loss, epoch_metrics, training=True):\n mode = 'Training' if training else 'Validation'\n num_slices = len(self.train_loader.dataset) if training else len(self.val_loader.dataset)\n\n # Checking for nan values.\n epoch_loss = torch.stack(epoch_loss)\n is_finite = torch.isfinite(epoch_loss)\n num_nans = (is_finite.size(0) - is_finite.sum()).item()\n\n if num_nans > 0:\n self.logger.warning(f'Epoch {epoch} {mode}: {num_nans} NaN values present in {num_slices} slices.'\n f'Turning on anomaly detection.')\n # Turn on anomaly detection for finding where the nan values are.\n torch.autograd.set_detect_anomaly(True)\n epoch_loss = torch.mean(epoch_loss[is_finite]).item()\n else:\n epoch_loss = torch.mean(epoch_loss).item()\n\n for key, value in epoch_metrics.items():\n epoch_metric = torch.stack(value)\n is_finite = torch.isfinite(epoch_metric)\n num_nans = (is_finite.size(0) - is_finite.sum()).item()\n\n if num_nans > 0:\n self.logger.warning(f'Epoch {epoch} {mode} {key}: {num_nans} NaN values present in {num_slices} slices.'\n f'Turning on anomaly detection.')\n epoch_metrics[key] = torch.mean(epoch_metric[is_finite]).item()\n else:\n epoch_metrics[key] = torch.mean(epoch_metric).item()\n\n return epoch_loss, epoch_metrics\n\n def _log_step_outputs(self, epoch, step, step_loss, step_metrics, training=True):\n mode = 'Training' if training else 'Validation'\n self.logger.info(f'Epoch {epoch:03d} Step {step:03d} {mode} loss: {step_loss.item():.4e}')\n for key, value in step_metrics.items():\n self.logger.info(f'Epoch {epoch:03d} Step {step:03d}: {mode} {key}: {value.item():.4e}')\n\n def _log_epoch_outputs(self, epoch, epoch_loss, epoch_metrics, elapsed_secs, training=True):\n mode = 'Training' if training else 'Validation'\n self.logger.info(f'Epoch {epoch:03d} {mode}. loss: {epoch_loss:.4e}, '\n f'Time: {elapsed_secs // 60} min {elapsed_secs % 60} sec')\n self.writer.add_scalar(f'{mode} epoch_loss', scalar_value=epoch_loss, global_step=epoch)\n\n for key, value in epoch_metrics.items():\n self.logger.info(f'Epoch {epoch:03d} {mode}. {key}: {value:.4e}')\n # Very important whether it is mode_~~ or mode/~~.\n if 'loss' in key:\n self.writer.add_scalar(f'{mode}/epoch_{key}', scalar_value=value, global_step=epoch)\n else:\n self.writer.add_scalar(f'{mode}_epoch_{key}', scalar_value=value, global_step=epoch)\n\n if not training: # Record learning rate.\n for idx, group in enumerate(self.optimizer.param_groups, start=1):\n self.writer.add_scalar(f'learning_rate_{idx}', group['lr'], global_step=epoch)\n"
] | [
[
"torch.stack",
"torch.mean",
"torch.multiprocessing.get_start_method",
"torch.autograd.set_grad_enabled",
"torch.no_grad",
"torch.multiprocessing.set_start_method",
"torch.autograd.set_detect_anomaly",
"torch.nn.ModuleDict",
"torch.isfinite"
]
] |
LemonNoel/Paddle | [
"1cb511d1488bb86ebb587330902840cb01c79c0d"
] | [
"python/paddle/tensor/math.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nmath functions\n\"\"\"\nfrom __future__ import print_function\nimport numpy as np\n\nfrom paddle.common_ops_import import VarDesc\nfrom paddle.common_ops_import import dygraph_only\nfrom paddle.common_ops_import import OpProtoHolder\nfrom paddle.common_ops_import import templatedoc\nfrom paddle.common_ops_import import dygraph_utils\n\nfrom paddle.tensor import cast\nfrom paddle.tensor.attribute import _complex_to_real_dtype\nimport paddle\nfrom ..fluid import layers\nfrom ..fluid.framework import core, _varbase_creator, in_dygraph_mode, Variable, convert_np_dtype_to_dtype_\nfrom ..fluid.layer_helper import LayerHelper\nfrom ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype\nfrom ..fluid.layers.layer_function_generator import _generate_doc_string_, generate_activation_fn, generate_layer_fn\nfrom ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only\n\n# TODO: define math functions\n# yapf: disable\nfrom ..fluid.layers import abs # noqa: F401\nfrom ..fluid.layers import acos # noqa: F401\nfrom ..fluid.layers import asin # noqa: F401\nfrom ..fluid.layers import ceil # noqa: F401\nfrom ..fluid.layers import ceil_ # noqa: F401\nfrom ..fluid.layers import cos # noqa: F401\nfrom ..fluid.layers import tan # noqa: F401\nfrom ..fluid.layers import sinh # noqa: F401\nfrom ..fluid.layers import cosh # noqa: F401\nfrom ..fluid.layers import exp # noqa: F401\nfrom ..fluid.layers import exp_ # noqa: F401\nfrom ..fluid.layers import expm1 # noqa: F401\nfrom ..fluid.layers import floor # noqa: F401\nfrom ..fluid.layers import floor_ # noqa: F401\nfrom ..fluid.layers import log # noqa: F401\nfrom ..fluid.layers import reciprocal # noqa: F401\nfrom ..fluid.layers import reciprocal_ # noqa: F401\nfrom ..fluid.layers import round # noqa: F401\nfrom ..fluid.layers import round_ # noqa: F401\nfrom ..fluid.layers import rsqrt # noqa: F401\nfrom ..fluid.layers import rsqrt_ # noqa: F401\nfrom ..fluid.layers import scale # noqa: F401\nfrom ..fluid.layers import square # noqa: F401\nfrom ..fluid.layers import stanh # noqa: F401\nfrom ..fluid.layers import atan # noqa: F401\nfrom ..fluid.layers import erf # noqa: F401\nfrom ..fluid.layers import sqrt # noqa: F401\nfrom ..fluid.layers import sqrt_ # noqa: F401\nfrom ..fluid.layers import sin # noqa: F401\nfrom ..fluid.layers import lgamma # noqa: F401\n\nfrom ..fluid.layers import multiplex # noqa: F401\nfrom ..fluid import layers\nfrom paddle import _C_ops\n\n__all__ = []\n\n_supported_int_dtype_ = [\n VarDesc.VarType.UINT8,\n VarDesc.VarType.INT8,\n VarDesc.VarType.INT16,\n VarDesc.VarType.INT32,\n VarDesc.VarType.INT64,\n]\n\n_supported_float_dtype_ = [\n VarDesc.VarType.FP32,\n VarDesc.VarType.FP64,\n]\n\n\n@inplace_apis_in_dygraph_only\ndef scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):\n \"\"\"\n Inplace version of ``scale`` API, the output Tensor will be inplaced with input ``x``.\n Please refer to :ref:`api_tensor_scale`.\n \"\"\"\n _scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale\n return _C_ops.scale_(x, 'scale',\n float(_scale), 'bias',\n float(bias), 'bias_after_scale', bias_after_scale)\n\n\ndef pow(x, y, name=None):\n \"\"\"\n Compute the power of tensor elements. The equation is:\n\n .. math::\n out = x^{y} \n\n **Note**:\n ``paddle.pow`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .\n\n\n Args:\n x (Tensor): An N-D Tensor, the data type is float32, float64, int32 or int64.\n y (float|int|Tensor): If it is an N-D Tensor, its data type should be the same as `x`.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n \n Returns:\n N-D Tensor. A location into which the result is stored. Its dimension and data type are the same as `x`.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([1, 2, 3], dtype='float32')\n\n # example 1: y is a float or int\n res = paddle.pow(x, 2)\n print(res)\n # Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [1., 4., 9.])\n res = paddle.pow(x, 2.5)\n print(res)\n # Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [1. , 5.65685415 , 15.58845711])\n\n # example 2: y is a Tensor\n y = paddle.to_tensor([2], dtype='float32')\n res = paddle.pow(x, y)\n print(res)\n # Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [1., 4., 9.])\n\n \"\"\"\n # in dynamic graph mode\n if in_dygraph_mode():\n if isinstance(y, (int, float)):\n return _C_ops.pow(x, 'factor', y)\n elif isinstance(y, (paddle.Tensor, Variable)):\n return _elementwise_op_in_dygraph(\n x, y, axis=-1, act=None, op_name='elementwise_pow')\n else:\n raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))\n # in static graph mode\n else:\n if isinstance(y, (int, float)):\n helper = LayerHelper('pow', **locals())\n inputs = {'X': x}\n attrs = {'factor': y}\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n helper.append_op(\n type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)\n return out\n elif isinstance(y, (paddle.Tensor, Variable)):\n # TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here\n helper = LayerHelper('elementwise_pow', **locals())\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n return _elementwise_op(LayerHelper('elementwise_pow', **locals()))\n else:\n raise TypeError('y must be scalar or tensor type, but received: %s '% (type(y)))\n\n\n\n@dygraph_only\ndef _elementwise_op_in_dygraph(x,\n y,\n axis=-1,\n act=None,\n use_mkldnn=False,\n op_name=None):\n op = getattr(_C_ops, op_name)\n out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)\n\n return dygraph_utils._append_activation_in_dygraph(\n out, act, use_mkldnn=use_mkldnn)\n\n\ndef _elementwise_op(helper):\n op_type = helper.layer_type\n original_op_type = helper.kwargs.get('original_op_type', op_type)\n x = helper.kwargs.get('x', None)\n y = helper.kwargs.get('y', None)\n\n out = helper.kwargs.get('out', None)\n\n assert x is not None, 'x cannot be None in {}'.format(original_op_type)\n assert y is not None, 'y cannot be None in {}'.format(original_op_type)\n check_variable_and_dtype(\n x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],\n original_op_type)\n check_variable_and_dtype(\n y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],\n original_op_type)\n\n axis = helper.kwargs.get('axis', -1)\n use_mkldnn = helper.kwargs.get('use_mkldnn', False)\n name = helper.kwargs.get('name', None)\n\n if out is None:\n if name is None:\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n else:\n out = helper.create_variable(name=name, dtype=x.dtype, persistable=False)\n\n helper.append_op(\n type=op_type,\n inputs={'X': x,\n 'Y': y},\n outputs={'Out': out},\n attrs={'axis': axis,\n 'use_mkldnn': use_mkldnn})\n return helper.append_activation(out)\n\n\ndef add(x, y, name=None):\n \"\"\"\n Examples:\n\n .. code-block:: python\n\n import paddle\n x = paddle.to_tensor([2, 3, 4], 'float64')\n y = paddle.to_tensor([1, 5, 2], 'float64')\n z = paddle.add(x, y)\n print(z) # [3., 8., 6. ]\n\n \"\"\"\n\n if in_dygraph_mode():\n return _C_ops.elementwise_add(x, y)\n\n return _elementwise_op(LayerHelper('elementwise_add', **locals()))\n\n\n@inplace_apis_in_dygraph_only\ndef add_(x, y, name=None):\n \"\"\"\n Inplace version of ``add`` API, the output Tensor will be inplaced with input ``x``.\n Please refer to :ref:`api_tensor_add`.\n \"\"\"\n op_type = 'elementwise_add_'\n axis = -1\n\n out_shape = broadcast_shape(x.shape, y.shape)\n if out_shape != x.shape:\n raise ValueError(\"The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.\".format(out_shape, x.shape))\n\n out = _elementwise_op_in_dygraph(\n x, y, axis=axis, op_name=op_type)\n return out\n\n\ndef subtract(x, y, name=None):\n \"\"\"\n Substract two tensors element-wise. The equation is:\n\n .. math::\n out = x - y\n\n **Note**:\n ``paddle.subtract`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .\n\n Args:\n x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.\n y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n N-D Tensor. A location into which the result is stored. If x, y have different shapes and are \"broadcastable\", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.\n\n Examples:\n\n .. code-block:: python\n\n import numpy as np\n import paddle\n\n x = paddle.to_tensor([[1, 2], [7, 8]])\n y = paddle.to_tensor([[5, 6], [3, 4]])\n res = paddle.subtract(x, y)\n print(res)\n # [[-4, -4],\n # [4, 4]]\n\n x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])\n y = paddle.to_tensor([1, 0, 4])\n res = paddle.subtract(x, y)\n print(res)\n # [[[ 0, 2, -1],\n # [ 0, 2, -1]]]\n\n x = paddle.to_tensor([2, np.nan, 5], dtype='float32')\n y = paddle.to_tensor([1, 4, np.nan], dtype='float32')\n res = paddle.subtract(x, y)\n print(res)\n # [ 1., nan, nan]\n\n x = paddle.to_tensor([5, np.inf, -np.inf], dtype='float64')\n y = paddle.to_tensor([1, 4, 5], dtype='float64')\n res = paddle.subtract(x, y)\n print(res)\n # [ 4., inf., -inf.]\n\n \"\"\"\n op_type = 'elementwise_sub'\n axis = -1\n act = None\n if in_dygraph_mode():\n return _elementwise_op_in_dygraph(\n x, y, axis=axis, act=act, op_name=op_type)\n return _elementwise_op(LayerHelper(op_type, **locals()))\n\n\n@inplace_apis_in_dygraph_only\ndef subtract_(x, y, name=None):\n \"\"\"\n Inplace version of ``subtract`` API, the output Tensor will be inplaced with input ``x``.\n Please refer to :ref:`api_tensor_subtract`.\n \"\"\"\n axis = -1\n act = None\n\n out_shape = broadcast_shape(x.shape, y.shape)\n if out_shape != x.shape:\n raise ValueError(\"The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.\".format(out_shape, x.shape))\n\n out = _elementwise_op_in_dygraph(\n x, y, axis=axis, act=act, op_name='elementwise_sub_')\n return out\n\n\ndef divide(x, y, name=None):\n \"\"\"\n Divide two tensors element-wise. The equation is:\n\n .. math::\n out = x / y\n\n **Note**:\n ``paddle.divide`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .\n\n Args:\n x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.\n y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n N-D Tensor. A location into which the result is stored. If x, y have different shapes and are \"broadcastable\", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([2, 3, 4], dtype='float64')\n y = paddle.to_tensor([1, 5, 2], dtype='float64')\n z = paddle.divide(x, y)\n print(z) # [2., 0.6, 2.]\n\n \"\"\"\n op_type = 'elementwise_div'\n axis = -1\n act = None\n if in_dygraph_mode():\n return _elementwise_op_in_dygraph(\n x, y, axis=axis, act=act, op_name=op_type)\n\n return _elementwise_op(LayerHelper(op_type, **locals()))\n\n\ndef floor_divide(x, y, name=None):\n \"\"\"\n Floor divide two tensors element-wise. The equation is:\n\n .. math::\n out = x // y\n\n **Note**:\n ``paddle.floor_divide`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .\n\n Args:\n x (Tensor): the input tensor, it's data type should be int32, int64.\n y (Tensor): the input tensor, it's data type should be int32, int64.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n N-D Tensor. A location into which the result is stored. It's dimension equals with $x$.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([2, 3, 8, 7])\n y = paddle.to_tensor([1, 5, 3, 3])\n z = paddle.floor_divide(x, y)\n print(z) # [2, 0, 2, 2]\n\n \"\"\"\n op_type = 'elementwise_floordiv'\n axis = -1\n if in_dygraph_mode():\n return _elementwise_op_in_dygraph(\n x, y, axis=axis, op_name=op_type)\n\n return _elementwise_op(LayerHelper(op_type, **locals()))\n\n\ndef remainder(x, y, name=None):\n r\"\"\"\n Mod two tensors element-wise. The equation is:\n\n .. math::\n\n out = x \\% y\n\n **Note**:\n ``paddle.remainder`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .\n\n Args:\n x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.\n y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n N-D Tensor. A location into which the result is stored. If x, y have different shapes and are \"broadcastable\", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([2, 3, 8, 7])\n y = paddle.to_tensor([1, 5, 3, 3])\n z = paddle.remainder(x, y)\n print(z) # [0, 3, 2, 1]\n\n \"\"\"\n op_type = 'elementwise_mod'\n axis = -1\n if in_dygraph_mode():\n return _elementwise_op_in_dygraph(\n x, y, axis=axis, op_name=op_type)\n\n return _elementwise_op(LayerHelper(op_type, **locals()))\n\n\nmod = remainder # noqa: F841\nfloor_mod = remainder # noqa: F841\n\n\ndef multiply(x, y, name=None):\n \"\"\"\n multiply two tensors element-wise. The equation is:\n\n .. math::\n out = x * y\n\n **Note**:\n ``paddle.multiply`` supports broadcasting. If you would like to know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .\n\n Args:\n x (Tensor): the input tensor, its data type should be one of float32, float64, int32, int64, bool.\n y (Tensor): the input tensor, its data type should be one of float32, float64, int32, int64, bool.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n N-D Tensor. A location into which the result is stored. If x, y have different shapes and are \"broadcastable\", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[1, 2], [3, 4]])\n y = paddle.to_tensor([[5, 6], [7, 8]])\n res = paddle.multiply(x, y)\n print(res) # [[5, 12], [21, 32]]\n\n x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])\n y = paddle.to_tensor([2])\n res = paddle.multiply(x, y)\n print(res) # [[[2, 4, 6], [2, 4, 6]]]\n\n \"\"\"\n op_type = 'elementwise_mul'\n act = None\n axis = -1\n\n if in_dygraph_mode():\n return _elementwise_op_in_dygraph(\n x, y, axis=axis, act=act, op_name=op_type)\n\n if x.dtype != y.dtype:\n raise TypeError(\n 'Input tensors must be same type, but received type of x: %s, type of y: %s '\n % (x.dtype, y.dtype))\n\n return _elementwise_op(LayerHelper(op_type, **locals()))\n\ndef maximum(x, y, name=None):\n \"\"\"\n Compare two tensors and returns a new tensor containing the element-wise maxima. The equation is:\n\n .. math::\n out = max(x, y)\n\n **Note**:\n ``paddle.maximum`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .\n\n Args:\n x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.\n y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n N-D Tensor. A location into which the result is stored. If x, y have different shapes and are \"broadcastable\", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.\n\n Examples:\n\n .. code-block:: python\n\n import numpy as np\n import paddle\n\n x = paddle.to_tensor([[1, 2], [7, 8]])\n y = paddle.to_tensor([[3, 4], [5, 6]])\n res = paddle.maximum(x, y)\n print(res)\n # [[3, 4],\n # [7, 8]]\n\n x = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])\n y = paddle.to_tensor([3, 0, 4])\n res = paddle.maximum(x, y)\n print(res)\n # [[3, 2, 4],\n # [3, 2, 4]]\n\n x = paddle.to_tensor([2, 3, 5], dtype='float32')\n y = paddle.to_tensor([1, np.nan, np.nan], dtype='float32')\n res = paddle.maximum(x, y)\n print(res)\n # [ 2., nan, nan]\n\n x = paddle.to_tensor([5, 3, np.inf], dtype='float32')\n y = paddle.to_tensor([1, -np.inf, 5], dtype='float32')\n res = paddle.maximum(x, y)\n print(res)\n # [ 5., 3., inf.]\n \"\"\"\n op_type = 'elementwise_max'\n axis = -1\n act = None\n if in_dygraph_mode():\n return _elementwise_op_in_dygraph(\n x, y, axis=axis, act=act, op_name=op_type)\n return _elementwise_op(LayerHelper(op_type, **locals()))\n\ndef minimum(x, y, name=None):\n \"\"\"\n Compare two tensors and returns a new tensor containing the element-wise minima. The equation is:\n\n .. math::\n out = min(x, y)\n\n **Note**:\n ``paddle.minimum`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .\n\n Args:\n x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.\n y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n N-D Tensor. A location into which the result is stored. If x, y have different shapes and are \"broadcastable\", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.\n\n Examples:\n\n .. code-block:: python\n\n import numpy as np\n import paddle\n\n x = paddle.to_tensor([[1, 2], [7, 8]])\n y = paddle.to_tensor([[3, 4], [5, 6]])\n res = paddle.minimum(x, y)\n print(res)\n # [[1, 2],\n # [5, 6]]\n\n x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])\n y = paddle.to_tensor([3, 0, 4])\n res = paddle.minimum(x, y)\n print(res)\n # [[[1, 0, 3],\n # [1, 0, 3]]]\n\n x = paddle.to_tensor([2, 3, 5], dtype='float32')\n y = paddle.to_tensor([1, np.nan, np.nan], dtype='float32')\n res = paddle.minimum(x, y)\n print(res)\n # [ 1., nan, nan]\n\n x = paddle.to_tensor([5, 3, np.inf], dtype='float64')\n y = paddle.to_tensor([1, -np.inf, 5], dtype='float64')\n res = paddle.minimum(x, y)\n print(res)\n # [ 1., -inf., 5.]\n \"\"\"\n op_type = 'elementwise_min'\n axis = -1\n act = None\n if in_dygraph_mode():\n return _elementwise_op_in_dygraph(\n x, y, axis=axis, act=act, op_name=op_type)\n return _elementwise_op(LayerHelper(op_type, **locals()))\n\nfor func in [\n add,\n multiply\n]:\n proto_dict = {'add': 'elementwise_add', 'multiply': 'elementwise_mul'}\n op_proto = OpProtoHolder.instance().get_op_proto(proto_dict[func.__name__])\n\n additional_args_lines = [\n \"name (string, optional): Name of the output. \\\n Default is None. It's used to print debug info for developers. Details: \\\n :ref:`api_guide_Name` \"\n ]\n\n func.__doc__ = _generate_doc_string_(\n op_proto,\n additional_args_lines=additional_args_lines,\n skip_attrs_set={\"x_data_format\", \"y_data_format\", \"axis\",\n \"use_quantizer\", \"mkldnn_data_type\", \"Scale_x\", \"Scale_y\", \"Scale_out\"\n }) + \"\"\"\\n\"\"\" + str(func.__doc__)\n\n\ndef sum(x, axis=None, dtype=None, keepdim=False, name=None):\n \"\"\"\n Computes the sum of tensor elements over the given dimension.\n\n Args:\n x (Tensor): An N-D Tensor, the data type is bool, float16, float32, float64, int32 or int64.\n axis (int|list|tuple, optional): The dimensions along which the sum is performed. If\n :attr:`None`, sum all elements of :attr:`x` and return a\n Tensor with a single element, otherwise must be in the\n range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,\n the dimension to reduce is :math:`rank + axis[i]`.\n dtype (str, optional): The dtype of output Tensor. The default value is None, the dtype\n of output is the same as input Tensor `x`.\n keepdim (bool, optional): Whether to reserve the reduced dimension in the\n output Tensor. The result Tensor will have one fewer dimension\n than the :attr:`x` unless :attr:`keepdim` is true, default\n value is False.\n name (str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`\n\n Returns:\n Tensor: Results of summation operation on the specified axis of input Tensor `x`,\n if `x.dtype='bool'`, `x.dtype='int32'`, it's data type is `'int64'`, \n otherwise it's data type is the same as `x`.\n\n Raises:\n TypeError: The type of :attr:`axis` must be int, list or tuple.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n # x is a Tensor with following elements:\n # [[0.2, 0.3, 0.5, 0.9]\n # [0.1, 0.2, 0.6, 0.7]]\n # Each example is followed by the corresponding output tensor.\n x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],\n [0.1, 0.2, 0.6, 0.7]])\n out1 = paddle.sum(x) # [3.5]\n out2 = paddle.sum(x, axis=0) # [0.3, 0.5, 1.1, 1.6]\n out3 = paddle.sum(x, axis=-1) # [1.9, 1.6]\n out4 = paddle.sum(x, axis=1, keepdim=True) # [[1.9], [1.6]]\n\n # y is a Tensor with shape [2, 2, 2] and elements as below:\n # [[[1, 2], [3, 4]],\n # [[5, 6], [7, 8]]]\n # Each example is followed by the corresponding output tensor.\n y = paddle.to_tensor([[[1, 2], [3, 4]], \n [[5, 6], [7, 8]]])\n out5 = paddle.sum(y, axis=[1, 2]) # [10, 26]\n out6 = paddle.sum(y, axis=[0, 1]) # [16, 20]\n \n # x is a Tensor with following elements:\n # [[True, True, True, True]\n # [False, False, False, False]]\n # Each example is followed by the corresponding output tensor.\n x = paddle.to_tensor([[True, True, True, True],\n [False, False, False, False]])\n out7 = paddle.sum(x) # [4]\n out8 = paddle.sum(x, axis=0) # [1, 1, 1, 1]\n out9 = paddle.sum(x, axis=1) # [4, 0]\n \"\"\"\n if axis is not None and not isinstance(axis, (list, tuple)):\n axis = [axis]\n\n if not axis:\n reduce_all_flag = True\n else:\n if len(axis) == len(x.shape):\n reduce_all_flag = True\n else:\n reduce_all_flag = False\n\n def get_dtype(x, dtype):\n if dtype is not None:\n return (True, dtype)\n src_type = convert_dtype(x.dtype)\n if src_type in ['bool','int32', 'int64']:\n return (True, 'int64')\n return (False, src_type)\n\n dtype_flag, dtype = get_dtype(x, dtype)\n if in_dygraph_mode():\n axis = axis if axis != None and axis != [] else [0]\n if dtype_flag:\n return _C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,\n 'reduce_all', reduce_all_flag, 'in_dtype',\n x.dtype, 'out_dtype',\n convert_np_dtype_to_dtype_(dtype))\n else:\n return _C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,\n 'reduce_all', reduce_all_flag)\n\n attrs = {\n 'dim': axis if axis != None and axis != [] and axis != () else [0],\n 'keep_dim': keepdim,\n 'reduce_all': reduce_all_flag\n }\n\n if dtype_flag:\n attrs.update({\n 'in_dtype': x.dtype,\n 'out_dtype': convert_np_dtype_to_dtype_(dtype)\n })\n\n check_variable_and_dtype(\n x, 'x', ['bool', 'float16', 'float32', 'float64',\n 'int32', 'int64', 'complex64', 'complex128',\n u'bool', u'float16', u'float32', u'float64',\n u'int32', u'int64', u'complex64', u'complex128'], 'sum')\n\n check_type(axis, 'axis', (int, list, tuple, type(None)), 'sum')\n\n helper = LayerHelper('sum', **locals())\n if dtype_flag:\n out = helper.create_variable_for_type_inference(\n dtype=convert_np_dtype_to_dtype_(dtype))\n else:\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n helper.append_op(\n type='reduce_sum',\n inputs={'X': x},\n outputs={'Out': out},\n attrs=attrs)\n return out\n\n\n@templatedoc(op_type=\"sum\")\ndef add_n(inputs, name=None):\n \"\"\"\n This OP is used to sum one or more Tensor of the input.\n \n For example:\n\n .. code-block:: text\n \n Case 1:\n\n Input:\n input.shape = [2, 3]\n input = [[1, 2, 3],\n [4, 5, 6]]\n\n Output:\n output.shape = [2, 3]\n output = [[1, 2, 3],\n [4, 5, 6]]\n\n Case 2:\n \n Input:\n First input:\n input1.shape = [2, 3]\n Input1 = [[1, 2, 3],\n [4, 5, 6]]\n\n The second input:\n input2.shape = [2, 3]\n input2 = [[7, 8, 9],\n [10, 11, 12]]\n\n Output:\n output.shape = [2, 3]\n output = [[8, 10, 12],\n [14, 16, 18]]\n\n Args:\n inputs (Tensor|list[Tensor]|tuple[Tensor]): A Tensor or a list/tuple of Tensors. The shape and data type of the list/tuple elements should be consistent.\n Input can be multi-dimensional Tensor, and data types can be: float32, float64, int32, int64.\n name(str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`\n\n Returns:\n Tensor, the sum of input :math:`inputs` , its shape and data types are consistent with :math:`inputs`.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n input0 = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32')\n input1 = paddle.to_tensor([[7, 8, 9], [10, 11, 12]], dtype='float32')\n output = paddle.add_n([input0, input1])\n # [[8., 10., 12.], \n # [14., 16., 18.]]\n \"\"\"\n if in_dygraph_mode():\n if isinstance(inputs, Variable):\n inputs = [inputs]\n return _C_ops.sum(inputs, 'use_mkldnn', False)\n\n helper = LayerHelper('add_n', **locals())\n check_type(inputs, 'inputs', (Variable, tuple, list), 'add_n')\n if isinstance(inputs, list) or isinstance(inputs, tuple):\n if len(inputs) > 0:\n for input in inputs:\n check_variable_and_dtype(input, \"inputs\", \\\n ['float32', 'float64', 'int32', 'int64'], 'add_n')\n else:\n check_variable_and_dtype(inputs, \"inputs\", \\\n ['float32', 'float64', 'int32', 'int64'], 'add_n')\n\n\n out = helper.create_variable_for_type_inference(\n dtype=helper.input_dtype('inputs'))\n helper.append_op(\n type='sum',\n inputs={'X': inputs},\n outputs={'Out': out},\n attrs={'use_mkldnn': False})\n\n return out\n\n\ndef trunc(input, name=None):\n '''\n This API is used to returns a new tensor with the truncated integer values of input.\n \n Args:\n input (Tensor): The input tensor, it's data type should be int32, int64, float32, float64.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n \n Returns:\n Tensor: The output Tensor of trunc.\n \n Examples:\n .. code-block:: python\n\n import paddle\n\n input = paddle.rand([2,2],'float32')\n print(input)\n # Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [[0.02331470, 0.42374918],\n # [0.79647720, 0.74970269]])\n\n output = paddle.trunc(input)\n print(output)\n # Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [[0., 0.],\n # [0., 0.]]))\n '''\n if in_dygraph_mode():\n return _C_ops.trunc(input)\n else:\n inputs = {\"X\": input}\n attrs = {}\n\n helper = LayerHelper(\"trunc\", **locals())\n check_variable_and_dtype(input, 'X', ['int32', 'int64', 'float32', 'float64'], 'trunc')\n out = helper.create_variable_for_type_inference(dtype=input.dtype)\n\n helper.append_op(\n type=\"trunc\", inputs=inputs, attrs=attrs, outputs={\"Out\": out})\n return out\n\n\n\ndef mm(input, mat2, name=None):\n \"\"\"\n\n Applies matrix multiplication to two tensors.\n\n Currently, the input tensors' rank can be any, but when the rank of any\n inputs is bigger than 3, this two inputs' rank should be equal.\n\n\n Also note that if the raw tensor :math:`x` or :math:`mat2` is rank-1 and\n nontransposed, the prepended or appended dimension :math:`1` will be\n removed after matrix multiplication.\n\n Args:\n input (Tensor): The input tensor which is a Tensor.\n mat2 (Tensor): The input tensor which is a Tensor.\n name(str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`\n\n Returns:\n Tensor: The product Tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n input = paddle.arange(1, 7).reshape((3, 2)).astype('float32')\n mat2 = paddle.arange(1, 9).reshape((2, 4)).astype('float32')\n out = paddle.mm(input, mat2)\n print(out)\n # [[11., 14., 17., 20.],\n # [23., 30., 37., 44.],\n # [35., 46., 57., 68.]])\n\n\n \"\"\"\n if in_dygraph_mode():\n return _C_ops.matmul_v2(input, mat2)\n\n def __check_input(x, y):\n var_names = {'x': x, 'y': y}\n for name, val in var_names.items():\n check_variable_and_dtype(val, name,\n ['float16', 'float32', 'float64'], 'mm')\n x_shape = list(x.shape)\n y_shape = list(y.shape)\n if len(x_shape) == 1:\n x_shape = [1] + x_shape\n if len(y_shape) == 1:\n y_shape = y_shape + [1]\n\n # check the inner 2 dimensions\n if x_shape[-1] != y_shape[-2]:\n if not ((x_shape[-1] == -1) or (y_shape[-2] == -1)):\n raise ValueError(\n \"After performing an optional transpose, Input X's width should be \"\n \"equal to Y's width for multiplication \"\n \"prerequisites. But received X's shape: %s, Y's shape: %s\\n\"\n % (x_shape, y_shape))\n\n if len(y_shape) > 2 and len(x_shape) > 2:\n for i, dim_x in enumerate(x_shape[:-2]):\n # don't check neg shape\n if dim_x < 0 or y_shape[i] < 0:\n continue\n if dim_x != y_shape[i]:\n raise ValueError(\n \"When the matrix is larger than 2 dimensions, the higher \"\n \"dimensional values of the two matrices need to be equal. \"\n \"But received x_shape[%d] != y_shape[%d]. X's shape: %s, \"\n \"Y's shape: %s.\\n\" % (i, i, x_shape, y_shape))\n\n __check_input(input, mat2)\n\n helper = LayerHelper('mm', **locals())\n out = helper.create_variable_for_type_inference(dtype=input.dtype)\n helper.append_op(\n type='matmul_v2', inputs={'X': input,\n 'Y': mat2}, outputs={'Out': out})\n return out\n\n\ndef addmm(input, x, y, beta=1.0, alpha=1.0, name=None):\n \"\"\"\n **addmm**\n\n This operator is used to perform matrix multiplication for input $x$ and $y$.\n $input$ is added to the final result.\n The equation is:\n\n .. math::\n Out = alpha * x * y + beta * input\n\n $Input$, $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $input$.\n\n Args:\n input (Tensor): The input Tensor to be added to the final result.\n x (Tensor): The first input Tensor for matrix multiplication.\n y (Tensor): The second input Tensor for matrix multiplication.\n beta (float): Coefficient of $input$.\n alpha (float): Coefficient of $x*y$.\n name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.\n\n Returns:\n Tensor: The output Tensor of addmm op.\n\n Examples:\n .. code-block:: python\n \n import paddle\n\n x = paddle.ones([2,2])\n y = paddle.ones([2,2])\n input = paddle.ones([2,2])\n\n out = paddle.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 )\n\n print(out)\n # [[10.5 10.5]\n # [10.5 10.5]]\n \"\"\"\n input_shape = input.shape\n x_shape = x.shape\n y_shape = y.shape\n if not len(input_shape) == len(x_shape) == len(y_shape) == 2:\n raise ValueError(\"The dimention of input, x, y should be 2 but receive input's shape: {}, x's shape: {}, y's shape: {}\".format(input_shape, x_shape, y_shape))\n if input_shape[0] != x_shape[0]:\n if input_shape[0] != 1:\n raise ValueError( \"When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}\".format(input_shape[0]))\n if input_shape[1] != y_shape[1] and input_shape[1] != 1:\n raise ValueError( \"When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}\".format(input_shape[1]))\n if input_shape[1] != y_shape[1]:\n if input_shape[1] != 1:\n raise ValueError( \"When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}\".format(input_shape[1]))\n if input_shape[0] != x_shape[0] and input_shape[0] != 1:\n raise ValueError( \"When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}\".format(input_shape[0]))\n if x_shape[1] != y_shape[0]:\n raise ValueError(\"The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}.\".format(x_shape, y_shape))\n\n\n\n if in_dygraph_mode():\n out = _C_ops.addmm(input, x, y, \"Alpha\", alpha, \"Beta\", beta)\n return out\n\n inputs = {'Input': input, \"X\": x, \"Y\": y}\n attrs = {'Alpha': alpha, 'Beta': beta}\n\n helper = LayerHelper(\"addmm\", **locals())\n check_variable_and_dtype(input, 'Input', ['float32', 'float64'], 'addmm')\n check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'addmm')\n check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'addmm')\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n\n helper.append_op(\n type=\"addmm\", inputs=inputs, attrs=attrs, outputs={\"Out\": out})\n return out\n\n\ndef logsumexp(x, axis=None, keepdim=False, name=None):\n r\"\"\"\n This OP calculates the log of the sum of exponentials of ``x`` along ``axis`` .\n\n .. math::\n logsumexp(x) = \\\\log\\\\sum exp(x)\n\n Args:\n x (Tensor): The input Tensor with data type float32 or float64, which \n have no more than 4 dimensions.\n axis (int|list|tuple, optional): The axis along which to perform\n logsumexp calculations. ``axis`` should be int, list(int) or\n tuple(int). If ``axis`` is a list/tuple of dimension(s), logsumexp\n is calculated along all element(s) of ``axis`` . ``axis`` or\n element(s) of ``axis`` should be in range [-D, D), where D is the\n dimensions of ``x`` . If ``axis`` or element(s) of ``axis`` is\n less than 0, it works the same way as :math:`axis + D` . If\n ``axis`` is None, logsumexp is calculated along all elements of\n ``x``. Default is None.\n keepdim (bool, optional): Whether to reserve the reduced dimension(s)\n in the output Tensor. If ``keep_dim`` is True, the dimensions of\n the output Tensor is the same as ``x`` except in the reduced\n dimensions(it is of size 1 in this case). Otherwise, the shape of\n the output Tensor is squeezed in ``axis`` . Default is False.\n name (str, optional): Name for the operation (optional, default is None).\n For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor, results of logsumexp along ``axis`` of ``x``, with the same data\n type as ``x``.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[-1.5, 0., 2.], [3., 1.2, -2.4]])\n out1 = paddle.logsumexp(x) # [3.4691226]\n out2 = paddle.logsumexp(x, 1) # [2.15317821, 3.15684602]\n\n \"\"\"\n if isinstance(axis, int):\n axis = [axis]\n reduce_all = True if axis is None \\\n or len(axis)==0 \\\n or len(axis) == len(x.shape) else False\n if axis is None or len(axis) == 0:\n axis = [0]\n\n if in_dygraph_mode():\n return _C_ops.logsumexp(x, 'axis', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)\n\n check_variable_and_dtype(x, 'x',\n ['float32', 'float64'],\n 'logsumexp')\n\n helper = LayerHelper('logsumexp', **locals())\n attrs = {'axis': axis, 'keepdim': keepdim, 'reduce_all':reduce_all}\n out = helper.create_variable_for_type_inference(x.dtype)\n helper.append_op(\n type='logsumexp', inputs={'X': x}, outputs={'Out': out}, attrs=attrs)\n return out\n\n\ndef inverse(x, name=None):\n \"\"\"\n Takes the inverse of the square matrix. A square matrix is a matrix with\n the same number of rows and columns. The input can be a square matrix\n (2-D Tensor) or batches of square matrices.\n\n Args:\n x (Tensor): The input tensor. The last two\n dimensions should be equal. When the number of dimensions is\n greater than 2, it is treated as batches of square matrix. The data\n type can be float32 and float64.\n name (str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information,\n please refer to :ref:`api_guide_Name`\n\n Returns:\n Tensor: A Tensor holds the inverse of x. The shape and data type\n is the same as x.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n mat = paddle.to_tensor([[2, 0], [0, 2]], dtype='float32')\n inv = paddle.inverse(mat)\n print(inv) # [[0.5, 0], [0, 0.5]]\n\n \"\"\"\n if in_dygraph_mode():\n return _C_ops.inverse(x)\n\n def _check_input(x):\n check_variable_and_dtype(x, 'x',\n ['float32', 'float64'], 'inverse')\n if len(x.shape) < 2:\n raise ValueError(\n \"The input of inverse is expected to be a Tensor whose number \"\n \"of dimensions is no less than 2. But reviced: %d, \"\n \"x's shape: %s.\" % (len(x.shape), x.shape))\n _check_input(x)\n helper = LayerHelper('inverse', **locals())\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n helper.append_op(\n type='inverse', inputs={'Input': [x] }, outputs={'Output': [out]})\n return out\n\n\ndef max(x, axis=None, keepdim=False, name=None):\n \"\"\"\n\n Computes the maximum of tensor elements over the given axis.\n\n Args:\n x(Tensor): A tensor, the data type is float32,\n float64, int32, int64.\n axis(int|list|tuple, optional): The axis along which the maximum is computed.\n If :attr:`None`, compute the maximum over all elements of\n `x` and return a Tensor with a single element,\n otherwise must be in the range :math:`[-x.ndim(x), x.ndim(x))`.\n If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.\n keepdim(bool, optional): Whether to reserve the reduced dimension in the\n output Tensor. The result tensor will have one fewer dimension\n than the `x` unless :attr:`keepdim` is true, default\n value is False.\n name(str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`\n\n Returns:\n Tensor, results of maximum on the specified axis of input tensor,\n it's data type is the same as `x`.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n # data_x is a Tensor with shape [2, 4]\n # the axis is a int element\n\n x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],\n [0.1, 0.2, 0.6, 0.7]])\n result1 = paddle.max(x)\n print(result1)\n #[0.9]\n result2 = paddle.max(x, axis=0)\n print(result2)\n #[0.2 0.3 0.6 0.9]\n result3 = paddle.max(x, axis=-1)\n print(result3)\n #[0.9 0.7]\n result4 = paddle.max(x, axis=1, keepdim=True)\n print(result4)\n #[[0.9]\n # [0.7]]\n\n # data_y is a Tensor with shape [2, 2, 2]\n # the axis is list \n\n y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],\n [[5.0, 6.0], [7.0, 8.0]]])\n result5 = paddle.max(y, axis=[1, 2])\n print(result5)\n #[4. 8.]\n result6 = paddle.max(y, axis=[0, 1])\n print(result6)\n #[7. 8.]\n \"\"\"\n\n if axis is not None and not isinstance(axis, list):\n if isinstance(axis, tuple):\n axis = list(axis)\n elif isinstance(axis, int):\n axis= [axis]\n else:\n raise TypeError(\n \"The type of axis must be int, list or tuple, but received {}\".format(type(axis)))\n\n reduce_all = True if axis == None or axis == [] else False\n axis = axis if axis != None and axis != [] else [0]\n if in_dygraph_mode():\n return _C_ops.reduce_max(x, 'dim', axis, 'keep_dim', keepdim,\n 'reduce_all', reduce_all)\n\n helper = LayerHelper('max', **locals())\n check_variable_and_dtype(\n x, 'x', ['float32', 'float64', 'int32', 'int64'], 'max')\n\n out = helper.create_variable_for_type_inference(\n dtype=x.dtype)\n helper.append_op(\n type='reduce_max',\n inputs={'X': x},\n outputs={'Out': out},\n attrs={\n 'dim': axis,\n 'keep_dim': keepdim,\n 'reduce_all': reduce_all\n })\n return out\n\ndef min(x, axis=None, keepdim=False, name=None):\n \"\"\"\n\n Computes the minimum of tensor elements over the given axis\n\n Args:\n x(Tensor): A tensor, the data type is float32, float64, int32, int64.\n axis(int|list|tuple, optional): The axis along which the minimum is computed.\n If :attr:`None`, compute the minimum over all elements of\n `x` and return a Tensor with a single element,\n otherwise must be in the range :math:`[-x.ndim, x.ndim)`.\n If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.\n keepdim(bool, optional): Whether to reserve the reduced dimension in the\n output Tensor. The result tensor will have one fewer dimension\n than the `x` unless :attr:`keepdim` is true, default\n value is False.\n name(str, optional): The default value is None. Normally there is no need for \n user to set this property. For more information, please refer to :ref:`api_guide_Name`\n\n Returns:\n Tensor, results of minimum on the specified axis of input tensor,\n it's data type is the same as input's Tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n # x is a tensor with shape [2, 4]\n # the axis is a int element\n x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],\n [0.1, 0.2, 0.6, 0.7]])\n result1 = paddle.min(x)\n print(result1)\n #[0.1]\n result2 = paddle.min(x, axis=0)\n print(result2)\n #[0.1 0.2 0.5 0.7]\n result3 = paddle.min(x, axis=-1)\n print(result3)\n #[0.2 0.1]\n result4 = paddle.min(x, axis=1, keepdim=True)\n print(result4)\n #[[0.2]\n # [0.1]]\n\n # y is a Tensor with shape [2, 2, 2]\n # the axis is list \n y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],\n [[5.0, 6.0], [7.0, 8.0]]])\n result5 = paddle.min(y, axis=[1, 2])\n print(result5)\n #[1. 5.]\n result6 = paddle.min(y, axis=[0, 1])\n print(result6)\n #[1. 2.]\n \"\"\"\n\n if axis is not None and not isinstance(axis, list):\n if isinstance(axis, tuple):\n axis = list(axis)\n elif isinstance(axis, int):\n axis= [axis]\n else:\n raise TypeError(\n \"The type of axis must be int, list or tuple, but received {}\".format(type(axis)))\n reduce_all = True if axis == None or axis == [] else False\n axis = axis if axis != None and axis != [] else [0]\n if in_dygraph_mode():\n return _C_ops.reduce_min(x, 'dim', axis, 'keep_dim', keepdim,\n 'reduce_all', reduce_all)\n\n helper = LayerHelper('min', **locals())\n check_variable_and_dtype(\n x, 'x', ['float32', 'float64', 'int32', 'int64'], 'min')\n\n out = helper.create_variable_for_type_inference(\n dtype=x.dtype)\n helper.append_op(\n type='reduce_min',\n inputs={'X': x},\n outputs={'Out': out},\n attrs={\n 'dim': axis,\n 'keep_dim': keepdim,\n 'reduce_all': reduce_all\n })\n return out\n\n\ndef log1p(x, name=None):\n r\"\"\"\n Calculates the natural log of the given input tensor, element-wise.\n\n .. math::\n Out = \\\\ln(x+1)\n\n Args:\n x (Tensor): Input Tensor. Must be one of the following types: float32, float64.\n name(str, optional): The default value is None. Normally there is no need for \n user to set this property. For more information, please refer to :ref:`api_guide_Name`\n Returns:\n Tensor, the natural log of the input Tensor computed element-wise.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n data = paddle.to_tensor([[0], [1]], dtype='float32')\n res = paddle.log1p(data)\n # [[0.], [0.6931472]]\n \"\"\"\n\n if in_dygraph_mode():\n return _C_ops.log1p(x)\n\n check_variable_and_dtype(x, 'x', ['float32', 'float64'], \"log1p\")\n inputs = {'X': [x]}\n helper = LayerHelper('log1p', **locals())\n dtype = helper.input_dtype(input_param_name='x')\n out = helper.create_variable_for_type_inference(dtype)\n helper.append_op(type=\"log1p\", inputs={\"X\": x}, outputs={\"Out\": out})\n return out\n\ndef log2(x, name=None):\n r\"\"\"\n Calculates the log to the base 2 of the given input tensor, element-wise.\n\n .. math::\n\n Out = \\\\log_2x\n\n Args:\n x (Tensor): Input tensor must be one of the following types: float32, float64.\n name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`\n\n\n Returns:\n Tensor: The log to the base 2 of the input Tensor computed element-wise.\n\n Examples:\n\n .. code-block:: python\n \n import paddle\n\n # example 1: x is a float\n x_i = paddle.to_tensor([[1.0], [2.0]])\n res = paddle.log2(x_i) # [[0.], [1.0]]\n\n # example 2: x is float32\n x_i = paddle.full(shape=[1], fill_value=2, dtype='float32')\n paddle.to_tensor(x_i)\n res = paddle.log2(x_i)\n print(res) # [1.0]\n\n # example 3: x is float64\n x_i = paddle.full(shape=[1], fill_value=2, dtype='float64')\n paddle.to_tensor(x_i)\n res = paddle.log2(x_i)\n print(res) # [1.0]\n \"\"\"\n if in_dygraph_mode():\n return _C_ops.log2(x)\n\n check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], \"log2\")\n inputs = {'X': [x]}\n helper = LayerHelper('log2', **locals())\n dtype = helper.input_dtype(input_param_name='x')\n out = helper.create_variable_for_type_inference(dtype)\n helper.append_op(type=\"log2\", inputs={\"X\": x}, outputs={\"Out\": out})\n return out\n\n\ndef log10(x, name=None):\n r\"\"\"\n Calculates the log to the base 10 of the given input tensor, element-wise.\n\n .. math::\n\n Out = \\\\log_10_x\n\n Args:\n x (Tensor): Input tensor must be one of the following types: float32, float64.\n name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`\n\n\n Returns:\n Tensor: The log to the base 10 of the input Tensor computed element-wise.\n\n Examples:\n\n .. code-block:: python\n \n import paddle\n\n # example 1: x is a float\n x_i = paddle.to_tensor([[1.0], [10.0]])\n res = paddle.log10(x_i) # [[0.], [1.0]]\n\n # example 2: x is float32\n x_i = paddle.full(shape=[1], fill_value=10, dtype='float32')\n paddle.to_tensor(x_i)\n res = paddle.log10(x_i)\n print(res) # [1.0]\n\n # example 3: x is float64\n x_i = paddle.full(shape=[1], fill_value=10, dtype='float64')\n paddle.to_tensor(x_i)\n res = paddle.log10(x_i)\n print(res) # [1.0]\n \"\"\"\n if in_dygraph_mode():\n return _C_ops.log10(x)\n\n check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], \"log10\")\n inputs = {'X': [x]}\n helper = LayerHelper('log10', **locals())\n dtype = helper.input_dtype(input_param_name='x')\n out = helper.create_variable_for_type_inference(dtype)\n helper.append_op(type=\"log10\", inputs={\"X\": x}, outputs={\"Out\": out})\n return out\n\n\ndef clip(x, min=None, max=None, name=None):\n \"\"\"\n This operator clip all elements in input into the range [ min, max ] and return\n a resulting tensor as the following equation:\n\n .. math::\n\n Out = MIN(MAX(x, min), max)\n\n Args:\n x (Tensor): An N-D Tensor with data type float32, float64, int32 or int64.\n min (float|int|Tensor): The lower bound with type ``float`` , ``int`` or a ``Tensor``\n with shape [1] and type ``int32``, ``float32``, ``float64``.\n max (float|int|Tensor): The upper bound with type ``float``, ``int`` or a ``Tensor``\n with shape [1] and type ``int32``, ``float32``, ``float64``.\n name (str, optional): The default value is None. Normally there is no\n need for user to set this property. For more information, please\n refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: A Tensor with the same data type and data shape as input.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x1 = paddle.to_tensor([[1.2, 3.5], [4.5, 6.4]], 'float32')\n out1 = paddle.clip(x1, min=3.5, max=5.0)\n out2 = paddle.clip(x1, min=2.5)\n print(out1)\n # [[3.5, 3.5]\n # [4.5, 5.0]]\n print(out2)\n # [[2.5, 3.5]\n # [[4.5, 6.4]\n \"\"\"\n\n x_dtype = str(x.dtype)\n if x_dtype == 'paddle.int32':\n min_ = np.iinfo(np.int32).min\n max_ = np.iinfo(np.int32).max - 2**7\n elif x_dtype == 'paddle.int64':\n min_ = np.iinfo(np.int64).min\n max_ = np.iinfo(np.int64).max - 2**39\n else:\n min_ = float(np.finfo(np.float32).min)\n max_ = float(np.finfo(np.float32).max)\n\n if in_dygraph_mode():\n if isinstance(min, Variable):\n min = min.numpy().item(0)\n if isinstance(max, Variable):\n max = max.numpy().item(0)\n min = min_ if min is None else min\n max = max_ if max is None else max\n return _C_ops.clip(x, \"min\", min, \"max\", max)\n\n if min is not None:\n check_type(min, 'min', (float, int, Variable), 'clip')\n if isinstance(min, Variable):\n check_dtype(min.dtype, 'min', ['float32', 'float64', 'int32'],\n 'clip', '(When the type of min in clip is Variable.)')\n if max is not None:\n check_type(max, 'max', (float, int, Variable), 'clip')\n if isinstance(max, Variable):\n check_dtype(max.dtype, 'max', ['float32', 'float64', 'int32'],\n 'clip', '(When the type of max in clip is Variable.)')\n\n check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], 'clip')\n\n inputs = {'X': x}\n attrs = {'min': min_, 'max': max_}\n\n if isinstance(min, Variable):\n min.stop_gradient = True\n inputs['Min'] = min\n elif min is not None:\n attrs['min'] = min\n\n if isinstance(max, Variable):\n max.stop_gradient = True\n inputs['Max'] = max\n elif max is not None:\n attrs['max'] = max\n\n helper = LayerHelper('clip', **locals())\n output = helper.create_variable_for_type_inference(\n dtype=helper.input_dtype('x'))\n helper.append_op(\n type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs)\n\n return output\n\n\n@inplace_apis_in_dygraph_only\ndef clip_(x, min=None, max=None, name=None):\n \"\"\"\n Inplace version of ``clip`` API, the output Tensor will be inplaced with input ``x``.\n Please refer to :ref:`api_tensor_clip`.\n \"\"\"\n fmin = float(np.finfo(np.float32).min)\n fmax = float(np.finfo(np.float32).max)\n if isinstance(min, Variable):\n min = min.numpy().item(0)\n if isinstance(max, Variable):\n max = max.numpy().item(0)\n min = fmin if min is None else min\n max = fmax if max is None else max\n return _C_ops.clip_(x, \"min\", min, \"max\", max)\n\n\n\ndef trace(x, offset=0, axis1=0, axis2=1, name=None):\n \"\"\"\n **trace**\n\n This OP computes the sum along diagonals of the input tensor x.\n\n If ``x`` is 2D, returns the sum of diagonal.\n\n If ``x`` has larger dimensions, then returns an tensor of diagonals sum, diagonals be taken from\n the 2D planes specified by axis1 and axis2. By default, the 2D planes formed by the first and second axes\n of the input tensor x.\n\n The argument ``offset`` determines where diagonals are taken from input tensor x:\n\n - If offset = 0, it is the main diagonal.\n - If offset > 0, it is above the main diagonal.\n - If offset < 0, it is below the main diagonal.\n - Note that if offset is out of input's shape indicated by axis1 and axis2, 0 will be returned.\n\n Args:\n x(Tensor): The input tensor x. Must be at least 2-dimensional. The input data type should be float32, float64, int32, int64.\n offset(int, optional): Which diagonals in input tensor x will be taken. Default: 0 (main diagonals).\n axis1(int, optional): The first axis with respect to take diagonal. Default: 0.\n axis2(int, optional): The second axis with respect to take diagonal. Default: 1.\n name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.\n\n Returns:\n Tensor: the output data type is the same as input data type.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n case1 = paddle.randn([2, 3])\n case2 = paddle.randn([3, 10, 10])\n case3 = paddle.randn([3, 10, 5, 10])\n data1 = paddle.trace(case1) # data1.shape = [1]\n data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3]\n data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5]\n \"\"\"\n def __check_input(input, offset, dim1, dim2):\n check_dtype(x.dtype, 'Input',\n ['int32', 'int64', 'float16', 'float32', 'float64'],\n 'trace')\n\n input_shape = list(x.shape)\n assert len(input_shape) >= 2, \\\n \"The x must be at least 2-dimensional, \" \\\n \"But received Input x's dimensional: %s.\\n\" % \\\n len(input_shape)\n\n axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1\n axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2\n\n assert ((0 <= axis1_) and (axis1_ < len(input_shape))), \\\n \"The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\\n\" \\\n % (-(len(input_shape)), len(input_shape) - 1, axis1)\n\n assert ((0 <= axis2_) and (axis2_ < len(input_shape))), \\\n \"The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\\n\" \\\n % (-(len(input_shape)), len(input_shape) - 1, axis2)\n\n\n assert axis1_ != axis2_, \\\n \"axis1 and axis2 cannot be the same axis.\" \\\n \"But received axis1 = %d, axis2 = %d\\n\"%(axis1, axis2)\n\n __check_input(input, offset, axis1, axis2)\n if in_dygraph_mode():\n return _C_ops.trace(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)\n\n inputs = {'Input': [x]}\n attrs = {'offset': offset, 'axis1': axis1, 'axis2': axis2}\n helper = LayerHelper('trace', **locals())\n\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n\n helper.append_op(\n type='trace',\n inputs={'Input': [x]},\n attrs={'offset': offset,\n 'axis1': axis1,\n 'axis2': axis2},\n outputs={'Out': [out]})\n return out\n\ndef diagonal(x, offset=0, axis1=0, axis2=1, name=None):\n \"\"\"\n This OP computes the diagonals of the input tensor x.\n\n If ``x`` is 2D, returns the diagonal.\n If ``x`` has larger dimensions, diagonals be taken from the 2D planes specified by axis1 and axis2. \n By default, the 2D planes formed by the first and second axis of the input tensor x.\n\n The argument ``offset`` determines where diagonals are taken from input tensor x:\n\n - If offset = 0, it is the main diagonal.\n - If offset > 0, it is above the main diagonal.\n - If offset < 0, it is below the main diagonal.\n \n Args:\n x(Tensor): The input tensor x. Must be at least 2-dimensional. The input data type should be bool, int32, int64, float16, float32, float64.\n offset(int, optional): Which diagonals in input tensor x will be taken. Default: 0 (main diagonals).\n axis1(int, optional): The first axis with respect to take diagonal. Default: 0.\n axis2(int, optional): The second axis with respect to take diagonal. Default: 1.\n name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.\n\n Returns:\n Tensor: a partial view of input tensor in specify two dimensions, the output data type is the same as input data type.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.rand([2,2,3],'float32')\n print(x)\n # Tensor(shape=[2, 2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [[[0.45661032, 0.03751532, 0.90191704],\n # [0.43760979, 0.86177313, 0.65221709]],\n\n # [[0.17020577, 0.00259554, 0.28954273],\n # [0.51795638, 0.27325270, 0.18117726]]])\n\n out1 = paddle.diagonal(x)\n print(out1)\n #Tensor(shape=[3, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [[0.45661032, 0.51795638],\n # [0.03751532, 0.27325270],\n # [0.90191704, 0.18117726]])\n\n out2 = paddle.diagonal(x, offset=0, axis1=2, axis2=1)\n print(out2)\n #Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [[0.45661032, 0.86177313],\n # [0.17020577, 0.27325270]])\n\n out3 = paddle.diagonal(x, offset=1, axis1=0, axis2=1)\n print(out3)\n #Tensor(shape=[3, 1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [[0.43760979],\n # [0.86177313],\n # [0.65221709]])\n\n out4 = paddle.diagonal(x, offset=0, axis1=1, axis2=2)\n print(out4)\n #Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [[0.45661032, 0.86177313],\n # [0.17020577, 0.27325270]])\n \n \"\"\"\n if in_dygraph_mode():\n return _C_ops.diagonal(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)\n\n def __check_input(input, offset, dim1, dim2):\n check_dtype(x.dtype, 'Input',\n ['bool', 'int32', 'int64', 'float16', 'float32', 'float64'],\n 'diagonal')\n\n input_shape = list(x.shape)\n assert len(input_shape) >= 2, \\\n \"The x must be at least 2-dimensional, \" \\\n \"But received Input x's dimensional: %s.\\n\" % \\\n len(input_shape)\n\n axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1\n axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2\n\n assert axis1_ < len(input_shape), \\\n \"The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\\n\" \\\n % (-(len(input_shape)), len(input_shape) - 1, axis1)\n\n assert axis2_ < len(input_shape), \\\n \"The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\\n\" \\\n % (-(len(input_shape)), len(input_shape) - 1, axis2)\n\n assert axis1_ != axis2_, \\\n \"axis1 and axis2 cannot be the same axis.\" \\\n \"But received axis1 = %d, axis2 = %d\\n\"%(axis1, axis2)\n\n __check_input(input, offset, axis1, axis2)\n helper = LayerHelper('diagonal', **locals())\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n\n helper.append_op(\n type='diagonal',\n inputs={'Input': [x]},\n attrs={'offset': offset,\n 'axis1': axis1,\n 'axis2': axis2},\n outputs={'Out': [out]})\n return out\n\n\n@templatedoc(op_type=\"kron\")\ndef kron(x, y, name=None):\n \"\"\"\n\n${comment}\n\n Args:\n x (Tensor): the fist operand of kron op, data type: float16, float32,\n float64, int32 or int64.\n y (Tensor): the second operand of kron op, data type: float16,\n float32, float64, int32 or int64. Its data type should be the same\n with x.\n name(str, optional): The default value is None. Normally there is no\n need for user to set this property. For more information, please\n refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: The output of kron op, data type: float16, float32, float64, int32 or int64. Its data is the same with x.\n\n Examples:\n .. code-block:: python\n\n import paddle\n x = paddle.to_tensor([[1, 2], [3, 4]], dtype='int64')\n y = paddle.to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype='int64')\n out = paddle.kron(x, y)\n print(out)\n # [[1, 2, 3, 2, 4, 6],\n # [ 4, 5, 6, 8, 10, 12],\n # [ 7, 8, 9, 14, 16, 18],\n # [ 3, 6, 9, 4, 8, 12],\n # [12, 15, 18, 16, 20, 24],\n # [21, 24, 27, 28, 32, 36]])\n \"\"\"\n if in_dygraph_mode():\n return _C_ops.kron(x, y)\n\n helper = LayerHelper('kron', **locals())\n check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')\n check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')\n\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n helper.append_op(type=\"kron\", inputs={\"X\": x, \"Y\": y}, outputs={\"Out\": out})\n return out\n\n\ndef cumsum(x, axis=None, dtype=None, name=None):\n \"\"\"\n The cumulative sum of the elements along a given axis. \n \n **Note**:\n The first element of the result is the same of the first element of the input. \n\n Args:\n x (Tensor): The input tensor needed to be cumsumed.\n axis (int, optional): The dimension to accumulate along. -1 means the last dimension. The default (None) is to compute the cumsum over the flattened array.\n dtype (str, optional): The data type of the output tensor, can be float32, float64, int32, int64. If specified, the input tensor is casted to dtype before the operation is performed. This is useful for preventing data type overflows. The default value is None. \n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor, the result of cumsum operator. \n\n Examples:\n .. code-block:: python\n \n import paddle\n \n data = paddle.arange(12)\n data = paddle.reshape(data, (3, 4))\n\n y = paddle.cumsum(data)\n # [ 0 1 3 6 10 15 21 28 36 45 55 66]\n\n y = paddle.cumsum(data, axis=0)\n # [[ 0 1 2 3]\n # [ 4 6 8 10]\n # [12 15 18 21]]\n \n y = paddle.cumsum(data, axis=-1)\n # [[ 0 1 3 6]\n # [ 4 9 15 22]\n # [ 8 17 27 38]]\n\n y = paddle.cumsum(data, dtype='float64')\n print(y.dtype)\n # VarType.FP64\n \"\"\"\n if axis is None:\n flatten = True\n else:\n flatten = False\n if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):\n x = layers.cast(x, dtype)\n\n if in_dygraph_mode():\n if axis is None:\n return _C_ops.cumsum(x, 'flatten', flatten)\n else:\n return _C_ops.cumsum(x, 'axis', axis, 'flatten', flatten)\n\n check_type(x, 'x', (Variable), 'cumsum')\n locals_var = locals().copy()\n kwargs = dict()\n for name, val in locals_var.items():\n if val is not None:\n kwargs[name] = val\n _cum_sum_ = generate_layer_fn('cumsum')\n return _cum_sum_(**kwargs)\n\ndef cumprod(x, dim=None, dtype=None, name=None):\n \"\"\"\n Compute the cumulative product of the input tensor x along a given dimension dim.\n\n **Note**:\n The first element of the result is the same as the first element of the input.\n\n Args:\n x (Tensor): the input tensor need to be cumproded.\n dim (int): the dimension along which the input tensor will be accumulated. It need to be in the range of [-x.rank, x.rank), where x.rank means the dimensions of the input tensor x and -1 means the last dimension.\n dtype (str, optional): The data type of the output tensor, can be float32, float64, int32, int64, complex64, complex128. If specified, the input tensor is casted to dtype before the operation is performed. This is useful for preventing data type overflows. The default value is None.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor, the result of cumprod operator.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n data = paddle.arange(12)\n data = paddle.reshape(data, (3, 4))\n # [[ 0 1 2 3 ]\n # [ 4 5 6 7 ]\n # [ 8 9 10 11]]\n\n y = paddle.cumprod(data, dim=0)\n # [[ 0 1 2 3]\n # [ 0 5 12 21]\n # [ 0 45 120 231]]\n\n y = paddle.cumprod(data, dim=-1)\n # [[ 0 0 0 0]\n # [ 4 20 120 840]\n # [ 8 72 720 7920]]\n\n y = paddle.cumprod(data, dim=1, dtype='float64')\n # [[ 0. 0. 0. 0.]\n # [ 4. 20. 120. 840.]\n # [ 8. 72. 720. 7920.]]\n\n print(y.dtype)\n # paddle.float64\n\n \"\"\"\n\n if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):\n x = layers.cast(x, dtype)\n\n if in_dygraph_mode():\n return _C_ops.cumprod(x, 'dim', dim)\n\n check_variable_and_dtype(x, \"x\", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'cumprod')\n check_type(dim, 'dim', int, 'cumprod')\n\n helper = LayerHelper('cumprod', **locals())\n out = helper.create_variable_for_type_inference(x.dtype)\n helper.append_op(type='cumprod', inputs={'X': x}, outputs={'Out': out}, attrs={'dim': dim})\n return out\n\ndef isfinite(x, name=None):\n \"\"\"\n\n Return whether every element of input tensor is finite number or not.\n\n Args:\n x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n `Tensor`, the bool result which shows every element of `x` whether it is finite number or not.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])\n out = paddle.tensor.isfinite(x)\n print(out) # [False True True False True False False]\n \"\"\"\n if in_dygraph_mode():\n return _C_ops.isfinite_v2(x)\n helper = LayerHelper(\"isfinite_v2\", **locals())\n check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isfinite')\n out = helper.create_variable_for_type_inference('bool')\n helper.append_op(type=\"isfinite_v2\", inputs={\"X\": x}, outputs={\"Out\": out})\n return out\n\ndef isinf(x, name=None):\n \"\"\"\n\n Return whether every element of input tensor is `+/-INF` or not.\n\n Args:\n x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n `Tensor`, the bool result which shows every element of `x` whether it is `+/-INF` or not.\n\n Examples:\n .. code-block:: python\n\n import paddle\n x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])\n out = paddle.tensor.isinf(x)\n print(out) # [ True False False True False False False]\n \"\"\"\n if in_dygraph_mode():\n return _C_ops.isinf_v2(x)\n helper = LayerHelper(\"isinf_v2\", **locals())\n check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isinf')\n out = helper.create_variable_for_type_inference(dtype='bool')\n helper.append_op(type=\"isinf_v2\", inputs={\"X\": x}, outputs={\"Out\": out})\n return out\n\ndef isnan(x, name=None):\n \"\"\"\n\n Return whether every element of input tensor is `NaN` or not.\n\n Args:\n x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n `Tensor`, the bool result which shows every element of `x` whether it is `NaN` or not.\n\n Examples:\n .. code-block:: python\n\n import paddle\n x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])\n out = paddle.tensor.isnan(x)\n print(out) # [False False False False False True True]\n \"\"\"\n if in_dygraph_mode():\n return _C_ops.isnan_v2(x)\n helper = LayerHelper(\"isnan_v2\", **locals())\n check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan')\n out = helper.create_variable_for_type_inference(dtype='bool')\n helper.append_op(type=\"isnan_v2\", inputs={\"X\": x}, outputs={\"Out\": out})\n return out\n\n\ndef prod(x, axis=None, keepdim=False, dtype=None, name=None):\n \"\"\"\n Compute the product of tensor elements over the given axis.\n\n Args:\n x(Tensor): The input tensor, its data type should be float32, float64, int32, int64.\n axis(int|list|tuple, optional): The axis along which the product is computed. If :attr:`None`, \n multiply all elements of `x` and return a Tensor with a single element, \n otherwise must be in the range :math:`[-x.ndim, x.ndim)`. If :math:`axis[i]<0`, \n the axis to reduce is :math:`x.ndim + axis[i]`. Default is None.\n dtype(str|np.dtype, optional): The desired date type of returned tensor, can be float32, float64, \n int32, int64. If specified, the input tensor is casted to dtype before operator performed. \n This is very useful for avoiding data type overflows. The default value is None, the dtype \n of output is the same as input Tensor `x`.\n keepdim(bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result \n tensor will have one fewer dimension than the input unless `keepdim` is true. Default is False.\n name(string, optional): The default value is None. Normally there is no need for user to set this property.\n For more information, please refer to :ref:`api_guide_Name` .\n\n Returns:\n Tensor, result of product on the specified dim of input tensor.\n\n Raises:\n ValueError: The :attr:`dtype` must be float32, float64, int32 or int64.\n TypeError: The type of :attr:`axis` must be int, list or tuple.\n \n Examples:\n .. code-block:: python\n\n import paddle\n\n # the axis is a int element\n x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],\n [0.1, 0.2, 0.6, 0.7]])\n out1 = paddle.prod(x)\n # [0.0002268]\n\n out2 = paddle.prod(x, -1)\n # [0.027 0.0084]\n\n out3 = paddle.prod(x, 0)\n # [0.02 0.06 0.3 0.63]\n\n out4 = paddle.prod(x, 0, keepdim=True)\n # [[0.02 0.06 0.3 0.63]]\n\n out5 = paddle.prod(x, 0, dtype='int64')\n # [0 0 0 0]\n\n # the axis is list\n y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],\n [[5.0, 6.0], [7.0, 8.0]]])\n out6 = paddle.prod(y, [0, 1])\n # [105. 384.]\n\n out7 = paddle.prod(y, (1, 2))\n # [ 24. 1680.]\n\n \"\"\"\n if dtype is not None:\n check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'prod')\n if x.dtype != convert_np_dtype_to_dtype_(dtype):\n x = layers.cast(x, dtype)\n\n return layers.reduce_prod(input=x, dim=axis, keep_dim=keepdim, name=name)\n\n\ndef sign(x, name=None):\n \"\"\"\n This OP returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero.\n\n Args:\n x(Tensor): The input tensor. The data type can be float16, float32 or float64.\n name (str, optional): The default value is None. Normally there is no need for user to\n set this property. For more information, please refer to :ref:`api_guide_Name`\n\n Returns:\n Tensor: The output sign tensor with identical shape and data type to the input :attr:`x`.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([3.0, 0.0, -2.0, 1.7], dtype='float32')\n out = paddle.sign(x=x)\n print(out) # [1.0, 0.0, -1.0, 1.0]\n \"\"\"\n if in_dygraph_mode():\n return _C_ops.sign(x)\n\n check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'sign')\n helper = LayerHelper(\"sign\", **locals())\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n\n helper.append_op(type='sign', inputs={'X': [x]}, outputs={'Out': [out]})\n\n return out\n\n\ndef tanh(x, name=None):\n r\"\"\"\n Tanh Activation Operator.\n\n .. math::\n out = \\\\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}\n\n Args:\n x (Tensor): Input of Tanh operator, an N-D Tensor, with data type float32, float64 or float16.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Output of Tanh operator, a Tensor with same data type and shape as input.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])\n out = paddle.tanh(x)\n print(out)\n # [-0.37994896 -0.19737532 0.09966799 0.29131261]\n \"\"\"\n if in_dygraph_mode():\n return _C_ops.tanh(x)\n\n check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'tanh')\n check_type(x, 'x', (Variable), 'tanh')\n helper = LayerHelper('tanh', **locals())\n out = helper.create_variable_for_type_inference(x.dtype)\n helper.append_op(type='tanh', inputs={'X': x}, outputs={'Out': out})\n return out\n\n@inplace_apis_in_dygraph_only\ndef tanh_(x, name=None):\n r\"\"\"\n Inplace version of ``tanh`` API, the output Tensor will be inplaced with input ``x``.\n Please refer to :ref:`api_tensor_tanh`.\n \"\"\"\n return _C_ops.tanh_(x)\n\n\ndef increment(x, value=1.0, name=None):\n \"\"\"\n The OP is usually used for control flow to increment the data of :attr:`x` by an amount :attr:`value`.\n Notice that the number of elements in :attr:`x` must be equal to 1.\n\n Args:\n x (Tensor): A tensor that must always contain only one element, its data type supports float32, float64, int32 and int64.\n value(float, optional): The amount to increment the data of :attr:`x`. Default: 1.0.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor, the elementwise-incremented tensor with the same shape and data type as :attr:`x`.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n data = paddle.zeros(shape=[1], dtype='float32')\n counter = paddle.increment(data)\n # [1.]\n\n \"\"\"\n if in_dygraph_mode():\n return _C_ops.increment(x, 'step', value)\n\n check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],\n 'increment')\n helper = LayerHelper(\"increment\", **locals())\n helper.append_op(\n type='increment',\n inputs={'X': [x]},\n outputs={'Out': [x]},\n attrs={'step': float(value)})\n return x\n\n\ndef all(x, axis=None, keepdim=False, name=None):\n \"\"\"\n Computes the the ``logical and`` of tensor elements over the given dimension.\n\n Args:\n x (Tensor): An N-D Tensor, the input data type should be `bool`.\n axis (int|list|tuple, optional): The dimensions along which the ``logical and`` is compute. If\n :attr:`None`, and all elements of :attr:`x` and return a\n Tensor with a single element, otherwise must be in the\n range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,\n the dimension to reduce is :math:`rank + axis[i]`.\n keepdim (bool, optional): Whether to reserve the reduced dimension in the\n output Tensor. The result Tensor will have one fewer dimension\n than the :attr:`x` unless :attr:`keepdim` is true, default\n value is False.\n name (str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`\n\n Returns:\n Tensor: Results the ``logical and`` on the specified axis of input Tensor `x`, it's data type is bool.\n\n Raises:\n ValueError: If the data type of `x` is not bool.\n TypeError: The type of :attr:`axis` must be int, list or tuple.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n \n # x is a bool Tensor with following elements:\n # [[True, False]\n # [True, True]]\n x = paddle.assign(np.array([[1, 0], [1, 1]], dtype='int32'))\n print(x)\n x = paddle.cast(x, 'bool')\n \n # out1 should be [False]\n out1 = paddle.all(x) # [False]\n print(out1)\n \n # out2 should be [True, False]\n out2 = paddle.all(x, axis=0) # [True, False]\n print(out2)\n \n # keep_dim=False, out3 should be [False, True], out.shape should be (2,)\n out3 = paddle.all(x, axis=-1) # [False, True]\n print(out3)\n \n # keep_dim=True, out4 should be [[False], [True]], out.shape should be (2,1)\n out4 = paddle.all(x, axis=1, keepdim=True)\n out4 = paddle.cast(out4, 'int32') # [[False], [True]]\n print(out4)\n \n \"\"\"\n if axis is not None and not isinstance(axis, (list, tuple)):\n axis = [axis]\n\n if not axis:\n reduce_all_flag = True\n else:\n if len(axis) == len(x.shape):\n reduce_all_flag = True\n else:\n reduce_all_flag = False\n\n if in_dygraph_mode():\n axis = axis if axis != None and axis != [] else [0]\n return _C_ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim,\n 'reduce_all', reduce_all_flag)\n\n attrs = {\n 'dim': axis if axis != None and axis != [] and axis != () else [0],\n 'keep_dim': keepdim,\n 'reduce_all': reduce_all_flag\n }\n check_variable_and_dtype(x, 'x', ['bool'], 'all')\n\n\n check_type(axis, 'axis', (int, list, tuple, type(None)), 'all')\n\n helper = LayerHelper('all', **locals())\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n helper.append_op(\n type='reduce_all',\n inputs={'X': x},\n outputs={'Out': out},\n attrs=attrs)\n return out\n\n\ndef any(x, axis=None, keepdim=False, name=None):\n \"\"\"\n Computes the the ``logical or`` of tensor elements over the given dimension.\n\n Args:\n x (Tensor): An N-D Tensor, the input data type should be `bool`.\n axis (int|list|tuple, optional): The dimensions along which the ``logical or`` is compute. If\n :attr:`None`, and all elements of :attr:`x` and return a\n Tensor with a single element, otherwise must be in the\n range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,\n the dimension to reduce is :math:`rank + axis[i]`.\n keepdim (bool, optional): Whether to reserve the reduced dimension in the\n output Tensor. The result Tensor will have one fewer dimension\n than the :attr:`x` unless :attr:`keepdim` is true, default\n value is False.\n name (str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`\n\n Returns:\n Tensor: Results the ``logical or`` on the specified axis of input Tensor `x`, it's data type is bool.\n\n Raises:\n ValueError: If the data type of `x` is not bool.\n TypeError: The type of :attr:`axis` must be int, list or tuple.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n \n # x is a bool Tensor with following elements:\n # [[True, False]\n # [False, False]]\n x = paddle.assign(np.array([[1, 0], [1, 1]], dtype='int32'))\n print(x)\n x = paddle.cast(x, 'bool')\n \n # out1 should be [True]\n out1 = paddle.any(x) # [True]\n print(out1)\n \n # out2 should be [True, True]\n out2 = paddle.any(x, axis=0) # [True, True]\n print(out2)\n \n # keep_dim=False, out3 should be [True, True], out.shape should be (2,)\n out3 = paddle.any(x, axis=-1) # [True, True]\n print(out3)\n \n # keep_dim=True, result should be [[True], [True]], out.shape should be (2,1)\n out4 = paddle.any(x, axis=1, keepdim=True)\n out4 = paddle.cast(out4, 'int32') # [[True], [True]]\n print(out4)\n \n \"\"\"\n if axis is not None and not isinstance(axis, (list, tuple)):\n axis = [axis]\n\n if not axis:\n reduce_all_flag = True\n else:\n if len(axis) == len(x.shape):\n reduce_all_flag = True\n else:\n reduce_all_flag = False\n\n if in_dygraph_mode():\n axis = axis if axis != None and axis != [] else [0]\n return _C_ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim,\n 'reduce_all', reduce_all_flag)\n\n attrs = {\n 'dim': axis if axis != None and axis != [] and axis != () else [0],\n 'keep_dim': keepdim,\n 'reduce_all': reduce_all_flag\n }\n\n check_variable_and_dtype(x, 'x', ['bool'], 'any')\n\n\n check_type(axis, 'axis', (int, list, tuple, type(None)), 'any')\n\n helper = LayerHelper('any', **locals())\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n helper.append_op(\n type='reduce_any',\n inputs={'X': x},\n outputs={'Out': out},\n attrs=attrs)\n return out\n\ndef broadcast_shape(x_shape, y_shape):\n \"\"\"\n The function returns the shape of doing operation with broadcasting on tensors of x_shape and y_shape, please refer to :ref:`user_guide_broadcasting` for more details.\n\n Args:\n x_shape (list[int]|tuple[int]): A shape of tensor.\n y_shape (list[int]|tuple[int]): A shape of tensor.\n \n\n Returns:\n list[int], the result shape.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n shape = paddle.broadcast_shape([2, 1, 3], [1, 3, 1])\n # [2, 3, 3]\n \n # shape = paddle.broadcast_shape([2, 1, 3], [3, 3, 1])\n # ValueError (terminated with error message).\n\n \"\"\"\n\n return core.broadcast_shape(x_shape, y_shape)\n\ndef conj(x, name=None):\n r\"\"\"\n This function computes the conjugate of the Tensor elementwisely.\n\n Args:\n x (Tensor): The input tensor which hold the complex numbers. \n Optional data types are: complex64, complex128, float32, float64, int32 or int64.\n name (str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`\n\n Returns:\n out (Tensor): The conjugate of input. The shape and data type is the same with input.\n If the elements of tensor is real type such as float32, float64, int32 or int64, the out is the same with input.\n\n Examples:\n .. code-block:: python\n\n import paddle\n data=paddle.to_tensor([[1+1j, 2+2j, 3+3j], [4+4j, 5+5j, 6+6j]])\n #Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,\n # [[(1+1j), (2+2j), (3+3j)],\n # [(4+4j), (5+5j), (6+6j)]])\n\n conj_data=paddle.conj(data)\n #Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,\n # [[(1-1j), (2-2j), (3-3j)],\n # [(4-4j), (5-5j), (6-6j)]])\n\n \"\"\"\n if in_dygraph_mode():\n return _C_ops.conj(x)\n\n check_variable_and_dtype(x, \"x\", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'conj')\n\n helper = LayerHelper('conj', **locals())\n out = helper.create_variable_for_type_inference(\n dtype=helper.input_dtype())\n\n helper.append_op(type='conj', inputs={'X': x}, outputs={'Out': [out]})\n return out\n\ndef digamma(x, name=None):\n r\"\"\"\n Calculates the digamma of the given input tensor, element-wise.\n\n .. math::\n Out = \\Psi(x) = \\frac{ \\Gamma^{'}(x) }{ \\Gamma(x) }\n\n Args:\n x (Tensor): Input Tensor. Must be one of the following types: float32, float64.\n name(str, optional): The default value is None. Normally there is no need for \n user to set this property. For more information, please refer to :ref:`api_guide_Name`\n Returns:\n Tensor, the digamma of the input Tensor, the shape and data type is the same with input.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n data = paddle.to_tensor([[1, 1.5], [0, -2.2]], dtype='float32')\n res = paddle.digamma(data)\n print(res)\n # Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [[-0.57721591, 0.03648996],\n # [ nan , 5.32286835]])\n \"\"\"\n\n if in_dygraph_mode():\n return _C_ops.digamma(x)\n\n check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'digamma')\n helper = LayerHelper('digamma', **locals())\n out = helper.create_variable_for_type_inference(x.dtype)\n helper.append_op(type='digamma', inputs={'X': x}, outputs={'Out': out})\n return out\n\ndef neg(x, name=None):\n \"\"\"\n This function computes the negative of the Tensor elementwisely.\n\n Args:\n x (Tensor): Input of neg operator, an N-D Tensor, with data type float32, float64, int8, int16, int32, or int64.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n out (Tensor): The negative of input Tensor. The shape and data type are the same with input Tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])\n out = paddle.neg(x)\n print(out)\n # [0.4 0.2 -0.1 -0.3]\n \"\"\"\n\n return layers.scale(x, scale=-1.0, bias=0.0, bias_after_scale=True, act=None, name=name)\n\ndef atan2(x, y, name=None):\n r\"\"\"\n Element-wise arctangent of x/y with consideration of the quadrant.\n\n Equation:\n .. math::\n\n atan2(x,y)=\\left\\{\\begin{matrix}\n & tan^{-1}(\\frac{x}{y}) & y > 0 \\\\\n & tan^{-1}(\\frac{x}{y}) + \\pi & x>=0, y < 0 \\\\\n & tan^{-1}(\\frac{x}{y}) - \\pi & x<0, y < 0 \\\\\n & +\\frac{\\pi}{2} & x>0, y = 0 \\\\\n & -\\frac{\\pi}{2} & x<0, y = 0 \\\\\n &\\text{undefined} & x=0, y = 0\n \\end{matrix}\\right.\n\n Args:\n x (Tensor): An N-D Tensor, the data type is int32, int64, float16, float32, float64.\n y (Tensor): An N-D Tensor, must have the same type as `x`.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float64 when the input data type is int).\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([-1, +1, +1, -1]).astype('float32')\n #Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [-1, 1, 1, -1])\n\n y = paddle.to_tensor([-1, -1, +1, +1]).astype('float32')\n #Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [-1, -1, 1, 1])\n\n out = paddle.atan2(x, y)\n #Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [-2.35619450, 2.35619450, 0.78539819, -0.78539819])\n\n \"\"\"\n\n if in_dygraph_mode():\n return _C_ops.atan2(x, y)\n else:\n check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2')\n check_variable_and_dtype(y, 'y', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2')\n\n helper = LayerHelper('atan2', **locals())\n inputs = {'X1' : x, 'X2' : y}\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n helper.append_op(\n type='atan2', inputs=inputs, outputs={'Out': out})\n return out\n\ndef lerp(x, y, weight, name=None):\n r\"\"\"\n Does a linear interpolation between x and y based on weight.\n\n Equation:\n .. math::\n\n lerp(x, y, weight) = x + weight * (y - x).\n\n Args:\n x (Tensor): An N-D Tensor, the data type is float32, float64.\n y (Tensor): An N-D Tensor, the data type is float32, float64.\n weight (float|Tensor): the weight for the interpolation formula.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n out (Tensor): An N-D Tensor, the shape and data type is the same with input.\n\n Example:\n .. code-block:: python\n\n import paddle\n \n x = paddle.arange(1., 5., dtype='float32')\n y = paddle.empty([4], dtype='float32')\n y.fill_(10.)\n out = paddle.lerp(start, end, 0.5)\n # out: [5.5., 6., 6.5, 7.]\n\n \"\"\"\n if in_dygraph_mode():\n check_type(weight, 'weight', (float, paddle.Tensor, Variable), 'lerp')\n if isinstance(weight, float):\n weight = paddle.to_tensor(weight, dtype=x.dtype)\n return _C_ops.lerp(x, y, weight)\n\n check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'lerp')\n check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'lerp')\n check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], 'lerp')\n\n helper = LayerHelper('lerp', **locals())\n inputs = {'X': x, 'Y': y, 'Weight': weight}\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n helper.append_op(type='lerp', inputs=inputs, outputs={'Out': out})\n return out\n\n@inplace_apis_in_dygraph_only\ndef lerp_(x, y, weight, name=None):\n r\"\"\"\n Inplace version of ``lerp`` API, the output Tensor will be inplaced with input ``x``.\n Please refer to :ref:`api_tensor_lerp`.\n \"\"\"\n out_shape = broadcast_shape(x.shape, y.shape)\n check_type(weight, 'weight', (float, paddle.Tensor, Variable), 'lerp')\n if isinstance(weight, float):\n weight = paddle.to_tensor([weight], dtype=x.dtype)\n elif isinstance(weight, (paddle.Tensor, Variable)):\n out_shape = broadcast_shape(out_shape, weight.shape)\n if out_shape != x.shape:\n raise ValueError(\"The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.\".format(out_shape, x.shape))\n return _C_ops.lerp_(x, y, weight)\n\ndef rad2deg(x, name=None):\n \"\"\"\n Convert each of the elements of input x from angles in radians to degrees.\n \n Equation:\n .. math::\n\n rad2deg(x)=180/ \\pi * x\n\n Args:\n x (Tensor): An N-D Tensor, the data type is float32, float64, int32, int64.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float32 when the input data type is int).\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n \n x1 = paddle.to_tensor([3.142, -3.142, 6.283, -6.283, 1.570, -1.570])\n result1 = paddle.rad2deg(x1)\n print(result1)\n # Tensor(shape=[6], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [180.02334595, -180.02334595, 359.98937988, -359.98937988,\n # 9.95437622 , -89.95437622])\n\n x2 = paddle.to_tensor(np.pi/2)\n result2 = paddle.rad2deg(x2)\n print(result2)\n # Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [90.])\n \n x3 = paddle.to_tensor(1)\n result3 = paddle.rad2deg(x3)\n print(result3)\n # Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [57.29578018])\n \"\"\"\n rad2deg_scale = 180 / np.pi\n if in_dygraph_mode():\n if convert_dtype(x.dtype) in ['int32', 'int64']:\n x = cast(x, dtype=\"float32\")\n return _C_ops.scale(x, 'scale', rad2deg_scale)\n else:\n check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'], 'rad2deg')\n helper = LayerHelper('rad2deg', **locals())\n out_cast = x\n if convert_dtype(x.dtype) in ['int32', 'int64']:\n out_cast = helper.create_variable_for_type_inference(dtype=paddle.float32)\n helper.append_op(\n type='cast', inputs={'X':x}, outputs={'Out': out_cast}, attrs={'in_dtype': x.dtype,'out_dtype': paddle.float32})\n out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)\n helper.append_op(\n type='scale', inputs={'X':out_cast}, outputs={'Out': out}, attrs={'scale': rad2deg_scale})\n return out\n\ndef deg2rad(x, name=None):\n \"\"\"\n Convert each of the elements of input x from degrees to angles in radians.\n \n Equation:\n .. math::\n\n deg2rad(x)=\\pi * x / 180\n\n Args:\n x (Tensor): An N-D Tensor, the data type is float32, float64, int32, int64.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float32 when the input data type is int).\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n \n x1 = paddle.to_tensor([180.0, -180.0, 360.0, -360.0, 90.0, -90.0])\n result1 = paddle.deg2rad(x1)\n print(result1)\n # Tensor(shape=[6], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [3.14159274, -3.14159274, 6.28318548, -6.28318548, 1.57079637,\n # -1.57079637])\n\n x2 = paddle.to_tensor(180)\n result2 = paddle.deg2rad(x2)\n print(result2)\n # Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [3.14159274])\n \"\"\"\n deg2rad_scale = np.pi / 180.0\n if in_dygraph_mode():\n if convert_dtype(x.dtype) in ['int32', 'int64']:\n x = cast(x, dtype=\"float32\")\n return _C_ops.scale(x, 'scale', deg2rad_scale)\n else:\n check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'], 'deg2rad')\n helper = LayerHelper('deg2rad', **locals())\n out_cast = x\n if convert_dtype(x.dtype) in ['int32', 'int64']:\n out_cast = helper.create_variable_for_type_inference(dtype=paddle.float32)\n helper.append_op(\n type='cast', inputs={'X':x}, outputs={'Out': out_cast}, attrs={'in_dtype': x.dtype,'out_dtype': paddle.float32})\n out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)\n helper.append_op(\n type='scale', inputs={'X':out_cast}, outputs={'Out': out}, attrs={'scale': deg2rad_scale})\n return out\n\ndef diff(x, n=1, axis=-1, prepend=None, append=None, name=None):\n r\"\"\"\n Computes the n-th forward difference along the given axis.\n The first-order differences is computed by using the following formula: \n\n .. math::\n\n out[i] = x[i+1] - x[i]\n \n Higher-order differences are computed by using paddle.diff() recursively. \n Only n=1 is currently supported.\n\n Args:\n x(Tensor): The input tensor to compute the forward difference on\n n(int, optional): The number of times to recursively compute the difference. \n Only support n=1. Default:1\n axis(int, optional): The axis to compute the difference along. Default:-1\n prepend(Tensor, optional): The tensor to prepend to input along axis before computing the difference.\n It's dimensions must be equivalent to that of x, \n and its shapes must match x's shape except on axis.\n append(Tensor, optional): The tensor to append to input along axis before computing the difference, \n It's dimensions must be equivalent to that of x, \n and its shapes must match x's shape except on axis.\n name(str|None): A name for this layer(optional). If set None, \n the layer will be named automatically.\n \n Returns:\n Tensor: The output tensor with same dtype with x.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([1, 4, 5, 2])\n out = paddle.diff(x)\n print(out)\n # out:\n # [3, 1, -3]\n\n y = paddle.to_tensor([7, 9])\n out = paddle.diff(x, append=y)\n print(out)\n # out: \n # [3, 1, -3, 5, 2]\n\n z = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])\n out = paddle.diff(z, axis=0)\n print(out)\n # out:\n # [[3, 3, 3]]\n out = paddle.diff(z, axis=1)\n print(out)\n # out:\n # [[1, 1], [1, 1]]\n \"\"\"\n\n if axis < 0:\n axis = axis + len(x.shape)\n if axis > len(x.shape):\n axis = len(x.shape)\n if axis < 0:\n axis = 0\n dtype = x.dtype\n axes = [axis]\n infer_flags = list(1 for i in range(len(axes)))\n if in_dygraph_mode():\n has_pend = False\n input_list = []\n if prepend is not None and append is not None:\n input_list = [prepend, x, append]\n has_pend = True\n elif prepend is not None:\n input_list = [prepend, x]\n has_pend = True\n elif append is not None:\n input_list = [x, append]\n has_pend = True\n if has_pend:\n new_input = _C_ops.concat(input_list, 'axis', axis)\n else:\n new_input = x\n\n attrs_1 = ()\n attrs_2 = ()\n\n dim_len = new_input.shape[axis]\n\n starts_1 = [0]\n attrs_1 += ('starts', starts_1)\n ends_1 = [dim_len - 1]\n attrs_1 += ('ends', ends_1)\n input_front = _C_ops.slice(new_input, None, None, 'axes', axes, \\\n 'infer_flags', infer_flags, *attrs_1)\n starts_2 = [1]\n attrs_2 += ('starts', starts_2)\n ends_2 = [dim_len]\n attrs_2 += ('ends', ends_2)\n input_back = _C_ops.slice(new_input, None, None, 'axes', axes, \\\n 'infer_flags', infer_flags, *attrs_2)\n\n if x.dtype == paddle.bool:\n op = getattr(_C_ops, \"logical_xor\")\n out = op(input_back, input_front)\n else:\n out = layers.elementwise_sub(input_back, input_front, axis=axis)\n return out\n else:\n check_variable_and_dtype(x, 'x', ['float32', 'float64', 'bool', 'int32', 'int64'], 'diff')\n check_type(axis, 'axis', (int), 'diff')\n helper = LayerHelper('diff', **locals())\n has_pend = False\n input_list = []\n if prepend is not None and append is not None:\n input_list = [prepend, x, append]\n has_pend = True\n elif prepend is not None:\n input_list = [prepend, x]\n has_pend = True\n elif append is not None:\n input_list = [x, append]\n has_pend = True\n\n if has_pend:\n new_input = helper.create_variable_for_type_inference(dtype)\n helper.append_op(\n type='concat', inputs={'X': input_list}, outputs={'Out': [new_input]}, attrs={'axis': axis}\n )\n else:\n new_input = x\n\n dim_len = new_input.shape[axis]\n attrs_1 = {'axes': axes}\n starts_1 = [0]\n ends_1 = [dim_len - 1]\n attrs_1['starts'] = starts_1\n attrs_1['ends'] = ends_1\n input_front = helper.create_variable_for_type_inference(dtype)\n helper.append_op(\n type='slice', inputs={'Input': new_input}, attrs=attrs_1, outputs={'Out': input_front}\n )\n attrs_2 = {'axes': axes}\n starts_2 = [1]\n ends_2 = [dim_len]\n attrs_2['starts'] = starts_2\n attrs_2['ends'] = ends_2\n input_back = helper.create_variable_for_type_inference(dtype)\n helper.append_op(\n type='slice', inputs={'Input': new_input}, attrs=attrs_2, outputs={'Out': input_back}\n )\n\n if dtype == paddle.bool:\n out = helper.create_variable_for_type_inference(dtype)\n helper.append_op(\n type='logical_xor', inputs={\"X\": input_back, \"Y\": input_front}, outputs={\"Out\": out}\n )\n else:\n out = layers.elementwise_sub(input_back, input_front, axis=axis)\n\n return out\n\n\ndef angle(x, name=None):\n r\"\"\"\n Element-wise angle of complex numbers. For non-negative real numbers, the angle is 0 while \n for negative real numbers, the angle is :math:`\\pi`.\n\n Equation:\n .. math::\n\n angle(x)=arctan2(x.imag, x.real)\n\n Args:\n x (Tensor): An N-D Tensor, the data type is complex64, complex128, or float32, float64 .\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n out (Tensor): y (Tensor): An N-D Tensor of real data type with the same precision as that of x's data type.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([-2, -1, 0, 1]).unsqueeze(-1).astype('float32')\n y = paddle.to_tensor([-2, -1, 0, 1]).astype('float32')\n z = x + 1j * y\n print(z.numpy())\n # [[-2.-2.j -2.-1.j -2.+0.j -2.+1.j]\n # [-1.-2.j -1.-1.j -1.+0.j -1.+1.j]\n # [ 0.-2.j 0.-1.j 0.+0.j 0.+1.j]\n # [ 1.-2.j 1.-1.j 1.+0.j 1.+1.j]]\n\n theta = paddle.angle(z)\n print(theta.numpy())\n # [[-2.3561945 -2.6779451 3.1415927 2.6779451]\n # [-2.0344439 -2.3561945 3.1415927 2.3561945]\n # [-1.5707964 -1.5707964 0. 1.5707964]\n # [-1.1071488 -0.7853982 0. 0.7853982]]\n \"\"\"\n\n if in_dygraph_mode():\n return _C_ops.angle(x)\n\n check_variable_and_dtype(x, 'x',\n ['float32', 'float64', 'complex64', 'complex128'], 'angle')\n op_type = \"angle\"\n helper = LayerHelper(op_type, **locals())\n inputs = {\"X\": x}\n out = helper.create_variable_for_type_inference(\n dtype=_complex_to_real_dtype(x.dtype))\n outputs = {\"Out\": out}\n helper.append_op(type=op_type, inputs=inputs, outputs=outputs)\n return out\n"
] | [
[
"numpy.finfo",
"numpy.iinfo"
]
] |
TheOpponent/st3-translation-notes | [
"c78d7c2347611c07677ec5e293bbd6351800f438"
] | [
"Scripts/convert_png_tiles.py"
] | [
"# This script reads a PNG file containing a single row of 26 x 26 tiles and outputs binary data.\n# NumPy and Pillow are required as dependencies.\n#\n# Specify an input PNG file and an optional output file as arguments.\n# If an output file is not given, the binary data will be written in the console.\n#\n# The original graphic format is 4 bits per pixel, with each byte representing two pixels stacked vertically. \n# The left nybble represents the lower pixel and the right nybble represents the upper pixel.\n# 13 rows of these bytes create a 26 x 26 tile.\n# \n# To create replacement tiles, create a non-transparent image with the following 16-color palette:\n# 000000 101010 202020 303030 404040 505050 606060 707070 808080 909090 A0A0A0 B0B0B0 C0C0C0 D0D0D0 E0E0E0 F0F0F0\n#\n# Although the resulting image will be grayscale, this image should be saved as 8-bit RGB.\n# Image editors will frequently override indexed palettes when converting to grayscale,\n# so creating RGB images is recommended to guarantee the palette will not be changed.\n# The first channel (red) of this file will be read and used as pixel data.\n#\n# Overwrite SKFONT.CG with the output starting at the tile offset to replace.\n\nimport struct\nimport sys\nimport numpy as np\nfrom PIL import Image\n\ndef main():\n\n if len(sys.argv) < 2:\n print(\"Specify input PNG file.\")\n return\n\n with Image.open(sys.argv[1]) as input_file:\n output = b''\n\n # Read image and split into equal number of 26 x 26 arrays.\n image = list(input_file.getdata(0))\n image_size = input_file.size\n image_2d = np.empty((image_size[1],image_size[0]),dtype=\"uint8\")\n \n # rows = image[2]\n try:\n for i in range(0,25):\n image_2d[i] = image[i * image_size[0]:(i + 1) * image_size[0]]\n\n # Split into individual tiles.\n tiles = np.hsplit(image_2d,image_size[0] / 26)\n for i in tiles:\n # Bitwise shift 4 to the right to obtain 0-F value for each pixel.\n tile = np.right_shift(i,4)\n\n # Divide each tile into 26 x 2 arrays.\n tile_row_pairs = np.vsplit(tile,13)\n\n for row_pair in tile_row_pairs: \n for column in range(0,26):\n # Upper pixel is right nybble; lower pixel is left nybble.\n upper_pixel = row_pair[0][column]\n lower_pixel = row_pair[1][column] << 4\n pixels = upper_pixel + lower_pixel\n\n output += struct.pack(\"=B\",pixels)\n\n except ValueError:\n print(\"Input PNG file must be 8-bit, no transparency, and have a height of 26 pixels and width a multiple of 26 pixels.\")\n return\n\n if len(sys.argv) >= 3:\n with open(sys.argv[2],\"wb\") as output_file:\n output_file.write(output)\n print(f\"Paste the contents of {sys.argv[2]} into SKFONT.CG starting at the tile(s) to replace.\")\n\n else:\n print(output.hex())\n print(\"\\nPaste the above hex into SKFONT.CG starting at the tile(s) to replace.\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.vsplit",
"numpy.hsplit",
"numpy.empty",
"numpy.right_shift"
]
] |
JRC1995/SocialMediaNER | [
"236b22ded48f64516ebf0577c3b9d9d907db84e0"
] | [
"generate_eval_file.py"
] | [
"import numpy as np\nimport random\nfrom dataLoader.batch import batcher\nfrom transformers import BertTokenizerFast, ElectraTokenizerFast\nfrom configs.WNUT_configs import *\nfrom utils.ml_utils import *\nfrom utils.data_utils import *\nfrom utils.metric_utils import *\nimport argparse\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport os\nimport torch as T\nimport torch.nn as nn\nfrom models.BigTransformerTagger import BigTransformerTagger\nfrom models.CSETagger import CSETagger\nfrom models.layers.BigTransformers.BERT import BertModel\nfrom models.layers.BigTransformers.ELECTRA import ElectraModel\nfrom models.cse_generator import CSEGenerator\nimport json\nimport sys\nimport re\n\n\"\"\"\nFUTURE STUFF TO KEEP IN MIND:\n\"\"\"\n\"\"\"\nTRY SAVE BY LOSS IN THE FUTURE\n\"\"\"\n\"\"\"\nIN FUTURE CHECK IF KEEPING TRUE CASES HARMS OR HELPS BERT\n\"\"\"\n\"\"\"\nCHECK WORD 2 VEC OOV STUFF\n\"\"\"\n\"\"\"\nCHECK CLASS WEIGHING\n\"\"\"\n\"\"\"\nCHECK FOR QA CHECK WITHOUT NEGATIVE EXAMPLES\n\"\"\"\n\"\"\"\nCHECK FOR QA IN FULL MODE\n\"\"\"\n\"\"\"\nIMPORT MODEL HERE\n\"\"\"\n\"\"\"\nFIX LSTM AND TRY ORDERED MEMORY AND GCDT AND STUFFS\n\"\"\"\n\n\ndevice = T.device('cuda' if T.cuda.is_available() else 'cpu')\n\nparser = argparse.ArgumentParser(description='Model Name and stuff')\nparser.add_argument('--model', type=str, default=\"ELECTRA_extra_BiLSTM_CRF\",\n choices=[\"BERT\",\n \"BERT_CRF\",\n \"BERT_BiLSTM_CRF\",\n \"BERT_w2v_BiLSTM_CRF\",\n \"BERT_extra_BiLSTM_CRF\",\n \"ELECTRA\",\n \"ELECTRA_CRF\",\n \"ELECTRA_fine_tune_CRF\",\n \"ELECTRA_BiLSTM_CRF\",\n \"ELECTRA_w2v_BiLSTM_CRF\",\n \"ELECTRA_extra_BiLSTM_CRF\",\n \"ELECTRA_extra_CRF\",\n \"ELECTRA_extra\",\n \"ELECTRA_w2v_extra_BiLSTM_CRF\",\n \"ELECTRA_extra_BiLSTM_DSC\",\n \"CSE\",\n \"CSE_CRF\",\n \"CSE_BiLSTM_CRF\",\n \"CSE_w2v_BiLSTM_CRF\",\n \"CSE_w2v_extra_BiLSTM_CRF\",\n \"CSE_extra_BiLSTM_CRF\"])\n\nparser.add_argument('--dataset', type=str, default=\"WNUT_2017\")\nparser.add_argument('--display_step', type=int, default=30)\nparser.add_argument('--lr', type=float, default=-1)\nparser.add_argument('--fine_tune_lr', type=float, default=-1)\nparser.add_argument('--times', type=int, default=1)\nparser.add_argument('--mixed_case_training', type=str, default=\"no\",\n choices=[\"yes\", \"no\"])\n\nflags = parser.parse_args()\nSEED_base_value = 101\n\n\"\"\"\nCREATE MAPPINGS HERE\n\"\"\"\n\nif re.match(\"^BERT|^ELECTRA\", flags.model):\n model_dict = {flags.model: BigTransformerTagger}\nelif re.match(\"^CSE\", flags.model):\n model_dict = {flags.model: CSETagger}\nelse:\n raise ValueError(\"Invalid model\")\n\n\nconfig_dict = {flags.model: eval(\"{0}_config\".format(flags.model))}\n\n\"\"\"\nmodel_dict = {'BERT': BigTransformerTagger,\n 'ELECTRA': BigTransformerTagger,\n 'ELECTRA_CRF': BigTransformerTagger,\n \"ELECTRA_BiLSTM_CRF\": BigTransformerTagger,\n 'ELECTRA_w2v_BiLSTM_CRF': BigTransformerTagger,\n \"ELECTRA_w2v_extra_BiLSTM_CRF\": BigTransformerTagger,\n \"ELECTRA_extra_BiLSTM_CRF\": BigTransformerTagger,\n \"ELECTRA_extra\": BigTransformerTagger,\n \"ELECTRA_extra_CRF\": BigTransformerTagger}\n\nconfig_dict = {'BERT': BERT_config,\n 'ELECTRA': ELECTRA_config,\n 'ELECTRA_CRF': ELECTRA_CRF_config,\n \"ELECTRA_BiLSTM_CRF\": ELECTRA_BiLSTM_CRF_config,\n 'ELECTRA_w2v_BiLSTM_CRF': ELECTRA_w2v_BiLSTM_CRF_config,\n 'ELECTRA_w2v_extra_BiLSTM_CRF': ELECTRA_w2v_extra_BiLSTM_CRF_config,\n \"ELECTRA_extra_BiLSTM_CRF\": ELECTRA_extra_BiLSTM_CRF_config,\n \"ELECTRA_extra\": ELECTRA_extra_config,\n \"ELECTRA_extra_CRF\": ELECTRA_extra_CRF_config}\n\"\"\"\n\nconfig = config_dict[flags.model]\nconfig = config()\n\nif flags.lr >= 0:\n config.lr = flags.lr\n\nif flags.fine_tune_lr >= 0:\n config.fine_tune_lr = flags.fine_tune_lr\n\ndisplay_step = flags.display_step\n\nprint('Dataset: {}'.format(flags.dataset))\nprint(\"Model Name: {}\".format(flags.model))\nprint(\"Total Runs: {}\".format(flags.times))\nprint(\"Learning Rate: {}\".format(config.lr))\nprint(\"Fine-Tune Learning Rate: {}\".format(config.fine_tune_lr))\nprint(\"Mixed-Case Training: {}\".format(flags.mixed_case_training))\nprint(\"Display Step: {}\".format(flags.display_step))\nprint(\"SEED base value: {}\".format(SEED_base_value))\n\n\ncommon_data_path = \"processed_data/{}/vocab_and_embd.pkl\".format(flags.dataset)\nif flags.mixed_case_training.lower() == \"no\":\n train_data_path = \"processed_data/{}/train_data.json\".format(flags.dataset)\nelse:\n train_data_path = \"processed_data/{}/train_mixed_data.json\".format(flags.dataset)\ndev_data_path = \"processed_data/{}/dev_data.json\".format(flags.dataset)\ntest_data_path = \"processed_data/{}/test_data.json\".format(flags.dataset)\n\ncheckpoint_directory = \"saved_params/{}/\".format(flags.dataset)\nPath(checkpoint_directory).mkdir(parents=True, exist_ok=True)\n\nPath(\"output/\").mkdir(parents=True, exist_ok=True)\n\nlog_directory = os.path.join(\"logs\", \"{}\".format(flags.dataset))\nPath(log_directory).mkdir(parents=True, exist_ok=True)\n\nkeys = ['labels2idx', 'segment_labels2idx',\n 'w2v_vocab2idx', 'ft_vocab2idx', 'ipa2idx', 'pos2idx',\n 'w2v_embeddings', 'ft_embeddings']\n\nlabels2idx, segment_labels2idx,\\\n w2v_vocab2idx, ft_vocab2idx, ipa2idx, pos2idx, \\\n w2v_embeddings, ft_embeddings = load_data(common_data_path, 'rb', 'pickle', keys=keys)\n\n\nidx2labels = {v: k for k, v in labels2idx.items()}\n\n\"\"\"\nDETERMINES WHAT TO LOAD AND IN WHICH ORDER. NEEDS TO MAKE CHANGES IF YOU WANT TO LOAD SOMETHING ELSE\n\"\"\"\nkeys = [\"sequence\",\n \"w2v_feats\", \"fasttext_feats\",\n \"pos_tags\",\n \"ipa_feats\", \"phono_feats\",\n \"labels\", \"segment_labels\"]\n\n\"\"\"\nsequence = variable length natural language sequences\nw2v_feats = variable length sequences in int format where int id correspond to a word2vec vector (mapped to a word in w2v_vocab2idx)\nfasttext_feats = same as above but for fasttext\npos_tags = same as above but int id corresponds to the pos tag of the corresponding word. the id is associated to pos2idx (mapping between id and pos tags). Need to create random embeddings for pos tags.\nipa_feats = character level features will be padded and batched to batch_size x sequence_len x word_len. int format where id correspond to a specific ipa alphabet in ipa2idx mapping. Need to create a randomly initialized embedding.\nphono_feats = same as above but each character is represented as a float vector of 22 dimensions instead (can be directly treated as char-level embeddings)\nlabels = variable length sequence labels for the corresponding sequences. int format. id correspond to a particular label (mapping in labels2idx)\nsegment_label = we can ignore it for now. Can be later used for multi-tasking for entity-segmentation task (where we do not predict the type of the entity just the boundaries)\n\"\"\"\n\n\"\"\"\nFor more about load_data see: utils/data_utils.py\n\"\"\"\ntrain_sample_tuples = load_data(train_data_path, 'r', 'json', keys=keys)\nval_sample_tuples = load_data(dev_data_path, 'r', 'json', keys=keys)\ntest_sample_tuples = load_data(test_data_path, 'r', 'json', keys=keys)\n\nMAX_CHAR_LEN = len(train_sample_tuples[4][0][0])\n\nIPA_PAD = [0]*MAX_CHAR_LEN\n\n\nPHONO_PAD = [0]*config.phono_feats_dim\nPHONO_PAD = [PHONO_PAD]*MAX_CHAR_LEN\n\nif \"bert\" in flags.model.lower() or \"electra\" in flags.model.lower():\n if \"bert\" in flags.model.lower():\n BigModel = BertModel.from_pretrained(config.embedding_path,\n output_hidden_states=True,\n output_attentions=False)\n\n tokenizer = BertTokenizerFast.from_pretrained(config.embedding_path,\n output_hidden_states=True,\n output_attentions=False)\n elif \"electra\" in flags.model.lower():\n\n BigModel = ElectraModel.from_pretrained(config.embedding_path,\n output_hidden_states=True,\n output_attentions=False)\n\n tokenizer = ElectraTokenizerFast.from_pretrained(config.embedding_path,\n output_hidden_states=True,\n output_attentions=False)\n\n pad_types = [None, w2v_vocab2idx['<pad>'], ft_vocab2idx['<pad>'],\n pos2idx['G'], IPA_PAD, PHONO_PAD, labels2idx[\"O\"], segment_labels2idx[\"O\"]]\n\nelse:\n cse_gen = CSEGenerator(config.use_forward, config.use_backward)\n tokenizer = None\n \"\"\"\n Probably need to do nothing for CSE here\n text sequences will not be padded (can be padded later after embedding)\n will need to change things if using precomputed embeddings\n \"\"\"\n pad_types = [None, w2v_vocab2idx['<pad>'], ft_vocab2idx['<pad>'],\n pos2idx['G'], IPA_PAD, PHONO_PAD, labels2idx[\"O\"], segment_labels2idx[\"O\"]]\n\n\ndef run(time, display_params=False):\n\n global model_dict\n global flags\n global config\n global device\n global checkpoint_directory, log_directory\n global BigModel\n global w2v_embeddings, ft_embeddings\n global ft_vocab2idx, w2v_vocab2idx, pos2idx, ipa2idx, labels2idx\n\n mixed_string = \"\" if flags.mixed_case_training.lower() == \"no\" else \"mixed_case_\"\n\n checkpoint_path = os.path.join(\n checkpoint_directory, \"{}_{}run{}.pt\".format(flags.model, mixed_string, time))\n\n log_path = os.path.join(log_directory,\n \"{}_{}run{}.json\".format(flags.model, mixed_string, time))\n\n # print(checkpoint_path)\n\n # print(\"Model: {}\".format(config.model_name))\n\n NamedEntitiyRecognizer = model_dict[flags.model]\n\n \"\"\"\n May need to make changes here and may be some conditional statements\n \"\"\"\n\n if 'bert' in flags.model.lower() or 'electra' in flags.model.lower():\n\n if config.use_w2v:\n classic_embeddings = w2v_embeddings\n word_pad_id = w2v_vocab2idx['<pad>']\n elif config.use_fasttext:\n classic_embeddings = ft_embeddings\n word_pad_id = ft_vocab2idx['<pad>']\n else:\n classic_embeddings = None\n word_pad_id = None\n\n if config.use_pos_tags:\n pos_vocab_size = len(pos2idx)\n else:\n pos_vocab_size = None\n\n if config.use_char_feats:\n ipa_vocab_size = len(ipa2idx)\n else:\n ipa_vocab_size = None\n\n model = NamedEntitiyRecognizer(BigTransformer=BigModel,\n classes_num=len(labels2idx),\n negative_index=labels2idx['O'],\n config=config,\n device=device,\n classic_embeddings=classic_embeddings,\n word_pad_id=word_pad_id,\n pos_vocab_size=pos_vocab_size,\n ipa_vocab_size=ipa_vocab_size)\n\n else:\n \"\"\"\n Put CSE code here\n\n \"\"\"\n\n if config.use_w2v:\n classic_embeddings = w2v_embeddings\n word_pad_id = w2v_vocab2idx['<pad>']\n elif config.use_fasttext:\n classic_embeddings = ft_embeddings\n word_pad_id = ft_vocab2idx['<pad>']\n else:\n classic_embeddings = None\n word_pad_id = None\n\n if config.use_pos_tags:\n pos_vocab_size = len(pos2idx)\n else:\n pos_vocab_size = None\n\n if config.use_char_feats:\n ipa_vocab_size = len(ipa2idx)\n else:\n ipa_vocab_size = None\n\n model = NamedEntitiyRecognizer(cse_gen,\n classes_num=len(labels2idx),\n config=config,\n device=device,\n classic_embeddings=classic_embeddings,\n word_pad_id=word_pad_id,\n ipa_vocab_size=ipa_vocab_size,\n pos_vocab_size=pos_vocab_size)\n\n model = model.to(device)\n\n parameters = [p for p in model.parameters() if p.requires_grad]\n parameter_count = param_count(parameters)\n\n print(\"\\n\\nParameter Count: {}\\n\\n\".format(parameter_count))\n if display_params:\n param_display_fn(model)\n\n print(\"RUN: {}\\n\\n\".format(time))\n\n run_epochs(model, config, checkpoint_path, log_path)\n\n\ndef run_epochs(model, config, checkpoint_path, log_path):\n \"\"\"\n\n raise ValueError(\n \"Have you remembered to save the whole epoch log? (both dump output and in a dict)\")\n \"\"\"\n\n global train_sample_tuples, val_sample_tuples, test_sample_tuples\n\n train_actual_iters = count_actual_iterations(train_sample_tuples[0], config)\n val_actual_iters = count_actual_iterations(val_sample_tuples[0], config)\n test_actual_iters = count_actual_iterations(test_sample_tuples[0], config)\n\n train_effective_iters = count_effective_iterations(train_sample_tuples[0], config)\n val_effective_iters = count_effective_iterations(val_sample_tuples[0], config)\n test_effective_iters = count_effective_iterations(test_sample_tuples[0], config)\n\n # print(train_iters)\n\n optimizer = load_LRangerMod(model,\n config=config) # misleading just running AdamW now\n\n print('Loading pre-trained weights for the model...')\n\n checkpoint = T.load(checkpoint_path)\n model.load_state_dict(checkpoint['model_state_dict'])\n print('\\nRESTORATION COMPLETE\\n')\n\n optimizer.zero_grad()\n\n # with tqdm(total=config.epochs-past_epoch, desc='Epoch', position=0) as pbar:\n\n print(\"TESTING\\n\")\n\n test_loss, test_F1 = run_batches(test_sample_tuples,\n epoch=0,\n model=model,\n optimizer=optimizer,\n config=config,\n generator_len=test_actual_iters,\n train=False,\n desc='Test Batch')\n\n # print(test_F1)\n\n\ndef run_batches(sample_tuples, epoch,\n model, optimizer, config,\n generator_len,\n train=True, scheduler=None,\n desc=None):\n\n global display_step\n global pad_types\n global tokenizer\n global idx2labels\n global flags\n\n accu_step = config.total_batch_size//config.train_batch_size\n\n if desc is None:\n desc = 'Batch'\n\n losses = []\n F1s = []\n\n total_tp = 0\n total_pred_len = 0\n total_gold_len = 0\n\n # copy_tuples = copy.deepcopy(sample_tuples)\n\n f = open(\"output/out_{}.txt\".format(flags.model), \"w\")\n f.write('')\n f.close()\n\n with tqdm(total=generator_len, desc=desc, position=0) as pbar:\n\n i = 0\n\n for batch, batch_masks in batcher(sample_tuples,\n pad_types,\n config.train_batch_size,\n sort_by_idx=1):\n\n # pbar = tqdm(total=generator_len, desc='Batch', position=0)\n\n batch_texts = batch[0]\n batch_w2v_idx = batch[1]\n batch_ft_idx = batch[2]\n batch_pos_idx = batch[3]\n batch_ipa_idx = batch[4]\n batch_phono = batch[5]\n batch_labels = batch[6]\n batch_segment_labels = batch[7]\n\n batch_mask = batch_masks[1]\n\n \"\"\"\n IMPLEMENT INSIDE utils/ml_utils.py\n \"\"\"\n\n predictions, loss = predict_NER(model=model,\n tokenizer=tokenizer,\n batch_texts=batch_texts,\n batch_w2v_idx=batch_w2v_idx,\n batch_ft_idx=batch_ft_idx,\n batch_pos_idx=batch_pos_idx,\n batch_ipa_idx=batch_ipa_idx,\n batch_phono=batch_phono,\n batch_labels=batch_labels,\n batch_segment_labels=batch_segment_labels,\n batch_mask=batch_mask,\n device=device,\n config=config,\n train=train)\n losses.append(loss.item())\n\n if train:\n\n loss = loss/accu_step\n loss.backward()\n\n if (i+1) % accu_step == 0: # Update accumulated gradients\n\n T.nn.utils.clip_grad_norm_(model.parameters(), config.max_grad_norm)\n optimizer.step()\n optimizer.zero_grad()\n\n tp, pred_len, gold_len = eval_stats(predictions,\n batch_labels,\n batch_mask,\n idx2labels)\n\n prec, rec, F1 = compute_F1(tp, pred_len, gold_len)\n\n F1s.append(F1)\n\n if i % display_step == 0:\n\n pbar.write(\"Model: {}, Epoch: {:3d}, Iter: {:5d}, \".format(config.model_name, epoch, i) +\n \"Loss: {:.3f}, F1: {:.3f}\".format(loss, F1))\n\n else:\n\n f = open(\"output/out_{}.txt\".format(flags.model), \"a\")\n for prediction_sample, gold_sample, mask in zip(predictions, batch_labels, batch_mask):\n true_seq_len = sum(mask)\n prediction_sample = prediction_sample[0:true_seq_len]\n gold_sample = gold_sample[0:true_seq_len]\n for pred, gold in zip(prediction_sample, gold_sample):\n f.write(\"test NNP \"+str(idx2labels[gold])+\" \"+str(idx2labels[pred])+\"\\n\")\n f.close()\n tp, pred_len, gold_len = eval_stats(predictions,\n batch_labels,\n batch_mask,\n idx2labels)\n\n prec, rec, F1 = compute_F1(tp, pred_len, gold_len)\n\n total_tp += tp\n total_pred_len += pred_len\n total_gold_len += gold_len\n\n if i % display_step == 0:\n\n pbar.write(\"Model: {}, Epoch: {:3d}, Iter: {:5d}, \".format(config.model_name, epoch, i) +\n \"Loss: {:.3f}\".format(loss))\n\n i += 1\n pbar.update(1)\n\n # print(\"generator_len\", generator_len)\n # print(\"i\", i)\n\n print(\"\\n\\n\")\n\n if train:\n F1 = np.mean(F1s)\n else:\n prec, rec, F1 = compute_F1(total_tp, total_pred_len, total_gold_len)\n\n # del copy_tuples\n\n return np.mean(losses), F1\n\n\nif __name__ == '__main__':\n time = 0\n while time < flags.times:\n\n if time == 0:\n \"\"\"\n time_str = input(\"\\nStarting time (0,1,2.....times): \")\n try:\n time = int(time_str)\n except:\n time = 0\n \"\"\"\n time = 0\n\n SEED = SEED_base_value+time\n T.manual_seed(SEED)\n random.seed(SEED)\n T.backends.cudnn.deterministic = True\n T.backends.cudnn.benchmark = False\n np.random.seed(SEED)\n\n run(time, display_params=True)\n time += 1\n"
] | [
[
"torch.load",
"torch.manual_seed",
"numpy.random.seed",
"torch.cuda.is_available",
"numpy.mean"
]
] |
oliviermirat/Scientizen | [
"e06515acbdc2cc2dc22445489dec2df4af454920"
] | [
"scripts/oldScripts2019/3_analyzeDataKnee_Participant1.py"
] | [
"# This scripts assumes that the dataframe has been created and saved in data.txt\n\nimport pickle\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom dataFrameUtilities import addInsultIntensityColumns, getInsultAboveThreshold, getPainAboveThreshold, selectColumns,selectTime\nfrom sklearn.preprocessing import MinMaxScaler\n\n# Getting data\n\ninput = open(\"../data/preprocessed/preprocessedDataParticipant1.txt\", \"rb\")\ndata = pickle.load(input)\ninput.close()\n\ntimeSelected = selectTime(data, \"2016-09-01\", \"2019-10-20\")\n\n\n# Removing \"steps\" caused by scooter riding\n\ntimeSelected[\"steps\"] = timeSelected[\"steps\"] - 37 * timeSelected[\"scooterRiding\"]\ntimeSelected[\"steps\"][timeSelected[\"steps\"] < 0] = 0\n\n\n# Getting knee pain information\n\nkneePain = selectColumns(timeSelected, [\"kneePain\"])\n\nthres = kneePain.copy()\nthres[:] = 3.3\n\n\n# Calculating knee stress over time\n\nenv = addInsultIntensityColumns(timeSelected, [\"steps\", \"kneePain\"], 21, 30)\nenvRollingMean = selectColumns(env, [\"stepsInsultIntensity\"])\nenvMaxInsultDiff = selectColumns(env, [\"stepsMaxInsultDiff\"])\n\nkneePainRollingMean = selectColumns(env, [\"kneePainInsultIntensity\"])\nkneePainRollingMean = kneePainRollingMean.replace(0, 0.4)\nscaler = MinMaxScaler()\nkneePainRollingMeanArray = scaler.fit_transform(kneePainRollingMean)\nfor i in range(0, len(kneePainRollingMean)):\n kneePainRollingMean[\"kneePainInsultIntensity\"][i] = kneePainRollingMeanArray[i]\nkneePainRollingMean = kneePainRollingMean.replace(0.0, 0.4)\n\nthres2 = kneePain.copy()\nthres2[:] = 1.1\nfor i in range(0, 300):\n thres2[\"kneePain\"][i] = 1.2\nfor i in range(810, len(thres2)):\n thres2[\"kneePain\"][i] = 1.8\n\nenvBrut = selectColumns(env, [\"steps\"])\n\nbetterMaxInsult = envMaxInsultDiff.copy()\nscaler = MinMaxScaler()\nbetterMaxInsultArray = scaler.fit_transform(betterMaxInsult)\nfor i in range(0, len(betterMaxInsult)):\n betterMaxInsult[\"stepsMaxInsultDiff\"][i] = betterMaxInsultArray[i] + envBrut[\"steps\"][i] + kneePainRollingMean[\"kneePainInsultIntensity\"][i]\n\n\n# Finding time points where knee pain and knee stress are above a certain threshold\n\npainAboveThresh = getPainAboveThreshold(kneePain, \"kneePain\", 3.3)\npainAboveThresh = selectColumns(painAboveThresh, [\"kneePainThreshed\"])\n\nstepsMaxInsultDiffThresh = getInsultAboveThreshold(betterMaxInsult, \"stepsMaxInsultDiff\", thres2)\nstepsMaxInsultDiffThresh = selectColumns(stepsMaxInsultDiffThresh, [\"stepsMaxInsultDiffThreshed\"])\n\n\n# Plotting results\n\nfig, axes = plt.subplots(nrows=3, ncols=1)\n\nselectColumns(kneePain, [\"kneePain\"]).rename(columns={\"kneePain\": \"knee pain\"}).plot(ax=axes[0])\nthres.rename(columns={\"kneePain\": \"pain threshold\"}).plot(ax=axes[0])\n\nselectColumns(betterMaxInsult, [\"stepsMaxInsultDiff\"]).rename(columns={\"stepsMaxInsultDiff\": \"knee stress\"}).plot(ax=axes[1])\nthres2.rename(columns={\"kneePain\": \"knee stress threshold\"}).plot(ax=axes[1])\n\npainAboveThresh.rename(columns={\"kneePainThreshed\": \"knee pain is above threshold\"}).plot(ax=axes[2])\nstepsMaxInsultDiffThresh = 0.95 * stepsMaxInsultDiffThresh\nstepsMaxInsultDiffThresh.rename(columns={\"stepsMaxInsultDiffThreshed\": \"knee stress is above threshold\"}).plot(ax=axes[2])\n\nleg = plt.legend(loc=\"best\")\nleg.set_draggable(True)\nplt.show()\n"
] | [
[
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots"
]
] |
juanCastrillo/gluon2pytorch | [
"dc73055f0c74dbc45a70f21057fa161123826d86"
] | [
"tests/convert_softmax.py"
] | [
"import torch\nimport mxnet as mx\nimport numpy as np\nfrom gluon2pytorch import gluon2pytorch\n\n\nclass SoftmaxTest(mx.gluon.nn.HybridSequential):\n def __init__(self):\n super(SoftmaxTest, self).__init__()\n from mxnet.gluon import nn\n with self.name_scope():\n self.conv1 = nn.Conv2D(3, 32)\n\n def hybrid_forward(self, F, x):\n x = F.softmax(self.conv1(x))\n return x\n\n\ndef check_error(gluon_output, pytorch_output, epsilon=1e-5):\n pytorch_output = pytorch_output.data.numpy()\n gluon_output = gluon_output.asnumpy()\n\n error = np.max(pytorch_output - gluon_output)\n print('Error:', error)\n\n assert error < epsilon\n return error\n\n\nif __name__ == '__main__':\n print('Test softmax:')\n\n net = SoftmaxTest()\n\n # Make sure it's hybrid and initialized\n net.hybridize()\n net.collect_params().initialize()\n\n pytorch_model = gluon2pytorch(net, [(1, 3, 224, 224)], dst_dir=None, pytorch_module_name='SoftmaxTest')\n\n input_np = np.random.uniform(-1, 1, (1, 3, 224, 224))\n\n gluon_output = net(mx.nd.array(input_np))\n pytorch_output = pytorch_model(torch.FloatTensor(input_np))\n check_error(gluon_output, pytorch_output)\n"
] | [
[
"numpy.random.uniform",
"torch.FloatTensor",
"numpy.max"
]
] |
mkennard-aquaveo/modflow6 | [
"73a0553636362c90f7d134318e1f5d902dbdc4d3"
] | [
"autotest/test_gwf_lakobs01.py"
] | [
"# Test for checking lak observation input. The following observation types:\n# 'lak', 'wetted-area', and 'conductance,' require that ID2 be provided when\n# ID is an integer corresponding to a lake number and not BOUNDNAME.\n# See table in LAK Package section of mf6io.pdf for an explanation of ID,\n# ID2, and Observation Type.\n\n\nimport os\nimport pytest\nimport sys\nimport numpy as np\n\ntry:\n import flopy\nexcept:\n msg = \"Error. FloPy package is not available.\\n\"\n msg += \"Try installing using the following command:\\n\"\n msg += \" pip install flopy\"\n raise Exception(msg)\n\nfrom framework import testing_framework\nfrom simulation import Simulation\nimport targets\n\nmf6_exe = os.path.abspath(targets.target_dict[\"mf6\"])\n\nex = \"gwf_lakobs_01a\"\nexdir = os.path.join(\"temp\", ex)\n\n\n# store global gwf for subsequent plotting\ngwf = None\n\n\ndef get_idomain(nlay, nrow, ncol, lakend):\n idomain = np.ones((nlay, nrow, ncol), dtype=int)\n for k, j in enumerate(lakend):\n idomain[k, 0, 0:j] = 0\n\n return idomain\n\n\ndef build_model():\n lx = 300.0\n lz = 45.0\n nlay = 45\n nrow = 1\n ncol = 30\n nper = 1\n delc = 1.0\n delr = lx / ncol\n delz = lz / nlay\n top = 5.0\n botm = [top - (k + 1) * delz for k in range(nlay)]\n\n perlen = [20.0]\n nstp = [1]\n tsmult = [1.0]\n\n Kh = 1.0\n Kv = 1.0\n\n tdis_rc = []\n for i in range(nper):\n tdis_rc.append((perlen[i], nstp[i], tsmult[i]))\n\n nouter, ninner = 700, 300\n hclose, rclose, relax = 1e-8, 1e-6, 0.97\n\n name = ex\n\n # build MODFLOW 6 files\n ws = exdir\n sim = flopy.mf6.MFSimulation(\n sim_name=name, version=\"mf6\", exe_name=mf6_exe, sim_ws=ws\n )\n\n # create tdis package\n tdis = flopy.mf6.ModflowTdis(\n sim, time_units=\"DAYS\", nper=nper, perioddata=tdis_rc\n )\n\n # create gwf model\n gwfname = name\n global gwf\n gwf = flopy.mf6.ModflowGwf(sim, modelname=gwfname, newtonoptions=\"NEWTON\")\n\n imsgwf = flopy.mf6.ModflowIms(\n sim,\n print_option=\"SUMMARY\",\n outer_dvclose=hclose,\n outer_maximum=nouter,\n under_relaxation=\"NONE\",\n inner_maximum=ninner,\n inner_dvclose=hclose,\n rcloserecord=rclose,\n linear_acceleration=\"BICGSTAB\",\n scaling_method=\"NONE\",\n reordering_method=\"NONE\",\n relaxation_factor=relax,\n filename=\"{}.ims\".format(gwfname),\n )\n\n # number of columns to be a lake for layer 1, 2, , ... len(lakend)\n lakend = [10, 9, 8, 7, 6]\n idomain = get_idomain(nlay, nrow, ncol, lakend)\n dis = flopy.mf6.ModflowGwfdis(\n gwf,\n nlay=nlay,\n nrow=nrow,\n ncol=ncol,\n delr=delr,\n delc=delc,\n top=top,\n botm=botm,\n idomain=idomain,\n )\n\n # initial conditions\n strt = np.zeros((nlay, nrow, ncol), dtype=float)\n strt += top\n ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)\n\n # node property flow\n npf = flopy.mf6.ModflowGwfnpf(\n gwf,\n xt3doptions=False,\n save_flows=True,\n save_specific_discharge=True,\n icelltype=1,\n k=Kh,\n k33=Kv,\n )\n\n sy = 0.3\n ss = np.zeros((nlay, nrow, ncol), dtype=float)\n # ss[0, :, :] = sy\n idx = np.where(idomain == 0)\n for k, i, j in zip(*idx):\n ss[k + 1, i, j] = 0.0 # sy\n sto = flopy.mf6.ModflowGwfsto(gwf, sy=sy, ss=ss, iconvert=1)\n\n irch = np.zeros((nrow, ncol), dtype=int)\n lake_vconnect = []\n idx = np.where(idomain == 0)\n for k, i, j in zip(*idx):\n if idomain[k + 1, i, j] == 1:\n lake_vconnect.append((k + 1, i, j))\n irch[i, j] = k + 1\n nlakeconn = len(lake_vconnect)\n\n # pak_data = [lakeno, strt, nlakeconn]\n initial_stage = 0.1\n pak_data = [(0, initial_stage, nlakeconn)]\n\n bedleak = 100.0 # \"None\"\n belev = 0.0\n con_data = [\n (0, i, idx, \"VERTICAL\", bedleak, belev, -99, -99, -99)\n for i, idx in enumerate(lake_vconnect)\n ]\n\n # period data\n p_data = [\n (0, \"STATUS\", \"ACTIVE\"),\n ]\n\n # note: for specifying lake number, use fortran indexing!\n fname = \"{}.lak.obs.csv\".format(gwfname)\n lak_obs = {\n fname: [\n (\"lakestage\", \"stage\", 1),\n (\"lakevolume\", \"volume\", 1),\n (\"lak1\", \"lak\", 1),\n ],\n \"digits\": 10,\n }\n\n lak = flopy.mf6.modflow.ModflowGwflak(\n gwf,\n surfdep=0.0,\n save_flows=True,\n print_input=True,\n print_flows=True,\n print_stage=True,\n stage_filerecord=\"{}.lak.bin\".format(gwfname),\n budget_filerecord=\"{}.lak.bud\".format(gwfname),\n nlakes=len(pak_data),\n ntables=0,\n packagedata=pak_data,\n pname=\"LAK-1\",\n connectiondata=con_data,\n perioddata=p_data,\n observations=lak_obs,\n )\n\n chdspd = [((0, 0, ncol - 1), 5.0)]\n chd = flopy.mf6.modflow.ModflowGwfchd(gwf, stress_period_data=chdspd)\n\n rech = 0.0001 * np.ones((nrow, ncol), dtype=float)\n # rech[:, 0:20] = 0.\n rch = flopy.mf6.modflow.ModflowGwfrcha(\n gwf, print_flows=True, save_flows=True, recharge=rech, irch=irch\n )\n\n # output control\n oc = flopy.mf6.ModflowGwfoc(\n gwf,\n budget_filerecord=\"{}.cbc\".format(gwfname),\n head_filerecord=\"{}.hds\".format(gwfname),\n headprintrecord=[(\"COLUMNS\", 10, \"WIDTH\", 15, \"DIGITS\", 6, \"GENERAL\")],\n saverecord=[(\"HEAD\", \"ALL\"), (\"BUDGET\", \"ALL\")],\n printrecord=[(\"HEAD\", \"ALL\"), (\"BUDGET\", \"ALL\")],\n )\n\n return sim\n\n\n# - No need to change any code below\ndef test_mf6model():\n # initialize testing framework\n test = testing_framework()\n\n # build the models\n sim = build_model()\n\n # write model input\n sim.write_simulation()\n\n # attempt to run model, should fail\n sim.run_simulation()\n\n # ensure that the error msg is contained in the mfsim.lst file\n f = open(os.path.join(exdir, \"mfsim.lst\"), \"r\")\n lines = f.readlines()\n error_count = 0\n expected_msg = False\n for line in lines:\n if \"ID2 (iconn) is missing\" in line:\n expected_msg = True\n error_count += 1\n\n assert error_count == 1, (\n \"error count = \" + str(error_count) + \"but should equal 1\"\n )\n\n # fix the error and attempt to rerun model\n orig_fl = os.path.join(exdir, ex + \".lak.obs\")\n new_fl = os.path.join(exdir, ex + \".lak.obs.new\")\n sr = open(orig_fl, \"r\")\n sw = open(new_fl, \"w\")\n\n lines = sr.readlines()\n error_free_line = \" lak1 lak 1 1\\n\"\n for line in lines:\n if \" lak \" in line:\n sw.write(error_free_line)\n else:\n sw.write(line)\n\n sr.close()\n sw.close()\n\n # delete original and replace with corrected lab obs input\n os.remove(orig_fl)\n os.rename(new_fl, orig_fl)\n\n # rerun the model, should be no errors\n sim.run_simulation()\n\n return\n\n\ndef main():\n # initialize testing framework\n test = testing_framework()\n\n # build the models\n sim = build_model()\n\n # write model input\n sim.write_simulation()\n\n # attempt to run model, should fail\n sim.run_simulation()\n\n # ensure that the error msg is contained in the mfsim.lst file\n f = open(os.path.join(exdir, \"mfsim.lst\"), \"r\")\n lines = f.readlines()\n error_count = 0\n expected_msg = False\n for line in lines:\n if \"ID2 (iconn) is missing\" in line:\n expected_msg = True\n error_count += 1\n\n assert error_count == 1, (\n \"error count = \" + str(error_count) + \", but should equal 1\"\n )\n\n # fix the error and attempt to rerun model\n orig_fl = os.path.join(exdir, ex + \".lak.obs\")\n new_fl = os.path.join(exdir, ex + \".lak.obs.new\")\n sr = open(orig_fl, \"r\")\n sw = open(new_fl, \"w\")\n\n lines = sr.readlines()\n error_free_line = \" lak1 lak 1 1\\n\"\n for line in lines:\n if \" lak \" in line:\n sw.write(error_free_line)\n else:\n sw.write(line)\n\n sr.close()\n sw.close()\n\n # delete original and replace with corrected lab obs input\n os.remove(orig_fl)\n os.rename(new_fl, orig_fl)\n\n # rerun the model, should be no errors\n sim.run_simulation()\n\n return\n\n\nif __name__ == \"__main__\":\n # print message\n print(\"standalone run of {}\".format(os.path.basename(__file__)))\n\n # run main routine\n main()\n"
] | [
[
"numpy.ones",
"numpy.where",
"numpy.zeros"
]
] |
argsim/argsim | [
"e5407acf7e47f2bf517b0c580fcdee3654d31089"
] | [
"src/explore.py"
] | [
"import tensorflow as tf\nfrom model import vAe, decode\nimport util_sp as sp\nfrom util_io import load_txt\nimport numpy as np\n\n\ndef analyze(z, use_dim=[], seed=25):\n ''' z = np.array[2, dim], mu of two sentences'''\n ''' use_dim = list of int describing which dimension should be used '''\n\n # select random path from z1 to z2\n np.random.seed(seed)\n if use_dim == []:\n rdm_path = np.arange(len(z[0]))\n else:\n rdm_path = use_dim\n np.random.shuffle(rdm_path)\n\n # walk the path and print at every step\n path = np.copy(z[0])\n for idx,dim in enumerate(rdm_path):\n path[dim] = z[1][dim]\n output = decode(sess, vae, [z[0], path, z[1]]).tolist()\n _ = [vocab.decode_ids(output[idx]) for idx in range(3)]\n print(idx,dim, _[1])\n #print(\"{}\\n{}\\n{}\\n{}\\n\".format(idx,_[0],_[1],_[2])) #print: sentence1, path, sentence2\n\n\npath_vocab = \"../trial/data/vocab.model\"\npath_txt = \"../data/test_data.txt\"\npath_ckpt = \"../trial/ckpt/kudo18\"\npath_use_dim = \"../data/useful_dimension.npy\"\n\n# load and restore model\nvae = vAe('infer')\nsess = tf.InteractiveSession()\ntf.train.Saver().restore(sess, path_ckpt)\n\n# load vocab and text\nvocab = sp.load_spm(path_vocab)\ntext = list(load_txt(path_txt))\n\n#pick 2 random sentences to explore\nnp.random.seed(23)\nsen_idx = np.random.random_integers(0, len(text), 2)\nsentences = [text[idx] for idx in sen_idx]\nprint(\"sentence 1: {}\\nsentence 2: {}\".format(sentences[0], sentences[1]))\n\n# encode sentences with sentence piece model\ndata = sp.encode(vocab, sentences)\n\n### full high dimensional space\nz = vae.z.eval({vae.tgt: data})\nanalyze(z)\n\n### only the dimensions that turned out usefull for our task\nuse_dim = np.load(path_use_dim)\nanalyze(z, use_dim)\n"
] | [
[
"numpy.load",
"numpy.random.shuffle",
"numpy.random.seed",
"numpy.copy",
"tensorflow.InteractiveSession",
"tensorflow.train.Saver"
]
] |
countBMB/BenjiRepo | [
"79d882263baaf2a11654ca67d2e5593074d36dfa"
] | [
"venv/Lib/site-packages/caffe2/python/onnx/backend.py"
] | [
"## @package onnx\n# Module caffe2.python.onnx.backend\n\n\"\"\"Backend for running ONNX on Caffe2\n\nTo run this, you will need to have Caffe2 installed as well.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport collections\nfrom subprocess import Popen, PIPE\nimport sys\nimport zipfile\nimport itertools\n\n# When onnx is built against a version of protobuf that is older than\n# that which is vendored with caffe2, onnx will crash if caffe2's\n# vendored protobuf is loaded first. We can work around this by\n# importing onnx first, which will cause it to go out and pick up the\n# system protobuf.\nimport onnx.backend\n\nimport caffe2\nfrom caffe2.python import core, workspace, rnn_cell, gru_cell\nfrom caffe2.python.compatibility import container_abcs\nfrom caffe2.python.model_helper import ModelHelper\nfrom caffe2.proto import caffe2_pb2\nimport caffe2.python.utils\nimport numpy as np\nimport onnx\nfrom onnx import checker, GraphProto, TensorProto, AttributeProto, ModelProto\nimport onnx.numpy_helper\nimport onnx.defs\nimport onnx.optimizer\nimport onnx.shape_inference\nimport onnx.utils\nfrom onnx.backend.base import Backend, Device, DeviceType, namedtupledict\n\nfrom caffe2.python.onnx.workspace import Workspace\nfrom caffe2.python.onnx.backend_rep import Caffe2Rep\nfrom caffe2.python.onnx.backend_cpp_rep import Caffe2CppRep\n\nimport caffe2.python._import_c_extension as C\n\nimport warnings\n\ndef force_unicode(s):\n try:\n return s.decode('utf-8')\n except AttributeError:\n return s\n\ndef get_device_option(device):\n m = {DeviceType.CPU: caffe2_pb2.CPU,\n DeviceType.CUDA: workspace.GpuDeviceType}\n return core.DeviceOption(m[device.type], device.device_id)\n\n\nclass OnnxAttributes(dict):\n \"\"\"\n This is a more convenient way to work with ONNX/Caffe2 attributes\n that is not the protobuf representation.\n \"\"\"\n @staticmethod\n def from_onnx(args):\n d = OnnxAttributes()\n for arg in args:\n d[arg.name] = convertAttributeProto(arg)\n return d\n\n def caffe2(self, kmap=lambda k: k):\n for k, v in self.items():\n if kmap(k) != '':\n yield caffe2.python.utils.MakeArgument(kmap(k), v)\n\n# TODO: Move this into ONNX main library\ndef convertAttributeProto(onnx_arg):\n \"\"\"\n Convert an ONNX AttributeProto into an appropriate Python object\n for the type.\n\n NB: Tensor attribute gets returned as the straight proto.\n \"\"\"\n if onnx_arg.HasField('f'):\n return onnx_arg.f\n elif onnx_arg.HasField('i'):\n return onnx_arg.i\n elif onnx_arg.HasField('s'):\n return onnx_arg.s\n elif onnx_arg.HasField('t'):\n return onnx_arg.t # this is a proto!\n elif onnx_arg.HasField('g'):\n return Caffe2Backend._graph_to_net(onnx_arg.g, Caffe2Backend._known_opset_version)\n elif len(onnx_arg.floats):\n return list(onnx_arg.floats)\n elif len(onnx_arg.ints):\n return list(onnx_arg.ints)\n elif len(onnx_arg.strings):\n return list(onnx_arg.strings)\n elif len(onnx_arg.graphs):\n retval = []\n # TODO: this doesn't work with RNN ops\n for g in onnx_arg.graphs:\n retval.append(Caffe2Backend._graph_to_net(g, Caffe2Backend._known_opset_version))\n return retval\n else:\n raise ValueError(\"Unsupported ONNX attribute: {}\".format(onnx_arg))\n\n\n# TODO: Move this into ONNX main library\nclass OnnxNode(object):\n \"\"\"\n Reimplementation of NodeProto from ONNX, but in a form\n more convenient to work with from Python.\n\n We may temporarily edit these nodes to get them into Caffe2 form,\n before actually translating into the Caffe2 protobuf, since this\n is easier than decomposing everything, and putting it back together\n when we're ready.\n \"\"\"\n def __init__(self, node):\n self.name = str(node.name)\n self.op_type = str(node.op_type)\n self.attrs = OnnxAttributes.from_onnx(node.attribute)\n self.inputs = list(node.input)\n self.outputs = list(node.output)\n\n\nCaffe2Ops = collections.namedtuple('Caffe2Ops', ['ops', 'init_ops', 'interface_blobs'])\n\n\nclass Caffe2Backend(Backend):\n\n # The greatest version of the ONNX operator set which we are aware of.\n # Models whose version is larger than this will cause us to emit a warning\n # that we are attempting to translate on a \"best effort\" basis.\n #\n # If you increase this, make SURE you cross-reference all BC-breaking\n # changes from one version to the next, and any that you did not\n # implement, mark as broken in _broken_operators\n _known_opset_version = 9\n\n # This dictionary will record operators which are KNOWN to be\n # broken, so we give a good error message rather than do something\n # bogus and then fail.\n _broken_operators = {\n # 'BrokenOp': version_it_was_broken_in\n }\n\n # Operators that are different between Caffe2 and\n # ONNX but only in their name.\n # In most cases, this should be empty - as the effort of ONNX is\n # to unify the operator definitions.\n _renamed_operators = {\n 'GlobalMaxPool': 'MaxPool',\n 'GlobalAveragePool': 'AveragePool',\n 'Pad': 'PadImage',\n 'Neg': 'Negative',\n 'BatchNormalization': 'SpatialBN',\n 'InstanceNormalization': 'InstanceNorm',\n 'MatMul': 'BatchMatMul',\n 'Upsample': 'ResizeNearest',\n 'Identity': 'Copy',\n 'InstanceNormalization': 'InstanceNorm',\n 'Equal': 'EQ',\n 'Less': 'LT',\n 'Greater': 'GT',\n 'Unsqueeze': 'ExpandDims',\n 'Loop': 'ONNXWhile',\n 'Tile': 'NumpyTile',\n 'RandomNormal': 'GaussianFill',\n 'RandomUniform': 'UniformFill',\n }\n\n _global_renamed_attrs = {'kernel_shape': 'kernels'}\n _per_op_renamed_attrs = {\n 'Squeeze': {'axes': 'dims'},\n 'Unsqueeze': {'axes': 'dims'},\n 'Transpose': {'perm': 'axes'},\n 'Upsample': {'mode': '',\n 'scales': ''},\n 'ConvTranspose': {'output_padding': 'adjs'},\n 'Selu': {'gamma': 'scale'},\n 'If': {'then_branch': 'then_net',\n 'else_branch': 'else_net'},\n 'RandomUniform': {'low': 'min',\n 'high': 'max'}\n }\n\n # operators whose behavior is different beyond renaming\n # the value is an attribute of this class that is a\n # function from ToffeIR node_def to caffe2 op_def\n _special_operators = {\n 'LSTM': '_create_rnn_variant',\n 'GRU': '_create_rnn_variant',\n 'RNN': '_create_rnn_variant',\n 'Loop': '_create_loop',\n 'If': '_create_if',\n 'Upsample': '_create_upsample',\n 'RandomNormal': '_create_gaussian_fill'\n }\n\n # Dummy name generator\n _dummy_name = C.DummyName()\n\n @classmethod\n def dummy_name(cls):\n return cls._dummy_name.new_dummy_name()\n\n # NB: By default, you will use the LATEST definition of the operator,\n # so this interface MAY make BC-breaking changes. Specify an\n # opset_version if you don't want this to version.\n @classmethod\n def run_node(cls, node, inputs, device='CPU', opset_version=_known_opset_version, outputs_info=None):\n super(Caffe2Backend, cls).run_node(node, inputs, device=device,\n outputs_info=outputs_info, opset_version=opset_version)\n\n value_infos = []\n device_option = get_device_option(Device(device))\n ws = Workspace()\n with core.DeviceScope(device_option): # temporary!\n if isinstance(inputs, dict):\n for key, value in inputs.items():\n ws.FeedBlob(key, value)\n value_infos.append(onnx.helper.make_tensor_value_info(\n name=key,\n elem_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype],\n shape=value.shape).SerializeToString())\n else:\n assert len(node.input) == len(inputs), \"{}: expected {} but got {}\".format(\n node.op_type, len(node.input), len(inputs))\n for key, value in zip(node.input, inputs):\n ws.FeedBlob(key, value)\n value_infos.append(onnx.helper.make_tensor_value_info(\n name=key,\n elem_type=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype],\n shape=value.shape).SerializeToString())\n\n ops = []\n cbackend = C.Caffe2Backend(cls._dummy_name)\n ops_str = cbackend.convert_node(node.SerializeToString(), value_infos, opset_version)\n for s in ops_str[0] + ops_str[1]:\n op = caffe2_pb2.OperatorDef()\n op.ParseFromString(s)\n op.device_option.CopyFrom(device_option)\n ops.append(op)\n ws.RunOperatorsOnce(ops)\n output_values = [ws.FetchBlob(name) for name in node.output]\n return namedtupledict('Outputs', node.output)(*output_values)\n\n @classmethod\n def _create_tensor_filling_op(cls, onnx_tensor, name=None):\n \"\"\"\n Given an Onnx TensorProto, translate it into a Caffe2 operator\n which produces the given tensor filling op.\n \"\"\"\n assert name or onnx_tensor.name\n name = name or onnx_tensor.name\n\n c2_op = caffe2_pb2.OperatorDef()\n\n c2_values = c2_op.arg.add()\n c2_values.name = \"values\"\n\n def tensor2list(onnx_tensor):\n # Use the onnx.numpy_helper because the data may be raw\n return onnx.numpy_helper.to_array(onnx_tensor).flatten().tolist()\n\n if onnx_tensor.data_type in [TensorProto.FLOAT]:\n c2_op.type = 'GivenTensorFill'\n c2_values.floats.extend(tensor2list(onnx_tensor))\n elif onnx_tensor.data_type in [TensorProto.DOUBLE]:\n c2_op.type = 'GivenTensorDoubleFill'\n c2_values.floats.extend(tensor2list(onnx_tensor))\n elif onnx_tensor.data_type in [TensorProto.INT64,\n TensorProto.UINT32]:\n c2_op.type = 'GivenTensorInt64Fill'\n c2_values.ints.extend(tensor2list(onnx_tensor))\n elif onnx_tensor.data_type in [TensorProto.UINT8,\n TensorProto.INT8,\n TensorProto.UINT16,\n TensorProto.INT16,\n TensorProto.INT32]:\n c2_op.type = 'GivenTensorIntFill'\n c2_values.ints.extend(tensor2list(onnx_tensor))\n elif onnx_tensor.data_type == TensorProto.BOOL:\n c2_op.type = 'GivenTensorBoolFill'\n c2_values.ints.extend(tensor2list(onnx_tensor))\n elif onnx_tensor.data_type == TensorProto.STRING:\n c2_op.type = 'GivenTensorStringFill'\n c2_values.strings.extend(onnx_tensor.string_data)\n else:\n raise RuntimeError(\n \"unrecognized tensor type {}\".format(onnx_tensor.data_type))\n\n c2_shape = c2_op.arg.add()\n c2_shape.name = \"shape\"\n c2_shape.ints.extend(onnx_tensor.dims)\n\n c2_op.output.append(name)\n\n return c2_op\n\n @classmethod\n def _rnn_reform_weights(cls, reforms, name, hidden_size, init_net, gates, reorder_indices):\n for name_from, name_to, do_concat, extra_dims in reforms:\n gate_blobs = ['%s/%s_%s' % (name, prefix, name_to) for prefix in gates]\n for i, x in enumerate(gate_blobs):\n dim0 = i * hidden_size, (i+1) * hidden_size\n starts, ends = zip(dim0, *extra_dims)\n init_net.Slice(name_from, x, starts=starts, ends=ends)\n if do_concat:\n reordered_gate_blobs = [gate_blobs[i] for i in reorder_indices]\n init_net.Concat(reordered_gate_blobs, ['%s/%s' % (name, name_to), cls.dummy_name()], axis=0)\n\n @classmethod\n def _make_rnn_direction(cls, input_blob, B, W, R, initial_states_and_names, sequence_lens,\n pred_mh, init_net,\n input_size, hidden_size, num_gates, direction_offset,\n Bi, Br, W_, R_,\n reform, make_cell, keep_outputs):\n name = cls.dummy_name()\n\n # input and recurrence biases are squashed together in onnx\n # but not in caffe2\n gates_hidden_size = num_gates * hidden_size\n bias_offset = 2 * direction_offset * gates_hidden_size\n weight_offset = direction_offset * gates_hidden_size\n Bi = init_net.Slice(B, name + Bi,\n starts=[bias_offset + 0 * gates_hidden_size],\n ends =[bias_offset + 1 * gates_hidden_size])\n Br = init_net.Slice(B, name + Br,\n starts=[bias_offset + 1 * gates_hidden_size],\n ends =[bias_offset + 2 * gates_hidden_size])\n W_ = init_net.Slice(W, name + W_,\n starts=[weight_offset + 0 * gates_hidden_size, 0],\n ends =[weight_offset + 1 * gates_hidden_size,-1])\n R_ = init_net.Slice(R, name + R_,\n starts=[weight_offset + 0 * gates_hidden_size, 0],\n ends =[weight_offset + 1 * gates_hidden_size,-1])\n\n initial_states_sliced = []\n for initial_state, name_suffix in initial_states_and_names:\n initial_states_sliced.append(\n pred_mh.net.Slice(initial_state, name + name_suffix,\n starts=[direction_offset + 0, 0, 0],\n ends =[direction_offset + 1,-1,-1]))\n\n if direction_offset == 1:\n if sequence_lens is not None:\n seq_lens_for_reverse = sequence_lens\n else:\n input_shape = pred_mh.net.Shape(input_blob, name + '/input_shape')\n batch_size = pred_mh.net.Slice(input_shape, name + '/batch_size_slice', starts=[1], ends=[2])\n seq_len = pred_mh.net.Slice(input_shape, name + '/seq_len_slice', starts=[0], ends=[1])\n dummy_sequence_lens = pred_mh.net.Tile([seq_len, batch_size], name + '/dummy_sequence_lens', axis=0)\n pred_mh.net.Reshape(dummy_sequence_lens, [dummy_sequence_lens, cls.dummy_name()], shape=[-1])\n seq_lens_for_reverse = pred_mh.net.Cast(dummy_sequence_lens, name + '/seq_lens_for_reverse', to=core.DataType.INT32)\n reform(Bi, Br, W_, R_, name, hidden_size, init_net)\n\n if direction_offset == 1:\n input = pred_mh.net.ReversePackedSegs(\n [input_blob, seq_lens_for_reverse], name + \"/input-reversed\")\n else:\n input = input_blob\n\n outputs = keep_outputs(list(make_cell(\n pred_mh,\n input,\n sequence_lens,\n initial_states_sliced,\n input_size,\n hidden_size,\n name,\n drop_states=False,\n forward_only=True,\n )))\n\n if direction_offset == 1:\n outputs[0] = pred_mh.net.ReversePackedSegs(\n [outputs[0], seq_lens_for_reverse], name + \"/output-reversed\")\n\n return outputs\n\n @classmethod\n def _create_rnn_variant(cls, init_model, pred_model, n, opset_version):\n assert init_model is not None, \"cannot convert RNNs without access to the full model\"\n assert pred_model is not None, \"cannot convert RNNs without access to the full model\"\n\n attrs = dict(n.attrs) # make a copy, which is safe to mutate\n hidden_size = attrs.pop('hidden_size')\n direction = force_unicode(attrs.pop('direction', 'forward'))\n\n if n.op_type == 'RNN':\n activation = force_unicode(attrs.pop('activations', ('tanh',))[0].lower())\n elif n.op_type == 'GRU':\n linear_before_reset = attrs.pop('linear_before_reset', 0)\n\n assert not attrs, \"unsupported RNN attributes: \" + str(attrs.keys())\n assert direction in ['forward', 'bidirectional'], \"unsupported backwards RNN/GRU/LSTM\"\n\n if n.op_type in ['RNN', 'GRU']:\n input_blob, W, R, B, sequence_lens, initial_h = n.inputs\n elif n.op_type == 'LSTM':\n input_blob, W, R, B, sequence_lens, initial_h, initial_c = n.inputs\n\n if sequence_lens == \"\":\n sequence_lens = None\n\n for x in itertools.chain(init_model.graph.input,\n init_model.graph.value_info,\n pred_model.graph.input,\n pred_model.graph.value_info):\n if x.name == W:\n input_size = x.type.tensor_type.shape.dim[2].dim_value\n break\n else:\n raise RuntimeError(\"best-effort shape inference for RNN/GRU/LSTM failed\")\n\n pred_mh = ModelHelper()\n init_net = core.Net(\"init-net\")\n\n init_net.Reshape(W, [W, cls.dummy_name()], shape=[1,-1,0])\n init_net.Squeeze(W, W, dims=[0])\n init_net.Reshape(R, [R, cls.dummy_name()], shape=[1,-1,0])\n init_net.Squeeze(R, R, dims=[0])\n init_net.Reshape(B, [B, cls.dummy_name()], shape=[1,-1])\n init_net.Squeeze(B, B, dims=[0])\n\n if n.op_type == 'RNN':\n def reform(*args):\n pass\n\n def make_cell(*args, **kwargs):\n return rnn_cell.BasicRNN(*args, activation=activation, **kwargs)\n\n def make_rnn(direction_offset):\n return cls._make_rnn_direction(\n input_blob, B, W, R, [(initial_h, '/initial_h')], sequence_lens,\n pred_mh, init_net, input_size, hidden_size, 1, direction_offset,\n \"/i2h_b\", \"/gates_t_b\", \"/i2h_w\", \"/gates_t_w\",\n reform, make_cell, lambda x: x)\n\n elif n.op_type == 'GRU':\n def reform(Bi, Br, W_, R_, name, hidden_size, init_net):\n # caffe2 has a different order from onnx. We need to rearrange\n # z r h -> r z h\n reforms = ((W_, 'i2h_w', True, [(0,-1)]),\n (R_, 'gate_t_w', False, [(0,-1)]),\n (Bi, 'i2h_b', True, []),\n (Br, 'gate_t_b', False, []))\n cls._rnn_reform_weights(reforms, name, hidden_size, init_net,\n ['update', 'reset', 'output'], [1, 0, 2])\n\n def make_cell(*args, **kwargs):\n return gru_cell.GRU(*args, linear_before_reset=linear_before_reset, **kwargs)\n\n def make_rnn(direction_offset):\n return cls._make_rnn_direction(\n input_blob, B, W, R, [(initial_h, '/initial_h')], sequence_lens,\n pred_mh, init_net, input_size, hidden_size, 3, direction_offset,\n \"_bias_i2h\", \"_bias_gates\", \"/i2h_w_pre\", \"/gates_t_w_pre\",\n reform, make_cell, lambda x: x)\n\n elif n.op_type == 'LSTM':\n def reform(Bi, Br, W_, R_, name, hidden_size, init_net):\n # caffe2 has a different order from onnx. We need to rearrange\n # i o f c -> i f o c\n reforms = ((W_, 'i2h_w', True, [(0, -1)]),\n (R_, 'gates_t_w', True, [(0, -1)]),\n (Bi, 'i2h_b' , True, []),\n (Br, 'gates_t_b', True, []))\n cls._rnn_reform_weights(reforms, name, hidden_size, init_net,\n ['input', 'output', 'forget', 'cell'], [0, 2, 1, 3])\n\n def make_cell(*args, **kwargs):\n return rnn_cell.LSTM(*args, **kwargs)\n\n def make_rnn(direction_offset):\n return cls._make_rnn_direction(\n input_blob, B, W, R, [(initial_h, '/initial_h'), (initial_c, '/initial_c')], sequence_lens,\n pred_mh, init_net, input_size, hidden_size, 4, direction_offset,\n \"/i2h_b\", \"/gates_t_b\", \"/i2h_w\", \"/gates_t_w\",\n reform, make_cell, lambda x: [x[0], x[1], x[3]])\n\n if direction == 'forward':\n outputs = make_rnn(0)\n\n # in the forward case, storage is shared between the\n # last outputs. We need to decouple them so that the\n # VariableLengthSequencePadding only mutates\n # n.outputs[0]\n for i in range(1, len(outputs)):\n pred_mh.net.Copy(outputs[i], n.outputs[i])\n\n if sequence_lens is not None:\n pred_mh.net.VariableLengthSequencePadding(\n [outputs[0], sequence_lens], [outputs[0]])\n pred_mh.net.ExpandDims([outputs[0]], [n.outputs[0]], dims=[1])\n elif direction == 'bidirectional':\n outputs_f = make_rnn(0)\n outputs_b = make_rnn(1)\n\n concatted_output, _ = pred_mh.net.Concat(\n [outputs_f[0], outputs_b[0]], [cls.dummy_name(), cls.dummy_name()], axis=2)\n if sequence_lens is not None:\n pred_mh.net.VariableLengthSequencePadding(\n [concatted_output, sequence_lens], [concatted_output])\n reshaped_output, _ = pred_mh.net.Reshape(concatted_output, [cls.dummy_name(), cls.dummy_name()], shape=[0,0,-1,2])\n pred_mh.net.Transpose(reshaped_output, n.outputs[0], axes=[0,2,1,3])\n for i in range(1, len(n.outputs)):\n pred_mh.net.Concat([outputs_f[i], outputs_b[i]],\n [n.outputs[i], cls.dummy_name()], axis=0)\n\n # We want to decide whether to put all of our weight-reshaping\n # operators in the init net or the predict net. We can put\n # them in the init net iff the inputs to those operators are\n # already available, either as graph initializers, or as the\n # output of other operators in the init net. The latter case\n # occurs, for example, when exporting from pytorch to onnx.\n # In most production use, we expect has_initializers to be\n # true.\n initializers = {i.name for i in init_model.graph.initializer}\n outputs = {output for node in init_model.graph.node for output in node.output}\n has_initializers = all(x in initializers or x in outputs for x in (W, R, B))\n\n pred_ops = []\n init_ops = []\n (init_ops if has_initializers else pred_ops).extend(init_net.Proto().op)\n pred_ops.extend(pred_mh.Proto().op)\n\n return Caffe2Ops(pred_ops, init_ops, list(pred_mh.Proto().external_input))\n\n @classmethod\n def _create_control_op(cls, init_model, pred_model, n, opset_version):\n control_inputs = []\n if '__control_inputs' in n.attrs:\n control_inputs.extend(n.attrs['__control_inputs'])\n node = cls._common_onnx_node_to_caffe2_op(init_model, pred_model, n, opset_version)\n node.control_input.extend(control_inputs)\n return Caffe2Ops([node], [], [])\n\n @classmethod\n def _remove_ssa(cls, net, remap_dict):\n for op in net.op:\n for i, name in enumerate(op.output):\n if name in remap_dict:\n op.output[i] = remap_dict[name]\n for i, out in enumerate(net.external_output):\n if out in remap_dict:\n net.external_output[i] = remap_dict[out]\n\n @classmethod\n def _create_if(cls, init_model, pred_model, n, opset_version):\n ops = cls._create_control_op(init_model, pred_model, n, opset_version)\n assert ops[0][0].type == 'If'\n if_op = ops[0][0]\n then_net = else_net = None\n control_inputs = []\n for arg in if_op.arg:\n if arg.name == 'then_net':\n then_net = arg.n\n if arg.name == 'else_net':\n else_net = arg.n\n if arg.name == '__control_inputs':\n control_inputs = arg.strings\n\n assert then_net and else_net\n then_net_outs = then_net.external_output\n else_net_outs = else_net.external_output\n op_outputs = if_op.output\n assert len(then_net_outs) == len(else_net_outs)\n assert len(else_net_outs) == len(op_outputs)\n\n for arg in if_op.arg:\n if arg.name == 'then_net':\n arg.n.external_input.extend(control_inputs)\n if arg.name == 'else_net':\n arg.n.external_input.extend(control_inputs)\n\n return ops\n\n @classmethod\n def _create_loop(cls, init_model, pred_model, n, opset_version):\n ops = cls._create_control_op(init_model, pred_model, n, opset_version)\n assert ops[0][0].type == 'ONNXWhile'\n while_op = ops[0][0]\n while_op.arg.extend([caffe2.python.utils.MakeArgument('has_trip_count', True)])\n while_op.arg.extend([caffe2.python.utils.MakeArgument('has_cond', True)])\n while_op.arg.extend([caffe2.python.utils.MakeArgument('disable_scopes', True)])\n control_inputs = []\n for arg in while_op.arg:\n if arg.name == '__control_inputs':\n control_inputs = arg.strings\n num_loop_carried_deps = 0\n for arg in while_op.arg:\n if arg.name == 'body':\n num_loop_carried_deps = len(arg.n.external_input) - 2\n arg.n.external_input.extend(control_inputs)\n while_op.arg.extend([\n caffe2.python.utils.MakeArgument('num_loop_carried_deps',\n num_loop_carried_deps)\n ])\n\n return ops\n\n @classmethod\n def _substitute_raw_value(cls, tp, raw_values_dict):\n if tp.HasField('raw_data') and tp.raw_data == bytes(b'__EXTERNAL'):\n if tp.name not in raw_values_dict:\n raise RuntimeError('TensorProto for value {} referenced raw data but it was not found!'.format(tp.name))\n else:\n tp.raw_data = raw_values_dict[tp.name]\n\n @classmethod\n def _visit_and_substitute_raw_values(cls, nodes, raw_values_dict):\n for node in nodes:\n for attr in node.attribute:\n if attr.HasField('t'):\n cls._substitute_raw_value(attr.t, raw_values_dict)\n for t in attr.tensors:\n cls._substitute_raw_value(t, raw_values_dict)\n if attr.HasField('g'):\n cls._visit_and_substitute_raw_values(attr.g.node, raw_values_dict)\n for g in attr.graphs:\n cls._visit_and_substitute_raw_values(g.node, raw_values_dict)\n\n @classmethod\n def _external_value_resolution_pass(cls, model, raw_values_dict):\n for init in model.graph.initializer:\n cls._substitute_raw_value(init, raw_values_dict)\n\n cls._visit_and_substitute_raw_values(model.graph.node, raw_values_dict)\n\n\n @classmethod\n def _direct_initialize_parameters(cls, initializer, ws, device_option):\n for tp in initializer:\n ws.FeedBlob(tp.name, onnx.numpy_helper.to_array(tp), device_option)\n\n @classmethod\n def _direct_initialize_inputs(cls, inputs, initialized, ws, device_option):\n for value_info in inputs:\n if value_info.name in initialized:\n continue\n shape = list(d.dim_value for d in value_info.type.tensor_type.shape.dim)\n ws.FeedBlob(\n value_info.name,\n np.ones(shape, dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[value_info.type.tensor_type.elem_type]),\n device_option)\n\n @staticmethod\n def optimize_onnx(input, init=False, predict=False):\n passes = ['fuse_consecutive_transposes',\n 'eliminate_nop_transpose',\n 'fuse_transpose_into_gemm',\n 'lift_lexical_references']\n if init:\n passes.append('split_init')\n if predict:\n passes.append('split_predict')\n out = onnx.optimizer.optimize(input, passes)\n return out\n\n @classmethod\n def prepare_zip_archive(cls, file, device='CPU', **kwargs):\n with zipfile.ZipFile(file, mode='r') as z:\n with z.open('__MODEL_PROTO', 'r') as f:\n model = onnx.load(f);\n blob_names = set(z.namelist()) - set('__MODEL_PROTO')\n # TODO: make this more efficient\n raw_values_dict = {}\n for name in blob_names:\n with z.open(name, 'r') as blob_file:\n raw_values_dict[name] = blob_file.read()\n\n return cls.prepare(model, device, raw_values_dict=raw_values_dict, **kwargs)\n\n @classmethod\n def prepare(cls, model, device='CPU', raw_values_dict=None, **kwargs):\n '''\n For Onnx Caffe2Backend, we require that init_graph don't initialize the actual input of the predict_graph,\n\n for example, if \"img\" is the input blob for the predict_net, we require that in init_graph and in\n initializer of the predict_graph, \"img\" is not initalized. We don't have a check for this, since\n there is no way we can know which blob is the input of the predict_graph.\n '''\n if not kwargs.pop('no_check_UNSAFE', False):\n super(Caffe2Backend, cls).prepare(model, device, **kwargs)\n opset_version = None\n for imp in model.opset_import:\n if not imp.HasField(\"domain\") or imp.domain == \"\":\n opset_version = imp.version\n if imp.version > cls._known_opset_version:\n warnings.warn(\"This version of onnx-caffe2 targets ONNX operator set version {}, but the model we are trying to import uses version {}. We will try to import it anyway, but if the model uses operators which had BC-breaking changes in the intervening versions, import will fail.\".format(cls._known_opset_version, imp.version))\n else:\n warnings.warn(\"Unrecognized operator set {}\".format(imp.domain))\n if opset_version is None:\n if model.ir_version >= 0x00000003:\n raise RuntimeError(\"Model with IR version >= 3 did not specify ONNX operator set version (onnx-caffe2 requires it)\")\n else:\n opset_version = 1\n\n model = onnx.shape_inference.infer_shapes(model)\n\n ws = Workspace()\n device_option = get_device_option(Device(device))\n\n init_net, predict_net = cls._onnx_model_to_caffe2_net(model, device, opset_version, False)\n\n if raw_values_dict:\n cls._external_value_resolution_pass(model, raw_values_dict)\n\n # Directly load initializer data into blobs in workspace\n cls._direct_initialize_parameters(\n model.graph.initializer,\n ws,\n device_option,\n )\n\n initialized = {init.name for init in model.graph.initializer}\n\n cls._direct_initialize_inputs(\n model.graph.input,\n initialized,\n ws,\n device_option,\n )\n\n uninitialized = [value_info.name for value_info in model.graph.input if value_info.name not in initialized]\n\n retval = Caffe2Rep(init_net, predict_net, ws, uninitialized)\n return retval\n\n\n @classmethod\n # TODO: This method needs a refactor for clarity\n def _onnx_node_to_caffe2_op(cls, init_model, pred_model, node_def, opset_version):\n cbackend = C.Caffe2Backend(cls._dummy_name)\n if cbackend.support_onnx_import(node_def.op_type):\n\n # extract value infos from pred model (value infos of\n # node's inputs that are in init model should be all\n # available in pred model)\n value_infos = []\n for name in node_def.input:\n if pred_model is not None:\n for vi in itertools.chain(pred_model.graph.input,\n pred_model.graph.output,\n pred_model.graph.value_info):\n if vi.name == name:\n value_infos.append(vi.SerializeToString())\n\n op_strs = cbackend.convert_node(node_def.SerializeToString(), value_infos, opset_version)\n init_ops = []\n for s in op_strs[0]:\n op = caffe2_pb2.OperatorDef()\n op.ParseFromString(s)\n init_ops.append(op)\n ops = []\n for s in op_strs[1]:\n op = caffe2_pb2.OperatorDef()\n op.ParseFromString(s)\n ops.append(op)\n return Caffe2Ops(ops, init_ops, [])\n\n if node_def.op_type in cls._special_operators:\n translator = getattr(cls, cls._special_operators[node_def.op_type])\n else:\n translator = cls._common_onnx_node_to_caffe2_op\n ops = translator(init_model, pred_model, OnnxNode(node_def), opset_version)\n if isinstance(ops, Caffe2Ops):\n return ops\n if not isinstance(ops, container_abcs.Iterable):\n ops = [ops]\n return Caffe2Ops(ops, [], [])\n\n _broadcast_operators = {\n 'Add',\n 'Sub',\n }\n\n @classmethod\n def _common_onnx_node_to_caffe2_op(cls, init_model, pred_model, onnx_node, opset_version):\n \"\"\"\n This translator performs the basic translation of ONNX nodes into\n Caffe2 operators. Besides doing a straightforward marshalling from\n one format to another, it also does these extra things:\n\n - Renames operators based on '_renamed_operators'\n - Renames attributes based on '_global_renamed_attrs' and\n '_per_op_renamed_attrs'\n\n If you're writing a custom translator, consider calling this first,\n and then fixing things up further.\n \"\"\"\n c2_op = caffe2_pb2.OperatorDef()\n\n c2_op.input.extend(onnx_node.inputs)\n c2_op.output.extend(onnx_node.outputs)\n c2_op.name = onnx_node.name\n\n\n onnx_op_type = onnx_node.op_type\n broken_version = cls._broken_operators.get(onnx_op_type, float('Inf'))\n if broken_version <= opset_version:\n raise ValueError(\n \"Don't know how to translate op {} in ONNX operator set v{} (I only support prior to v{})\".format(onnx_op_type, opset_version, broken_version))\n c2_op.type = cls._renamed_operators.get(onnx_op_type, onnx_op_type)\n if not core.IsOperator(c2_op.type):\n raise ValueError(\n \"Don't know how to translate op {}\".format(onnx_op_type))\n\n def kmap(k):\n if (onnx_op_type in cls._per_op_renamed_attrs and\n k in cls._per_op_renamed_attrs[onnx_op_type]):\n return cls._per_op_renamed_attrs[onnx_op_type][k]\n if k in cls._global_renamed_attrs:\n return cls._global_renamed_attrs[k]\n return k\n c2_op.arg.extend(onnx_node.attrs.caffe2(kmap=kmap))\n\n if opset_version < 7:\n # onnx opset 7 and newest caffe2 have adopted full onnx broadcast semantics\n # so we don't need this hack anymore\n if c2_op.type in cls._broadcast_operators:\n already_broadcast = False\n for arg in c2_op.arg:\n if arg.name == 'broadcast':\n already_broadcast = True\n if not already_broadcast:\n c2_op.arg.extend([caffe2.python.utils.MakeArgument('broadcast', 1)])\n\n return c2_op\n\n @staticmethod\n def _all_names_in_graph(graph):\n if graph is None:\n return set()\n\n names = set()\n names.update(value_info.name for value_info in graph.input)\n names.update(value_info.name for value_info in graph.output)\n for node in graph.node:\n names.update(node.input)\n names.update(node.output)\n return names\n\n @classmethod\n def _graph_to_net(cls, onnx_graph, opset_version):\n net = caffe2_pb2.NetDef()\n for node in onnx_graph.node:\n try:\n c2ops = cls._onnx_node_to_caffe2_op(\n None, None, node, opset_version)\n except Exception as e:\n print('ONNX FATAL:', e)\n continue\n net.op.extend(c2ops.init_ops)\n net.op.extend(c2ops.ops)\n net.external_input.extend(c2ops.interface_blobs)\n net.external_output.extend(\n value_info.name for value_info in onnx_graph.output)\n net.external_input.extend(\n value_info.name for value_info in onnx_graph.input)\n return net\n\n @classmethod\n def _onnx_model_to_caffe2_net(cls, onnx_model, device, opset_version, include_initializers):\n device_option = get_device_option(Device(device))\n\n onnx_model = onnx.utils.polish_model(onnx_model)\n init_model = cls.optimize_onnx(onnx_model, init=True)\n pred_model = cls.optimize_onnx(onnx_model, predict=True)\n\n init_net = caffe2_pb2.NetDef()\n pred_net = caffe2_pb2.NetDef()\n\n init_net.name = onnx_model.graph.name + '_init'\n pred_net.name = onnx_model.graph.name + '_predict'\n\n if include_initializers:\n init_net.op.extend(cls._create_tensor_filling_op(tp) for tp in onnx_model.graph.initializer)\n\n cls._dummy_name.reset(cls._all_names_in_graph(init_model.graph) | cls._all_names_in_graph(pred_model.graph))\n\n errors = []\n for net, model in ( (init_net, init_model), (pred_net, pred_model) ):\n net.device_option.CopyFrom(device_option)\n for node in model.graph.node:\n try:\n c2ops = cls._onnx_node_to_caffe2_op(\n init_model, pred_model, node, opset_version)\n except Exception as e:\n msg = 'Error while processing node: {}. Exception: {}'.format(node, e)\n errors.append(msg)\n print('ONNX FATAL:', msg, file=sys.stderr)\n continue\n init_net.op.extend(c2ops.init_ops)\n net.op.extend(c2ops.ops)\n net.external_input.extend(c2ops.interface_blobs)\n net.external_output.extend(\n value_info.name for value_info in model.graph.output)\n net.external_input.extend(\n value_info.name for value_info in model.graph.input)\n\n if len(errors) > 0:\n raise RuntimeError(\n \"ONNX conversion failed, encountered {} errors:\\n\\n{}\".format(\n len(errors), \"\\n\\n\".join(errors)))\n\n return init_net, pred_net\n\n # wrapper for backwards compatability\n @classmethod\n def onnx_graph_to_caffe2_net(cls, model, device=\"CPU\", opset_version=_known_opset_version):\n return cls._onnx_model_to_caffe2_net(model, device=device, opset_version=opset_version, include_initializers=True)\n\n @classmethod\n def supports_device(cls, device_str):\n device = Device(device_str)\n if device.type == DeviceType.CPU:\n return True\n elif core.IsGPUDeviceType(device.type):\n return workspace.has_gpu_support\n return False\n\n @classmethod\n def is_compatible(cls, model, device='CPU', **kwargs):\n if hasattr(super(Caffe2Backend, cls), 'is_compatible') \\\n and callable(super(Caffe2Backend, cls).is_compatible):\n if not super(Caffe2Backend, cls).is_compatible(model, device, **kwargs):\n return False\n # TODO: should have an unspported list of operators, be optimistic for now\n return True\n\nprepare = Caffe2Backend.prepare\n\nprepare_zip_archive = Caffe2Backend.prepare_zip_archive\n\nrun_node = Caffe2Backend.run_node\n\nrun_model = Caffe2Backend.run_model\n\nsupports_device = Caffe2Backend.supports_device # noqa\n\nis_compatible = Caffe2Backend.is_compatible\n"
] | [
[
"numpy.ones"
]
] |
andreiaugustin/tinygrad | [
"c0c2c0b0414dec0862aa442c60e905f39958f572"
] | [
"accel/cherry/tinygrad/ops_cherry.py"
] | [
"import numpy as np\nfrom tinygrad.tensor import Function\nfrom extra.cherry import *\n\n# ************* unary ops *************\n\nclass ReLU(Function):\n def forward(ctx, input):\n ctx.save_for_backward(input)\n return cherry_unop(input, UnaryOps.RELU)\n\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n return cherry_binop(grad_output, cherry_unop(input, UnaryOps.GT0), BinaryOps.MUL)\n\nclass Log(Function):\n def forward(ctx, input):\n ctx.save_for_backward(input)\n return cherry_unop(input, UnaryOps.LOG)\n\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n return cherry_binop(grad_output, input, BinaryOps.DIV)\n\nclass Exp(Function):\n def forward(ctx, input):\n ret = cherry_unop(input, UnaryOps.EXP)\n ctx.save_for_backward(ret)\n return ret\n\n def backward(ctx, grad_output):\n ret, = ctx.saved_tensors\n return cherry_binop(grad_output, ret, BinaryOps.MUL)\n\n# ************* reduce ops *************\n\nclass Sum(Function):\n def forward(ctx, input, axis=None):\n ctx.save_for_backward(input, axis)\n return cherry_reduceop(input, ReduceOps.SUM, axis)\n\n def backward(ctx, grad_output):\n input, axis = ctx.saved_tensors\n if isinstance(axis, int): axis = [axis]\n shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]\n return cherry_binop(grad_output.reshape(shape), np.zeros_like(input), BinaryOps.ADD)\n\nclass Max(Function):\n def forward(ctx, inp, axis=None):\n if isinstance(axis, int): axis = [axis]\n #ret = np.amax(inp, axis=None if axis is None else tuple(axis), keepdims=True)\n ret = cherry_reduceop(inp, ReduceOps.MAX, None if axis is None else tuple(axis), keepdims=True)\n ctx.save_for_backward(inp, axis, ret)\n if axis is not None:\n ret = ret.reshape([inp.shape[i] for i in range(len(inp.shape)) if i not in axis])\n return ret\n\n def backward(ctx, grad_output):\n input, axis, ret = ctx.saved_tensors\n shape = [1 if axis is None or i in axis else input.shape[i] for i in range(len(input.shape))]\n ret2 = (input==ret.reshape(shape))\n #div = ret2.sum(axis=None if axis is None else tuple(axis), keepdims=True)\n #return ret2*grad_output.reshape(shape)/div\n div = cherry_reduceop(ret2, ReduceOps.SUM, axis=None if axis is None else tuple(axis), keepdims=True)\n return cherry_binop(cherry_binop(ret2, grad_output.reshape(shape), BinaryOps.MUL), div, BinaryOps.DIV)\n\n# ************* binary ops *************\n\ndef unbroadcast(out, in_sh):\n # adjoint operation to broadcast is sum. Need to sum all axis with 1 = in_sh[i] < out.shape[i]\n sum_axis = tuple([i for i in range(len(in_sh)) if in_sh[i]==1 and out.shape[i]>1]) if in_sh != (1,) else None\n return cherry_reduceop(out, ReduceOps.SUM, sum_axis).reshape(in_sh)\n\nclass Add(Function):\n def forward(ctx, x, y):\n ctx.save_for_backward(x.shape, y.shape)\n return cherry_binop(x, y, BinaryOps.ADD)\n\n def backward(ctx, grad_output):\n shape_x, shape_y = ctx.saved_tensors\n return unbroadcast(grad_output, shape_x), unbroadcast(grad_output, shape_y)\n\nclass Sub(Function):\n def forward(ctx, x, y):\n ctx.save_for_backward(x.shape, y.shape)\n return cherry_binop(x, y, BinaryOps.SUB)\n\n def backward(ctx, grad_output):\n shape_x, shape_y = ctx.saved_tensors\n return unbroadcast(grad_output, shape_x), unbroadcast(-grad_output, shape_y)\n\nclass Mul(Function):\n def forward(ctx, x, y):\n ctx.save_for_backward(x, y)\n return cherry_binop(x, y, BinaryOps.MUL)\n\n def backward(ctx, grad_output):\n x,y = ctx.saved_tensors\n return unbroadcast(y*grad_output, x.shape), unbroadcast(x*grad_output, y.shape)\n\nclass Pow(Function):\n def forward(ctx, x, y):\n ctx.save_for_backward(x, y)\n return cherry_binop(x, y, BinaryOps.POW)\n\n def backward(ctx, grad_output):\n x,y = ctx.saved_tensors\n return unbroadcast(y * (x**(y-1.0)) * grad_output, x.shape), \\\n unbroadcast((x**y) * np.log(x) * grad_output, y.shape)\n\n# ************* processing ops *************\n\nclass Matmul(Function):\n def forward(ctx, input, weight):\n ctx.save_for_backward(input, weight)\n return cherry_matmul(input, weight)\n\n def backward(ctx, grad_output):\n input, weight = ctx.saved_tensors\n grad_input = cherry_matmul(grad_output, weight, transpose_w=True)\n grad_weight = cherry_matmul(input, grad_output, transpose_x=True)\n return grad_input, grad_weight\n\nclass Conv2D(Function):\n def forward(ctx, x, w, stride=1, groups=1):\n if type(ctx.stride) == int:\n ctx.stride = (ctx.stride, ctx.stride)\n cout,cin,H,W = w.shape\n ys,xs = ctx.stride\n bs,cin_ = x.shape[0], x.shape[1]\n iy,ix = x.shape[2],x.shape[3]\n oy,ox = (x.shape[2]-(H-ys))//ys, (x.shape[3]-(W-xs))//xs\n assert cin*ctx.groups == cin_\n assert cout % ctx.groups == 0\n rcout = cout//ctx.groups\n\n # if H == 1 and W == 1 and ctx.groups == 1 and ctx.stride == (1,1):\n\n gx = x.reshape(bs,ctx.groups,cin,x.shape[2],x.shape[3])\n tx = np.lib.stride_tricks.as_strided(gx,\n shape=(bs, ctx.groups, cin, oy, ox, H, W),\n strides=(*gx.strides[0:3], gx.strides[3]*ys, gx.strides[4]*xs, *gx.strides[3:5]),\n writeable=False,\n )\n tw = w.reshape(ctx.groups, rcout, cin, H, W)\n ctx.save_for_backward(tx, tw, x.shape)\n\n print((*gx.strides[0:3], gx.strides[3]*ys, gx.strides[4]*xs, *gx.strides[3:5]))\n\n \"\"\"\n ret = np.zeros((bs,ctx.groups,oy,ox,rcout),dtype=x.dtype)\n for g in range(ctx.groups):\n #ijYXyx,kjyx -> iYXk ->ikYX\n ret[:,g] += np.tensordot(tx[:,g], tw[g], ((1,4,5),(1,2,3)))\n\n print(bs, ctx.groups, cin)\n return np.moveaxis(ret,4,2).reshape(bs, cout, oy, ox)\n \"\"\"\n\n cherry_dmar(SLOT(0), x) # bs, groups, cin, x.shape[2], x.shape[3]\n cherry_dmar(SLOT(1), w) # groups, rcout, cin, H, W\n\n cherry_reset_counts()\n print(bs, ctx.groups, rcout, oy, ox, cin, H, W)\n\n for B in range(0, bs):\n if cin == 1 and rcout == 1 and ctx.groups > 1:\n # hmm, this doesn't work, it's not a matmul\n # you always have to loop over the groups, since they aren't joint\n # the idea would be to collapse the HxW into the matmul, but you'd be limited to 9 for 3x3\n # and while the load is easy in the weight matrix, it's hard in the image matrix (3 strides)\n # and only the diagonal of the matrix would be useful! groups aren't channels!\n # [(1, 144, 58, 58), (144, 1, 3, 3)] -> (1, 144, 56, 56)\n\n # what does a grouped 1x1 conv look like?\n # bs x groups x yx -- groups x 1 --> bs x groups x yx\n # it looks like a broadcasted multiply\n\n #print(\"opt1\")\n\n # x: bs x groups x iy x ix\n # w: groups x H x W\n # out: bs x groups x oy x ox\n # ix x groups x groups\n for g in range(0, groups, SZ):\n for Y in range(0, oy):\n for X in range(0, ox, SZ):\n IY,IX = Y*ys,X*xs\n riski_zero(Reg.MATMUL_ACC)\n for y in range(IY, IY+H):\n for x in range(IX, IX+W):\n riski_load(Reg.MATMUL_INPUT,\n SLOT(0) + B*groups*iy*ix + g*iy*ix + y*ix + x,\n xs, iy*ix, min(SZ, ox-X), min(SZ, groups-g))\n # 0 here is for broadcasting\n riski_load(Reg.MATMUL_WEIGHTS,\n SLOT(1) + g*H*W + (y-IY)*W + (x-IX),\n 0, H*W, SZ, min(SZ, groups-g))\n riski_mulacc()\n #risk_regdump()\n riski_store(Reg.MATMUL_ACC,\n SLOT(2) + B*groups*oy*ox + g*oy*ox + Y*ox + X,\n 1, oy*ox, min(SZ, ox-X), min(SZ, groups-g))\n\n elif H == 1 and W == 1 and xs == 1 and ys == 1:\n #print(\"opt2\")\n # oxy x cin x rcout -- unstrided 1x1\n # this is a simple matmul\n for g in range(0, groups):\n for c in range(0, rcout, SZ):\n yx = oy*ox\n assert yx == iy*ix\n for YX in range(0, oy*ox, SZ): # these are next to each other\n # inner conv\n riski_zero(Reg.MATMUL_ACC)\n for ci in range(0, cin, SZ):\n riski_load(Reg.MATMUL_INPUT,\n SLOT(0) + B*groups*cin*yx + g*cin*yx + ci*yx + YX,\n 1, yx, min(SZ, yx-YX), min(SZ, cin-ci))\n riski_load(Reg.MATMUL_WEIGHTS,\n SLOT(1) + g*rcout*cin + c*cin + ci,\n 1, cin, min(SZ, cin-ci), min(SZ, rcout-c))\n riski_matmul()\n riski_store(Reg.MATMUL_ACC,\n SLOT(2) + B*groups*rcout*yx + g*rcout*yx + c*yx + YX,\n 1, yx, min(SZ, yx-YX), min(SZ, rcout-c))\n else:\n #print(\"unoptimized\")\n # ox x cin x rcout -- unoptimized\n for g in range(0, groups):\n for c in range(0, rcout, SZ):\n for Y in range(0, oy):\n for X in range(0, ox, SZ):\n IY,IX = Y*ys,X*xs\n\n # inner conv\n riski_zero(Reg.MATMUL_ACC)\n for ci in range(0, cin, SZ):\n # not a loop in 1x1 convs, 9 in 3x3, 25 in 5x5\n for y in range(IY, IY+H):\n for x in range(IX, IX+W):\n riski_load(Reg.MATMUL_INPUT,\n SLOT(0) + B*groups*cin*iy*ix + g*cin*iy*ix + ci*iy*ix + y*ix + x,\n xs, iy*ix, min(SZ, ox-X), min(SZ, cin-ci))\n riski_load(Reg.MATMUL_WEIGHTS,\n SLOT(1) + g*rcout*cin*H*W + c*cin*H*W + ci*H*W + (y-IY)*W + (x-IX),\n H*W, cin*H*W, min(SZ, cin-ci), min(SZ, rcout-c))\n riski_matmul()\n riski_store(Reg.MATMUL_ACC,\n SLOT(2) + B*groups*rcout*oy*ox + g*rcout*oy*ox + c*oy*ox + Y*ox + X,\n 1, oy*ox, min(SZ, ox-X), min(SZ, rcout-c))\n cherry_print_counts()\n\n #print(x.shape, w.shape, \"->\", ret.shape)\n return cherry_dmaw(SLOT(2), (bs, cout, oy, ox))\n\n def backward(ctx, grad_output):\n bs,_,oy,ox = grad_output.shape\n tx, tw, x_shape = ctx.saved_tensors\n _,rcout,cin,H,W = tw.shape\n ys,xs = ctx.stride\n OY,OX = x_shape[2:4]\n\n ggg = grad_output.reshape(bs,ctx.groups,rcout,oy,ox)\n\n gdw = np.zeros((ctx.groups,rcout,cin,H,W), dtype=tx.dtype)\n\n if cin >= 16:\n # optimize for large channel count\n for g in range(ctx.groups):\n #'ikYX,ijYXyx -> kjyx'\n for i in range(ggg[:,g].shape[1]):\n for m in range(tx[:,g].shape[4]):\n for n in range(tx[:,g].shape[5]):\n # Use transposes to ensure reshape keeps the correct dimension (channel dimension) when multiple dimensions have the same size\n big_matrix = np.transpose(tx[:,g][:, :, :, :, m, n], (1, 0, 2, 3)).reshape(tx[:,g].shape[1], -1).T\n gdw[g][i, :, m, n] = cherry_matmul(ggg[:,g][:,i].reshape(1, -1), big_matrix).flatten()\n else:\n # unoptimized\n for g in range(ctx.groups):\n #'ikYX,ijYXyx -> kjyx'\n for i in range(ggg[:,g].shape[1]):\n for j in range(tx[:,g].shape[1]):\n for m in range(tx[:,g].shape[4]):\n big_matrix = tx[:,g][:,j, :, :, m].reshape(-1, tx[:,g].shape[5])\n gdw[g][i, j, m] = cherry_matmul(ggg[:,g][:,i].reshape(1, -1), big_matrix).flatten()\n\n # needs to be optimized separately for large oy and ox, versus large ctx.groups\n gdx = np.zeros((bs,ctx.groups,cin,OY,OX), dtype=tx.dtype)\n for k in range(oy*ox):\n Y, X = k//ox, k%ox\n iY,iX = Y*ys, X*xs\n big_matrix = []\n for g in range(ctx.groups):\n big_matrix.append(cherry_matmul(ggg[:,g,:,Y,X].reshape(bs, -1), tw[g].reshape(rcout, -1)).reshape((bs, cin, H, W)))\n gdx[:, :, :, iY:iY+H, iX:iX+W] = cherry_binop(gdx[:, :, :, iY:iY+H, iX:iX+W], np.array(np.transpose(big_matrix, (1, 0, 2, 3, 4))), BinaryOps.ADD)\n\n return gdx.reshape((bs, ctx.groups*cin, OY, OX)), gdw.reshape((ctx.groups*rcout, cin, H, W))\n\n"
] | [
[
"numpy.zeros_like",
"numpy.transpose",
"numpy.zeros",
"numpy.lib.stride_tricks.as_strided",
"numpy.log"
]
] |
kouroshHakha/fist | [
"328c098789239fd892e17edefd799fc1957ab637",
"328c098789239fd892e17edefd799fc1957ab637"
] | [
"spirl/rl/components/agent.py",
"spirl/data/block_stacking/src/demo_gen/block_demo_policy.py"
] | [
"import os\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom torch.optim import Adam, SGD\n\nfrom spirl.utils.general_utils import ParamDict, get_clipped_optimizer, AttrDict, prefix_dict, map_dict, \\\n nan_hook, np2obj, ConstantSchedule\nfrom spirl.utils.pytorch_utils import RAdam, remove_grads, map2np, map2torch\nfrom spirl.utils.vis_utils import add_caption_to_img, add_captions_to_seq\nfrom spirl.rl.components.normalization import DummyNormalizer\nfrom spirl.rl.components.policy import Policy\nfrom spirl.components.checkpointer import CheckpointHandler\nfrom spirl.rl.utils.mpi import sync_grads\n\n\nclass BaseAgent(nn.Module):\n def __init__(self, config):\n super().__init__()\n self._hp = self._default_hparams().overwrite(config)\n self.device = self._hp.device\n self._is_train = True # indicates whether agent should sample in training mode\n self._rand_act_mode = False # indicates whether agent should act randomly (for warmup collection)\n self._rollout_mode = False # indicates whether agent is run in rollout mode (omit certain policy outputs)\n self._obs_normalizer = self._hp.obs_normalizer(self._hp.obs_normalizer_params)\n\n def _default_hparams(self):\n default_dict = ParamDict({\n 'device': None, # pytorch device\n 'discount_factor': 0.99, # discount factor for RL update\n 'optimizer': 'adam', # supported: 'adam', 'radam', 'rmsprop', 'sgd'\n 'gradient_clip': None, # max grad norm, if None no clipping\n 'momentum': 0, # momentum in RMSProp / SGD optimizer\n 'adam_beta': 0.9, # beta1 param in Adam\n 'update_iterations': 1, # number of iteration steps per one call to 'update(...)'\n 'target_network_update_factor': 5e-3, # percentage of new weights that are carried over\n 'batch_size': 64, # size of the experience batch used for updates\n 'obs_normalizer': DummyNormalizer, # observation normalization class\n 'obs_normalizer_params': {}, # parameters for optimization norm class\n 'obs_norm_log_groups': {}, # (optional) dict defining separation of state space for obsNormLog\n 'log_videos': True, # whether to log videos during logging\n 'log_video_caption': False, # whether to add captions to video\n 'num_workers': None, # number of independent workers --> whether grads need sync\n })\n return default_dict\n\n def act(self, obs):\n \"\"\"Returns policy output dict given observation (random action if self._rand_act_mode is set).\"\"\"\n if self._rand_act_mode:\n return self._act_rand(obs)\n else:\n return self._act(obs)\n\n def _act(self, obs):\n \"\"\"Implements act method in child class.\"\"\"\n raise NotImplementedError\n\n def _act_rand(self, obs):\n \"\"\"Returns random action with proper dimension. Implemented in child class.\"\"\"\n raise NotImplementedError\n\n def update(self, experience_batch):\n \"\"\"Updates the policy given a batch of experience.\"\"\"\n raise NotImplementedError\n\n def add_experience(self, experience_batch):\n \"\"\"Provides interface for adding additional experience to agent replay, needs to be overwritten by child.\"\"\"\n print(\"### This agent does not support additional experience! ###\")\n\n def log_outputs(self, logging_stats, rollout_storage, logger, log_images, step):\n \"\"\"Visualizes/logs all training outputs.\"\"\"\n logger.log_scalar_dict(logging_stats, prefix='train' if self._is_train else 'val', step=step)\n\n if log_images:\n assert rollout_storage is not None # need rollout data for image logging\n # log rollout videos with info captions\n if 'image' in rollout_storage and self._hp.log_videos:\n if self._hp.log_video_caption:\n vids = [np.stack(add_captions_to_seq(rollout.image, np2obj(rollout.info))).transpose(0, 3, 1, 2)\n for rollout in rollout_storage.get()[-logger.n_logged_samples:]]\n else:\n vids = [np.stack(rollout.image).transpose(0, 3, 1, 2)\n for rollout in rollout_storage.get()[-logger.n_logged_samples:]]\n logger.log_videos(vids, name=\"rollouts\", step=step)\n self.visualize(logger, rollout_storage, step)\n\n def visualize(self, logger, rollout_storage, step):\n \"\"\"Optionally allows to further visualize the internal state of agent (e.g. replay buffer etc.)\"\"\"\n pass\n\n def reset(self):\n \"\"\"Can be used for any initializations of agent's state at beginning of episode.\"\"\"\n pass\n\n def save_state(self, save_dir):\n \"\"\"Provides interface to save any internal state variables (like replay buffers) to disk.\"\"\"\n pass\n\n def load_state(self, save_dir):\n \"\"\"Provides interface to load any internal state variables (like replay buffers) from disk.\"\"\"\n pass\n\n def sync_networks(self):\n \"\"\"Syncs network parameters across workers.\"\"\"\n raise NotImplementedError\n\n def _soft_update_target_network(self, target, source):\n \"\"\"Copies weights from source to target with weight [0,1].\"\"\"\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(self._hp.target_network_update_factor * param.data +\n (1 - self._hp.target_network_update_factor) * target_param.data)\n\n def _copy_to_target_network(self, target, source):\n \"\"\"Completely copies weights from source to target.\"\"\"\n for target_param, source_param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(source_param.data)\n\n def _get_optimizer(self, optimizer, model, lr):\n \"\"\"Returns an instance of the specified optimizers on the parameters of the model with specified learning rate.\"\"\"\n if optimizer == 'adam':\n get_optim = partial(get_clipped_optimizer, optimizer_type=Adam, betas=(self._hp.adam_beta, 0.999))\n elif optimizer == 'radam':\n get_optim = partial(get_clipped_optimizer, optimizer_type=RAdam, betas=(self._hp.adam_beta, 0.999))\n elif optimizer == 'sgd':\n get_optim = partial(get_clipped_optimizer, optimizer_type=SGD, momentum=self._hp.momentum)\n else:\n raise ValueError(\"Optimizer '{}' not supported!\".format(optimizer))\n optim = partial(get_optim, gradient_clip=self._hp.gradient_clip)\n return optim(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)\n\n def _perform_update(self, loss, opt, network):\n \"\"\"Performs one backward gradient step on the loss using the given optimizer. Also syncs gradients.\"\"\"\n nan_hook(loss)\n opt.zero_grad()\n loss.backward()\n\n grads = [p.grad for p in network.parameters()]\n nan_hook(grads)\n\n opt.step()\n\n def _get_obs_norm_info(self):\n if isinstance(self._obs_normalizer, DummyNormalizer): return {}\n mean, std = self._obs_normalizer.mean, self._obs_normalizer.std\n if not self._hp.obs_norm_log_groups:\n self._hp.obs_norm_log_groups = AttrDict(all=np.arange(mean.shape[0]))\n info = {}\n for group_key in self._hp.obs_norm_log_groups:\n info['obs_norm_' + group_key + '_mean'] = mean[self._hp.obs_norm_log_groups[group_key]].mean()\n info['obs_norm_' + group_key + '_std'] = std[self._hp.obs_norm_log_groups[group_key]].mean()\n return info\n\n @staticmethod\n def load_model_weights(model, checkpoint, epoch='latest'):\n \"\"\"Loads weights for a given model from the given checkpoint directory.\"\"\"\n checkpoint_dir = checkpoint if os.path.basename(checkpoint) == 'weights' \\\n else os.path.join(checkpoint, 'weights') # checkpts in 'weights' dir\n checkpoint_path = CheckpointHandler.get_resume_ckpt_file(epoch, checkpoint_dir)\n CheckpointHandler.load_weights(checkpoint_path, model=model)\n\n @staticmethod\n def _remove_batch(d):\n \"\"\"Adds batch dimension to all tensors in d.\"\"\"\n return map_dict(lambda x: x[0] if (isinstance(x, torch.Tensor) or \n isinstance(x, np.ndarray)) else x, d)\n\n @contextmanager\n def val_mode(self):\n \"\"\"Sets validation parameters if desired. To be used like: with agent.val_mode(): ...<do something>...\"\"\"\n self._is_train = False\n self.call_children(\"switch_to_val\", Policy)\n yield\n self._is_train = True\n self.call_children(\"switch_to_train\", Policy)\n\n @contextmanager\n def rand_act_mode(self):\n \"\"\"Performs random actions within context. To be used like: with agent.rand_act_mode(): ...<do something>...\"\"\"\n self._rand_act_mode = True\n yield\n self._rand_act_mode = False\n\n @contextmanager\n def rollout_mode(self):\n \"\"\"Sets rollout parameters if desired.\"\"\"\n self._rollout_mode = True\n self.call_children(\"switch_to_rollout\", Policy)\n yield\n self._rollout_mode = False\n self.call_children(\"switch_to_non_rollout\", Policy)\n\n def call_children(self, fn, cls):\n \"\"\"Call function with name fn in all submodules of class cls.\"\"\"\n def conditional_fn(module):\n if isinstance(module, cls):\n getattr(module, fn).__call__()\n\n self.apply(conditional_fn)\n\n\nclass HierarchicalAgent(BaseAgent):\n \"\"\"Implements a basic hierarchical agent with high-level and low-level policy/policies.\"\"\"\n def __init__(self, config):\n super().__init__(config)\n self.hl_agent = self._hp.hl_agent(self._hp.overwrite(self._hp.hl_agent_params))\n self.ll_agent = self._hp.ll_agent(self._hp.overwrite(self._hp.ll_agent_params))\n self._last_hl_output = None # stores last high-level output to feed to low-level during intermediate steps\n\n def _default_hparams(self):\n default_dict = ParamDict({\n 'hl_agent': None, # high-level agent class\n 'hl_agent_params': None, # parameters of the high-level agent\n 'll_agent': None, # low-level agent class\n 'll_agent_params': None, # parameters of the low-level agent(s)\n 'update_hl': True, # whether to update high-level agent\n 'update_ll': True, # whether to update low-level agent(s)\n 'll_subgoal_reaching_reward': False, # whether to count ll subgoal reaching reward in training\n 'll_subgoal_reaching_reward_weight': 1e3, # weight for the subgoal reaching reward\n })\n return super()._default_hparams().overwrite(default_dict)\n\n def act(self, obs):\n \"\"\"Output dict contains is_hl_step in case high-level action was performed during this action.\"\"\"\n obs_input = obs[None] if len(obs.shape) == 1 else obs # need batch input for agents\n output = AttrDict()\n if self._perform_hl_step_now:\n # perform step with high-level policy\n self._last_hl_output = self.hl_agent.act(obs_input)\n output.is_hl_step = True\n if len(obs_input.shape) == 2 and len(self._last_hl_output.action.shape) == 1:\n self._last_hl_output.action = self._last_hl_output.action[None] # add batch dim if necessary\n self._last_hl_output.log_prob = self._last_hl_output.log_prob[None]\n else:\n output.is_hl_step = False\n output.update(prefix_dict(self._last_hl_output, 'hl_'))\n\n # perform step with low-level policy\n assert self._last_hl_output is not None\n output.update(self.ll_agent.act(self.make_ll_obs(obs_input, self._last_hl_output.action)))\n\n return self._remove_batch(output) if len(obs.shape) == 1 else output\n\n def update(self, experience_batches):\n \"\"\"Updates high-level and low-level agents depending on which parameters are set.\"\"\"\n assert isinstance(experience_batches, AttrDict) # update requires batches for both HL and LL\n update_outputs = AttrDict()\n if self._hp.update_hl:\n hl_update_outputs = self.hl_agent.update(experience_batches.hl_batch)\n update_outputs.update(prefix_dict(hl_update_outputs, \"hl_\"))\n if self._hp.update_ll:\n ll_update_outputs = self.ll_agent.update(experience_batches.ll_batch)\n update_outputs.update(ll_update_outputs)\n return update_outputs\n\n def log_outputs(self, logging_stats, rollout_storage, logger, log_images, step):\n \"\"\"Additionally provides option ot visualize hierarchical agents.\"\"\"\n super().log_outputs(logging_stats, rollout_storage, logger, log_images, step)\n if log_images:\n self.hl_agent.visualize(logger, rollout_storage, step)\n self.ll_agent.visualize(logger, rollout_storage, step)\n\n def _act_rand(self, obs):\n \"\"\"Performs random actions with high-level policy. Low-level policy operates normally.\"\"\"\n with self.hl_agent.rand_act_mode():\n return self.act(obs)\n\n def make_ll_obs(self, obs, hl_action):\n \"\"\"Creates low-level agent's observation from env observation and HL action.\"\"\"\n return np.concatenate((obs, hl_action), axis=-1)\n\n def add_experience(self, experience_batch):\n self.hl_agent.add_experience(experience_batch.hl_batch)\n self.ll_agent.add_experience(experience_batch.ll_batch)\n\n def sync_networks(self):\n self.hl_agent.sync_networks()\n self.ll_agent.sync_networks()\n\n def state_dict(self, *args, **kwargs):\n return {'hl_agent': self.hl_agent.state_dict(*args, **kwargs),\n 'll_agent': self.ll_agent.state_dict(*args, **kwargs)}\n\n def load_state_dict(self, state_dict, *args, **kwargs):\n self.hl_agent.load_state_dict(state_dict.pop('hl_agent'), *args, **kwargs)\n self.ll_agent.load_state_dict(state_dict.pop('ll_agent'), *args, **kwargs)\n\n def save_state(self, save_dir):\n self.hl_agent.save_state(os.path.join(save_dir, 'hl_agent'))\n self.ll_agent.save_state(os.path.join(save_dir, 'll_agent'))\n\n def load_state(self, save_dir):\n self.hl_agent.load_state(os.path.join(save_dir, 'hl_agent'))\n self.ll_agent.load_state(os.path.join(save_dir, 'll_agent'))\n\n def reset(self):\n super().reset()\n self.hl_agent.reset()\n self.ll_agent.reset()\n\n @contextmanager\n def rand_act_mode(self):\n \"\"\"Performs random actions within context. To be used like: with agent.rand_act_mode(): ...<do something>...\"\"\"\n self._rand_act_mode = True\n self.hl_agent._rand_act_mode = True\n self.ll_agent._rand_act_mode = True\n yield\n self._rand_act_mode = False\n self.hl_agent._rand_act_mode = False\n self.ll_agent._rand_act_mode = False\n\n @property\n def _perform_hl_step_now(self):\n \"\"\"Indicates whether the high-level policy should be executed in the current time step.\"\"\"\n raise NotImplementedError # should be implemented by child class!\n\n\nclass FixedIntervalHierarchicalAgent(HierarchicalAgent):\n \"\"\"Hierarchical agent that executes high-level actions in fixed temporal intervals.\"\"\"\n def __init__(self, config):\n super().__init__(config)\n self._steps_since_hl = 0 # number of steps since last high-level step\n\n def _default_hparams(self):\n default_dict = ParamDict({\n 'hl_interval': 3, # temporal interval at which high-level actions are executed\n })\n return super()._default_hparams().overwrite(default_dict)\n\n def act(self, *args, **kwargs):\n output = super().act(*args, **kwargs)\n self._steps_since_hl += 1\n return output\n\n @property\n def _perform_hl_step_now(self):\n return self._steps_since_hl % self._hp.hl_interval == 0\n\n def reset(self):\n super().reset()\n self._steps_since_hl = 0 # start new episode with high-level step\n",
"import numpy as np\nfrom collections import deque\nimport copy\n\nfrom spirl.utils.general_utils import AttrDict, split_along_axis\nfrom spirl.data.block_stacking.src.utils.utils import quat2euler\nfrom spirl.data.block_stacking.src.block_stacking_env import BlockStackEnv\n\n\nclass BlockStackDemoPolicy:\n \"\"\"Follows plan on given env.\"\"\"\n GRASP_OFFSET = 0.08 # offset between robot pos and block pos for grasping\n PICK_OFFSET = 0.14 # additional vertical offset btw robot and block for placing\n PLACE_OFFSET = 0.17 # additional vertical offset btw robot and block for placing\n ACT_RANGE = [0.05, 0.05, 0.05, np.pi/10, 0.5] # maximum action scale for each action dimension\n GRAVITY_SUPPORT = 0.01 # z dimension action when noop to prevent robot from falling\n GRIPPER_OPEN = 1.\n GRIPPER_CLOSED = 0.\n MULTIPLIER = 20.\n EPS = 0.01\n\n def __init__(self, env_params):\n \"\"\"\n :param hl_plan: list of HL index tuples indicating which block should get stacked (e.g. [(1,2), (3,5)])\n \"\"\"\n # TODO consider whether to make task/hl_plan a proper class with transition subclass (to make reuse for kitchen easier)\n self.env_params = env_params\n self.lift_height = env_params.table_size[-1] + env_params.block_size * 2 * env_params.max_tower_height + 0.2\n self.block_height = env_params.block_size * 2\n\n self._hl_plan = None\n self._hl_plan_to_run = deque()\n self._action_plan = None\n self._u_obs = None # this stores env state when planning action sequence\n self._update_robot_state = True\n\n def reset(self):\n self._hl_plan = self.env_params.get_task()\n self._action_plan = None\n self._hl_plan_to_run = deque(self._hl_plan)\n self._u_obs = None\n\n def act(self, obs):\n if self.execution_finished: # should not call 'act' if execution is already finished\n return None\n self._u_obs = BlockUnflattenWrapper(BlockStackEnv.unflatten_block_obs(copy.deepcopy(obs),\n include_quat=self.env_params.include_quat,\n include_vel=self.env_params.include_vel))\n while True:\n if self._action_plan is None:\n if not self._hl_plan_to_run:\n self._action_plan = None\n ac = np.zeros(5,)\n break\n # generate new action plan\n self._action_plan = self._plan_actions()\n try:\n ac = next(self._action_plan)\n break\n except (StopIteration, IndexError): # generator exhausted\n self._action_plan = None\n ac = self._post_process(ac)\n return ac\n\n @property\n def execution_finished(self):\n \"\"\"Checks whether the plan execution has been finished.\"\"\"\n return self._action_plan is None and not self._hl_plan_to_run\n\n def _plan_actions(self):\n \"\"\"Plans LL actions given HL action plan and current env state.\"\"\"\n # generate pick-place plan for one stacking subtask\n bottom_block, top_block = self._hl_plan_to_run.popleft()\n raw_plan = self._pick_place(bottom_block, top_block)\n\n for ac in split_along_axis(raw_plan, axis=0):\n yield ac\n\n def _pick_place(self, bottom_block, top_block):\n \"\"\"Plans action sequence for pick&place of single block.\"\"\"\n action_plan = []\n\n # pick up block\n pick_target_pos = self._get_pick_target(top_block)\n top_block_quat = self._u_obs.block_quat(top_block)\n action_plan.append(self._move_to(pick_target_pos, top_block_quat, self.GRIPPER_OPEN)[0])\n action_plan.append(self._grasp())\n\n # place block\n place_target_pos = self._get_place_target(bottom_block)\n bottom_block_quat = self._u_obs.block_quat(bottom_block)\n action_plan.append(self._move_to(place_target_pos, bottom_block_quat, self.GRIPPER_CLOSED)[0])\n action_plan.append(self._place())\n\n return np.concatenate(action_plan)\n\n def _get_pick_target(self, block):\n block_pos = self._u_obs.block_pos(block)\n block_pos[2] += self.PICK_OFFSET\n return block_pos\n\n def _get_place_target(self, block):\n block_pos = self._u_obs.block_pos(block)\n block_pos[2] += self.PLACE_OFFSET\n return block_pos\n\n def _move_to(self, target_block_pos, target_block_quat, gripper, waypoints=None):\n \"\"\"\n Plans action sequence for moving robot arm to block.\n :param gripper: indicates whether gripper should be ['open', 'closed'] during execution\n :param waypoints: (optional) list of precomputed waypoints\n \"\"\"\n block_angle = quat2euler(*target_block_quat)[0] # assume single-axis rotation\n robot_pos, robot_angle = self._u_obs.gripper_pos, self._u_obs.gripper_angle\n if waypoints is None:\n waypoints = [\n [robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self._u_obs.gripper_finger_pos],\n [robot_pos[0], robot_pos[1], self.lift_height, robot_angle, gripper],\n [target_block_pos[0], target_block_pos[1], self.lift_height, robot_angle, gripper],\n [target_block_pos[0], target_block_pos[1], target_block_pos[2] + self.GRASP_OFFSET, block_angle, gripper],\n ]\n\n # add disturbed subgoals in between waypoints for better state coverage\n subgoals = [\n self._sample_disturbed_subgoal(robot_pos,\n [robot_pos[0], robot_pos[1], self.lift_height])\n + [robot_angle, gripper],\n self._sample_disturbed_subgoal([robot_pos[0], robot_pos[1], self.lift_height],\n [target_block_pos[0], target_block_pos[1], self.lift_height])\n + [robot_angle, gripper],\n self._sample_disturbed_subgoal([target_block_pos[0], target_block_pos[1], self.lift_height],\n [target_block_pos[0], target_block_pos[1], target_block_pos[2] + self.GRASP_OFFSET])\n + [block_angle, gripper],\n ]\n\n # assemble final waypoint list\n waypoints = [waypoints[0], subgoals[0], waypoints[1], subgoals[1], waypoints[2], subgoals[2], waypoints[3]]\n else:\n waypoints = [[robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self._u_obs.gripper_finger_pos]] \\\n + waypoints\n\n if self._update_robot_state:\n self._u_obs.gripper_pos, self._u_obs.gripper_angle, self._u_obs.gripper_finger_pos = \\\n np.array(waypoints[-1][:3]), waypoints[-1][3], gripper # update robot state\n return self._waypoints2plan(waypoints, absolute_dims=[-1]), waypoints[1:]\n\n def _grasp(self):\n \"\"\"Moves robot GRASP-offset down, closes gripper, moves GRASP-offset up.\"\"\"\n robot_pos, robot_angle = self._u_obs.gripper_pos, self._u_obs.gripper_angle\n waypoints = [\n [robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self.GRIPPER_OPEN],\n [robot_pos[0], robot_pos[1], robot_pos[2] - self.GRASP_OFFSET, robot_angle, self.GRIPPER_OPEN],\n [robot_pos[0], robot_pos[1], robot_pos[2] - self.GRASP_OFFSET, robot_angle, self.GRIPPER_CLOSED]]\n waypoints += [waypoints[-1]] * 3 # noop\n waypoints += [[robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self.GRIPPER_CLOSED]]\n if self._update_robot_state:\n self._u_obs.gripper_finger_pos = self.GRIPPER_CLOSED # update robot state\n return self._waypoints2plan(waypoints, absolute_dims=[-1])\n\n def _place(self):\n \"\"\"Moves robot GRASP-offset down, opens gripper, moves GRASP-offset up.\"\"\"\n robot_pos, robot_angle = self._u_obs.gripper_pos, self._u_obs.gripper_angle\n waypoints = [\n [robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self.GRIPPER_CLOSED],\n [robot_pos[0], robot_pos[1], robot_pos[2] - self.GRASP_OFFSET, robot_angle, self.GRIPPER_CLOSED],\n [robot_pos[0], robot_pos[1], robot_pos[2] - self.GRASP_OFFSET, robot_angle, self.GRIPPER_OPEN],\n [robot_pos[0], robot_pos[1], robot_pos[2], robot_angle, self.GRIPPER_OPEN],\n [robot_pos[0], robot_pos[1], self.lift_height, robot_angle, self.GRIPPER_OPEN]\n ]\n if self._update_robot_state:\n self._u_obs.gripper_finger_pos = self.GRIPPER_OPEN # update robot state\n return self._waypoints2plan(waypoints, absolute_dims=[-1])\n\n def _waypoints2plan(self, waypoints, absolute_dims=None):\n plan = np.concatenate([self._interpolate(waypoints[i], waypoints[i+1], absolute_dims)\n for i in range(len(waypoints) - 1)])\n return plan\n\n def _interpolate(self, start, goal, absolute_dims=None):\n \"\"\"\n Interpolates between start and goal linearly while taking max_actions into account.\n Since action effect is smaller than actual action scale we need a multiplier to treat the distance farther than the actual one.\n :param absolute_dims: list of dimensions for which action will be set to goal state.\n \"\"\"\n diff = np.array(goal) - np.array(start)\n n_steps = int(np.max(np.ceil(np.divide(np.abs(diff), np.array(self.ACT_RANGE)))))\n for dim in absolute_dims if absolute_dims is not None else []:\n diff[dim] = goal[dim] * n_steps # hack to make dims action values absolute\n if n_steps > 0:\n actions = [diff / n_steps for _ in range(n_steps)]\n return actions\n else:\n return np.zeros([0, diff.shape[-1]])\n\n def _post_process(self, ac):\n # scale action\n ac[:3] *= self.MULTIPLIER # scale lateral actions to make them reach the target states\n\n # add gravity support for noop\n if np.sum(ac[:-1]) == 0:\n ac[2] += self.GRAVITY_SUPPORT\n\n # crop action dimensions according to env params\n if not self.env_params.allow_rotate:\n ac = np.concatenate([ac[:3], ac[4:]])\n if self.env_params.dimension == 2:\n ac = ac[1:]\n\n return ac\n\n def _sample_disturbed_subgoal(self, start_pos, goal_pos, max_displacement_ratio=0.2):\n \"\"\"Samples a subgoal with some offset to the direct connection line.\"\"\"\n start_pos, goal_pos = np.array(start_pos), np.array(goal_pos)\n diff = goal_pos - start_pos\n\n # generate unit vector that's orthogonal to diff\n noise = np.asarray([diff[0], diff[2], -diff[1]])\n noise /= np.linalg.norm(noise) # normalize it\n\n # sample random offset along connection line + random length\n length = (np.random.rand() * 2 * max_displacement_ratio - max_displacement_ratio) * np.linalg.norm(diff)\n offset = (np.random.rand() * 0.6 + 0.2) * diff\n\n # compute subgoal position\n subgoal_pos = start_pos + offset + length * noise\n return [coord for coord in subgoal_pos]\n\n\n\nclass ClosedLoopBlockStackDemoPolicy(BlockStackDemoPolicy):\n PICK_OFFSET = 0.11\n\n def __init__(self, env_params):\n super().__init__(env_params)\n self._update_robot_state = False\n\n def _plan_actions(self):\n # generate pick-place plan for one stacking subtask\n bottom_block, top_block = self._hl_plan_to_run.popleft()\n top_block_init_pos = self._u_obs.block_pos(top_block)\n\n waypoints = None\n while not self._lifted(top_block):\n while not self._reached(self._get_pick_target(top_block)):\n pick_target_pos = self._get_pick_target(top_block)\n top_block_quat = self._u_obs.block_quat(top_block)\n actions, waypoints = self._move_to(pick_target_pos, top_block_quat, self.GRIPPER_OPEN, waypoints)\n if self._reached_waypoint(waypoints[0]) and len(waypoints) > 1:\n waypoints = waypoints[1:]\n if len(actions) > 0:\n yield actions[0]\n else:\n break\n\n grasp_plan = split_along_axis(self._grasp(), axis=0)\n for i, action in enumerate(grasp_plan):\n yield action\n\n waypoints = None\n while not self._reached(self._get_place_target(bottom_block)):\n place_target_pos = self._get_place_target(bottom_block)\n bottom_block_quat = self._u_obs.block_quat(bottom_block)\n actions, waypoints = self._move_to(place_target_pos, bottom_block_quat, self.GRIPPER_CLOSED, waypoints)\n if self._reached_waypoint(waypoints[0]) and len(waypoints) > 1:\n waypoints = waypoints[1:]\n if len(actions) > 0:\n yield actions[0]\n else:\n break\n\n while not self._stacked(top_block, bottom_block):\n for action in split_along_axis(self._place(), axis=0):\n yield action\n\n def _lifted(self, top_block):\n top_block_pos = self._u_obs.block_pos(top_block)\n gripper_pos = self._u_obs.gripper_pos\n\n lifted = True\n\n x_dist = np.abs(gripper_pos[0] - top_block_pos[0])\n lifted &= x_dist < self.env_params.block_size\n\n y_dist = np.abs(gripper_pos[1] - top_block_pos[1])\n lifted &= y_dist < self.env_params.block_size\n\n z_vec = gripper_pos[-1] - top_block_pos[-1]\n lifted &= z_vec < 0.14\n lifted &= z_vec > 0.08\n\n return lifted\n\n def _stacked(self, top_block, bottom_block):\n top_pos = self._u_obs.block_pos(top_block)\n bottom_pos = self._u_obs.block_pos(bottom_block)\n x_dist = np.linalg.norm(top_pos[0] - bottom_pos[0])\n y_dist = np.linalg.norm(top_pos[0] - bottom_pos[0])\n x_dist_correct = x_dist < self.env_params.block_size\n y_dist_correct = y_dist < self.env_params.block_size\n\n z_vec = top_pos[2] - bottom_pos[2]\n z_vec_correct = np.abs(z_vec - 2 * self.env_params.block_size) < 0.005\n\n return x_dist_correct and y_dist_correct and z_vec_correct\n\n def _reached(self, pos):\n target_pos = pos\n target_pos[2] += self.GRASP_OFFSET\n return np.linalg.norm(pos - self._u_obs.gripper_pos) < self.EPS\n\n def _reached_waypoint(self, waypoint):\n return np.linalg.norm(np.array(waypoint[:3]) - self._u_obs.gripper_pos) < self.EPS\n\n\nclass BlockUnflattenWrapper(AttrDict):\n def block_pos(self, idx):\n return list(self['block_pos'][idx])\n\n def block_quat(self, idx):\n return list(self['block_quat'][idx])\n\n def set_block_pos(self, idx, val):\n self['block_pos'][idx] = val\n\n def set_block_quat(self, idx, val):\n self['block_quat'][idx] = val\n\n\nif __name__ == \"__main__\":\n from spirl.data.block_stacking.src.block_task_generator import SingleTowerBlockTaskGenerator\n obs = AttrDict(\n block_pos=np.random.rand(4*3),\n block_quat=np.random.rand(4*4),\n gripper_pos=np.random.rand(3),\n gripper_angle=np.random.rand(),\n gripper_finger_pos=np.random.rand(),\n )\n task_gen = SingleTowerBlockTaskGenerator({}, 4)\n task = task_gen.sample()\n policy = BlockStackDemoPolicy(task)\n print(policy.act(obs))\n # print(policy._plan_actions(obs))\n\n\n"
] | [
[
"numpy.arange",
"numpy.stack",
"numpy.concatenate"
],
[
"numpy.sum",
"numpy.zeros",
"numpy.abs",
"numpy.asarray",
"numpy.random.rand",
"numpy.array",
"numpy.concatenate",
"numpy.linalg.norm"
]
] |
robert-anderson/pyscf | [
"cdc56e168cb15f47e8cdc791a92d689fa9b655af",
"cdc56e168cb15f47e8cdc791a92d689fa9b655af"
] | [
"pyscf/nao/tddft_iter_x_zip.py",
"pyscf/nao/m_overlap_lil.py"
] | [
"from __future__ import print_function, division\nfrom numpy import array, argmax\nfrom pyscf.nao import tddft_iter\n\n\nclass tddft_iter_x_zip(tddft_iter):\n \"\"\" Iterative TDDFT with a high-energy part of the KS eigenvectors compressed \"\"\"\n\n def __init__(self, **kw):\n from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations\n \n tddft_iter.__init__(self, **kw)\n self.x_zip = kw['x_zip'] if 'x_zip' in kw else False\n self.x_zip_eps = kw['x_zip_eps'] if 'x_zip_eps' in kw else 0.05\n self.x_zip_emax = kw['x_zip_emax'] if 'x_zip_emax' in kw else 0.25\n\n if self.x_zip: # redefine the eigenvectors\n sm2e,sma2x = self.build_x_zip()\n if self.verbosity>0: \n print(__name__, 'self.mo_energy.shape =', self.mo_energy.shape)\n print(__name__, 'sm2e.shape =', sm2e.shape)\n self.ksn2e = array([sm2e])\n ksn2fd = fermi_dirac_occupations(self.telec, self.ksn2e, self.fermi_energy)\n for s,n2fd in enumerate(ksn2fd[0]):\n if not all(n2fd>self.nfermi_tol): continue\n print(self.telec, s, nfermi_tol, n2fd)\n raise RuntimeError(__name__, 'telec is too high?')\n \n self.ksn2f = (3-self.nspin)*ksn2fd\n self.nfermi = array([argmax(ksn2fd[0,s,:]<self.nfermi_tol) for s in range(self.nspin)], dtype=int)\n self.vstart = array([argmax(1.0-ksn2fd[0,s,:]>=self.nfermi_tol) for s in range(self.nspin)], dtype=int)\n self.xocc = [ma2x[:nfermi,:] for ma2x,nfermi in zip(sma2x,self.nfermi)]\n self.xvrt = [ma2x[vstart:,:] for ma2x,vstart in zip(sma2x,self.vstart)]\n\n def build_x_zip(self):\n \"\"\" define compressed eigenvectors \"\"\"\n from pyscf.nao.m_x_zip import x_zip\n sm2e = []\n sma2x = []\n for n2e,na2x in zip(self.mo_energy[0], self.mo_coeff[0,:,:,:,0]):\n vst, i2w,i2dos, m2e, ma2x = x_zip(n2e, na2x, eps=self.x_zip_eps, emax=self.x_zip_emax)\n sm2e.append(m2e)\n sma2x.append(ma2x)\n sm2e = array(sm2e) \n return sm2e, sma2x\n",
"# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function, division\nfrom pyscf.nao.m_overlap_ni import overlap_ni\n\ndef overlap_lil(sv, ao_log=None, funct=overlap_ni,**kvargs):\n \"\"\"\n Computes the overlap matrix and returns it in List of Lists format (easy to index)\n Args:\n sv : (System Variables), this must have arrays of coordinates and species, etc\n Returns:\n overlap (real-space overlap) for the whole system\n \"\"\"\n from pyscf.nao.m_ao_matelem import ao_matelem_c\n from scipy.sparse import lil_matrix\n from numpy import array, int64, zeros\n\n aome = ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp)\n me = aome.init_one_set(sv.ao_log) if ao_log is None else aome.init_one_set(ao_log)\n atom2s = zeros((sv.natm+1), dtype=int64)\n for atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp]\n sp2rcut = array([max(mu2rcut) for mu2rcut in me.ao1.sp_mu2rcut])\n\n n = atom2s[-1]\n lil_mat = lil_matrix((n,n))\n\n for atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])):\n for atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])):\n if (sp2rcut[sp1]+sp2rcut[sp2])**2<=sum((rv1-rv2)**2) : continue\n lil_mat[s1:f1,s2:f2] = funct(me,sp1,rv1,sp2,rv2,**kvargs)\n\n return lil_mat\n"
] | [
[
"numpy.array",
"numpy.argmax"
],
[
"scipy.sparse.lil_matrix",
"numpy.zeros"
]
] |
ITNano/soundserver | [
"b84cbfd821987ad8af72a6c2677caa0b949abff6"
] | [
"audiostream.py"
] | [
"import numpy\nimport wave\n \nclass Audiostream(object):\n \n def __init__(self, volume_prio=1):\n self.volume_prio = volume_prio\n \n def get_data(self, frame_count, channels, width, rate):\n return \"\".join([\"\\x00\"]*frames*self.channels*self.width)\n \n def get_volume_priority(self):\n return self.volume_prio\n \n \nclass WaveAudioStream(Audiostream):\n \n def __init__(self, file, volume_prio=1):\n Audiostream.__init__(self, volume_prio)\n self.wf = wave.open(file)\n \n def get_data(self, frame_count, channels, width, rate, format):\n data = self.wf.readframes(frame_count)\n if len(data) > 0:\n return numpy.fromstring(data, format)\n else:\n return None\n \n \nclass FeedAudioStream(Audiostream):\n \n def __init__(self, keep_open=False, volume_prio=1):\n Audiostream.__init__(self, volume_prio)\n self.keep_open = keep_open\n self.closed = False\n self.data = []\n self.offset = 0\n \n def feed(self, data):\n if self.closed:\n print(\"WARNING: Trying to add data to a closed stream.\")\n self.data.append(data)\n \n def clean(self):\n self.data = self.data[self.offset:]\n self.offset = 0\n \n def get_data(self, frame_count, channels, width, rate, format):\n size = min(len(self.data)-self.offset, frame_count*channels)\n if size == 0 and not self.keep_open:\n self.closed = True\n return None\n data = numpy.array(self.data[self.offset:self.offset+size])\n self.offset += size\n if self.offset > rate:\n self.clean()\n return data\n "
] | [
[
"numpy.array",
"numpy.fromstring"
]
] |
sksg/parallize | [
"58d211fd92a4cac97b1d7795932157b839e42b2b"
] | [
"parallize.py"
] | [
"import numpy as np\nfrom numpy.core.numerictypes import typecodes\nimport inspect\nimport functools\nimport re\nimport builtins\nimport os\nfrom concurrent.futures import ThreadPoolExecutor as thread_pool\nfrom concurrent.futures import ProcessPoolExecutor as process_pool\nfrom concurrent.futures import as_completed\n\n\ndef _iterable(y):\n try:\n iter(y)\n except TypeError:\n return False\n return True\n\n# We use an extended version of:\n# http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html\n_DIMENSION_NAME = r'\\w+'\n_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME)\n_VECTOR_ARGUMENT = r'(\\({}\\))'.format(_CORE_DIMENSION_LIST)\n_EXCLUDED_ARGUMENT = r'(_)'\n_ARGUMENT = r'(?:{0:}|{1:})'.format(_VECTOR_ARGUMENT, _EXCLUDED_ARGUMENT)\n_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT)\n_OUT_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_VECTOR_ARGUMENT)\n_SIGNATURE = '^{0:}->{1:}$'.format(_ARGUMENT_LIST, _OUT_ARGUMENT_LIST)\n\n\ndef _parse_signature(signature):\n if not re.match(_SIGNATURE, signature):\n raise ValueError(\n 'not a valid gufunc signature: {}'.format(signature))\n inargs, outargs = [], []\n _in, _out = signature.split('->')\n for arg in re.findall(_ARGUMENT, _in):\n if arg[1] == \"_\":\n inargs.append(None)\n else:\n inarg = []\n for match in re.findall(_DIMENSION_NAME, arg[0]):\n try:\n inarg.append(int(match))\n except:\n inarg.append(match)\n inargs.append(tuple(inarg))\n\n for arg in re.findall(_ARGUMENT, _out):\n if arg[1] == \"_\":\n outargs.append(None)\n else:\n outarg = []\n for match in re.findall(_DIMENSION_NAME, arg[0]):\n try:\n outarg.append(int(match))\n except:\n outarg.append(match)\n outargs.append(tuple(outarg))\n return inargs, outargs\n\n\ndef _update_dim_sizes(dim_sizes, arg, core_dims):\n if not core_dims:\n return\n num_core_dims = len(core_dims)\n if arg.ndim < num_core_dims:\n raise ValueError('%d-dimensional argument does not have enough '\n 'dimensions for all core dimensions %r'\n % (arg.ndim, core_dims))\n core_shape = arg.shape[-num_core_dims:]\n for dim, size in zip(core_dims, core_shape):\n if dim in dim_sizes:\n if size != dim_sizes[dim]:\n raise ValueError('inconsistent size for core dimension'\n ' %r: %r vs %r'\n % (dim, size, dim_sizes[dim]))\n elif isinstance(dim, str):\n dim_sizes[dim] = size\n elif dim != size:\n raise ValueError('inconsistent size for core dimension: %r vs %r'\n % (dim, size))\n\n\ndef _parse_input_dimensions(args, arg_dims):\n dim_sizes = {}\n broadcast_args = []\n for a, dims in zip(args, arg_dims):\n if dims is None:\n broadcast_args.append(None)\n continue\n _update_dim_sizes(dim_sizes, a, dims)\n ndim = a.ndim - len(dims)\n dummy_array = np.lib.stride_tricks.as_strided(0, a.shape[:ndim])\n broadcast_args.append(dummy_array)\n broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args)\n return broadcast_shape, dim_sizes\n\n\ndef _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims):\n return [(broadcast_shape + tuple((dim_sizes[dim]\n if isinstance(dim, str) else dim)\n for dim in core_dims)\n if core_dims is not None else None)\n for core_dims in list_of_core_dims]\n\n\ndef _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes):\n shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims)\n arrays = tuple(np.empty(shape, dtype=dtype)\n for shape, dtype in zip(shapes, dtypes))\n return arrays\n\n\ndef parallize(signature, otypes=None, doc=None, default='parallelenv',\n evn='MEGA_PARALLIZE', isvec=False, parallel='threads',\n sendindex=False):\n def wrap_parallized(pyfunc):\n return parallized(pyfunc, signature, otypes, doc, default,\n evn, isvec, parallel, sendindex)\n return wrap_parallized\n\n\nclass parallized(object): # inspired by np.vectorize\n def __init__(self, pyfunc, signature, otypes=None, doc=None,\n default='parallel', evn='MEGA_PARALLIZE', isvec=False,\n parallel_type='threads', sendindex=False):\n self.signature = signature\n self.default = default\n self.evn = evn\n self.isvec = isvec\n self.parallel_type = parallel_type\n self.sendindex = sendindex\n self._ufunc = None # Caching to improve default performance\n\n if doc is not None:\n self.__doc__ = doc\n else:\n self.__doc__ = pyfunc.__doc__\n\n if isinstance(otypes, str):\n for char in otypes:\n if char not in typecodes['All']:\n raise ValueError(\"Invalid otype specified: %s\" % (char,))\n elif _iterable(otypes):\n otypes = ''.join([np.dtype(x).char for x in otypes])\n elif otypes is not None:\n raise ValueError(\"Invalid otype specification\")\n self.otypes = otypes\n\n self._in, self._out = _parse_signature(signature)\n self.excluded = [(a is None) for a in self._in]\n\n self.pyfunc = pyfunc\n self.__wrapped__ = pyfunc\n self.parameters = [k for k in inspect.signature(pyfunc).parameters]\n if self.sendindex:\n self.parameters = self.parameters[1:]\n\n def _process_args(self, args, kwargs):\n givenargs = list(args)\n allargs = []\n for p in self.parameters:\n if p in kwargs:\n allargs.append(kwargs.pop(p))\n else:\n if len(args) == 0:\n msg = 'expected {}, got {}'.format(len(self.parameters),\n len(givenargs))\n raise TypeError(\"Missing positional arguments: \" + msg)\n allargs.append(args[0])\n args = args[1:]\n\n if len(kwargs) != 0:\n raise TypeError(\"Unknown keyword arguments {}!\".format(kwargs))\n if len(args) != 0:\n msg = 'expected {}, got {}'.format(len(self.parameters),\n len(givenargs))\n raise TypeError(\"Too many positional arguments: \" + msg)\n\n args = tuple((np.asanyarray(a) if not ex else a)\n for a, ex in zip(allargs, self.excluded))\n\n broadcast_shape, dim_sizes = _parse_input_dimensions(args, self._in)\n input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, self._in)\n args = [(np.broadcast_to(arg, shape, subok=True)\n if shape is not None else arg)\n for arg, shape in zip(args, input_shapes)]\n return broadcast_shape, dim_sizes, args\n\n def __call__(self, *args, **kwargs):\n if self.default is 'parallel':\n return self.parallel(*args, **kwargs)\n if self.default is 'sequential':\n return self.sequential(*args, **kwargs)\n if self.default is 'vectorized':\n return self.vectorized(*args, **kwargs)\n if self.default is 'parallelenv':\n if self.evn in os.environ and not os.environ[self.evn]:\n return self.vectorized(*args, **kwargs)\n else:\n return self.parallel(*args, **kwargs)\n\n def vectorized(self, *args, **kwargs):\n if self.isvec:\n if self.sendindex:\n return self.pyfunc(None, *args, **kwargs)\n else:\n return self.pyfunc(*args, **kwargs)\n else:\n return self.sequential(*args, **kwargs)\n\n def sequential(self, *args, **kwargs):\n broadcast_shape, dim_sizes, args = self._process_args(args, kwargs)\n\n outputs = None\n otypes = self.otypes\n nout = len(self._out)\n\n for index in np.ndindex(*broadcast_shape):\n i_args = ((arg[index] if _in is not None else arg)\n for _in, arg in zip(self._in, args))\n if self.sendindex:\n results = self.pyfunc(index, *i_args)\n else:\n results = self.pyfunc(*i_args)\n\n n_results = len(results) if isinstance(results, tuple) else 1\n\n if nout != n_results:\n raise ValueError(\n 'wrong number of outputs from pyfunc: expected %r, got %r'\n % (nout, n_results))\n\n if nout == 1:\n results = (results,)\n\n if outputs is None:\n for result, core_dims in zip(results, self._out):\n _update_dim_sizes(dim_sizes, result, core_dims)\n\n if otypes is None:\n otypes = [np.asarray(result).dtype for result in results]\n\n outputs = _create_arrays(broadcast_shape, dim_sizes,\n self._out, otypes)\n\n for output, result in zip(outputs, results):\n output[index] = result\n\n if outputs is None:\n # did not call the function even once\n if otypes is None:\n raise ValueError('cannot call `vectorize` on size 0 inputs '\n 'unless `otypes` is set')\n if builtins.any(dim not in dim_sizes\n for dims in self._out\n for dim in dims):\n raise ValueError('cannot call `vectorize` with a signature '\n 'including new output dimensions on size 0 '\n 'inputs')\n outputs = _create_arrays(broadcast_shape, dim_sizes,\n self._out, otypes)\n\n return outputs[0] if nout == 1 else outputs\n\n def parallel(self, *args, **kwargs):\n broadcast_shape, dim_sizes, args = self._process_args(args, kwargs)\n\n outputs = None\n otypes = self.otypes\n nout = len(self._out)\n\n if self.parallel_type == 'threads':\n pool = thread_pool(os.cpu_count())\n elif self.parallel_type == 'processes':\n pool = process_pool(os.cpu_count())\n futures = {}\n\n for index in np.ndindex(*broadcast_shape):\n i_args = ((arg[index] if _in is not None else arg)\n for _in, arg in zip(self._in, args))\n if self.sendindex:\n futures[pool.submit(self.pyfunc, index, *i_args)] = index\n else:\n futures[pool.submit(self.pyfunc, *i_args)] = index\n\n for f in as_completed(futures):\n index = futures[f]\n results = f.result()\n\n n_results = len(results) if isinstance(results, tuple) else 1\n\n if nout != n_results:\n raise ValueError(\n 'wrong number of outputs from pyfunc: expected %r, got %r'\n % (nout, n_results))\n\n if nout == 1:\n results = (results,)\n\n if outputs is None:\n for result, core_dims in zip(results, self._out):\n _update_dim_sizes(dim_sizes, result, core_dims)\n\n if otypes is None:\n otypes = [np.asarray(result).dtype for result in results]\n\n outputs = _create_arrays(broadcast_shape, dim_sizes,\n self._out, otypes)\n\n for output, result in zip(outputs, results):\n output[index] = result\n\n if outputs is None:\n # did not call the function even once\n if otypes is None:\n raise ValueError('cannot call `vectorize` on size 0 inputs '\n 'unless `otypes` is set')\n if builtins.any(dim not in dim_sizes\n for dims in self._out\n for dim in dims):\n raise ValueError('cannot call `vectorize` with a signature '\n 'including new output dimensions on size 0 '\n 'inputs')\n outputs = _create_arrays(broadcast_shape, dim_sizes,\n self._out, otypes)\n\n return outputs[0] if nout == 1 else outputs\n\n\nclass asparallel(object):\n def __init__(self, pyfunc, default='parallelenv', evn='MEGA_PARALLIZE'):\n self.pyfunc = pyfunc\n self.default = default\n self.evn = evn\n self.__wrapped__ = pyfunc\n\n def __call__(self, *args, **kwargs):\n if self.default is 'parallel':\n return self.parallel(*args, **kwargs)\n if self.default is 'sequential':\n return self.sequential(*args, **kwargs)\n if self.default is 'vectorized':\n return self.vectorized(*args, **kwargs)\n if self.default is 'parallelenv':\n if self.evn in os.environ and not os.environ[self.evn]:\n return self.vectorized(*args, **kwargs)\n else:\n return self.parallel(*args, **kwargs)\n\n def parallel(self, *args, **kwargs):\n def wrap_parallels(parallelfunc):\n return parallelfunc.parallel\n return self.pyfunc(wrap_parallels, *args, **kwargs)\n\n def sequential(self, *args, **kwargs):\n def wrap_parallels(parallelfunc):\n return parallelfunc.sequential\n return self.pyfunc(wrap_parallels, *args, **kwargs)\n\n def vectorized(self, *args, **kwargs):\n def wrap_parallels(parallelfunc):\n return parallelfunc.vectorized\n return self.pyfunc(wrap_parallels, *args, **kwargs)\n"
] | [
[
"numpy.empty",
"numpy.dtype",
"numpy.lib.stride_tricks._broadcast_shape",
"numpy.asanyarray",
"numpy.asarray",
"numpy.lib.stride_tricks.as_strided",
"numpy.broadcast_to",
"numpy.ndindex"
]
] |
StanfordVL/Lasersuite | [
"8b78c3d202f2a4b8712c5f228feaf5fae61f16e9"
] | [
"robosuite/models/robots/panda_robot.py"
] | [
"import numpy as np\nfrom .robot_model import RobotModel\nfrom ...utils.mjcf_utils import xml_path_completion\n\n\nclass Panda(RobotModel):\n \"\"\"Panda is a sensitive single-arm robot designed by Franka.\"\"\"\n\n def __init__(self, idn=0, bottom_offset=(0, 0, -0.913)):\n \"\"\"\n Args:\n idn (int or str): Number or some other unique identification string for this robot instance\n bottom_offset (3-list/tuple): x,y,z offset desired from initial coordinates\n \"\"\"\n super().__init__(xml_path_completion(\"robots/panda/robot.xml\"), idn=idn, bottom_offset=bottom_offset)\n\n # Set joint damping\n self.set_joint_attribute(attrib=\"damping\", values=np.array((0.1, 0.1, 0.1, 0.1, 0.1, 0.01, 0.01)))\n\n @property\n def dof(self):\n return 7\n\n @property\n def gripper(self):\n return \"PandaGripper\"\n\n @property\n def default_controller_config(self):\n return \"default_panda\"\n\n @property\n def init_qpos(self):\n return np.array([0, np.pi / 16.0, 0.00, -np.pi / 2.0 - np.pi / 3.0, 0.00, np.pi - 0.2, np.pi/4])\n\n @property\n def base_xpos_offset(self):\n return {\n \"bins\": (-0.5, 0.3, 0),\n \"empty\": (-0.6, 0, 0),\n \"pegs\": (-0.5, 0.15, 0),\n \"table\": lambda table_length: (-0.16 - table_length / 2, 0, 0)\n }\n\n @property\n def arm_type(self):\n return \"single\"\n\n @property\n def _joints(self):\n return [\"joint1\", \"joint2\", \"joint3\", \"joint4\", \"joint5\", \"joint6\", \"joint7\"]\n\n @property\n def _eef_name(self):\n return \"right_hand\"\n\n @property\n def _robot_base(self):\n return \"base\"\n\n @property\n def _actuators(self):\n return {\n \"pos\": [], # No position actuators for panda\n \"vel\": [], # No velocity actuators for panda\n \"torq\": [\"torq_j1\", \"torq_j2\", \"torq_j3\", \"torq_j4\", \"torq_j5\", \"torq_j6\", \"torq_j7\"]\n }\n\n @property\n def _contact_geoms(self):\n return [\"link1_collision\", \"link2_collision\", \"link3_collision\", \"link4_collision\",\n \"link5_collision\", \"link6_collision\", \"link7_collision\"]\n\n @property\n def _root(self):\n return \"link0\"\n\n @property\n def _links(self):\n return [\"link1\", \"link2\", \"link3\", \"link4\", \"link5\", \"link6\", \"link7\"]\n"
] | [
[
"numpy.array"
]
] |
jiasenwu/gan | [
"f92aeca269365180125d4e4c57c53cbf5e679299"
] | [
"tensorflow_gan/examples/stargan_estimator/train_test.py"
] | [
"# coding=utf-8\n# Copyright 2019 The TensorFlow GAN Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for stargan_estimator.train.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_gan.examples.stargan_estimator import train_lib\n\nmock = tf.compat.v1.test.mock\n\n\ndef _test_generator(input_images, _):\n \"\"\"Simple generator function.\"\"\"\n return input_images * tf.compat.v1.get_variable('dummy_g', initializer=2.0)\n\n\ndef _test_discriminator(inputs, num_domains):\n \"\"\"Differentiable dummy discriminator for StarGAN.\"\"\"\n hidden = tf.compat.v1.layers.flatten(inputs)\n output_src = tf.reduce_mean(input_tensor=hidden, axis=1)\n output_cls = tf.compat.v1.layers.dense(inputs=hidden, units=num_domains)\n return output_src, output_cls\n\n\nclass TrainTest(tf.test.TestCase):\n\n @mock.patch.object(train_lib.data_provider, 'provide_data', autospec=True)\n @mock.patch.object(\n train_lib.data_provider, 'provide_celeba_test_set', autospec=True)\n def test_main(self, mock_provide_celeba_test_set, mock_provide_data):\n hparams = train_lib.HParams(\n batch_size=1,\n patch_size=8,\n output_dir='/tmp/tfgan_logdir/stargan/',\n generator_lr=1e-4,\n discriminator_lr=1e-4,\n max_number_of_steps=0,\n steps_per_eval=1,\n adam_beta1=0.5,\n adam_beta2=0.999,\n gen_disc_step_ratio=0.2,\n master='',\n ps_tasks=0,\n task=0)\n num_domains = 3\n\n # Construct mock inputs.\n images_shape = [\n hparams.batch_size, hparams.patch_size, hparams.patch_size, 3\n ]\n img_list = [np.zeros(images_shape, dtype=np.float32)] * num_domains\n # Create a list of num_domains arrays of shape [batch_size, num_domains].\n # Note: assumes hparams.batch_size <= num_domains.\n lbl_list = [np.eye(num_domains)[:hparams.batch_size, :]] * num_domains\n mock_provide_data.return_value = (img_list, lbl_list)\n mock_provide_celeba_test_set.return_value = np.zeros(\n [3, hparams.patch_size, hparams.patch_size, 3])\n\n train_lib.train(hparams, _test_generator, _test_discriminator)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"numpy.eye",
"numpy.zeros",
"tensorflow.reduce_mean",
"tensorflow.compat.v1.layers.flatten",
"tensorflow.compat.v1.layers.dense",
"tensorflow.test.main",
"tensorflow.compat.v1.get_variable"
]
] |
LuisCerdenoMota/SHERLOCK | [
"5fb52795d3ab44e27bc7dbc6f2c2e6c214995ba1"
] | [
"experimental/inject.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, division, absolute_import\n\n#::: modules\nimport numpy as np\nimport os, sys\nimport ellc\nfrom transitleastsquares import catalog_info\nimport astropy.constants as ac\nimport astropy.units as u\nimport lightkurve as lk\nimport pandas as pd\n\n\nnp.random.seed(42)\n\n#::: load data and set the units correctly\nTIC_ID = 85400193 # TIC_ID of our candidate\nlcf= lk.search_lightcurvefile('TIC '+str(TIC_ID), mission=\"tess\").download_all()\nab, mass, massmin, massmax, radius, radiusmin, radiusmax = catalog_info(TIC_ID=TIC_ID)\n\n#units for ellc\nrstar=radius*u.R_sun\nmstar=mass*u.M_sun\n#mass and radius for the TLS\n#rstar=radius\n#mstar=mass\nmstar_min = mass-massmin\nmstar_max = mass+massmax\nrstar_min = radius-radiusmin\nrstar_max = radius+radiusmax\n\n#uncomment the following lines to check that the parameters used are correct.\n\n#print('\\n STELLAR PROPERTIES FOR THE SIGNAL SEARCH')\n#print('================================================\\n')\n#print('limb-darkening estimates using quadratic LD (a,b)=', ab)\n#print('mass =', format(mstar,'0.5f'))\n#print('mass_min =', format(mstar_min,'0.5f'))\n#print('mass_max =', format(mstar_max,'0.5f'))\n#print('radius =', format(rstar,'0.5f'))\n#print('radius_min =', format(rstar_min,'0.5f'))\n#print('radius_max =', format(rstar_max,'0.5f'))\n\n\nlc=lcf.PDCSAP_FLUX.stitch().remove_nans() # remove of the nans\nlc_new=lk.LightCurve(time=lc.time, flux=lc.flux,flux_err=lc.flux_err)\nclean=lc_new.remove_outliers(sigma_lower=float('inf'), sigma_upper=3) #remove outliers over 3sigma\nflux0=clean.flux\ntime=clean.time\nflux_err = clean.flux_err\n#period_maximum=(max(time)-min(time))/2.\n#time, flux0 = np.genfromtxt('TESS_phot.csv', delimiter=',', unpack=True)\n#rstar = 0.211257 * 41.46650444642 #in Rearth\n\n#::: make model \ndef make_model(epoch, period, rplanet):\n #a = (7.495e-6 * period**2)**(1./3.)*u.au #in AU\n P1=period*u.day\n a = np.cbrt((ac.G*mstar*P1**2)/(4*np.pi**2)).to(u.au)\n #print(\"radius_1 =\", rstar.to(u.au) / a) #star radius convert from AU to in units of a \n #print(\"radius_2 =\", rplanet.to(u.au) / a)\n texpo=2./60./24.\n #print(\"T_expo = \", texpo,\"dy\")\n #tdur=t14(R_s=radius, M_s=mass,P=period,small_planet=False) #we define the typical duration of a small planet in this star\n #print(\"transit_duration= \", tdur*24*60,\"min\" )\n model = ellc.lc(\n t_obs = time,\n radius_1 = rstar.to(u.au) / a, #star radius convert from AU to in units of a\n radius_2 = rplanet.to(u.au) / a, #convert from Rearth (equatorial) into AU and then into units of a\n sbratio = 0,\n incl = 90,\n light_3 = 0,\n t_zero = epoch,\n period = period,\n a = None,\n q = 1e-6,\n f_c = None, f_s = None,\n ldc_1=[0.2755,0.5493], ldc_2 = None,\n gdc_1 = None, gdc_2 = None,\n didt = None,\n domdt = None,\n rotfac_1 = 1, rotfac_2 = 1,\n hf_1 = 1.5, hf_2 = 1.5,\n bfac_1 = None, bfac_2 = None,\n heat_1 = None, heat_2 = None,\n lambda_1 = None, lambda_2 = None,\n vsini_1 = None, vsini_2 = None,\n t_exp=texpo, n_int=None,\n grid_1='default', grid_2='default',\n ld_1='quad', ld_2=None,\n shape_1='sphere', shape_2='sphere',\n spots_1=None, spots_2=None,\n exact_grav=False, verbose=1)\n\n flux_t = flux0 + model - 1.\n if model[0] > 0:\n flux = flux_t\n flux_err_model = flux_err\n time_custom = time\n else:\n flux = []\n time_custom = []\n flux_err_model = []\n return time_custom, flux, flux_err_model\n #minutes=10\n #print(len(time))\n #print(min(time),max(time))\n #bins=len(time)*2./minutes\n #print(bins)\n #bin_means, bin_edges, binnumber = stats.binned_statistic(time, flux, statistic='mean', bins=bins)\n #bin_stds, _, _ = stats.binned_statistic(time, flux, statistic='std', bins=bins)\n #bin_width = (bin_edges[1] - bin_edges[0])\n #bin_centers = bin_edges[1:] - bin_width/2\n #print('RMS PDCSAP flux (ppm): ',np.std(flux0[~np.isnan(flux0)])*1e6)\n #print('RMS model (ppm): ',np.std(flux[~np.isnan(flux)])*1e6)\n #print('RMS 10min bin detrended (ppm): ',np.std(bin_means[~np.isnan(bin_means)])*1e6)\n \n #fig, (ax1,ax2,ax3) = plt.subplots(3, 1, figsize=(10,5), constrained_layout=True)\n ##ax1\n #ax1.plot(time, flux0, linewidth=0.05 ,color='black', alpha=0.4)\n ##ax1.legend(bbox_to_anchor=(0.85, 0.95), loc=2, borderaxespad=0.,fontsize=8)\n #ax1.set_ylabel(\"Normalized flux\")\n #ax1.set_xlim(1766,1769)\n ##ax2\n #ax2.plot(time, flux0, linewidth=0.05 ,color='black', alpha=0.4)\n ##ax2.plot(time, model, linewidth=0.9 ,color='firebrick', alpha=1)\n #ax2.errorbar(time, model, marker='.', markersize=2, color='firebrick', alpha=1, linestyle='none')\n #ax2.set_ylabel(\"Normalized flux\")\n #ax2.set_xlim(1766,1769)\n ##ax3\n #ax3.plot(time, flux, linewidth=0.1 ,color='teal', alpha=0.5)\n #ax3.errorbar(bin_centers, bin_means, marker='.', markersize=4, color='darkorange', alpha=1, linestyle='none')\n #ax3.set_ylabel(\"Normalized flux\")\n #ax3.set_xlabel(\"Time (days)\")\n #ax3.set_xlim(1766,1769)\n #plt.savefig('model.png', dpi=200)\n\n\n\ndef logprint(*text):\n# print(*text)\n original = sys.stdout\n with open( os.path.join('tls/'+'P = '+str(period)+' days, Rp = '+str(rplanet)+'.log'), 'a' ) as f:\n sys.stdout = f\n print(*text)\n sys.stdout = original\n\n \n#::: iterate through grid of periods and rplanet\ndir = \"/home/pozuelos/martin/curves\"\nif not os.path.isdir(dir):\n os.mkdir(dir)\nmax_period = 10\nmin_period = 0.5\nfor period in np.arange(min_period, max_period, 0.5):\n for t0 in np.arange(time[60], time[60] + period - 0.1, period / 5):\n for rplanet in np.arange(4, 0.65, -0.1):\n rplanet = np.around(rplanet, decimals=2)*u.R_earth\n print('\\n')\n print('P = '+str(period)+' days, Rp = '+str(rplanet) + \", T0 = \" + str(t0))\n time_model, flux_model, flux_err_model = make_model(t0, period, rplanet)\n file_name = os.path.join(dir + '/P' + str(period) + '_R' + str(rplanet.value) + '_' + str(t0) + '.csv')\n lc_df = pd.DataFrame(columns=['#time', 'flux', 'flux_err'])\n lc_df['#time'] = time_model\n lc_df['flux'] = flux_model\n lc_df['flux_err'] = flux_err_model\n lc_df.to_csv(file_name, index=False)\n"
] | [
[
"numpy.cbrt",
"pandas.DataFrame",
"numpy.random.seed",
"numpy.arange",
"numpy.around"
]
] |
kungfu-team/mindspore-bert | [
"71501cf52ae01db9d6a73fb64bcfe68a6509dc32",
"71501cf52ae01db9d6a73fb64bcfe68a6509dc32"
] | [
"mindspore/nn/metrics/confusion_matrix.py",
"mindspore/common/initializer.py"
] | [
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"ConfusionMatrixMetric & ConfusionMatrix.\"\"\"\nimport numpy as np\nfrom mindspore._checkparam import Validator as validator\nfrom .metric import Metric\n\n\nclass ConfusionMatrix(Metric):\n r\"\"\"\n Computes the confusion matrix. The performance matrix of measurement classification model is the model whose output\n is binary or multi class. The confusion matrix is calculated. An array of shape [BC4] is returned.\n The third dimension represents each channel of each sample in the input batch.Where B is the batch size and C is\n the number of classes to be calculated.\n\n If you only want to find confusion matrix, use this class. If you want to find 'PPV', 'TPR', 'TNR', etc., use class\n 'mindspore.metrics.ConfusionMatrixMetric'.\n\n Args:\n num_classes (int): Number of classes in the dataset.\n normalize (str): The parameter of calculating ConfusionMatrix supports four Normalization modes, Choose from:\n\n - **'no_norm'** (None) - No normalization is used. Default: None.\n - **'target'** (str) - Normalization based on target value.\n - **'prediction'** (str) - Normalization based on predicted value.\n - **'all'** (str) - Normalization over the whole matrix.\n\n threshold (float): A threshold, which is used to compare with the input tensor. Default: 0.5.\n\n Examples:\n >>> x = Tensor(np.array([1, 0, 1, 0]))\n >>> y = Tensor(np.array([1, 0, 0, 1]))\n >>> metric = nn.ConfusionMatrix(num_classes=2, normalize=\"no_norm\", threshold=0.5)\n >>> metric.clear()\n >>> metric.update(x, y)\n >>> output = metric.eval()\n >>> print(output)\n [[1. 1.]\n [1. 1.]]\n \"\"\"\n TARGET = \"target\"\n PREDICTION = \"prediction\"\n ALL = \"all\"\n NO_NORM = \"no_norm\"\n\n def __init__(self, num_classes, normalize=NO_NORM, threshold=0.5):\n super(ConfusionMatrix, self).__init__()\n\n self.num_classes = validator.check_value_type(\"num_classes\", num_classes, [int])\n if normalize != ConfusionMatrix.TARGET and normalize != ConfusionMatrix.PREDICTION and \\\n normalize != ConfusionMatrix.ALL and normalize is not ConfusionMatrix.NO_NORM:\n raise ValueError(\n 'The normalize way should be in [all, prediction, label, None], but got {}.'.format(normalize)\n )\n\n self.normalize = normalize\n self.threshold = validator.check_value_type(\"threshold\", threshold, [float])\n self.clear()\n\n def clear(self):\n \"\"\"Clears the internal evaluation result.\"\"\"\n self.confusion_matrix = np.zeros((self.num_classes, self.num_classes))\n self._is_update = False\n\n def update(self, *inputs):\n \"\"\"\n Update state with y_pred and y.\n\n Args:\n inputs: Input `y_pred` and `y`. `y_pred` and `y` are a `Tensor`, a list or an array.\n `y_pred` is the predicted value, `y` is the true value.\n The shape of `y_pred` is :math:`(N, C, ...)` or :math:`(N, ...)`.\n The shape of `y` is :math:`(N, ...)`.\n\n Raises:\n ValueError: If the number of the inputs is not 2.\n \"\"\"\n if len(inputs) != 2:\n raise ValueError('ConfusionMatrix need 2 inputs (y_pred, y), but got {}.'.format(len(inputs)))\n\n y_pred = self._convert_data(inputs[0])\n y = self._convert_data(inputs[1])\n\n if not (y_pred.ndim == y.ndim or y_pred.ndim == y.ndim + 1):\n raise ValueError(\"y_pred and y should have the same number of dimensions, or the dimension of y_pred \"\n \"equals the dimension of y add 1.\")\n\n if y_pred.ndim == y.ndim + 1:\n y_pred = np.argmax(y_pred, axis=1)\n\n if y_pred.ndim == y.ndim and y_pred.dtype in (np.float16, np.float32, np.float64):\n y_pred = (y_pred >= self.threshold).astype(int)\n\n trans = (y.reshape(-1) * self.num_classes + y_pred.reshape(-1)).astype(int)\n bincount = np.bincount(trans, minlength=self.num_classes ** 2)\n confusion_matrix = bincount.reshape(self.num_classes, self.num_classes)\n self.confusion_matrix += confusion_matrix\n self._is_update = True\n\n def eval(self):\n \"\"\"\n Computes confusion matrix.\n\n Returns:\n numpy.ndarray, the computed result.\n \"\"\"\n\n if not self._is_update:\n raise RuntimeError('Call the update method before calling eval.')\n\n confusion_matrix = self.confusion_matrix.astype(float)\n\n matrix_target = confusion_matrix / confusion_matrix.sum(axis=1, keepdims=True)\n matrix_pred = confusion_matrix / confusion_matrix.sum(axis=0, keepdims=True)\n matrix_all = confusion_matrix / confusion_matrix.sum()\n normalize_dict = {ConfusionMatrix.TARGET: matrix_target,\n ConfusionMatrix.PREDICTION: matrix_pred,\n ConfusionMatrix.ALL: matrix_all}\n\n if self.normalize == ConfusionMatrix.NO_NORM:\n return confusion_matrix\n\n matrix = normalize_dict.get(self.normalize)\n if matrix[np.isnan(matrix)].size != 0:\n matrix[np.isnan(matrix)] = 0\n\n return matrix\n\n\nclass ConfusionMatrixMetric(Metric):\n r\"\"\"\n The performance matrix of measurement classification model is the model whose output is binary or multi class.\n The correlation measure of confusion matrix was calculated from the full-scale tensor, and the average values of\n batch, class channel and iteration were collected. This function supports the calculation of all measures described\n below: the metric name in parameter metric_name.\n\n If you want to use confusion matrix to calculate, such as 'PPV', 'TPR', 'TNR', use this class.\n If you only want to calculate confusion matrix, please use 'mindspore.metrics.ConfusionMatrix'.\n\n Args:\n skip_channel (bool): Whether to skip the measurement calculation on the first channel of the predicted output.\n Default: True.\n metric_name (str): The names of indicators are in the following range. Of course, you can also set the industry\n common aliases for these indicators. Choose from:\n [\"sensitivity\", \"specificity\", \"precision\", \"negative predictive value\", \"miss rate\",\n \"fall out\", \"false discovery rate\", \"false omission rate\", \"prevalence threshold\",\n \"threat score\", \"accuracy\", \"balanced accuracy\", \"f1 score\",\n \"matthews correlation coefficient\", \"fowlkes mallows index\", \"informedness\", \"markedness\"].\n calculation_method (bool): If true, the measurement for each sample is calculated first. If it is false, the\n confusion matrix of all samples is accumulated first. As for classification task,\n 'calculation_method' should be False. Default: False.\n decrease (str): Define the mode to reduce the calculation result of one batch of data. Decrease is used only if\n calculation_method is True. Default: \"mean\". Choose from:\n [\"none\", \"mean\", \"sum\", \"mean_batch\", \"sum_batch\", \"mean_channel\", \"sum_channel\"].\n\n Examples:\n >>> metric = ConfusionMatrixMetric(skip_channel=True, metric_name=\"tpr\",\n ... calculation_method=False, decrease=\"mean\")\n >>> metric.clear()\n >>> x = Tensor(np.array([[[0], [1]], [[1], [0]]]))\n >>> y = Tensor(np.array([[[0], [1]], [[0], [1]]]))\n >>> metric.update(x, y)\n >>> x = Tensor(np.array([[[0], [1]], [[1], [0]]]))\n >>> y = Tensor(np.array([[[0], [1]], [[1], [0]]]))\n >>> avg_output = metric.eval()\n >>> print(avg_output)\n [0.5]\n \"\"\"\n def __init__(self,\n skip_channel=True,\n metric_name=\"sensitivity\",\n calculation_method=False,\n decrease=\"mean\"):\n super(ConfusionMatrixMetric, self).__init__()\n\n self.confusion_matrix = _ConfusionMatrix(skip_channel=skip_channel, metric_name=metric_name,\n calculation_method=calculation_method, decrease=decrease)\n self.skip_channel = validator.check_value_type(\"skip_channel\", skip_channel, [bool])\n self.calculation_method = validator.check_value_type(\"calculation_method\", calculation_method, [bool])\n self.metric_name = validator.check_value_type(\"metric_name\", metric_name, [str])\n decrease_list = [\"none\", \"mean\", \"sum\", \"mean_batch\", \"sum_batch\", \"mean_channel\", \"sum_channel\"]\n decrease = validator.check_value_type(\"decrease\", decrease, [str])\n self.decrease = validator.check_string(decrease, decrease_list, \"decrease\")\n self.clear()\n\n def clear(self):\n \"\"\"Clears the internal evaluation result.\"\"\"\n self._total_num = 0\n self._class_num = 0\n self._total_tp = 0.0\n self._total_fp = 0.0\n self._total_tn = 0.0\n self._total_fn = 0.0\n\n def update(self, *inputs):\n \"\"\"\n Update state with predictions and targets.\n\n inputs:\n Input `y_pred` and `y`. `y_pred` and `y` are a `Tensor`, a list or an array.\n\n - **y_pred** (ndarray) - Input data to compute. It must be one-hot format and first dim is batch.\n The shape of `y_pred` is :math:`(N, C, ...)` or :math:`(N, ...)`.\n As for classification tasks, `y_pred` should has the shape [BN] where N is larger than 1.\n As for segmentation tasks, the shape should be [BNHW] or [BNHWD].\n - **y** (ndarray) - Compute the true value of the measure. It must be one-hot format and first dim is batch.\n The shape of `y` is :math:`(N, C, ...)`.\n\n Raises:\n ValueError: If the number of the inputs is not 2.\n \"\"\"\n if len(inputs) != 2:\n raise ValueError('ConfusionMatrixMetric need 2 inputs (y_pred, y), but got {}.'.format(len(inputs)))\n\n y_pred = self._convert_data(inputs[0])\n y = self._convert_data(inputs[1])\n\n if self.calculation_method is True:\n score, not_nans = self.confusion_matrix(y_pred, y)\n not_nans = int(not_nans.item())\n self._total_num += score.item() * not_nans\n self._class_num += not_nans\n else:\n confusion_matrix = self.confusion_matrix(y_pred, y)\n confusion_matrix, _ = _decrease_metric(confusion_matrix, \"sum\")\n self._total_tp += confusion_matrix[0].item()\n self._total_fp += confusion_matrix[1].item()\n self._total_tn += confusion_matrix[2].item()\n self._total_fn += confusion_matrix[3].item()\n\n def eval(self):\n \"\"\"\n Computes confusion matrix metric.\n\n Returns:\n ndarray, the computed result.\n \"\"\"\n\n if self.calculation_method is True:\n if self._class_num == 0:\n raise RuntimeError(\"ConfusionMatrixMetric must have at least one example before it can be computed.\")\n\n return self._total_num / self._class_num\n\n confusion_matrix = np.array([self._total_tp, self._total_fp, self._total_tn, self._total_fn])\n return _compute_confusion_matrix_metric(self.metric_name, confusion_matrix)\n\n\nclass _ConfusionMatrix:\n \"\"\"\n Compute confusion matrix related metrics.\n\n Args:\n skip_channel (bool): Whether to skip the measurement calculation on the first channel of the predicted\n output. Default: True.\n metric_name (str): The names of indicators are in the following range. Of course, you can also set the industry\n common aliases for these indicators.\n calculation_method (bool): If true, the measurement for each sample is calculated first. If it is false, the\n confusion matrix for each image (the output of function '_get_confusion_matrix')\n will be returned. In this way, users should achieve the confusion matrixes for all\n images during an epochand then use '_compute_confusion_matrix_metric' to calculate\n the metric. Default: False.\n decrease (Union[DecreaseMetric, str]): [\"none\", \"mean\", \"sum\", \"mean_batch\", \"sum_batch\", \"mean_channel\",\n \"sum_channel\"]\n Define the mode to reduce the calculation result of one batch of data.\n Decrease is used only if calculation_method is True. Default: \"mean\".\n \"\"\"\n\n def __init__(self, skip_channel=True, metric_name=\"hit_rate\", calculation_method=False,\n decrease=\"mean\"):\n super().__init__()\n self.skip_channel = skip_channel\n self.metric_name = metric_name\n self.calculation_method = calculation_method\n self.decrease = decrease\n\n def __call__(self, y_pred, y):\n \"\"\"\n 'y_preds' is expected to have binarized predictions and 'y' should be in one-hot format.\n\n Args:\n - **y_pred** (ndarray) - Input data to compute. It must be one-hot format and first dim is batch.\n - **y** (ndarray) - Ground truth to compute the metric. It must be one-hot format and first dim is batch.\n\n Raises:\n ValueError: If `metric_name` is empty.\n ValueError: when `y_pred` has less than two dimensions.\n \"\"\"\n if not np.all(y.astype(np.uint8) == y):\n raise ValueError(\"y should be a binarized ndarray.\")\n\n dims = y_pred.ndim\n if dims < 2:\n raise ValueError(\"y_pred should have at least two dimensions.\")\n\n if dims == 2 or (dims == 3 and y_pred.shape[-1] == 1):\n if self.calculation_method:\n self.calculation_method = False\n\n confusion_matrix = _get_confusion_matrix(y_pred=y_pred, y=y, skip_channel=self.skip_channel)\n\n if self.calculation_method:\n if isinstance(self.metric_name, str):\n confusion_matrix = _compute_confusion_matrix_metric(self.metric_name, confusion_matrix)\n chart, not_nans = _decrease_metric(confusion_matrix, self.decrease)\n return chart, not_nans\n\n if not self.metric_name:\n raise ValueError(\"There should be at least one metric name.\")\n\n results = []\n for metric_name in self.metric_name:\n sub_confusion_matrix = _compute_confusion_matrix_metric(metric_name, confusion_matrix)\n chart, not_nans = _decrease_metric(sub_confusion_matrix, self.decrease)\n results.append(chart)\n results.append(not_nans)\n return results\n\n return confusion_matrix\n\n\ndef _get_confusion_matrix(y_pred, y, skip_channel=True):\n \"\"\"\n The confusion matrix is calculated. An array of shape [BC4] is returned. The third dimension represents each channel\n of each sample in the input batch.Where B is the batch size and C is the number of classes to be calculated.\n\n Args:\n y_pred (ndarray): input data to compute. It must be one-hot format and first dim is batch.\n The values should be binarized.\n y (ndarray): ground truth to compute the metric. It must be one-hot format and first dim is batch.\n The values should be binarized.\n skip_channel (bool): whether to skip metric computation on the first channel of the predicted output.\n Default: True.\n\n Raises:\n ValueError: when `y_pred` and `y` have different shapes.\n \"\"\"\n\n if not skip_channel:\n y = y[:, 1:] if y.shape[1] > 1 else y\n y_pred = y_pred[:, 1:] if y_pred.shape[1] > 1 else y_pred\n\n y = y.astype(float)\n y_pred = y_pred.astype(float)\n validator.check('y_shape', y.shape, 'y_pred_shape', y_pred.shape)\n batch_size, n_class = y_pred.shape[:2]\n y_pred = y_pred.reshape(batch_size, n_class, -1)\n y = y.reshape(batch_size, n_class, -1)\n tp = ((y_pred + y) == 2).astype(float)\n tn = ((y_pred + y) == 0).astype(float)\n tp = tp.sum(axis=2)\n tn = tn.sum(axis=2)\n p = y.sum(axis=2)\n n = y.shape[-1] - p\n fn = p - tp\n fp = n - tn\n\n return np.stack([tp, fp, tn, fn], axis=-1)\n\n\ndef _decrease_mean(not_nans, chart):\n not_nans = not_nans.sum(axis=1)\n chart = np.where(not_nans > 0, chart.sum(axis=1) / not_nans, np.zeros(1, dtype=float))\n\n not_nans = (not_nans > 0).astype(float).sum(axis=0)\n chart = np.where(not_nans > 0, chart.sum(axis=0) / not_nans, np.zeros(1, dtype=float))\n\n return not_nans, chart\n\n\ndef _decrease_sum(not_nans, chart):\n not_nans = not_nans.sum(axis=(0, 1))\n chart = np.sum(chart, axis=(0, 1))\n\n return not_nans, chart\n\n\ndef _decrease_mean_batch(not_nans, chart):\n not_nans = not_nans.sum(axis=0)\n chart = np.where(not_nans > 0, chart.sum(axis=0) / not_nans, np.zeros(1, dtype=float))\n\n return not_nans, chart\n\n\ndef _decrease_sum_batch(not_nans, chart):\n not_nans = not_nans.sum(axis=0)\n chart = chart.sum(axis=0)\n\n return not_nans, chart\n\n\ndef _decrease_mean_channel(not_nans, chart):\n not_nans = not_nans.sum(axis=1)\n chart = np.where(not_nans > 0, chart.sum(axis=1) / not_nans, np.zeros(1, dtype=float))\n\n return not_nans, chart\n\n\ndef _decrease_sum_channel(not_nans, chart):\n not_nans = not_nans.sum(axis=1)\n chart = chart.sum(axis=1)\n\n return not_nans, chart\n\n\ndef _decrease_none(not_nans, chart):\n return not_nans, chart\n\n\ndef _decrease_metric(chart, decrease=\"mean\"):\n \"\"\"\n This function is used to reduce the calculated metrics for each class of each example.\n\n Args:\n chart (ndarray): A data table containing the calculated measurement scores for each batch and class.\n The first two dims should be batch and class.\n decrease (str): Define the mode to reduce computation result of 1 batch data. Decrease will only be employed\n when 'calculation_method' is True. Default: \"mean\".\n \"\"\"\n\n nans = np.isnan(chart)\n not_nans = (~nans).astype(float)\n chart[nans] = 0\n\n decrease_dict = {\"mean\": _decrease_mean(not_nans, chart),\n \"sum\": _decrease_sum(not_nans, chart),\n \"mean_batch\": _decrease_mean_batch,\n \"sum_batch\": _decrease_sum_batch(not_nans, chart),\n \"mean_channel\": _decrease_mean_channel(not_nans, chart),\n \"sum_channel\": _decrease_sum_channel(not_nans, chart),\n \"none\": _decrease_none(not_nans, chart)}\n not_nans, chart = decrease_dict.get(decrease)\n\n return chart, not_nans\n\n\ndef _calculate_tpr(tp, p):\n \"\"\"Calculate tpr.\"\"\"\n return tp, p\n\n\ndef _calculate_tnr(tn, n):\n \"\"\"Calculate tnr.\"\"\"\n return tn, n\n\n\ndef _calculate_ppv(tp, fp):\n \"\"\"Calculate ppv.\"\"\"\n return tp, (tp + fp)\n\n\ndef _calculate_npv(tn, fn):\n \"\"\"Calculate npv.\"\"\"\n return tn, (tn + fn)\n\n\ndef _calculate_fnr(fn, p):\n \"\"\"Calculate fnr.\"\"\"\n return fn, p\n\n\ndef _calculate_fpr(fp, n):\n \"\"\"Calculate fpr.\"\"\"\n return fp, n\n\n\ndef _calculate_fdr(tp, fp):\n \"\"\"Calculate fdr.\"\"\"\n return fp, (fp + tp)\n\n\ndef _calculate_for(tn, fn):\n \"\"\"Calculate for.\"\"\"\n return fn, (fn + tn)\n\n\ndef _calculate_pt(tp, tn, p, n):\n \"\"\"Calculate pt.\"\"\"\n tpr = np.where(p > 0, tp / p, np.array(float(\"nan\")))\n tnr = np.where(n > 0, tn / n, np.array(float(\"nan\")))\n numerator = np.sqrt(tpr * (1.0 - tnr)) + tnr - 1.0\n denominator = tpr + tnr - 1.0\n\n return numerator, denominator\n\n\ndef _calculate_ts(tp, fp, fn):\n \"\"\"Calculate ts.\"\"\"\n return tp, (tp + fn + fp)\n\n\ndef _calculate_acc(tp, tn, p, n):\n \"\"\"Calculate acc.\"\"\"\n return (tp + tn), (p + n)\n\n\ndef _calculate_ba(tp, tn, p, n):\n \"\"\"Calculate ba.\"\"\"\n tpr = np.where(p > 0, tp / p, np.array(float(\"nan\")))\n tnr = np.where(n > 0, tn / n, np.array(float(\"nan\")))\n numerator, denominator = (tpr + tnr), 2.0\n\n return numerator, denominator\n\n\ndef _calculate_f1(tp, fp, fn):\n \"\"\"Calculate f1.\"\"\"\n return tp * 2.0, (tp * 2.0 + fn + fp)\n\n\ndef _calculate_mcc(tp, fp, tn, fn):\n \"\"\"Calculate mcc.\"\"\"\n numerator = tp * tn - fp * fn\n denominator = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))\n\n return numerator, denominator\n\n\ndef _calculate_fm(tp, fp, p):\n \"\"\"Calculate fm.\"\"\"\n tpr = np.where(p > 0, tp / p, np.array(float(\"nan\")))\n ppv = np.where((tp + fp) > 0, tp / (tp + fp), np.array(float(\"nan\")))\n numerator = np.sqrt(ppv * tpr)\n denominator = 1.0\n\n return numerator, denominator\n\n\ndef _calculate_bm(tp, tn, p, n):\n \"\"\"Calculate bm.\"\"\"\n tpr = np.where(p > 0, tp / p, np.array(float(\"nan\")))\n tnr = np.where(n > 0, tn / n, np.array(float(\"nan\")))\n numerator = tpr + tnr - 1.0\n denominator = 1.0\n\n return numerator, denominator\n\n\ndef _calculate_mk(tp, fp, tn, fn):\n \"\"\"Calculate mk.\"\"\"\n ppv = np.where((tp + fp) > 0, tp / (tp + fp), np.array(float(\"nan\")))\n npv = np.where((tn + fn) > 0, tn / (tn + fn), np.array(float(\"nan\")))\n npv = tn / (tn + fn)\n numerator = ppv + npv - 1.0\n denominator = 1.0\n\n return numerator, denominator\n\n\ndef _compute_confusion_matrix_metric(metric_name, confusion_matrix):\n \"\"\"\n This function is used to compute confusion matrix related metric.\n\n Args:\n metric_name (str): Refer to conflusionmatrixmetric 'metric_name'. Some of the metrics have multiple aliases\n (as shown in the wikipedia page aforementioned), and you can also input those names instead.\n confusion_matrix (ndarray): Refer to '_get_confusion_matrix'.\n\n Raises:\n ValueError: when the size of the last dimension of confusion_matrix is not 4.\n NotImplementedError: when specify a not implemented metric_name.\n\n \"\"\"\n\n metric = _check_metric_name(metric_name)\n\n input_dim = confusion_matrix.ndim\n if input_dim == 1:\n confusion_matrix = np.expand_dims(confusion_matrix, 0)\n if confusion_matrix.shape[-1] != 4:\n raise ValueError(\"The size of the last dimension of confusion_matrix should be 4.\")\n\n tp = confusion_matrix[..., 0]\n fp = confusion_matrix[..., 1]\n tn = confusion_matrix[..., 2]\n fn = confusion_matrix[..., 3]\n p = tp + fn\n n = fp + tn\n\n metric_name_dict = {\"tpr\": _calculate_tpr(tp, p),\n \"tnr\": _calculate_tnr(tn, n),\n \"ppv\": _calculate_ppv(tp, fp),\n \"npv\": _calculate_npv(tn, fn),\n \"fnr\": _calculate_fnr(fn, p),\n \"fpr\": _calculate_fpr(fp, n),\n \"fdr\": _calculate_fdr(tp, fp),\n \"for\": _calculate_for(tn, fn),\n \"pt\": _calculate_pt(tp, tn, p, n),\n \"ts\": _calculate_ts(tp, fp, fn),\n \"acc\": _calculate_acc(tp, tn, p, n),\n \"ba\": _calculate_ba(tp, tn, p, n),\n \"f1\": _calculate_f1(tp, fp, fn),\n \"mcc\": _calculate_mcc(tp, fp, tn, fn),\n \"fm\": _calculate_fm(tp, fp, p),\n \"bm\": _calculate_bm(tp, tn, p, n),\n \"mk\": _calculate_mk(tp, fp, tn, fn)\n }\n numerator, denominator = metric_name_dict.get(metric)\n\n if isinstance(denominator, np.ndarray):\n result = np.where(denominator != 0, numerator / denominator, np.array(float(\"nan\")))\n else:\n result = numerator / denominator\n return result\n\n\ndef _check_metric_name(metric_name):\n \"\"\"\n There are many metrics related to confusion matrix, and some of the metrics have more than one names. In addition,\n some of the names are very long. Therefore, this function is used to check and simplify the name.\n\n Returns:\n Simplified metric name.\n\n Raises:\n NotImplementedError: when the metric is not implemented.\n \"\"\"\n metric_name = metric_name.replace(\" \", \"_\")\n metric_name = metric_name.lower()\n metric_name_dict = {\"sensitivity\": \"tpr\",\n \"recall\": \"tpr\",\n \"hit_rate\": \"tpr\",\n \"true_positive_rate\": \"tpr\",\n \"tpr\": \"tpr\",\n \"specificity\": \"tnr\",\n \"selectivity\": \"tnr\",\n \"true_negative_rate\": \"tnr\",\n \"tnr\": \"tnr\",\n \"precision\": \"ppv\",\n \"positive_predictive_value\": \"ppv\",\n \"ppv\": \"ppv\",\n \"negative_predictive_value\": \"npv\",\n \"npv\": \"npv\",\n \"miss_rate\": \"fnr\",\n \"false_negative_rate\": \"fnr\",\n \"fnr\": \"fnr\",\n \"fall_out\": \"fpr\",\n \"false_positive_rate\": \"fpr\",\n \"fpr\": \"fpr\",\n \"false_discovery_rate\": \"fdr\",\n \"fdr\": \"fdr\",\n \"false_omission_rate\": \"for\",\n \"for\": \"for\",\n \"prevalence_threshold\": \"pt\",\n \"pt\": \"pt\",\n \"threat_score\": \"ts\",\n \"critical_success_index\": \"ts\",\n \"ts\": \"ts\",\n \"csi\": \"ts\",\n \"accuracy\": \"acc\",\n \"acc\": \"acc\",\n \"balanced_accuracy\": \"ba\",\n \"ba\": \"ba\",\n \"f1_score\": \"f1\",\n \"f1\": \"f1\",\n \"matthews_correlation_coefficient\": \"mcc\",\n \"mcc\": \"mcc\",\n \"fowlkes_mallows_index\": \"fm\",\n \"fm\": \"fm\",\n \"informedness\": \"bm\",\n \"bookmaker_informedness\": \"bm\",\n \"bm\": \"bm\",\n \"markedness\": \"mk\",\n \"deltap\": \"mk\",\n \"mk\": \"mk\"\n }\n\n metric_name_info = metric_name_dict.get(metric_name)\n\n if metric_name_info is None:\n raise NotImplementedError(\"The metric is not implemented.\")\n\n return metric_name_info\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Initializer for cell parameters.\"\"\"\nimport numbers\nimport math\n\nfrom functools import reduce\nimport numpy as np\nfrom scipy.stats import truncnorm\nfrom .seed import get_seed, _get_graph_seed\nfrom . import dtype as mstype\nfrom .tensor import Tensor\nfrom .._c_expression import random_normal\n\n_INITIALIZER_ALIAS = dict()\n\n\nclass Initializer:\n \"\"\"\n The base class of the initializer.\n Initialization of tensor basic attributes and model weight values.\n\n Args:\n kwargs (dict): Keyword arguments for Initializer.\n\n Returns:\n Array, an array after being initialized.\n \"\"\"\n def __init__(self, **kwargs):\n self._kwargs = kwargs\n self._seed = None\n\n @property\n def seed(self):\n if self._seed is None:\n seed, seed2 = _get_graph_seed(get_seed(), \"init\")\n else:\n seed, seed2 = self._seed + 1, 0\n return seed, seed2\n\n @seed.setter\n def seed(self, value):\n self._seed = value\n\n def _initialize(self, *kwargs):\n raise NotImplementedError('Must be overridden!')\n\n def __call__(self, arr):\n return self._initialize(arr)\n\ndef _register(*aliases):\n \"\"\"Return the alias register.\"\"\"\n def alias_reg(cls):\n name = cls.__name__\n name = name.lower()\n if name not in _INITIALIZER_ALIAS:\n _INITIALIZER_ALIAS[name] = cls\n\n for alias in aliases:\n if alias not in _INITIALIZER_ALIAS:\n _INITIALIZER_ALIAS[alias] = cls\n\n return cls\n\n return alias_reg\n\n\ndef _assignment(arr, num):\n \"\"\"Assign the value of `num` to `arr`.\"\"\"\n if arr.shape == ():\n arr = arr.reshape((1))\n arr[:] = num\n arr = arr.reshape(())\n else:\n if isinstance(num, np.ndarray):\n arr[:] = num[:]\n else:\n arr[:] = num\n return arr\n\n\n@_register('zeros')\nclass Zero(Initializer):\n \"\"\"\n Initialize the array to zero.\n\n Args:\n arr (Array): The array to be assigned.\n\n Returns:\n Array, an array after being assigned.\n \"\"\"\n def _initialize(self, arr):\n _assignment(arr, 0)\n\n\n@_register('ones')\nclass One(Initializer):\n \"\"\"\n Initialize the array to one.\n\n Args:\n arr (Array): The array to be assigned.\n\n Returns:\n Array, assigned array.\n \"\"\"\n def _initialize(self, arr):\n _assignment(arr, 1)\n\n\ndef _calculate_fan_in_and_fan_out(shape):\n \"\"\"\n calculate fan_in and fan_out\n\n Args:\n shape (tuple): input shape.\n\n Returns:\n Tuple, a tuple with two elements, the first element is `n_in` and the second element is `n_out`.\n \"\"\"\n dimensions = len(shape)\n if dimensions < 2:\n raise ValueError(\"Fan in and fan out can not be computed for tensor with fewer than 2 dimensions\")\n if dimensions == 2: # Linear\n fan_in = shape[1]\n fan_out = shape[0]\n else:\n num_input_fmaps = shape[1]\n num_output_fmaps = shape[0]\n receptive_field_size = 1\n if dimensions > 2:\n receptive_field_size = shape[2] * shape[3]\n fan_in = num_input_fmaps * receptive_field_size\n fan_out = num_output_fmaps * receptive_field_size\n return fan_in, fan_out\n\n\ndef _calculate_correct_fan(shape, mode):\n \"\"\"\n Calculate fan.\n\n Args:\n shape (tuple): input shape.\n mode (str): only support fan_in and fan_out.\n\n Returns:\n fan_in or fan_out.\n \"\"\"\n mode = mode.lower()\n valid_modes = ['fan_in', 'fan_out']\n if mode not in valid_modes:\n raise ValueError(\"Mode {} not supported, please use one of {}\".format(mode, valid_modes))\n fan_in, fan_out = _calculate_fan_in_and_fan_out(shape)\n return fan_in if mode == 'fan_in' else fan_out\n\n\ndef _calculate_gain(nonlinearity, param=None):\n \"\"\"\n Calculate gain.\n\n Args:\n nonlinearity (str): nonlinearity function.\n param (str): used to calculate negative_slope.\n\n Returns:\n number.\n \"\"\"\n linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']\n if nonlinearity in linear_fns or nonlinearity == 'sigmoid':\n res = 1\n elif nonlinearity == 'tanh':\n res = 5.0 / 3\n elif nonlinearity == 'relu':\n res = math.sqrt(2.0)\n elif nonlinearity == 'leaky_relu':\n if param is None:\n negative_slope = 0.01\n elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):\n # True/False are instances of int, hence check above\n negative_slope = param\n else:\n raise ValueError(\"negative_slope {} not a valid number\".format(param))\n res = math.sqrt(2.0 / (1 + negative_slope ** 2))\n else:\n raise ValueError(\"Unsupported nonlinearity {}\".format(nonlinearity))\n return res\n\n\ndef _calculate_in_and_out(arr):\n \"\"\"\n Calculate n_in and n_out.\n\n Args:\n arr (Array): Input array.\n\n Returns:\n Tuple, a tuple with two elements, the first element is `n_in` and the second element is `n_out`.\n \"\"\"\n dim = len(arr.shape)\n if dim < 2:\n raise ValueError(\"If initialize data with xavier uniform, the dimension of data must be greater than 1.\")\n\n n_in = arr.shape[1]\n n_out = arr.shape[0]\n\n if dim > 2:\n counter = reduce(lambda x, y: x * y, arr.shape[2:])\n n_in *= counter\n n_out *= counter\n return n_in, n_out\n\n\n@_register('xavier_uniform')\nclass XavierUniform(Initializer):\n r\"\"\"\n Initialize the array with xavier uniform algorithm, and from a uniform distribution collect samples within\n U[-boundary, boundary] The boundary is defined as:\n\n .. math::\n boundary = gain * \\sqrt{\\frac{6}{n_{in} + n_{out}}}\n\n - where :math:`n_{in}` is the number of input units in the weight tensor.\n - where :math:`n_{out}` is the number of output units in the weight tensor.\n\n Args:\n gain (float): An optional scaling factor. Default: 1.\n\n Returns:\n Array, assigned array.\n \"\"\"\n def __init__(self, gain=1):\n super(XavierUniform, self).__init__(gain=gain)\n self.gain = gain\n\n def _initialize(self, arr):\n n_in, n_out = _calculate_fan_in_and_fan_out(arr.shape)\n\n boundary = self.gain * math.sqrt(6.0 / (n_in + n_out))\n data = np.random.uniform(-boundary, boundary, arr.shape)\n\n _assignment(arr, data)\n\n\n@_register('he_uniform')\nclass HeUniform(Initializer):\n r\"\"\"\n Initialize the array with He kaiming uniform algorithm, and from a uniform distribution collect samples within\n U[-boundary, boundary] The boundary is defined as:\n\n .. math::\n boundary = \\sqrt{\\frac{6}{(1 + a^2) \\times \\text{fan_in}}}\n\n Args:\n negative_slope (int, float, bool): The negativa slope of the rectifier used after this layer\n (only used when `nonlinearity` is 'leaky_relu'). Default: 0.\n mode (str): Either 'fan_in' or 'fan_out'. Choosing 'fan_in' preserves the magnitude of the\n variance of the weights in the forward pass. Choosing 'fan_out' preserves the magnitudes\n in the backwards pass. Default: fan_in.\n nonlinearity (str): The non-linear function, recommended to use only with 'relu' or 'leaky_relu'.\n Default: leaky_relu.\n\n Returns:\n Array, assigned array.\n \"\"\"\n def __init__(self, negative_slope=0, mode='fan_in', nonlinearity='leaky_relu'):\n super(HeUniform, self).__init__(negative_slope=negative_slope, mode=mode, nonlinearity=nonlinearity)\n self.negative_slope = negative_slope\n self.mode = mode\n self.nonlinearity = nonlinearity\n\n def _initialize(self, arr):\n fan = _calculate_correct_fan(arr.shape, self.mode)\n gain = _calculate_gain(self.nonlinearity, self.negative_slope)\n std = gain / math.sqrt(fan)\n boundary = math.sqrt(3.0) * std\n data = np.random.uniform(-boundary, boundary, arr.shape)\n\n _assignment(arr, data)\n\n\n@_register('he_normal')\nclass HeNormal(Initializer):\n r\"\"\"\n Initialize the array with He kaiming Normal algorithm, and from a normal distribution collect samples within\n N(0, sigma).\n\n Args:\n negative_slope (int, float, bool): The negativa slope of the rectifier used after this layer\n (only used when `nonlinearity` is 'leaky_relu'). Default: 0.\n mode (str): Either 'fan_in' or 'fan_out'. Choosing 'fan_in' preserves the magnitude of the\n variance of the weights in the forward pass. Choosing 'fan_out' preserves the magnitudes\n in the backwards pass. Default: fan_in.\n nonlinearity (str): The non-linear function, recommended to use only with 'relu' or 'leaky_relu'.\n Default: leaky_relu.\n\n Returns:\n Array, assigned array.\n \"\"\"\n def __init__(self, negative_slope=0, mode='fan_in', nonlinearity='leaky_relu'):\n super(HeNormal, self).__init__(negative_slope=negative_slope, mode=mode, nonlinearity=nonlinearity)\n self.negative_slope = negative_slope\n self.mode = mode\n self.nonlinearity = nonlinearity\n\n def _initialize(self, arr):\n fan = _calculate_correct_fan(arr.shape, self.mode)\n gain = _calculate_gain(self.nonlinearity, self.negative_slope)\n std = gain / math.sqrt(fan)\n data = np.random.normal(0, std, arr.shape)\n\n _assignment(arr, data)\n\n\nclass Constant(Initializer):\n \"\"\"\n Initialize a constant.\n\n Args:\n value (Union[int, numpy.ndarray]): The value to initialize.\n\n Returns:\n Array, an array after being assigned.\n \"\"\"\n def __init__(self, value):\n super(Constant, self).__init__(value=value)\n self.value = value\n\n def _initialize(self, arr):\n _assignment(arr, self.value)\n\n\n@_register()\nclass Uniform(Initializer):\n \"\"\"\n Initialize a uniform array, and obtain values U(-scale, scale) from the uniform distribution\n to fill the input tensor.\n\n Args:\n scale (float): The scale of the array. Default: 0.07.\n\n Returns:\n Array, uniform array.\n \"\"\"\n def __init__(self, scale=0.07):\n super(Uniform, self).__init__(scale=scale)\n self.scale = scale\n\n def _initialize(self, arr):\n tmp = np.random.uniform(-self.scale, self.scale, arr.shape)\n _assignment(arr, tmp)\n\n\n@_register()\nclass Normal(Initializer):\n \"\"\"\n Initialize a normal array, and obtain values N(0, sigma) from the uniform distribution\n to fill the input tensor.\n\n Args:\n sigma (float): The sigma of the array. Default: 0.01.\n\n Returns:\n Array, normal array.\n \"\"\"\n def __init__(self, sigma=0.01):\n super(Normal, self).__init__(sigma=sigma)\n self.sigma = sigma\n\n def _initialize(self, arr):\n seed, seed2 = self.seed\n output_tensor = Tensor(np.zeros(arr.shape, dtype=np.float32))\n random_normal(0, self.sigma, arr.shape, seed, seed2, output_tensor)\n output_data = output_tensor.asnumpy()\n output_data *= self.sigma\n _assignment(arr, output_data)\n\n@_register()\nclass TruncatedNormal(Initializer):\n \"\"\"\n Initialize a truncated normal distribution which is a bounded normal distribution within N(low, high).\n\n Args:\n sigma (float): The sigma of the array. Default: 0.01.\n\n Returns:\n Array, truncated normal array.\n \"\"\"\n def __init__(self, sigma=0.01):\n super(TruncatedNormal, self).__init__(sigma=sigma)\n self.sigma = sigma\n\n def _initialize(self, arr):\n tmp = truncnorm.rvs(-2, 2, loc=0, scale=self.sigma, size=arr.shape, random_state=None)\n _assignment(arr, tmp)\n\n\ndef initializer(init, shape=None, dtype=mstype.float32):\n \"\"\"\n Create and initialize a tensor.\n\n Args:\n init (Union[Tensor, str, Initializer, numbers.Number]): Initialize value.\n\n - `str`: The `init` should be the alias of the class inheriting from `Initializer` and the corresponding\n class will be called.\n\n - `Initializer`: The `init` should be the class inheriting from `Initializer` to initialize tensor.\n\n - `numbers.Number`: The `Constant` will be called to initialize tensor.\n\n shape (Union[tuple, list, int]): A list of integers, a tuple of integers or an integer as the shape of\n output. Default: None.\n dtype (:class:`mindspore.dtype`): The type of data in initialized tensor. Default: mindspore.float32.\n\n Returns:\n Union[Tensor], return is Tensor object.\n\n\n Examples:\n >>> import mindspore\n >>> from mindspore.common.initializer import initializer, One\n >>> tensor = initializer('ones', [1, 2, 3], mindspore.float32)\n >>> tensor = initializer(One(), [1, 2, 3], mindspore.float32)\n >>> tensor = initializer(0, [1, 2, 3], mindspore.float32)\n \"\"\"\n if not isinstance(init, (Tensor, numbers.Number, str, Initializer)):\n raise TypeError(\"Unsupported init type '{}'.\".format(type(init)))\n\n if isinstance(init, Tensor):\n init_shape = init.shape\n shape = shape if isinstance(shape, (tuple, list)) else [shape]\n if shape is not None and init_shape != tuple(shape):\n raise ValueError(\"The shape of init should be same as variable shape, but got the shape of init {} and \"\n \"the variable shape {}.\".format(list(init.shape), shape))\n return init\n\n if isinstance(shape, list):\n shape = tuple(shape)\n elif isinstance(shape, numbers.Number):\n shape = (shape,)\n\n for value in shape if shape is not None else ():\n if not isinstance(value, int) or value <= 0:\n raise ValueError(f\"shape is invalid, shape value must be positive integer, shape:{shape}\")\n\n if isinstance(init, str):\n init = _INITIALIZER_ALIAS[init.lower()]()\n if init is None:\n raise ValueError(\"The class corresponding to '{}' was not found.\".format(init))\n elif isinstance(init, numbers.Number):\n init = Constant(init)\n shape = shape if shape is not None else init.shape\n init_obj = Tensor(dtype=dtype, shape=shape, init=init)\n return init_obj\n\n__all__ = [\n 'Initializer',\n 'initializer',\n 'TruncatedNormal',\n 'Normal',\n 'Uniform',\n 'HeUniform',\n 'HeNormal',\n 'XavierUniform',\n 'One',\n 'Zero',\n 'Constant']\n"
] | [
[
"numpy.sqrt",
"numpy.sum",
"numpy.bincount",
"numpy.zeros",
"numpy.argmax",
"numpy.expand_dims",
"numpy.isnan",
"numpy.stack",
"numpy.array"
],
[
"numpy.random.uniform",
"numpy.random.normal",
"numpy.zeros",
"scipy.stats.truncnorm.rvs"
]
] |
Balavignesh/badminton-elo-dashboard | [
"df380afb26c89827111f7316df381408d7d19298"
] | [
"multiBatelo/multielo.py"
] | [
"import numpy as np\nfrom typing import Union, List, Callable\nimport logging\n\nfrom multiBatelo.score_functions import create_exponential_score_function\n\n\nDEFAULT_K_VALUE = 32\nDEFAULT_D_VALUE = 400\nDEFAULT_SCORING_FUNCTION_BASE = 1\n\n_default_logger = logging.getLogger(\"multielo.multielo\")\n\n\nclass MultiElo:\n \"\"\"\n Generalized Elo for multiplayer matchups (also simplifies to standard Elo for 1-vs-1 matchups).\n Does not allow ties.\n \"\"\"\n\n def __init__(\n self,\n k_value: float = DEFAULT_K_VALUE,\n d_value: float = DEFAULT_D_VALUE,\n score_function_base: float = DEFAULT_SCORING_FUNCTION_BASE,\n custom_score_function: Callable = None,\n log_base: int = 10,\n logger: logging.Logger = None,\n ):\n \"\"\"\n :param k_value: K parameter in Elo algorithm that determines how much ratings increase or decrease\n after each match\n :param d_value: D parameter in Elo algorithm that determines how much Elo difference affects win\n probability\n :param score_function_base: base value to use for scoring function; scores are approximately\n multiplied by this value as you improve from one place to the next (minimum allowed value is 1,\n which results in a linear scoring function)\n :param custom_score_function: a function that takes an integer input and returns a numpy array\n of monotonically decreasing values summing to 1\n :param log_base: base to use for logarithms throughout the Elo algorithm. Traditionally Elo\n uses base-10 logs\n :param logger: logger to use (optional)\n \"\"\"\n self.k = k_value\n self.d = d_value\n self._score_func = custom_score_function or create_exponential_score_function(base=score_function_base)\n self._log_base = log_base\n self.logger = logger or _default_logger\n\n def get_new_ratings(\n self,\n initial_ratings: Union[List[float], np.ndarray],\n result_order: List[int] = None,\n ) -> np.ndarray:\n \"\"\"\n Update ratings based on results. Takes an array of ratings before the matchup and returns an array with\n the updated ratings. Provided array should be ordered by the actual results (first place finisher's\n initial rating first, second place next, and so on).\n\n Example usage:\n >>> elo = MultiElo()\n >>> elo.get_new_ratings([1200, 1000])\n array([1207.68809835, 992.31190165])\n >>> elo.get_new_ratings([1200, 1000, 1100, 900])\n array([1212.01868209, 1012.15595083, 1087.84404917, 887.98131791])\n\n :param initial_ratings: array of ratings (float values) in order of actual results\n :param result_order: list where each value indicates the place the player in the same index of\n initial_ratings finished in. Lower is better. Identify ties by entering the same value for players\n that tied. For example, [1, 2, 3] indicates that the first listed player won, the second listed player\n finished 2nd, and the third listed player finished 3rd. [1, 2, 2] would indicate that the second\n and third players tied for 2nd place. (default = range(len(initial_ratings))\n :return: array of updated ratings (float values) in same order as input\n \"\"\"\n if not isinstance(initial_ratings, np.ndarray):\n initial_ratings = np.array(initial_ratings)\n n = len(initial_ratings) # number of players\n actual_scores = self.get_actual_scores(n, result_order)\n expected_scores = self.get_expected_scores(initial_ratings)\n scale_factor = self.k * (n - 1)\n #print(f\"scale factor: {scale_factor}\")\n return initial_ratings + scale_factor * (actual_scores - expected_scores)\n\n def get_actual_scores(self, n: int, result_order: List[int] = None) -> np.ndarray:\n \"\"\"\n Return the scores to be awarded to the players based on the results.\n\n :param n: number of players in the matchup\n :param result_order: list indicating order of finish (see docstring for MultiElo.get_new_ratings\n for more details\n :return: array of length n of scores to be assigned to first place, second place, and so on\n \"\"\"\n # calculate actual scores according to score function, then sort in order of finish\n result_order = result_order or list(range(n))\n scores = self._score_func(n)\n scores = scores[np.argsort(np.argsort(result_order))]\n\n # if there are ties, average the scores of all tied players\n distinct_results = set(result_order)\n if len(distinct_results) != n:\n for place in distinct_results:\n idx = [i for i, x in enumerate(result_order) if x == place]\n scores[idx] = scores[idx].mean()\n\n self._validate_actual_scores(scores, result_order)\n # print(f\"calculated actual scores: {scores}\")\n return scores\n\n @staticmethod\n def _validate_actual_scores(scores: np.ndarray, result_order: List[int]):\n if not np.allclose(1, sum(scores)):\n raise ValueError(\"scoring function does not return scores summing to 1\")\n if min(scores) != 0:\n # tie for last place means minimum score doesn't have to be zero,\n # so only raise error if there isn't a tie for last place\n last_place = max(result_order)\n if result_order.count(last_place) == 1:\n raise ValueError(\"scoring function does not return minimum value of 0\")\n if not np.all(np.diff(scores[np.argsort(result_order)]) <= 0):\n raise ValueError(\"scoring function does not return monotonically decreasing values\")\n\n def get_expected_scores(self, ratings: Union[List[float], np.ndarray]) -> np.ndarray:\n \"\"\"\n Get the expected scores for all players given their ratings before the matchup.\n\n :param ratings: array of ratings for each player in a matchup\n :return: array of expected scores for all players\n \"\"\"\n #print(f\"computing expected scores for {ratings}\")\n if not isinstance(ratings, np.ndarray):\n ratings = np.array(ratings)\n if ratings.ndim > 1:\n raise ValueError(f\"ratings should be 1-dimensional array (received {ratings.ndim})\")\n\n # get all pairwise differences\n diff_mx = ratings - ratings[:, np.newaxis]\n print(f\"diff_mx = \\n{diff_mx}\")\n\n # get individual contributions to expected score using logistic function\n logistic_mx = 1 / (1 + self._log_base ** (diff_mx / self.d))\n np.fill_diagonal(logistic_mx, 0)\n #print(f\"logistic_mx = \\n{logistic_mx}\")\n\n # get each expected score (sum individual contributions, then scale)\n expected_scores = logistic_mx.sum(axis=1)\n n = len(ratings)\n denom = n * (n - 1) / 2 # number of individual head-to-head matchups between n players\n expected_scores = expected_scores / denom\n\n # this should be guaranteed, but check to make sure\n if not np.allclose(1, sum(expected_scores)):\n raise ValueError(\"expected scores do not sum to 1\")\n #print(f\"calculated expected scores: {expected_scores}\")\n return expected_scores\n\n def simulate_win_probabilities(\n self,\n ratings: Union[List[float], np.ndarray],\n n_sim: int = int(1e5),\n seed: int = None,\n ) -> np.ndarray:\n \"\"\"\n Estimate the probability of each player finishing in each possible\n place using a simulation. Returns a matrix where (i, j) values are the\n probability that player i finishes in place j.\n\n To simulate a game including players in the\n ratings array, we generate a score for each player using a Gumbel\n distribution. If a player has rating R, then that player's score is\n sampled from a Gumbel(R, D) distribution, where D is the Elo D\n parameter. Then we rank the players in descending order of their\n scores to determine first place, second place, ..., last place. We\n count the number of times each player finishes in each place and then\n divide by the number of simulations to calculate the proportions.\n\n We generate scores using Gumbel distributions because of the property:\n ~~ Gumbel(a_1, b) - Gumbel(a_2, b) ~ Logistic(a_1 - a_2, b) ~~\n\n The Logistic(a_1 - a_2, b) distribution is the same distribution that\n describes the pairwise win probability if two payers have Elo ratings\n a_1 and a_2. In other words, a score sampled from Gumbel(a_1, b) will\n be greater than a score sampled from Gumbel(a_2, b) with the same\n probability that a player with Elo rating a_1 will beat a player with\n Elo rating a_2 in a 1-on-1 matchup.\n\n :param ratings: array of ratings of the players involved\n :param n_sim: number of simulations to run\n :param seed: (optional) seed for random number generation\n\n :return: matrix (a numpy array) where (i, j) values are the probability\n that player i finishes in place j\n \"\"\"\n if seed is not None:\n np.random.seed(seed)\n\n # sort so we always get the same result for same distinct ratings, but\n # keep track of original order\n idx = np.argsort(ratings)\n ratings = sorted(ratings)\n\n # simulate n_sim scores for each player from Gumbel distributions\n n_players = len(ratings)\n n_sim = int(n_sim)\n scores = np.zeros((n_players, n_sim))\n #print(f\"simulating {n_sim:,} scores for each player\")\n for i, rating in enumerate(ratings):\n scores[idx[i], :] = _gumbel_sample(\n loc=rating,\n scale=self.d,\n size=int(n_sim),\n base=self._log_base\n )\n #print(f\"finished sampling {n_sim:,} scores for player {i+1} of {n_players}\")\n\n # use the scores to decide the order of finish (highest score wins) and\n # create matrix with proportion of times each player finishes in each place\n result_mx = self._convert_scores_to_result_proportions(scores)\n #print(f\"finished simulation\")\n return result_mx\n\n @staticmethod\n def _convert_scores_to_result_proportions(scores: np.ndarray) -> np.ndarray:\n \"\"\"\n Take an array of scores with one row per player and one column per\n simulation, and return a matrix with one row per player and one column\n per place. Each (row, col) value in the returned matrix is the count of\n times player \"row\" finished in place \"col\".\n \"\"\"\n # sort scores from high to low for each simulation\n results = np.argsort(-scores, axis=0)\n\n # put it into a matrix where row = player, column = place, value = count\n # of times player finished in place\n n = scores.shape[0]\n count_mx = np.zeros((n, n))\n for i, x in enumerate(results):\n counts = np.bincount(x, minlength=n)\n count_mx[:, i] = counts\n\n proportion_mx = count_mx / scores.shape[1]\n return proportion_mx\n\n\ndef _gumbel_sample(\n loc: float,\n scale: float,\n size: int = 1,\n base: float = np.exp(1),\n) -> np.ndarray:\n \"\"\"\n Sample from a Gumbel distribution (optionally with a different log base).\n\n :param loc: location parameter for distribution\n :param scale: scale parameter for distribution (> 0)\n :param size: number of samples to draw\n :param base: base for logarithm (defaults to natural log)\n\n :return: sample(s) from Gumbel distribution\n \"\"\"\n if scale <= 0:\n raise ValueError(\"scale parameter for Gumbel distribution must be > 0\")\n p = np.random.rand(int(size))\n return loc - scale * _log(-_log(p, base=base), base=base)\n\n\ndef _log(x, base=np.exp(1)):\n return np.log(x) / np.log(base)"
] | [
[
"numpy.bincount",
"numpy.zeros",
"numpy.argsort",
"numpy.random.seed",
"numpy.exp",
"numpy.log",
"numpy.fill_diagonal",
"numpy.array"
]
] |
squalidux/stable-baselines3 | [
"72690b3ed0635c68f037b3dc121bd9987a6e82a8"
] | [
"stable_baselines3/sac/sac.py"
] | [
"from typing import Any, Dict, List, Optional, Tuple, Type, Union\n\nimport gym\nimport numpy as np\nimport torch as th\nfrom torch.nn import functional as F\n\nfrom stable_baselines3.common.buffers import ReplayBuffer\nfrom stable_baselines3.common.noise import ActionNoise\nfrom stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm\nfrom stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule\nfrom stable_baselines3.common.utils import polyak_update\nfrom stable_baselines3.sac.policies import SACPolicy\n\n\nclass SAC(OffPolicyAlgorithm):\n \"\"\"\n Soft Actor-Critic (SAC)\n Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,\n This implementation borrows code from original implementation (https://github.com/haarnoja/sac)\n from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo\n (https://github.com/rail-berkeley/softlearning/)\n and from Stable Baselines (https://github.com/hill-a/stable-baselines)\n Paper: https://arxiv.org/abs/1801.01290\n Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html\n\n Note: we use double q target and not value target as discussed\n in https://github.com/hill-a/stable-baselines/issues/270\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from (if registered in Gym, can be str)\n :param learning_rate: learning rate for adam optimizer,\n the same learning rate will be used for all networks (Q-Values, Actor and Value function)\n it can be a function of the current progress remaining (from 1 to 0)\n :param buffer_size: size of the replay buffer\n :param learning_starts: how many steps of the model to collect transitions for before learning starts\n :param batch_size: Minibatch size for each gradient update\n :param tau: the soft update coefficient (\"Polyak update\", between 0 and 1)\n :param gamma: the discount factor\n :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit\n like ``(5, \"step\")`` or ``(2, \"episode\")``.\n :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)\n Set to ``-1`` means to do as many gradient steps as steps done in the environment\n during the rollout.\n :param action_noise: the action noise type (None by default), this can help\n for hard exploration problem. Cf common.noise for the different action noise type.\n :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).\n If ``None``, it will be automatically selected.\n :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.\n :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n :param ent_coef: Entropy regularization coefficient. (Equivalent to\n inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.\n Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)\n :param target_update_interval: update the target network every ``target_network_update_freq``\n gradient steps.\n :param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``)\n :param use_sde: Whether to use generalized State Dependent Exploration (gSDE)\n instead of action noise exploration (default: False)\n :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE\n Default: -1 (only sample at the beginning of the rollout)\n :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling\n during the warm up phase (before learning starts)\n :param create_eval_env: Whether to create a second environment that will be\n used for evaluating the agent periodically. (Only available when passing string for the environment)\n :param policy_kwargs: additional arguments to be passed to the policy on creation\n :param verbose: the verbosity level: 0 no output, 1 info, 2 debug\n :param seed: Seed for the pseudo random generators\n :param device: Device (cpu, cuda, ...) on which the code should be run.\n Setting it to auto, the code will be run on the GPU if possible.\n :param _init_setup_model: Whether or not to build the network at the creation of the instance\n \"\"\"\n\n def __init__(\n self,\n policy: Union[str, Type[SACPolicy]],\n env: Union[GymEnv, str],\n learning_rate: Union[float, Schedule] = 3e-4,\n buffer_size: int = 1_000_000, # 1e6\n learning_starts: int = 100,\n batch_size: int = 256,\n tau: float = 0.005,\n gamma: float = 0.99,\n train_freq: Union[int, Tuple[int, str]] = 1,\n gradient_steps: int = 1,\n action_noise: Optional[ActionNoise] = None,\n replay_buffer_class: Optional[ReplayBuffer] = None,\n replay_buffer_kwargs: Optional[Dict[str, Any]] = None,\n optimize_memory_usage: bool = False,\n ent_coef: Union[str, float] = \"auto\",\n target_update_interval: int = 1,\n target_entropy: Union[str, float] = \"auto\",\n use_sde: bool = False,\n sde_sample_freq: int = -1,\n use_sde_at_warmup: bool = False,\n tensorboard_log: Optional[str] = None,\n create_eval_env: bool = False,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n verbose: int = 0,\n seed: Optional[int] = None,\n device: Union[th.device, str] = \"auto\",\n _init_setup_model: bool = True,\n ):\n\n super(SAC, self).__init__(\n policy,\n env,\n SACPolicy,\n learning_rate,\n buffer_size,\n learning_starts,\n batch_size,\n tau,\n gamma,\n train_freq,\n gradient_steps,\n action_noise,\n replay_buffer_class=replay_buffer_class,\n replay_buffer_kwargs=replay_buffer_kwargs,\n policy_kwargs=policy_kwargs,\n tensorboard_log=tensorboard_log,\n verbose=verbose,\n device=device,\n create_eval_env=create_eval_env,\n seed=seed,\n use_sde=use_sde,\n sde_sample_freq=sde_sample_freq,\n use_sde_at_warmup=use_sde_at_warmup,\n optimize_memory_usage=optimize_memory_usage,\n supported_action_spaces=(gym.spaces.Box),\n support_multi_env=True,\n )\n\n self.target_entropy = target_entropy\n self.log_ent_coef = None # type: Optional[th.Tensor]\n # Entropy coefficient / Entropy temperature\n # Inverse of the reward scale\n self.ent_coef = ent_coef\n self.target_update_interval = target_update_interval\n self.ent_coef_optimizer = None\n\n if _init_setup_model:\n self._setup_model()\n\n def _setup_model(self) -> None:\n super(SAC, self)._setup_model()\n self._create_aliases()\n # Target entropy is used when learning the entropy coefficient\n if self.target_entropy == \"auto\":\n # automatically set target entropy if needed\n self.target_entropy = -np.prod(self.env.action_space.shape).astype(np.float32)\n else:\n # Force conversion\n # this will also throw an error for unexpected string\n self.target_entropy = float(self.target_entropy)\n\n # The entropy coefficient or entropy can be learned automatically\n # see Automating Entropy Adjustment for Maximum Entropy RL section\n # of https://arxiv.org/abs/1812.05905\n if isinstance(self.ent_coef, str) and self.ent_coef.startswith(\"auto\"):\n # Default initial value of ent_coef when learned\n init_value = 1.0\n if \"_\" in self.ent_coef:\n init_value = float(self.ent_coef.split(\"_\")[1])\n assert init_value > 0.0, \"The initial value of ent_coef must be greater than 0\"\n\n # Note: we optimize the log of the entropy coeff which is slightly different from the paper\n # as discussed in https://github.com/rail-berkeley/softlearning/issues/37\n self.log_ent_coef = th.log(th.ones(1, device=self.device) * init_value).requires_grad_(True)\n self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef], lr=self.lr_schedule(1))\n else:\n # Force conversion to float\n # this will throw an error if a malformed string (different from 'auto')\n # is passed\n self.ent_coef_tensor = th.tensor(float(self.ent_coef)).to(self.device)\n\n def _create_aliases(self) -> None:\n self.actor = self.policy.actor\n self.critic = self.policy.critic\n self.critic_target = self.policy.critic_target\n\n def train(self, gradient_steps: int, batch_size: int = 64) -> None:\n # Switch to train mode (this affects batch norm / dropout)\n self.policy.set_training_mode(True)\n # Update optimizers learning rate\n optimizers = [self.actor.optimizer, self.critic.optimizer]\n if self.ent_coef_optimizer is not None:\n optimizers += [self.ent_coef_optimizer]\n\n # Update learning rate according to lr schedule\n self._update_learning_rate(optimizers)\n\n ent_coef_losses, ent_coefs = [], []\n actor_losses, critic_losses = [], []\n\n for gradient_step in range(gradient_steps):\n # Sample replay buffer\n replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)\n\n # We need to sample because `log_std` may have changed between two gradient steps\n if self.use_sde:\n self.actor.reset_noise()\n\n # Action by the current actor for the sampled state\n actions_pi, log_prob = self.actor.action_log_prob(replay_data.observations)\n log_prob = log_prob.reshape(-1, 1)\n\n ent_coef_loss = None\n if self.ent_coef_optimizer is not None:\n # Important: detach the variable from the graph\n # so we don't change it with other losses\n # see https://github.com/rail-berkeley/softlearning/issues/60\n ent_coef = th.exp(self.log_ent_coef.detach())\n ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean()\n ent_coef_losses.append(ent_coef_loss.item())\n else:\n ent_coef = self.ent_coef_tensor\n\n ent_coefs.append(ent_coef.item())\n\n # Optimize entropy coefficient, also called\n # entropy temperature or alpha in the paper\n if ent_coef_loss is not None:\n self.ent_coef_optimizer.zero_grad()\n ent_coef_loss.backward()\n self.ent_coef_optimizer.step()\n\n with th.no_grad():\n # Select action according to policy\n next_actions, next_log_prob = self.actor.action_log_prob(replay_data.next_observations)\n # Compute the next Q values: min over all critics targets\n next_q_values = th.cat(self.critic_target(replay_data.next_observations, next_actions), dim=1)\n next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True)\n # add entropy term\n next_q_values = next_q_values - ent_coef * next_log_prob.reshape(-1, 1)\n # td error + entropy term\n target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values\n\n # Get current Q-values estimates for each critic network\n # using action from the replay buffer\n current_q_values = self.critic(replay_data.observations, replay_data.actions)\n\n # Compute critic loss\n critic_loss = 0.5 * sum([F.mse_loss(current_q, target_q_values) for current_q in current_q_values])\n critic_losses.append(critic_loss.item())\n\n # Optimize the critic\n self.critic.optimizer.zero_grad()\n critic_loss.backward()\n self.critic.optimizer.step()\n\n # Compute actor loss\n # Alternative: actor_loss = th.mean(log_prob - qf1_pi)\n # Mean over all critic networks\n q_values_pi = th.cat(self.critic.forward(replay_data.observations, actions_pi), dim=1)\n min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True)\n actor_loss = (ent_coef * log_prob - min_qf_pi).mean()\n actor_losses.append(actor_loss.item())\n\n # Optimize the actor\n self.actor.optimizer.zero_grad()\n actor_loss.backward()\n self.actor.optimizer.step()\n\n # Update target networks\n if gradient_step % self.target_update_interval == 0:\n polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau)\n\n self._n_updates += gradient_steps\n\n self.logger.record(\"train/n_updates\", self._n_updates, exclude=\"tensorboard\")\n self.logger.record(\"train/ent_coef\", np.mean(ent_coefs))\n self.logger.record(\"train/actor_loss\", np.mean(actor_losses))\n self.logger.record(\"train/critic_loss\", np.mean(critic_losses))\n if len(ent_coef_losses) > 0:\n self.logger.record(\"train/ent_coef_loss\", np.mean(ent_coef_losses))\n\n def learn(\n self,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 4,\n eval_env: Optional[GymEnv] = None,\n eval_freq: int = -1,\n n_eval_episodes: int = 5,\n tb_log_name: str = \"SAC\",\n eval_log_path: Optional[str] = None,\n reset_num_timesteps: bool = True,\n ) -> OffPolicyAlgorithm:\n\n return super(SAC, self).learn(\n total_timesteps=total_timesteps,\n callback=callback,\n log_interval=log_interval,\n eval_env=eval_env,\n eval_freq=eval_freq,\n n_eval_episodes=n_eval_episodes,\n tb_log_name=tb_log_name,\n eval_log_path=eval_log_path,\n reset_num_timesteps=reset_num_timesteps,\n )\n\n def _excluded_save_params(self) -> List[str]:\n return super(SAC, self)._excluded_save_params() + [\"actor\", \"critic\", \"critic_target\"]\n\n def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:\n state_dicts = [\"policy\", \"actor.optimizer\", \"critic.optimizer\"]\n if self.ent_coef_optimizer is not None:\n saved_pytorch_variables = [\"log_ent_coef\"]\n state_dicts.append(\"ent_coef_optimizer\")\n else:\n saved_pytorch_variables = [\"ent_coef_tensor\"]\n return state_dicts, saved_pytorch_variables\n"
] | [
[
"torch.nn.functional.mse_loss",
"torch.min",
"torch.ones",
"torch.no_grad",
"numpy.prod",
"numpy.mean"
]
] |
pizzahan/lingvo | [
"9b85b7ba5d037701302efa807841c05223bc7d1d"
] | [
"lingvo/core/wpm_encoder.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Encode using wordpiece models.\n\nImplements the segmentation algorithm described in the last paragraph of\np. 5150, in the following publication:\n\nM. Schuster and K. Nakajima, \"Japanese and Korean voice\nsearch,\" 2012 IEEE International Conference on Acoustics,\nSpeech and Signal Processing, 2012\n\nhttps://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nimport tensorflow as tf\n\nfrom lingvo.core.ops import py_x_ops\n\n# Must be a large ID.\nNO_TOKEN = 1 << 31 - 1\nNO_TOKEN_STRING = '<unk>'\n\nSENTENCE_START_STRING = '<s>'\nSENTENCE_END_STRING = '</s>'\n\nBOW_STR = '▁'\n\n\nclass WpmEncoder(object):\n\n def __init__(self, wpm_filepath, merge_prob=1.):\n \"\"\"Create a WPM encoder.\n\n Args:\n wpm_filepath: a path to the file containing the vocabulary.\n merge_prob: the probability of merging tokens while encoding.\n \"\"\"\n # Load vocabulary file.\n self._pieces = []\n with tf.gfile.Open(wpm_filepath, 'r') as f:\n for line in f.readlines():\n line = line.decode('utf-8')\n piece = line.strip().split('\\t')[0]\n self._pieces.append(piece)\n self._merge_prob = merge_prob\n\n def _TokenToString(self, token):\n return py_x_ops.vocab_id_to_token(token, vocab=self._pieces)\n\n def _StringToToken(self, tokstr):\n return tf.where(\n py_x_ops.token_in_vocab(tokstr, vocab=self._pieces),\n py_x_ops.vocab_token_to_id(tokstr, vocab=self._pieces),\n tf.broadcast_to(NO_TOKEN, tf.shape(tokstr)))\n\n def _MergeTokens(self, tokens):\n return self._StringToToken(\n self._TokenToString(tokens[0]) + self._TokenToString(tokens[1]))\n\n def _EncodeToIds(self, word):\n # Below:\n # * a token is a wordpiece ID.\n # * the tokens array will be merged in-place.\n # * the candidates array is an array of size len(tokens) - 1.\n # It contains the token for the merged wordpiece, if it exists,\n # -1 otherwise. For instance, candidate[3] = id(token[3] + token[4]).\n # First, split into basic UTF-8 characters (letters).\n chars = tf.strings.unicode_split(word, 'UTF-8')\n tokens = self._StringToToken(chars)\n tokens = tf.where(\n tf.equal(tokens, NO_TOKEN),\n # Unseen character.\n tf.broadcast_to(self.unk_id, tf.shape(tokens)),\n tokens)\n # Create initial candidate list.\n candidates = tf.map_fn(\n self._MergeTokens, (tokens[:-1], tokens[1:]), dtype=tokens.dtype)\n\n def _ShouldMerge(unused_tokens, candidates):\n \"\"\"Merge until not possible, or we abort early according to merge_prob.\"\"\"\n return tf.logical_and(\n tf.reduce_any(tf.not_equal(candidates, NO_TOKEN)),\n tf.random.uniform([]) < self._merge_prob)\n\n def _MergeOneToken(tokens, i):\n return tf.expand_dims(\n self._MergeTokens((tokens[i], tokens[i + 1])), axis=-1)\n\n def _MergeCandidates(tokens, candidates):\n \"\"\"Merge in the reverse binary tree.\"\"\"\n best_id = tf.argmin(candidates, output_type=tf.int32)\n # Perform the merge at position best_id.\n tokens = tf.concat(\n [tokens[:best_id], [candidates[best_id]], tokens[best_id + 2:]],\n axis=0)\n # Recompute the merge candidates.\n # Only the neighbors of best_id need to be recomputed.\n empty = tf.zeros([0], dtype=candidates.dtype)\n\n def _MergeLeft():\n return tf.concat(\n [candidates[:best_id - 1],\n _MergeOneToken(tokens, best_id - 1)],\n axis=0)\n\n left_candidates = tf.cond(tf.equal(best_id, 0), lambda: empty, _MergeLeft)\n\n def _MergeRight():\n return tf.concat(\n [_MergeOneToken(tokens, best_id), candidates[best_id + 2:]], axis=0)\n\n right_candidates = tf.cond(\n tf.greater_equal(best_id,\n tf.size(tokens) - 1), lambda: empty, _MergeRight)\n\n candidates = tf.concat([left_candidates, right_candidates], axis=0)\n return tokens, candidates\n\n return tf.while_loop(\n _ShouldMerge,\n _MergeCandidates, (tokens, candidates),\n parallel_iterations=1,\n back_prop=False)[0]\n\n def Encode(self, text):\n \"\"\"Converts string `text` to integer ids and the encoded string.\n\n Encoding includes prefixing the beginning-of-word token to each word.\n\n Returns:\n ids: the encoded integer ids.\n tokens: the encoded string.\n \"\"\"\n words = tf.sparse.to_dense(tf.strings.split([text]), default_value='')[0]\n num_words = tf.size(words)\n ids_ta = tf.TensorArray(tf.int32, 0, dynamic_size=True)\n\n def _WordsToIds(i, words, ids_ta):\n encoded_ids = self._EncodeToIds(BOW_STR + words[i])\n ids_ta = ids_ta.scatter(\n tf.range(ids_ta.size(),\n ids_ta.size() + tf.size(encoded_ids)), encoded_ids)\n return i + 1, words, ids_ta\n\n _, _, ids_ta = tf.while_loop(\n lambda i, *_: i < num_words,\n _WordsToIds,\n loop_vars=(tf.constant(0, tf.int32), words, ids_ta),\n parallel_iterations=30,\n back_prop=False)\n\n ids = ids_ta.stack()\n return ids, self._TokenToString(ids)\n\n def Decode(self, ids):\n txt = tf.strings.reduce_join(self._TokenToString(ids))\n txt = tf.strings.regex_replace(txt, BOW_STR, ' ')\n # Note that this strips spaces from the end of the input as well.\n # We assume no inputs rely on the existence of trailing whitespace.\n txt = tf.strings.strip(txt)\n return txt\n\n @property\n def sentence_start_id(self):\n return self._pieces.index(SENTENCE_START_STRING)\n\n @property\n def sentence_start_string(self):\n return SENTENCE_START_STRING\n\n @property\n def sentence_end_id(self):\n return self._pieces.index(SENTENCE_END_STRING)\n\n @property\n def sentence_end_string(self):\n return SENTENCE_END_STRING\n\n @property\n def unk_id(self):\n return self._pieces.index(NO_TOKEN_STRING)\n"
] | [
[
"tensorflow.size",
"tensorflow.zeros",
"tensorflow.equal",
"tensorflow.shape",
"tensorflow.random.uniform",
"tensorflow.strings.unicode_split",
"tensorflow.map_fn",
"tensorflow.strings.split",
"tensorflow.while_loop",
"tensorflow.TensorArray",
"tensorflow.concat",
"tensorflow.strings.strip",
"tensorflow.strings.regex_replace",
"tensorflow.not_equal",
"tensorflow.constant",
"tensorflow.gfile.Open",
"tensorflow.argmin"
]
] |
igilitschenski/quaternion | [
"44dd138fa7e95e55d0ccd4a7620a3587cf314b4f"
] | [
"setup.py"
] | [
"#!/usr/bin/env python\n\n# Copyright (c) 2018, Michael Boyle\n# See LICENSE file for details: <https://github.com/moble/quaternion/blob/master/LICENSE>\n\n# Construct the version number from the date and time this python version was created.\nfrom os import environ\nfrom sys import platform\non_windows = ('win' in platform.lower() and not 'darwin' in platform.lower())\nif \"package_version\" in environ:\n version = environ[\"package_version\"]\n print(\"Setup.py using environment version='{0}'\".format(version))\nelse:\n print(\"The variable 'package_version' was not present in the environment\")\n try:\n # For cases where this is being installed from git. This gives the true version number.\n from subprocess import check_output\n if on_windows:\n version = check_output(\"\"\"git log -1 --format=%cd --date=format:'%Y.%m.%d.%H.%M.%S'\"\"\", shell=False)\n version = version.decode('ascii').strip().replace('.0', '.').replace(\"'\", \"\")\n else:\n version = check_output(\"\"\"git log -1 --format=%cd --date=format:'%Y.%-m.%-d.%-H.%-M.%-S'\"\"\", shell=True).decode('ascii').rstrip()\n print(\"Setup.py using git log version='{0}'\".format(version))\n except:\n # For cases where this isn't being installed from git. This gives the wrong version number,\n # but at least it provides some information.\n try:\n from time import strftime, gmtime\n try:\n version = strftime(\"%Y.%-m.%-d.%-H.%-M.%-S\", gmtime())\n except ValueError: # because Windows\n version = strftime(\"%Y.%m.%d.%H.%M.%S\", gmtime()).replace('.0', '.')\n print(\"Setup.py using strftime version='{0}'\".format(version))\n except:\n version = '0.0.0'\n print(\"Setup.py failed to determine the version; using '{0}'\".format(version))\nwith open('_version.py', 'w') as f:\n f.write('__version__ = \"{0}\"'.format(version))\n\n\nlong_description = \"\"\"\\\nThis package creates a quaternion type in python, and further enables numpy to create and manipulate arrays of\nquaternions. The usual algebraic operations (addition and multiplication) are available, along with numerous\nproperties like norm and various types of distance measures between two quaternions. There are also\nadditional functions like \"squad\" and \"slerp\" interpolation, and conversions to and from axis-angle, matrix,\nand Euler-angle representations of rotations. The core of the code is written in C for speed.\n\"\"\"\n\n\nif __name__ == \"__main__\":\n import numpy\n from setuptools import setup, Extension\n # from distutils.core import setup, Extension\n from distutils.errors import DistutilsError\n if numpy.__dict__.get('quaternion') is not None:\n raise DistutilsError('The target NumPy already has a quaternion type')\n extension = Extension(\n name='quaternion.numpy_quaternion', # This is the name of the object file that will be compiled\n sources=['quaternion.c', 'numpy_quaternion.c'],\n extra_compile_args=['/O2' if on_windows else '-O3'],\n depends=['quaternion.c', 'quaternion.h', 'numpy_quaternion.c'],\n include_dirs=[numpy.get_include()]\n )\n extension2 = Extension(\n name='quaternion.numpy_dual_quaternion', # This is the name of the object file that will be compiled\n sources=['dual_quaternion.c', 'numpy_dual_quaternion.c'],\n extra_compile_args=['/O2' if on_windows else '-O3'],\n depends=['dual_quaternion.c', 'dual_quaternion.h', 'numpy_dual_quaternion.c'],\n include_dirs=[numpy.get_include()]\n )\n setup(name='numpy-quaternion', # Uploaded to pypi under this name\n packages=['quaternion'], # This is the actual package name\n package_dir={'quaternion': ''},\n ext_modules=[extension, extension2],\n version=version,\n install_requires=[\n 'numpy>=1.13',\n ],\n url='https://github.com/moble/quaternion',\n author='Michael Boyle',\n author_email='[email protected]',\n description='Add built-in support for quaternions to numpy',\n long_description=long_description,\n )\n"
] | [
[
"numpy.__dict__.get",
"numpy.get_include"
]
] |
Samaretas/global-motion-estimation | [
"798b70ccc23ac6d6c9d25119db22d346c965faca"
] | [
"global_motion_estimation/test scripts/gradient descent tests/dummy.py"
] | [
"import numpy as np\nfrom scipy import optimize\n\n\ndef f(x, a): return x**3 - a\ndef fder(x, a): return 3 * x**2\n\n\nrng = np.random.default_rng()\nx = rng.standard_normal(100)\na = np.arange(-50, 50)\nvec_res = optimize.newton(f, x, fprime=fder, args=(a, ), maxiter=200)\nprint(vec_res)"
] | [
[
"numpy.arange",
"scipy.optimize.newton",
"numpy.random.default_rng"
]
] |
AkihideHayashi/torchfes1 | [
"83f01525e6071ffd7a884c8e108f9c25ba2b009b"
] | [
"torchfes/colvar/fix.py"
] | [
"import math\nfrom typing import Dict, Union, List\nimport torch\nfrom torch import nn, Tensor\nfrom .. import properties as p\n\n\ndef fix_msk(mol: Dict[str, Tensor], idx: Tensor):\n _, atm, dim = mol[p.pos].size()\n msk = torch.zeros([atm, dim], dtype=torch.bool, device=idx.device)\n msk[idx, :] = True\n return msk\n\n\nclass Fix(nn.Module):\n idx: Tensor\n\n def __init__(self, idx: Union[Tensor, List[int]]):\n super().__init__()\n if isinstance(idx, list):\n idx = torch.tensor(idx)\n self.register_buffer('idx', idx)\n\n def forward(self, mol: Dict[str, Tensor]):\n out = mol.copy()\n msk = fix_msk(mol, self.idx)[None, :, :]\n if p.fix_msk not in out:\n out[p.fix_msk] = msk\n else:\n out[p.fix_msk] = out[p.fix_msk] | msk\n return out\n\n\nclass FixGen(nn.Module):\n pbc: Tensor\n idx: Tensor\n\n def __init__(self, idx: Union[Tensor, List[int]], num_dim: int):\n super().__init__()\n if isinstance(idx, list):\n idx = torch.tensor(idx, dtype=torch.long)\n n = idx.numel() * num_dim\n self.register_buffer('idx', idx)\n self.register_buffer('pbc', torch.ones(n) * math.inf)\n\n def forward(self, mol: Dict[str, Tensor]):\n msk = fix_msk(mol, self.idx)\n return mol[p.pos][:, msk]\n"
] | [
[
"torch.zeros",
"torch.ones",
"torch.tensor"
]
] |
akathpal/UMD-CMSC733-ComputerVision | [
"f5fa21a0ada8ab8ea08a6c558f6df9676570a2df"
] | [
"SfM/Traditional/ExtraCredit/ExtractCameraPose.py"
] | [
"import numpy as np\nimport sys\n\nsys.dont_write_bytecode = True\n\ndef ExtractCameraPose(E, K):\n\n U, S, V_T = np.linalg.svd(E)\n W = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])\n\n # print(\"E svd U\", U)\n # print(\"E svd S\", S)\n # print(\"E svd U[:, 2]\", U[:, 2])\n R = []\n C = []\n R.append(np.dot(U, np.dot(W, V_T)))\n R.append(np.dot(U, np.dot(W, V_T)))\n R.append(np.dot(U, np.dot(W.T, V_T)))\n R.append(np.dot(U, np.dot(W.T, V_T)))\n C.append(U[:, 2])\n C.append(-U[:, 2])\n C.append(U[:, 2])\n C.append(-U[:, 2])\n\n for i in range(4):\n if (np.linalg.det(R[i]) < 0):\n R[i] = -R[i]\n C[i] = -C[i]\n\n return R, C\n"
] | [
[
"numpy.array",
"numpy.dot",
"numpy.linalg.det",
"numpy.linalg.svd"
]
] |
RobinYaoWenbin/Python-CommonCode | [
"1ee714541f2fd9c8b96d018d3d4eb94f4edc812a"
] | [
"python业务代码/地图散点可视化/2/plot_city_machine.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 7 09:36:45 2019\r\n\r\n@author: MyPC\r\n\"\"\"\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib \r\nimport math\r\nimport pymssql\r\nimport numpy as np\r\nimport copy\r\nimport re\r\nfrom sklearn import preprocessing\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom pyecharts import Map, Geo , Timeline\r\n\r\ndef get_data_signalmachine():\r\n df = pd.read_excel('Data.xlsx' , sheet_name='example')\r\n # df.fillna(0 , inplace = True)\r\n # df.set_index('year' , inplace = True)\r\n df.drop(columns = ['NO' , '首次售出年份' , '总计'] , inplace = True)\r\n df.rename(columns = {'行标签':'city'} , inplace = True)\r\n df.set_index('city' , inplace = True)\r\n df = df.T\r\n df.rename(columns = {'合计' : 'total'} , inplace = True)\r\n # print(df)\r\n return df \r\n\r\ndef plot_map(df):\r\n # maptype='china' 只显示全国直辖市和省级\r\n # 数据只能是省名和直辖市的名称\r\n # province_distribution = {'青岛': 22, '龙口': 37.56, '河北': 21, '辽宁': 12, '江西': 6, '上海': 20, '安徽': 10, '江苏': 16, '湖南': 9, '浙江': 13, '海南': 2, '广东': 22, '湖北': 8, '黑龙江': 11, '澳门': 1, '陕西': 11, '四川': 7, '内蒙古': 3, '重庆': 3, '云南': 6, '贵州': 2, '吉林': 3, '山西': 12, '山东': 11, '福建': 4, '青海': 1, '舵主科技,质量保证': 1, '天津': 1, '其他': 1}\r\n # provice=list(province_distribution.keys())\r\n # values=list(province_distribution.values())\r\n years = list(df.index)\r\n geos = []\r\n timeline = Timeline(width=1700,height=900,is_auto_play=True, timeline_bottom=-10,timeline_symbol_size=20,timeline_play_interval=400,timeline_left=20,timeline_right=100 , \\\r\n is_timeline_show = False )\r\n for index in range(len(years)):\r\n cities = list(df.columns)\r\n cities.remove('total')\r\n values = list(df.loc[years[index] , :])\r\n total_num = values[-1]\r\n del(values[-1])\r\n # print(cities)\r\n # print(values)\r\n \r\n geos.append(Geo( str(int(total_num)), title_top=\"10%\" , title_text_size=50 , subtitle = years[index] +\" , subtitle\", \\\r\n subtitle_text_size = 23 , subtitle_color=\"white\", \\\r\n title_color=\"red\", title_pos=\"center\", width=1200, height=600, \\\r\n background_color='#404a59'))\r\n # type=\"effectScatter\", is_random=True, effect_scale=5 使点具有发散性\r\n geos[index].add(\"title level1\", cities, values, type=\"effectScatter\", maptype='china' , is_random=True, effect_scale=3, is_selected = True,is_toolbox_show = True ,is_more_utils =True,\\\r\n visual_text_color=\"#fff\", symbol_size=10, is_label_show = True , legend_orient = 'left' ,is_legend_show = False, legend_top = 'bottom' , label_formatter = '{b}' , \\\r\n is_visualmap=True, is_roam=True , label_text_color=\"#00FF00\" , is_piecewise=True, label_text_size = 7,visual_range=[1, 300] , \\\r\n geo_cities_coords = {'赣江': [115.934192 , 28.826235] , '红河州' : [103.381549,23.369996] , '蒙自' : [103.371546,23.40208] , '海安' : [120.469259,32.544553] , \\\r\n '济阳' : [117.023094,36.965519] , '库车' : [82.970183,41.733785] , '文山-砚山' : [104.334442,23.621612] , '文安':[116.455985,38.891083] , '罗平':[104.309188,24.890519] , \\\r\n '宣城' : [118.762662,30.957007] , '古田' : [118.747401,26.596702] , '泗阳':[118.699691,33.723524] , } , \\\r\n pieces=[\r\n {\"min\":0.1, \"max\": 50 , \"label\": \"0-50\"},\r\n {\"min\": 51, \"max\": 100 , \"label\": \"51-100\"},\r\n {\"min\": 101, \"max\": 200 , \"label\": \"101-200\"},\r\n {\"min\":201, \"max\": 500, \"label\": \"201-500\"},\r\n {\"min\":500, \"max\": 2900, \"label\": \">500\"}, ] )\r\n geos[index].show_config()\r\n geos[index].render(\"xxxx售出数量.html\")\r\n # 时间轴定义\r\n timeline.add(geos[index],years[index] )\r\n timeline.render('final_graph.html')\r\n \r\n\r\ndef main():\r\n df = get_data_signalmachine()\r\n # print(df)\r\n plot_map(df)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()"
] | [
[
"pandas.read_excel"
]
] |
jeremy43/autodp-1 | [
"0a3626f6e1baaefb46715396998d1e8029a659bb"
] | [
"autodp/rdp_acct.py"
] | [
"\"\"\"\nThis file contains the implementation of the main class object: anaRDPacct --- an analytical moment accountant\nthat keeps track the effects of a hetereogeneous sequence of randomized algorithms using the RDP technique.\n\nIn particular it supports amplification of RDP by subsampling without replacement and the amplification of RDP\nby poisson sampling, but unfortunately not (yet) together.\n\"\"\"\n\n\n\n\n\n\nimport numpy as np\nfrom scipy.optimize import minimize_scalar\nimport sys\nsys.path.append('..')\nimport autodp\n\nfrom autodp import utils, rdp_bank\nfrom autodp.privacy_calibrator import subsample_epsdelta\nimport scipy\nimport math\n\ndef general_upperbound(func, mm, prob):\n \"\"\"\n\n :param func:\n :param mm: alpha in RDP\n :param prob: sample probability\n :return: the upperbound in theorem 1 in 2019 ICML,could be applied for general case(including poisson distribution)\n k_approx = 100 k approximation is applied here\n \"\"\"\n def cgf(x):\n return (x - 1) * func(x)\n\n if np.isinf(func(mm)):\n return np.inf\n if mm == 1 or mm == 0:\n return 0\n\n cur_k = np.minimum(50, mm - 1) # choose small k-approx for general upperbound (here is 50) in case of scipy-accuracy\n log_term_1 = mm * np.log(1 - prob)\n #logBin = utils.get_binom_coeffs(mm)\n log_term_2 = np.log(3) - func(mm) + mm * utils.stable_logsumexp_two(np.log(1 - prob), np.log(prob) + func(mm))\n neg_term_3 = [np.log(scipy.special.comb(mm,l)) + np.log(3) + (mm - l) * np.log(1 - prob) + l * np.log(prob) +\n utils.stable_log_diff_exp((l - 1) * func(mm), cgf(l))[1] for l in\n range(3, cur_k + 1)]\n neg_term_4 = np.log(mm*(mm - 1)/2) + 2 * np.log(prob) + (mm - 2) * np.log(\n 1 - prob) + utils.stable_log_diff_exp(np.log(3) + func(mm), func(2))[1]\n neg_term_5 = np.log(2) + np.log(prob) + np.log(mm) + (mm - 1) * np.log(1 - prob)\n neg_term_6 = mm * np.log(1 - prob) + np.log(3) - func(mm)\n pos_term = utils.stable_logsumexp([log_term_1, log_term_2])\n neg_term_3.append(neg_term_4)\n neg_term_3.append(neg_term_5)\n neg_term_3.append(neg_term_6)\n neg_term = utils.stable_logsumexp(neg_term_3)\n bound = utils.stable_log_diff_exp(pos_term, neg_term)[1]\n return bound\n\ndef fast_subsampled_cgf_upperbound(func, mm, prob, deltas_local):\n # evaulate the fast CGF bound for the subsampled mechanism\n # func evaluates the RDP of the base mechanism\n # mm is alpha. NOT lambda.\n return np.inf\n\n if np.isinf(func(mm)):\n return np.inf\n if mm == 1:\n return 0\n secondterm = np.minimum(np.minimum((2) * np.log(np.exp(func(np.inf)) - 1)\n + np.minimum(func(2), np.log(4)),\n np.log(2) + func(2)),\n np.log(4) + 0.5 * deltas_local[int(2 * np.floor(2 / 2.0)) - 1]\n + 0.5 * deltas_local[int(2 * np.ceil(2 / 2.0)) - 1]\n ) + 2 * np.log(prob) + np.log(mm) + np.log(mm - 1) - np.log(2)\n\n if mm == 2:\n return utils.stable_logsumexp([0, secondterm])\n\n # approximate the remaining terms using a geometric series\n logratio1 = np.log(prob) + np.log(mm) + func(mm)\n logratio2 = logratio1 + np.log(np.exp(func(np.inf)) - 1)\n logratio = np.minimum(logratio1, logratio2)\n if logratio1 > logratio2:\n coeff = 1\n else:\n coeff = 2\n\n\n if mm == 3:\n return utils.stable_logsumexp([0, secondterm, np.log(coeff) + 3 * logratio])\n\n # Calculate the sum of the geometric series starting from the third term. This is a total of mm-2 terms.\n if logratio < 0:\n geometric_series_bound = np.log(coeff) + 3 * logratio - np.log(1 - np.exp(logratio)) \\\n + np.log(1 - np.exp((mm - 2) * logratio))\n elif logratio > 0:\n geometric_series_bound = np.log(coeff) + 3 * logratio + (mm-2) * logratio - np.log(np.exp(logratio) - 1)\n else:\n geometric_series_bound = np.log(coeff) + np.log(mm - 2)\n\n # we will approximate using (1+h)^mm\n logh1 = np.log(prob) + func(mm - 1)\n\n logh2 = logh1 + np.log(np.exp(func(np.inf)) - 1)\n\n binomial_series_bound1 = np.log(2) + mm * utils.stable_logsumexp_two(0, logh1)\n binomial_series_bound2 = mm * utils.stable_logsumexp_two(0, logh2)\n\n tmpsign, binomial_series_bound1 \\\n = utils.stable_sum_signed(True, binomial_series_bound1, False, np.log(2)\n + utils.stable_logsumexp([0, logh1 + np.log(mm), 2 * logh1 + np.log(mm)\n + np.log(mm - 1) - np.log(2)]))\n tmpsign, binomial_series_bound2 \\\n = utils.stable_sum_signed(True, binomial_series_bound2, False,\n utils.stable_logsumexp([0, logh2 + np.log(mm), 2 * logh2 + np.log(mm)\n + np.log(mm - 1) - np.log(2)]))\n\n remainder = np.min([geometric_series_bound, binomial_series_bound1, binomial_series_bound2])\n\n return utils.stable_logsumexp([0, secondterm, remainder])\n\n\n\ndef fast_poission_subsampled_cgf_upperbound(func, mm, prob):\n # evaulate the fast CGF bound for the subsampled mechanism\n # func evaluates the RDP of the base mechanism\n # mm is alpha. NOT lambda.\n\n if np.isinf(func(mm)):\n return np.inf\n if mm == 1:\n return 0\n # Bound #1: log [ (1-\\gamma + \\gamma e^{func(mm)})^mm ]\n bound1 = mm * utils.stable_logsumexp_two(np.log(1-prob), np.log(prob) + func(mm))\n\n # Bound #2: log [ (1-gamma)^alpha E [ 1 + gamma/(1-gamma) E[p/q]]^mm ]\n # log[ (1-gamma)^\\alpha { 1 + alpha gamma / (1-gamma) + gamma^2 /(1-gamma)^2 * alpha(alpha-1) /2 e^eps(2))\n # + alpha \\choose 3 * gamma^3 / (1-gamma)^3 / e^(-2 eps(alpha)) * (1 + gamma /(1-gamma) e^{eps(alpha)}) ^ (alpha - 3) }\n # ]\n if mm >= 3:\n bound2 = utils.stable_logsumexp([mm * np.log(1-prob), (mm-1) * np.log(1-prob) + np.log(mm) + np.log(prob),\n (mm-2)*np.log(1-prob) + 2 * np.log(prob) + np.log(mm) + np.log(mm-1) + func(2),\n np.log(mm) + np.log(mm-1) + np.log(mm-2) - np.log(3*2) + 3 * np.log(prob)\n + (mm-3)*np.log(1-prob) + 2 * func(mm) +\n (mm-3) * utils.stable_logsumexp_two(0, np.log(prob) - np.log(1-prob) + func(mm))])\n else:\n bound2 = bound1\n\n #print('www={} func={} mm={}'.format(np.exp(func(mm))-1),func, mm)\n #print('bound1 ={} bound2 ={}'.format(bound1,bound2))\n return np.minimum(bound1,bound2)\n\ndef fast_k_subsample_upperbound(func, mm, prob, k):\n \"\"\"\n\n :param func:\n :param mm:\n :param prob: sample probability\n :param k: approximate term\n :return: k-term approximate upper bound in therorem 11 in ICML-19\n \"\"\"\n def cgf(x):\n return (x - 1) * func(x)\n\n if np.isinf(func(mm)):\n return np.inf\n if mm == 1:\n return 0\n #logBin = utils.get_binom_coeffs(mm)\n cur_k = np.minimum(k, mm - 1)\n if (2 * cur_k) >= mm:\n exact_term_1 = (mm - 1) * np.log(1 - prob) + np.log(mm * prob - prob + 1)\n exact_term_2 = [np.log(scipy.special.comb(mm,l)) + (mm - l) * np.log(1 - prob) + l * np.log(prob) + cgf(l) for l in\n range(2, mm + 1)]\n exact_term_2.append(exact_term_1)\n bound = utils.stable_logsumexp(exact_term_2)\n return bound\n\n s, mag1 = utils.stable_log_diff_exp(0, -func(mm - cur_k))\n new_log_term_1 = np.log(1 - prob) * mm + mag1\n new_log_term_2 = -func(mm - cur_k) + mm * utils.stable_logsumexp_two(np.log(1 - prob),\n np.log(prob) + func(mm - cur_k))\n new_log_term_3 = [np.log(scipy.special.comb(mm,l)) + (mm - l) * np.log(1 - prob) + l * np.log(prob) +\n utils.stable_log_diff_exp((l - 1) * func(mm - cur_k), cgf(l))[1] for l in\n range(2, cur_k + 1)]\n if len(new_log_term_3) > 0:\n new_log_term_3 = utils.stable_logsumexp(new_log_term_3)\n else:\n return utils.stable_logsumexp_two(new_log_term_1, new_log_term_2)\n new_log_term_4 = [np.log(scipy.special.comb(mm,mm-l)) + (mm - l) * np.log(1 - prob) + l * np.log(prob) +\n utils.stable_log_diff_exp(cgf(l), (l - 1) * func(mm - cur_k))[1] for l in\n range(mm - cur_k + 1, mm + 1)]\n new_log_term_4.append(new_log_term_1)\n new_log_term_4.append(new_log_term_2)\n new_log_term_4 = utils.stable_logsumexp(new_log_term_4)\n s, new_log_term_5 = utils.stable_log_diff_exp(new_log_term_4, new_log_term_3)\n new_bound = new_log_term_5\n return new_bound\n\n\nclass anaRDPacct:\n \"\"\"A class that keeps track of the analytical expression of the RDP --- 1/(alpha-1)*CGF of the privacy loss R.V.\"\"\"\n def __init__(self, m=100, tol=0.1, m_max=500, m_lin_max=10000, approx = False, verbose=False):\n # m_max indicates the number that we calculate binomial coefficients exactly up to.\n # beyond that we use Stirling approximation.\n\n # ------ Class Attributes -----------\n self.m = m # default number of binomial coefficients to precompute\n self.m_max = m_max # An upper bound of the quadratic dependence\n self.m_lin_max = m_lin_max # An upper bound of the linear dependence.\n self.verbose = verbose\n self.approx = approx\n self.lambs = np.linspace(1, self.m, self.m).astype(int) # Corresponds to \\alpha = 2,3,4,5,.... for RDP\n\n self.alphas = np.linspace(1, self.m, self.m).astype(int)\n self.RDPs_int = np.zeros_like(self.alphas, float)\n\n self.n=0\n self.RDPs = [] # analytical CGFs\n self.coeffs = []\n self.RDP_inf = .0 # This is effectively for pure DP.\n self.logBinomC = utils.get_binom_coeffs(self.m + 1) # The logBinomC is only needed for subsampling mechanisms.\n self.idxhash = {} # save the index of previously used algorithms\n self.cache = {} # dictionary to save results from previously seen algorithms\n self.deltas_cache = {} # dictionary to save results of all discrete derivative path\n self.evalRDP = lambda x: 0\n self.flag = True # a flag indicating whether evalCGF is out of date\n self.flag_subsample = False # a flag to indicate whether we need to expand the logBinomC.\n self.tol = tol\n\n\n # ---------- Methods ------------\n def build_zeroth_oracle(self):\n self.evalRDP = lambda x: sum([c * item(x) for (c, item) in zip(self.coeffs, self.RDPs)])\n\n def plot_rdp(self):\n if not self.flag:\n self.build_zeroth_oracle()\n self.flag = True\n\n import matplotlib.pyplot as plt\n plt.figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')\n x = range(0,self.m,1)\n y = [self.evalRDP(item) for item in x]\n plt.loglog(x, y)\n plt.show()\n\n\n def plot_cgf_int(self):\n import matplotlib.pyplot as plt\n plt.figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')\n plt.plot(self.alphas, self.RDPs_int)\n plt.xlabel(r'$\\lambda$')\n plt.ylabel('CGF')\n plt.show()\n\n def plot_rdp_int(self):\n import matplotlib.pyplot as plt\n plt.figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')\n plt.loglog(self.alphas, self.RDPs_int)\n if not self.flag:\n self.build_zeroth_oracle()\n self.flag = True\n x = range(1,self.m_lin_max,1)\n y = [self.evalRDP(item) for item in x]\n plt.loglog(x, y)\n plt.xlabel(r'$\\alpha$')\n plt.ylabel(r'RDP $\\epsilon$')\n plt.show()\n\n\n def get_rdp(self,alphas):\n # alphas is a numpy array or a list of numbers\n # we will return a numpy array of the corresponding RDP\n if not self.flag:\n self.build_zeroth_oracle()\n self.flag = True\n alphas = np.array(alphas)\n assert(np.all(alphas >= 1))\n rdp_list = []\n for alpha in alphas:\n rdp_list.append(self.evalRDP(alpha))\n\n return np.array(rdp_list)\n\n def get_eps(self, delta): # minimize over \\lambda\n if not self.flag:\n self.build_zeroth_oracle()\n self.flag = True\n\n if delta<0 or delta > 1:\n print(\"Error! delta is a probability and must be between 0 and 1\")\n if delta == 0:\n return self.RDP_inf\n else:\n def fun(x): # the input the RDP's \\alpha\n if x <= 1:\n return np.inf\n else:\n return np.log(1 / delta)/(x-1) + self.evalRDP(x)\n\n def fun_int(i): # the input is RDP's \\alpha in integer\n if i <= 1 | i >= len(self.RDPs_int):\n return np.inf\n else:\n return np.log(1 / delta) / (i-1) + self.RDPs_int[i - 1]\n\n\n # When do we have computational constraints?\n # Only when we have subsampled items.\n\n # First check if the forward difference is positive at self.m, or if it is infinite\n while (self.m<self.m_max) and (not np.isposinf(fun(self.m))) and (fun_int(self.m-1)-fun_int(self.m-2) < 0):\n # If so, double m, expand logBimomC until the forward difference is positive\n\n\n if self.flag_subsample:\n\n # The following line is m^2 time.\n self.logBinomC = utils.get_binom_coeffs(self.m*2+1)\n\n # Update deltas_caches\n for key, val in self.deltas_cache.items():\n if type(key) is tuple:\n func_tmp = key[0]\n else:\n func_tmp = key\n cgf = lambda x: x*func_tmp(x+1)\n deltas,signs_deltas = utils.get_forward_diffs(cgf,self.m*2)\n\n self.deltas_cache[key] = [deltas, signs_deltas]\n\n new_alphas = range(self.m + 1, self.m * 2 + 1, 1)\n self.alphas = np.concatenate((self.alphas, np.array(new_alphas))) # array of integers\n self.m = self.m * 2\n\n mm = np.max(self.alphas)\n\n rdp_int_new = np.zeros_like(self.alphas, float)\n\n for key,val in self.cache.items():\n idx = self.idxhash[key]\n rdp = self.RDPs[idx]\n newarray = np.zeros_like(self.alphas, float)\n for j in range(2,mm+1,1):\n newarray[j-1] = rdp(1.0*j)\n newarray[0]=newarray[1]\n coeff = self.coeffs[idx]\n rdp_int_new += newarray * coeff\n self.cache[key] = newarray\n\n self.RDPs_int = rdp_int_new\n\n # # update the integer CGF and the cache for each function\n # rdp_int_new = np.zeros_like(self.RDPs_int)\n # for key,val in self.cache.items():\n # idx = self.idxhash[key]\n # rdp = self.RDPs[idx]\n # newarray = np.zeros_like(self.RDPs_int)\n # for j in range(self.m):\n # newarray[j] = rdp(1.0*(j+self.m+1))\n #\n # coeff = self.coeffs[idx]\n # rdp_int_new += newarray * coeff\n # self.cache[key] = np.concatenate((val, newarray))\n #\n # # update the corresponding quantities\n # self.RDPs_int = np.concatenate((self.RDPs_int, rdp_int_new))\n\n #self.m = self.m*2\n\n bestint = np.argmin(np.log(1 / delta)/(self.alphas[1:]-1) + self.RDPs_int[1:]) + 1\n\n if bestint == self.m-1:\n if self.verbose:\n print('Warning: Reach quadratic upper bound: m_max.')\n # In this case, we matches the maximum qudaratic upper bound\n # Fix it by calling O(1) upper bounds and do logarithmic search\n cur = fun(bestint)\n while (not np.isposinf(cur)) and fun(bestint-1)-fun(bestint-2) < -1e-8:\n bestint = bestint*2\n cur = fun(bestint)\n if bestint > self.m_lin_max and self.approx ==True:\n print('Warning: Reach linear upper bound: m_lin_max.')\n return cur\n\n results = minimize_scalar(fun, method='Bounded', bounds=[self.m-1, bestint + 2],\n options={'disp': False})\n if results.success:\n return results.fun\n else:\n return None\n #return fun(bestint)\n\n if bestint == 0:\n if self.verbose:\n print('Warning: Smallest alpha = 1.')\n\n # find the best integer alpha.\n bestalpha = self.alphas[bestint]\n\n results = minimize_scalar(fun, method='Bounded',bounds=[bestalpha-1, bestalpha+1],\n options={'disp':False})\n # the while loop above ensures that bestint+2 is at most m, and also bestint is at least 0.\n if results.success:\n return results.fun\n else:\n # There are cases when certain \\delta is not feasible.\n # For example, let p and q be uniform the privacy R.V. is either 0 or \\infty and unless all \\infty\n # events are taken cared of by \\delta, \\epsilon cannot be < \\infty\n return -1\n\n def compose_mechanism(self, func, coeff=1.0):\n self.flag = False\n if func in self.idxhash:\n self.coeffs[self.idxhash[func]] += coeff\n # also update the integer CGFs\n self.RDPs_int += self.cache[func] * coeff\n\n else:\n # book keeping\n self.idxhash[func] = self.n\n self.n += 1\n self.coeffs.append(coeff)\n # update the analytical\n self.RDPs.append(func)\n\n # also update the integer results\n if func in self.cache:\n tmp = self.cache[func]\n else:\n tmp = np.zeros_like(self.RDPs_int, float)\n for i in range(self.m):\n tmp[i] = func(i+1)\n self.cache[func] = tmp # save in cache\n self.RDPs_int += tmp * coeff\n\n self.RDP_inf += func(np.inf) * coeff\n #795010\n #imple 100\n def compose_subsampled_mechanism(self, func, prob, coeff=1.0):\n # This function is for subsample without replacements.\n self.flag = False\n self.flag_subsample = True\n if (func, prob) in self.idxhash:\n idx = self.idxhash[(func, prob)]\n # update the coefficients of each function\n self.coeffs[idx] += coeff\n # also update the integer CGFs\n self.RDPs_int += self.cache[(func, prob)] * coeff\n else:\n\n def cgf(x):\n return x * func(x+1)\n # we need forward differences of thpe exp(cgf)\n # The following line is the numericall y stable way of implementing it.\n # The output is in polar form with logarithmic magnitude\n deltas, signs_deltas = utils.get_forward_diffs(cgf,self.m)\n\n #deltas1, signs_deltas1 = get_forward_diffs_direct(func, self.m)\n\n #tmp = deltas-deltas1\n\n self.deltas_cache[(func,prob)] = [deltas,signs_deltas]\n\n def subsample_func_int(x):\n # This function evaluates teh CGF at alpha = x, i.e., lamb = x- 1\n deltas_local, signs_deltas_local = self.deltas_cache[(func,prob)]\n if np.isinf(func(x)):\n return np.inf\n\n mm = int(x)\n\n fastupperbound = fast_subsampled_cgf_upperbound(func, mm, prob, deltas_local)\n fastupperbound2 = general_upperbound(func, mm, prob)\n if self.approx ==True:\n if fastupperbound2 <0:\n print('general rdp is negative',x)\n return fastupperbound2\n\n if mm <= self.alphas[-1]: # compute the bound exactly. Requires book keeping of O(x^2)\n\n moments = [ np.minimum(np.minimum((j)*np.log(np.exp(func(np.inf))-1) + np.minimum(cgf(j-1),np.log(4)),\n np.log(2) + cgf(j-1)),\n np.log(4) + 0.5*deltas_local[int(2*np.floor(j/2.0))-1]\n + 0.5*deltas_local[int(2*np.ceil(j/2.0))-1]) + j*np.log(prob)\n +self.logBinomC[int(mm), j] for j in range(2,int(mm+1),1)]\n\n return np.minimum(fastupperbound, utils.stable_logsumexp([0]+moments))\n elif mm <= self.m_lin_max: # compute the bound with stirling approximation. Everything is O(x) now.\n moment_bound = lambda j: np.minimum(j * np.log(np.exp(func(np.inf)) - 1)\n + np.minimum(cgf(j - 1), np.log(4)), np.log(2)\n + cgf(j - 1)) + j * np.log(prob) + utils.logcomb(mm, j)\n moments = [moment_bound(j) for j in range(2,mm+1,1)]\n return np.minimum(fastupperbound, utils.stable_logsumexp([0]+ moments))\n else: # Compute the O(1) upper bound\n return fastupperbound\n\n\n\n def subsample_func(x):\n # This function returns the RDP at alpha = x\n # RDP with the linear interpolation upper bound of the CGF\n\n epsinf, tmp = subsample_epsdelta(func(np.inf),0,prob)\n\n if np.isinf(x):\n return epsinf\n if prob == 1.0:\n return func(x)\n\n if (x >= 1.0) and (x <= 2.0):\n return np.minimum(epsinf, subsample_func_int(2.0) / (2.0-1))\n if np.equal(np.mod(x, 1), 0):\n return np.minimum(epsinf, subsample_func_int(x) / (x-1) )\n xc = math.ceil(x)\n xf = math.floor(x)\n return np.minimum(\n epsinf,\n ((x-xf)*subsample_func_int(xc) + (1-(x-xf))*subsample_func_int(xf)) / (x-1)\n )\n\n\n # book keeping\n self.idxhash[(func, prob)] = self.n # save the index\n self.n += 1 # increment the number of unique mechanisms\n self.coeffs.append(coeff) # Update the coefficient\n self.RDPs.append(subsample_func) # update the analytical functions\n\n # also update the integer results up to m_max.\n if (func,prob) in self.cache:\n results = self.cache[(func,prob)]\n else:\n results = np.zeros_like(self.RDPs_int, float)\n # m = np.max(self.lambs)\n mm = np.max(self.alphas)\n for alpha in range(2, mm+1):\n results[alpha-1] = subsample_func(alpha)\n results[0] = results[1] # Provide the trivial upper bound of RDP at alpha = 1 --- the KL privacy.\n self.cache[(func,prob)] = results # save in cache\n\n self.RDPs_int += results * coeff\n # update the pure DP\n eps, delta = subsample_epsdelta(func(np.inf), 0, prob)\n self.RDP_inf += eps * coeff\n\n\n # mm = np.max(self.alphas)\n #\n # jvec = np.arange(2, mm+1) #\n # logterm3plus = np.zeros_like(results)\n # for j in jvec:\n # logterm3plus[j-2] = (np.minimum(np.minimum(j * np.log(np.exp(func(np.inf)) - 1)\n # + np.minimum(np.log(4),cgf(j-1)), np.log(2) + cgf(j-1)),\n # np.log(4) + 0.5 * deltas[int(2 * np.floor(j / 2.0))-1]\n # + 0.5 * deltas[int(2 * np.ceil(j / 2.0))-1])\n # + j * np.log(prob))\n #\n # for alpha in range(2, mm+1):\n # if np.isinf(logterm3plus[alpha-1]):\n # results[alpha-1] = np.inf\n # else:\n # tmp = utils.stable_logsumexp(logterm3plus[0:alpha-1] + self.logBinomC[alpha, 2:(alpha+1)])\n # results[alpha-1] = utils.stable_logsumexp_two(0, tmp) / (1.0*alpha-1)\n #\n # results[0] = results[1] # Provide the trivial upper bound of RDP at alpha = 1 --- the KL privacy.\n #\n # self.cache[(func,prob)] = results # save in cache\n # self.RDPs_int += results\n #\n # # For debugging: The following 'results1' should be the same as 'results' above.\n # # results1 = np.zeros_like(self.RDPs_int, float)\n # # for j in range(self.m):\n # # results1[j] = subsample_func(j+1)\n #\n # eps, delta = subsample_epsdelta(func(np.inf), 0, prob)\n # self.RDP_inf += eps\n\n\n def compose_poisson_subsampled_mechanisms(self, func, prob, coeff=1.0):\n # This function implements the lower bound for subsampled RDP.\n # It is also the exact formula of poission_subsampled RDP for many mechanisms including Gaussian mech.\n #\n # At the moment, we do not support mixing poisson subsampling and standard subsampling.\n # TODO: modify the caching identifies so that we can distinguish different types of subsampling\n #\n self.flag = False\n self.flag_subsample = True\n if (func, prob) in self.idxhash:\n idx = self.idxhash[(func, prob)] # TODO: this is really where it needs to be changed.\n # update the coefficients of each function\n self.coeffs[idx] += coeff\n # also update the integer CGFs\n self.RDPs_int += self.cache[(func, prob)] * coeff\n else: # compute an easy to compute upper bound of it.\n\n def cgf(x):\n return x * func(x+1)\n\n def subsample_func_int(x):\n # This function evaluates teh CGF at alpha = x, i.e., lamb = x- 1\n\n if np.isinf(func(x)):\n return np.inf\n\n mm = int(x)\n #\n fastbound = fast_poission_subsampled_cgf_upperbound(func, mm, prob)\n\n k = self.alphas[-1]\n fastbound_k = fast_k_subsample_upperbound(func, mm, prob,k)\n if self.approx == True:\n return fastbound_k\n #fastbound = min(fastbound, fastbound_k)\n if x <= self.alphas[-1]: # compute the bound exactly.\n moments = [cgf(j-1) +j*np.log(prob) + (mm-j) * np.log(1-prob)\n + self.logBinomC[mm, j] for j in range(2,mm+1,1)]\n\n return utils.stable_logsumexp([(mm-1)*np.log(1-prob)+np.log(1+(mm-1)*prob)]+moments)\n elif mm <= self.m_lin_max:\n moments = [cgf(j-1) +j*np.log(prob) + (mm-j) * np.log(1-prob)\n + utils.logcomb(mm,j) for j in range(2,mm+1,1)]\n return utils.stable_logsumexp([(mm-1)*np.log(1-prob)+np.log(1+(mm-1)*prob)] + moments)\n else:\n return fastbound\n\n def subsample_func(x): # linear interpolation upper bound\n # This function implements the RDP at alpha = x\n\n if np.isinf(func(x)):\n return np.inf\n if prob == 1.0:\n return func(x)\n\n epsinf, tmp = subsample_epsdelta(func(np.inf),0,prob)\n\n if np.isinf(x):\n return epsinf\n if (x >= 1.0) and (x <= 2.0):\n return np.minimum(epsinf, subsample_func_int(2.0) / (2.0-1))\n if np.equal(np.mod(x, 1), 0):\n return np.minimum(epsinf, subsample_func_int(x) / (x-1) )\n xc = math.ceil(x)\n xf = math.floor(x)\n return np.minimum(\n epsinf,\n ((x-xf)*subsample_func_int(xc) + (1-(x-xf))*subsample_func_int(xf)) / (x-1)\n )\n\n # book keeping\n self.idxhash[(func, prob)] = self.n # save the index\n self.n += 1 # increment the number of unique mechanisms\n self.coeffs.append(coeff) # Update the coefficient\n self.RDPs.append(subsample_func) # update the analytical functions\n\n # also update the integer results, with a vectorized computation.\n # TODO: pre-computing subsampled RDP for integers is error-prone (implement the same thing twice)\n # TODO: and its benefits are not clear. We should consider removing it and simply call the lambda function.\n #\n if (func,prob) in self.cache:\n results = self.cache[(func,prob)]\n else:\n results = np.zeros_like(self.RDPs_int, float)\n mm = np.max(self.alphas) # evaluate the RDP up to order mm\n jvec = np.arange(2, mm + 1)\n logterm3plus = np.zeros_like(results) # This saves everything from j=2 to j = m+1\n for j in jvec:\n logterm3plus[j-2] = cgf(j-1) + j * np.log(prob) #- np.log(1-prob))\n\n for alpha in range(2, mm+1):\n if np.isinf(logterm3plus[alpha-1]):\n results[alpha-1] = np.inf\n else:\n tmp = utils.stable_logsumexp(logterm3plus[0:alpha-1] + self.logBinomC[alpha , 2:(alpha + 1)]\n + (alpha+1-jvec[0:alpha-1])*np.log(1-prob))\n results[alpha-1] = utils.stable_logsumexp_two((alpha-1)*np.log(1-prob)\n + np.log(1+(alpha-1)*prob), tmp) / (1.0*alpha-1)\n\n results[0] = results[1] # Provide the trivial upper bound of RDP at alpha = 1 --- the KL privacy.\n self.cache[(func,prob)] = results # save in cache\n self.RDPs_int += results * coeff\n # update the pure DP tracker\n eps, delta = subsample_epsdelta(func(np.inf), 0, prob)\n self.RDP_inf += eps * coeff\n\n\n def compose_poisson_subsampled_mechanisms1(self, func, prob, coeff=1.0):\n # This function implements the general amplification bounds for Poisson sampling.\n # No additional assumptions are needed.\n\n # At the moment, we do not support mixing poisson subsampling and standard subsampling.\n #\n self.flag = False\n self.flag_subsample = True\n if (func, prob) in self.idxhash:\n idx = self.idxhash[(func, prob)]\n # update the coefficients of each function\n self.coeffs[idx] += coeff\n # also update the integer CGFs\n self.RDPs_int += self.cache[(func, prob)] * coeff\n else: # compute an easy to compute upper bound of it.\n\n cgf = lambda x: x*func(x+1)\n\n def subsample_func_int(x):\n # This function evaluates the CGF at alpha = x, i.e., lamb = x- 1\n if np.isinf(func(x)):\n return np.inf\n if prob == 1.0:\n return func(x)\n\n mm = int(x)\n\n fastbound = fast_poission_subsampled_cgf_upperbound(func, mm, prob)\n\n if x <= self.alphas[-1]: # compute the bound exactly.\n moments = [cgf(1) + 2*np.log(prob) + (mm-2) * np.log(1 - prob) + self.logBinomC[mm, 2]]\n moments = moments + [cgf(j-1+1) +j*np.log(prob) + (mm-j) * np.log(1 - prob)\n + self.logBinomC[mm, j] for j in range(3,mm+1,1)]\n\n return utils.stable_logsumexp([(mm-1)*np.log(1-prob)+np.log(1+(mm-1)*prob)]+moments)\n elif mm <= self.m_lin_max:\n moments = [cgf(1) + 2*np.log(prob) + (mm-2) * np.log(1 - prob) + utils.logcomb(mm, 2)]\n moments = moments + [cgf(j-1+1) +j*np.log(prob) + (mm-j) * np.log(1 - prob)\n + utils.logcomb(mm, j) for j in range(3,mm+1,1)]\n return utils.stable_logsumexp([(mm-1)*np.log(1-prob)+np.log(1+(mm-1)*prob)]+moments)\n else:\n return fastbound\n\n\n def subsample_func(x): # linear interpolation upper bound\n epsinf, tmp = subsample_epsdelta(func(np.inf),0,prob)\n\n if np.isinf(x):\n return epsinf\n if (x >= 1.0) and (x <= 2.0):\n return np.minimum(epsinf, subsample_func_int(2.0) / (2.0-1))\n if np.equal(np.mod(x, 1), 0):\n return np.minimum(epsinf, subsample_func_int(x) / (x-1) )\n xc = math.ceil(x)\n xf = math.floor(x)\n return np.minimum(\n epsinf,\n ((x-xf)*subsample_func_int(xc) + (1-(x-xf))*subsample_func_int(xf)) / (x-1)\n )\n\n # book keeping\n self.idxhash[(func, prob)] = self.n # save the index\n self.n += 1 # increment the number of unique mechanisms\n self.coeffs.append(coeff) # Update the coefficient\n self.RDPs.append(subsample_func) # update the analytical functions\n\n # also update the integer results\n if (func,prob) in self.cache:\n results = self.cache[(func,prob)]\n else:\n results = np.zeros_like(self.RDPs_int, float)\n mm = np.max(self.alphas) # evaluate the RDP up to order mm\n\n for alpha in range(2, mm+1):\n results[alpha-1] = subsample_func_int(alpha)\n results[0] = results[1] # Provide the trivial upper bound of RDP at alpha = 1 --- the KL privacy.\n self.cache[(func,prob)] = results # save in cache\n self.RDPs_int += results * coeff\n # update the pure DP tracker\n eps, delta = subsample_epsdelta(func(np.inf), 0, prob)\n self.RDP_inf += eps * coeff\n\n\n# TODO: 1. Modularize the several Poission sampling versions. 2. Support both sampling schemes together.\n"
] | [
[
"numpy.log",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.loglog",
"numpy.linspace",
"numpy.minimum",
"numpy.ceil",
"numpy.isposinf",
"numpy.mod",
"numpy.arange",
"numpy.all",
"numpy.max",
"numpy.min",
"scipy.special.comb",
"numpy.zeros_like",
"scipy.optimize.minimize_scalar",
"numpy.isinf",
"numpy.floor",
"numpy.exp",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.xlabel"
]
] |
deslay1/CAVE | [
"afcbecd0b9cb97276625c16a89cb6df141e6f6f2"
] | [
"test/test_utils/test_statistical_tests.py"
] | [
"import logging\nimport unittest\n\nimport numpy as np\n\nfrom cave.utils.statistical_tests import paired_permutation, paired_t_student\n\n\nclass TestStatisticalTests(unittest.TestCase):\n\n def setUp(self):\n self.logger = logging.getLogger(\"TestStatisticalTests\")\n\n def test_paired_permutation(self):\n \"\"\" Testing paired permutation test. \"\"\"\n rng = np.random.RandomState(42)\n a, b = rng.normal(loc=0, size=100), rng.normal(loc=0, size=100)\n result = paired_permutation(a, a, rng, 100, self.logger)\n self.assertGreater(result, 0.9999)\n result = paired_permutation(a, b, rng, 100, self.logger)\n self.assertGreater(result, 0.3)\n a, b = rng.normal(loc=-1, size=100), rng.normal(loc=1, size=100)\n result = paired_permutation(a, b, rng, 1000, self.logger)\n self.assertLess(result, 0.001)\n\n def test_t_student(self):\n \"\"\" Testing paired t-test. \"\"\"\n rng = np.random.RandomState(42)\n a, b = rng.normal(loc=0, size=100), rng.normal(loc=0, size=100)\n result = paired_t_student(a, b, self.logger)\n self.assertGreater(result, 0.3)\n a, b = rng.normal(loc=-1, size=100), rng.normal(loc=1, size=100)\n result = paired_t_student(a, b, self.logger)\n self.assertLess(result, 0.001)\n"
] | [
[
"numpy.random.RandomState"
]
] |
zuston/elasticdl | [
"601609fd44f826a2f5ea209443124b2c9a2f9ccb"
] | [
"model_zoo/mnist/mnist_functional_api.py"
] | [
"# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport PIL.Image\nimport tensorflow as tf\n\nfrom elasticdl.python.common.constants import Mode\n\n\ndef custom_model():\n inputs = tf.keras.Input(shape=(28, 28), name=\"image\")\n x = tf.keras.layers.Reshape((28, 28, 1))(inputs)\n x = tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation=\"relu\")(x)\n x = tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\")(x)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(x)\n x = tf.keras.layers.Dropout(0.25)(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10)(x)\n\n return tf.keras.Model(inputs=inputs, outputs=outputs, name=\"mnist_model\")\n\n\ndef prepare_data_for_a_single_file(file_object, filename):\n \"\"\"\n :param filename: training data file name\n :param file_object: a file object associated with filename\n \"\"\"\n label = int(filename.split(\"/\")[-2])\n image = PIL.Image.open(file_object)\n numpy_image = np.array(image)\n example_dict = {\n \"image\": tf.train.Feature(\n float_list=tf.train.FloatList(value=numpy_image.flatten())\n ),\n \"label\": tf.train.Feature(\n int64_list=tf.train.Int64List(value=[label])\n ),\n }\n example = tf.train.Example(\n features=tf.train.Features(feature=example_dict)\n )\n return example.SerializeToString()\n\n\ndef loss(labels, predictions):\n labels = tf.reshape(labels, [-1])\n return tf.reduce_mean(\n input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=predictions, labels=labels\n )\n )\n\n\ndef optimizer(lr=0.01):\n return tf.optimizers.SGD(lr)\n\n\ndef feed(dataset, mode, _):\n def _parse_data(record):\n if mode == Mode.PREDICTION:\n feature_description = {\n \"image\": tf.io.FixedLenFeature([28, 28], tf.float32)\n }\n else:\n feature_description = {\n \"image\": tf.io.FixedLenFeature([28, 28], tf.float32),\n \"label\": tf.io.FixedLenFeature([1], tf.int64),\n }\n r = tf.io.parse_single_example(record, feature_description)\n features = {\n \"image\": tf.math.divide(tf.cast(r[\"image\"], tf.float32), 255.0)\n }\n if mode == Mode.PREDICTION:\n return features\n else:\n return features, tf.cast(r[\"label\"], tf.int32)\n\n dataset = dataset.map(_parse_data)\n\n if mode == Mode.TRAINING:\n dataset = dataset.shuffle(buffer_size=1024)\n return dataset\n\n\ndef eval_metrics_fn():\n return {\n \"accuracy\": lambda labels, predictions: tf.equal(\n tf.argmax(predictions, 1, output_type=tf.int32),\n tf.cast(tf.reshape(labels, [-1]), tf.int32),\n )\n }\n"
] | [
[
"tensorflow.keras.layers.Flatten",
"tensorflow.reshape",
"tensorflow.train.Int64List",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.keras.layers.Conv2D",
"tensorflow.io.parse_single_example",
"tensorflow.train.Features",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.io.FixedLenFeature",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Input",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.cast",
"tensorflow.keras.Model",
"tensorflow.argmax",
"numpy.array",
"tensorflow.optimizers.SGD",
"tensorflow.keras.layers.Reshape"
]
] |
andrewdownie/BlendAway | [
"daf73b22c29dfa905fbe8e838188d4df5861ae5d"
] | [
"blend.py"
] | [
"import os\nimport sys\nimport numpy as np\nimport cv2\nimport statistics\nimport datetime\n\ndef getMedian(arr, x, y):\n values = []\n for a in arr:\n values.append(a[x][y])\n return statistics.median_grouped(values)\n\ndef getMean(arr, x, y):\n values = []\n for a in arr:\n values.append(a[x][y])\n return statistics.mean(values)\n\ndef getMode(arr, x, y):\n values = []\n for a in arr:\n values.append(a[x][y])\n try:\n mode = statistics.mode(values)\n return mode\n except statistics.StatisticsError: # all values are the same\n return getMedian(arr,x,y)\n\nmethod = sys.argv[1]\n\nimgs = [\"1.png\",\"2.png\", \"3.png\", \"4.png\", \"5.png\"] # image\n#direct = os.getcwd() + \"/images/\" # where to get test images\n#saved = os.getcwd() + \"/saved/\" # where to get test images\ndirect = \"/var/www/html/\" # where to get test images\nsaved = \"/var/www/html/\" # where to get test images\ni=0\nimages = []\n\nfor img in imgs:\n image = cv2.imread(direct + img) # open template image\n images.append(image)\n (height, width) = image.shape[:2] # get dimensions\n\n\n\nred = []\ngreen = []\nblue = []\n\nfor image in images:\n\n redMatrix = [[0 for x in range(width)] for y in range(height)]\n greenMatrix = [[0 for x in range(width)] for y in range(height)]\n blueMatrix = [[0 for x in range(width)] for y in range(height)]\n\n for x in range(height):\n for y in range(width):\n redMatrix[x][y] = image[x,y,0]\n greenMatrix[x][y] = image[x,y,1]\n blueMatrix[x][y] = image[x,y,2]\n red.append(redMatrix)\n green.append(greenMatrix)\n blue.append(blueMatrix)\n\nnewImage = np.zeros((height,width,3), np.uint8)\n\nfor x in range(height):\n for y in range(width):\n\n rgb = []\n\n if(method == \"median\"):\n redMedian = getMedian(red,x,y)\n greenMedian = getMedian(green,x,y)\n blueMedian = getMedian(blue,x,y)\n\n if(method == \"mean\"):\n redMedian = getMean(red,x,y)\n greenMedian = getMean(green,x,y)\n blueMedian = getMean(blue,x,y)\n\n if(method == \"mode\"):\n redMedian = getMode(red,x,y)\n greenMedian = getMode(green,x,y)\n blueMedian = getMode(blue,x,y)\n\n\n rgb.append(redMedian)\n rgb.append(greenMedian)\n rgb.append(blueMedian)\n\n newImage[x][y] = rgb\n\ncv2.imwrite(saved + \"results.jpg\", newImage) # save image\n"
] | [
[
"numpy.zeros"
]
] |
UrusuLambda/pix2pix-tensorflow | [
"ba40020706ad3a1fbefa1da7bc7a05b7b031fb9e"
] | [
"model.py"
] | [
"from __future__ import division\nimport os\nimport time\nfrom glob import glob\nimport tensorflow as tf\nimport numpy as np\nfrom six.moves import xrange\n\nfrom ops import *\nfrom utils import *\n\nclass pix2pix(object):\n def __init__(self, sess, image_size=256,\n batch_size=1, sample_size=1, output_size=256,\n gf_dim=64, df_dim=64, L1_lambda=100,\n input_c_dim=3, output_c_dim=3, dataset_name='facades',\n checkpoint_dir=None, sample_dir=None):\n \"\"\"\n\n Args:\n sess: TensorFlow session\n batch_size: The size of batch. Should be specified before training.\n output_size: (optional) The resolution in pixels of the images. [256]\n gf_dim: (optional) Dimension of gen filters in first conv layer. [64]\n df_dim: (optional) Dimension of discrim filters in first conv layer. [64]\n input_c_dim: (optional) Dimension of input image color. For grayscale input, set to 1. [3]\n output_c_dim: (optional) Dimension of output image color. For grayscale input, set to 1. [3]\n \"\"\"\n self.sess = sess\n self.is_grayscale = (input_c_dim == 1)\n self.batch_size = batch_size\n self.image_size = image_size\n self.sample_size = sample_size\n self.output_size = output_size\n\n self.gf_dim = gf_dim\n self.df_dim = df_dim\n\n self.input_c_dim = input_c_dim\n self.output_c_dim = output_c_dim\n\n self.L1_lambda = L1_lambda\n\n # batch normalization : deals with poor initialization helps gradient flow\n self.d_bn1 = batch_norm(name='d_bn1')\n self.d_bn2 = batch_norm(name='d_bn2')\n self.d_bn3 = batch_norm(name='d_bn3')\n\n self.g_bn_e2 = batch_norm(name='g_bn_e2')\n self.g_bn_e3 = batch_norm(name='g_bn_e3')\n self.g_bn_e4 = batch_norm(name='g_bn_e4')\n self.g_bn_e5 = batch_norm(name='g_bn_e5')\n self.g_bn_e6 = batch_norm(name='g_bn_e6')\n self.g_bn_e7 = batch_norm(name='g_bn_e7')\n self.g_bn_e8 = batch_norm(name='g_bn_e8')\n\n self.g_bn_d1 = batch_norm(name='g_bn_d1')\n self.g_bn_d2 = batch_norm(name='g_bn_d2')\n self.g_bn_d3 = batch_norm(name='g_bn_d3')\n self.g_bn_d4 = batch_norm(name='g_bn_d4')\n self.g_bn_d5 = batch_norm(name='g_bn_d5')\n self.g_bn_d6 = batch_norm(name='g_bn_d6')\n self.g_bn_d7 = batch_norm(name='g_bn_d7')\n\n self.dataset_name = dataset_name\n self.checkpoint_dir = checkpoint_dir\n self.build_model()\n\n def build_model(self):\n self.real_data = tf.placeholder(tf.float32,\n [self.batch_size, self.image_size, self.image_size,\n self.input_c_dim + self.output_c_dim],\n name='real_A_and_B_images')\n\n self.real_B = self.real_data[:, :, :, :self.input_c_dim]\n self.real_A = self.real_data[:, :, :, self.input_c_dim:self.input_c_dim + self.output_c_dim]\n\n self.fake_B = self.generator(self.real_A)\n\n self.real_AB = tf.concat([self.real_A, self.real_B], 3)\n self.fake_AB = tf.concat([self.real_A, self.fake_B], 3)\n self.D, self.D_logits = self.discriminator(self.real_AB, reuse=False)\n self.D_, self.D_logits_ = self.discriminator(self.fake_AB, reuse=True)\n\n self.fake_B_sample = self.sampler(self.real_A)\n\n self.d_sum = tf.summary.histogram(\"d\", self.D)\n self.d__sum = tf.summary.histogram(\"d_\", self.D_)\n self.fake_B_sum = tf.summary.image(\"fake_B\", self.fake_B)\n\n self.d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits, labels=tf.ones_like(self.D)))\n self.d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_, labels=tf.zeros_like(self.D_)))\n self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_, labels=tf.ones_like(self.D_))) \\\n + self.L1_lambda * tf.reduce_mean(tf.abs(self.real_B - self.fake_B))\n\n self.d_loss_real_sum = tf.summary.scalar(\"d_loss_real\", self.d_loss_real)\n self.d_loss_fake_sum = tf.summary.scalar(\"d_loss_fake\", self.d_loss_fake)\n\n self.d_loss = self.d_loss_real + self.d_loss_fake\n\n self.g_loss_sum = tf.summary.scalar(\"g_loss\", self.g_loss)\n self.d_loss_sum = tf.summary.scalar(\"d_loss\", self.d_loss)\n\n t_vars = tf.trainable_variables()\n\n self.d_vars = [var for var in t_vars if 'd_' in var.name]\n self.g_vars = [var for var in t_vars if 'g_' in var.name]\n\n self.saver = tf.train.Saver()\n\n\n def load_random_samples(self):\n data = np.random.choice(glob('./datasets/{}/val/*.jpg'.format(self.dataset_name)), self.batch_size)\n sample = [load_data(sample_file) for sample_file in data]\n\n if (self.is_grayscale):\n sample_images = np.array(sample).astype(np.float32)[:, :, :, None]\n else:\n sample_images = np.array(sample).astype(np.float32)\n return sample_images\n\n def sample_model(self, sample_dir, epoch, idx):\n sample_images = self.load_random_samples()\n samples, d_loss, g_loss = self.sess.run(\n [self.fake_B_sample, self.d_loss, self.g_loss],\n feed_dict={self.real_data: sample_images}\n )\n save_images(samples, [self.batch_size, 1],\n './{}/train_{:02d}_{:04d}.png'.format(sample_dir, epoch, idx))\n print(\"[Sample] d_loss: {:.8f}, g_loss: {:.8f}\".format(d_loss, g_loss))\n\n def train(self, args):\n \"\"\"Train pix2pix\"\"\"\n d_optim = tf.train.AdamOptimizer(args.lr, beta1=args.beta1) \\\n .minimize(self.d_loss, var_list=self.d_vars)\n g_optim = tf.train.AdamOptimizer(args.lr, beta1=args.beta1) \\\n .minimize(self.g_loss, var_list=self.g_vars)\n\n init_op = tf.global_variables_initializer()\n self.sess.run(init_op)\n\n self.g_sum = tf.summary.merge([self.d__sum,\n self.fake_B_sum, self.d_loss_fake_sum, self.g_loss_sum])\n self.d_sum = tf.summary.merge([self.d_sum, self.d_loss_real_sum, self.d_loss_sum])\n self.writer = tf.summary.FileWriter(\"./logs\", self.sess.graph)\n\n counter = 1\n start_time = time.time()\n\n if self.load(self.checkpoint_dir):\n print(\" [*] Load SUCCESS\")\n else:\n print(\" [!] Load failed...\")\n\n for epoch in xrange(args.epoch):\n data = glob('./datasets/{}/train/*.jpg'.format(self.dataset_name))\n #np.random.shuffle(data)\n batch_idxs = min(len(data), args.train_size) // self.batch_size\n\n for idx in xrange(0, batch_idxs):\n batch_files = data[idx*self.batch_size:(idx+1)*self.batch_size]\n batch = [load_data(batch_file) for batch_file in batch_files]\n if (self.is_grayscale):\n batch_images = np.array(batch).astype(np.float32)[:, :, :, None]\n else:\n batch_images = np.array(batch).astype(np.float32)\n\n # Update D network\n _, summary_str = self.sess.run([d_optim, self.d_sum],\n feed_dict={ self.real_data: batch_images })\n self.writer.add_summary(summary_str, counter)\n\n # Update G network\n _, summary_str = self.sess.run([g_optim, self.g_sum],\n feed_dict={ self.real_data: batch_images })\n self.writer.add_summary(summary_str, counter)\n\n # Run g_optim twice to make sure that d_loss does not go to zero (different from paper)\n _, summary_str = self.sess.run([g_optim, self.g_sum],\n feed_dict={ self.real_data: batch_images })\n self.writer.add_summary(summary_str, counter)\n\n errD_fake = self.d_loss_fake.eval({self.real_data: batch_images})\n errD_real = self.d_loss_real.eval({self.real_data: batch_images})\n errG = self.g_loss.eval({self.real_data: batch_images})\n\n counter += 1\n print(\"Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f\" \\\n % (epoch, idx, batch_idxs,\n time.time() - start_time, errD_fake+errD_real, errG))\n\n if np.mod(counter, 100) == 1:\n self.sample_model(args.sample_dir, epoch, idx)\n\n if np.mod(counter, 500) == 2:\n self.save(args.checkpoint_dir, counter)\n\n def discriminator(self, image, y=None, reuse=False):\n\n with tf.variable_scope(\"discriminator\") as scope:\n\n # image is 256 x 256 x (input_c_dim + output_c_dim)\n if reuse:\n tf.get_variable_scope().reuse_variables()\n else:\n assert tf.get_variable_scope().reuse == False\n\n h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))\n # h0 is (128 x 128 x self.df_dim)\n h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))\n # h1 is (64 x 64 x self.df_dim*2)\n h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))\n # h2 is (32x 32 x self.df_dim*4)\n h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, d_h=1, d_w=1, name='d_h3_conv')))\n # h3 is (16 x 16 x self.df_dim*8)\n h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')\n\n return tf.nn.sigmoid(h4), h4\n\n def generator(self, image, y=None):\n with tf.variable_scope(\"generator\") as scope:\n\n s = self.output_size\n s2, s4, s8, s16, s32, s64, s128 = int(s/2), int(s/4), int(s/8), int(s/16), int(s/32), int(s/64), int(s/128)\n\n # image is (256 x 256 x input_c_dim)\n e1 = conv2d(image, self.gf_dim, name='g_e1_conv')\n # e1 is (128 x 128 x self.gf_dim)\n e2 = self.g_bn_e2(conv2d(lrelu(e1), self.gf_dim*2, name='g_e2_conv'))\n # e2 is (64 x 64 x self.gf_dim*2)\n e3 = self.g_bn_e3(conv2d(lrelu(e2), self.gf_dim*4, name='g_e3_conv'))\n # e3 is (32 x 32 x self.gf_dim*4)\n e4 = self.g_bn_e4(conv2d(lrelu(e3), self.gf_dim*8, name='g_e4_conv'))\n # e4 is (16 x 16 x self.gf_dim*8)\n e5 = self.g_bn_e5(conv2d(lrelu(e4), self.gf_dim*8, name='g_e5_conv'))\n # e5 is (8 x 8 x self.gf_dim*8)\n e6 = self.g_bn_e6(conv2d(lrelu(e5), self.gf_dim*8, name='g_e6_conv'))\n # e6 is (4 x 4 x self.gf_dim*8)\n e7 = self.g_bn_e7(conv2d(lrelu(e6), self.gf_dim*8, name='g_e7_conv'))\n # e7 is (2 x 2 x self.gf_dim*8)\n e8 = self.g_bn_e8(conv2d(lrelu(e7), self.gf_dim*8, name='g_e8_conv'))\n # e8 is (1 x 1 x self.gf_dim*8)\n\n self.d1, self.d1_w, self.d1_b = deconv2d(tf.nn.relu(e8),\n [self.batch_size, s128, s128, self.gf_dim*8], name='g_d1', with_w=True)\n d1 = tf.nn.dropout(self.g_bn_d1(self.d1), 0.5)\n d1 = tf.concat([d1, e7], 3)\n # d1 is (2 x 2 x self.gf_dim*8*2)\n\n self.d2, self.d2_w, self.d2_b = deconv2d(tf.nn.relu(d1),\n [self.batch_size, s64, s64, self.gf_dim*8], name='g_d2', with_w=True)\n d2 = tf.nn.dropout(self.g_bn_d2(self.d2), 0.5)\n d2 = tf.concat([d2, e6], 3)\n # d2 is (4 x 4 x self.gf_dim*8*2)\n\n self.d3, self.d3_w, self.d3_b = deconv2d(tf.nn.relu(d2),\n [self.batch_size, s32, s32, self.gf_dim*8], name='g_d3', with_w=True)\n d3 = tf.nn.dropout(self.g_bn_d3(self.d3), 0.5)\n d3 = tf.concat([d3, e5], 3)\n # d3 is (8 x 8 x self.gf_dim*8*2)\n\n self.d4, self.d4_w, self.d4_b = deconv2d(tf.nn.relu(d3),\n [self.batch_size, s16, s16, self.gf_dim*8], name='g_d4', with_w=True)\n d4 = self.g_bn_d4(self.d4)\n d4 = tf.concat([d4, e4], 3)\n # d4 is (16 x 16 x self.gf_dim*8*2)\n\n self.d5, self.d5_w, self.d5_b = deconv2d(tf.nn.relu(d4),\n [self.batch_size, s8, s8, self.gf_dim*4], name='g_d5', with_w=True)\n d5 = self.g_bn_d5(self.d5)\n d5 = tf.concat([d5, e3], 3)\n # d5 is (32 x 32 x self.gf_dim*4*2)\n\n self.d6, self.d6_w, self.d6_b = deconv2d(tf.nn.relu(d5),\n [self.batch_size, s4, s4, self.gf_dim*2], name='g_d6', with_w=True)\n d6 = self.g_bn_d6(self.d6)\n d6 = tf.concat([d6, e2], 3)\n # d6 is (64 x 64 x self.gf_dim*2*2)\n\n self.d7, self.d7_w, self.d7_b = deconv2d(tf.nn.relu(d6),\n [self.batch_size, s2, s2, self.gf_dim], name='g_d7', with_w=True)\n d7 = self.g_bn_d7(self.d7)\n d7 = tf.concat([d7, e1], 3)\n # d7 is (128 x 128 x self.gf_dim*1*2)\n\n self.d8, self.d8_w, self.d8_b = deconv2d(tf.nn.relu(d7),\n [self.batch_size, s, s, self.output_c_dim], name='g_d8', with_w=True)\n # d8 is (256 x 256 x output_c_dim)\n\n return tf.nn.tanh(self.d8)\n\n def sampler(self, image, y=None):\n\n with tf.variable_scope(\"generator\") as scope:\n scope.reuse_variables()\n\n s = self.output_size\n s2, s4, s8, s16, s32, s64, s128 = int(s/2), int(s/4), int(s/8), int(s/16), int(s/32), int(s/64), int(s/128)\n\n # image is (256 x 256 x input_c_dim)\n e1 = conv2d(image, self.gf_dim, name='g_e1_conv')\n # e1 is (128 x 128 x self.gf_dim)\n e2 = self.g_bn_e2(conv2d(lrelu(e1), self.gf_dim*2, name='g_e2_conv'))\n # e2 is (64 x 64 x self.gf_dim*2)\n e3 = self.g_bn_e3(conv2d(lrelu(e2), self.gf_dim*4, name='g_e3_conv'))\n # e3 is (32 x 32 x self.gf_dim*4)\n e4 = self.g_bn_e4(conv2d(lrelu(e3), self.gf_dim*8, name='g_e4_conv'))\n # e4 is (16 x 16 x self.gf_dim*8)\n e5 = self.g_bn_e5(conv2d(lrelu(e4), self.gf_dim*8, name='g_e5_conv'))\n # e5 is (8 x 8 x self.gf_dim*8)\n e6 = self.g_bn_e6(conv2d(lrelu(e5), self.gf_dim*8, name='g_e6_conv'))\n # e6 is (4 x 4 x self.gf_dim*8)\n e7 = self.g_bn_e7(conv2d(lrelu(e6), self.gf_dim*8, name='g_e7_conv'))\n # e7 is (2 x 2 x self.gf_dim*8)\n e8 = self.g_bn_e8(conv2d(lrelu(e7), self.gf_dim*8, name='g_e8_conv'))\n # e8 is (1 x 1 x self.gf_dim*8)\n\n self.d1, self.d1_w, self.d1_b = deconv2d(tf.nn.relu(e8),\n [self.batch_size, s128, s128, self.gf_dim*8], name='g_d1', with_w=True)\n d1 = tf.nn.dropout(self.g_bn_d1(self.d1), 0.5)\n d1 = tf.concat([d1, e7], 3)\n # d1 is (2 x 2 x self.gf_dim*8*2)\n\n self.d2, self.d2_w, self.d2_b = deconv2d(tf.nn.relu(d1),\n [self.batch_size, s64, s64, self.gf_dim*8], name='g_d2', with_w=True)\n d2 = tf.nn.dropout(self.g_bn_d2(self.d2), 0.5)\n d2 = tf.concat([d2, e6], 3)\n # d2 is (4 x 4 x self.gf_dim*8*2)\n\n self.d3, self.d3_w, self.d3_b = deconv2d(tf.nn.relu(d2),\n [self.batch_size, s32, s32, self.gf_dim*8], name='g_d3', with_w=True)\n d3 = tf.nn.dropout(self.g_bn_d3(self.d3), 0.5)\n d3 = tf.concat([d3, e5], 3)\n # d3 is (8 x 8 x self.gf_dim*8*2)\n\n self.d4, self.d4_w, self.d4_b = deconv2d(tf.nn.relu(d3),\n [self.batch_size, s16, s16, self.gf_dim*8], name='g_d4', with_w=True)\n d4 = self.g_bn_d4(self.d4)\n d4 = tf.concat([d4, e4], 3)\n # d4 is (16 x 16 x self.gf_dim*8*2)\n\n self.d5, self.d5_w, self.d5_b = deconv2d(tf.nn.relu(d4),\n [self.batch_size, s8, s8, self.gf_dim*4], name='g_d5', with_w=True)\n d5 = self.g_bn_d5(self.d5)\n d5 = tf.concat([d5, e3], 3)\n # d5 is (32 x 32 x self.gf_dim*4*2)\n\n self.d6, self.d6_w, self.d6_b = deconv2d(tf.nn.relu(d5),\n [self.batch_size, s4, s4, self.gf_dim*2], name='g_d6', with_w=True)\n d6 = self.g_bn_d6(self.d6)\n d6 = tf.concat([d6, e2], 3)\n # d6 is (64 x 64 x self.gf_dim*2*2)\n\n self.d7, self.d7_w, self.d7_b = deconv2d(tf.nn.relu(d6),\n [self.batch_size, s2, s2, self.gf_dim], name='g_d7', with_w=True)\n d7 = self.g_bn_d7(self.d7)\n d7 = tf.concat([d7, e1], 3)\n # d7 is (128 x 128 x self.gf_dim*1*2)\n\n self.d8, self.d8_w, self.d8_b = deconv2d(tf.nn.relu(d7),\n [self.batch_size, s, s, self.output_c_dim], name='g_d8', with_w=True)\n # d8 is (256 x 256 x output_c_dim)\n\n return tf.nn.tanh(self.d8)\n\n def save(self, checkpoint_dir, step):\n model_name = \"pix2pix.model\"\n model_dir = \"%s_%s_%s\" % (self.dataset_name, self.batch_size, self.output_size)\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n self.saver.save(self.sess,\n os.path.join(checkpoint_dir, model_name),\n global_step=step)\n\n def load(self, checkpoint_dir):\n print(\" [*] Reading checkpoint...\")\n\n model_dir = \"%s_%s_%s\" % (self.dataset_name, self.batch_size, self.output_size)\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))\n return True\n else:\n return False\n\n def test(self, args):\n \"\"\"Test pix2pix\"\"\"\n init_op = tf.global_variables_initializer()\n self.sess.run(init_op)\n\n sample_files = glob('./datasets/{}/val/*.jpg'.format(self.dataset_name))\n\n # sort testing input\n n = [int(i) for i in map(lambda x: x.split('/')[-1].split('.jpg')[0], sample_files)]\n sample_files = [x for (y, x) in sorted(zip(n, sample_files))]\n\n # load testing input\n print(\"Loading testing images ...\")\n sample = [load_data(sample_file, is_test=True) for sample_file in sample_files]\n\n if (self.is_grayscale):\n sample_images = np.array(sample).astype(np.float32)[:, :, :, None]\n else:\n sample_images = np.array(sample).astype(np.float32)\n\n sample_images = [sample_images[i:i+self.batch_size]\n for i in xrange(0, len(sample_images), self.batch_size)]\n sample_images = np.array(sample_images)\n print(sample_images.shape)\n\n start_time = time.time()\n if self.load(self.checkpoint_dir):\n print(\" [*] Load SUCCESS\")\n else:\n print(\" [!] Load failed...\")\n\n for i, sample_image in enumerate(sample_images):\n idx = i+1\n print(\"sampling image \", idx)\n samples = self.sess.run(\n self.fake_B_sample,\n feed_dict={self.real_data: sample_image}\n )\n save_images(samples, [self.batch_size, 1],\n './{}/test_{:04d}.png'.format(args.test_dir, idx))\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.nn.tanh",
"tensorflow.summary.image",
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.abs",
"tensorflow.concat",
"tensorflow.get_variable_scope",
"tensorflow.summary.merge",
"tensorflow.summary.FileWriter",
"tensorflow.summary.histogram",
"tensorflow.global_variables_initializer",
"tensorflow.ones_like",
"tensorflow.zeros_like",
"numpy.mod",
"tensorflow.train.Saver",
"tensorflow.nn.sigmoid",
"tensorflow.placeholder",
"tensorflow.train.get_checkpoint_state",
"tensorflow.train.AdamOptimizer",
"tensorflow.trainable_variables",
"numpy.array",
"tensorflow.nn.relu"
]
] |
songwanguw/pytorch-lightning | [
"64da9c9d87ac1c106d94310c4d90668fbafbb2cf"
] | [
"pytorch_lightning/trainer/training_loop.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom contextlib import contextmanager\nfrom copy import copy, deepcopy\n\nimport numpy as np\nimport torch\nimport torch.distributed as torch_distrib\n\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_lightning.core.memory import ModelSummary\nfrom pytorch_lightning.core.step_result import EvalResult, Result\nfrom pytorch_lightning.trainer.states import TrainerState\nfrom pytorch_lightning.trainer.supporters import TensorRunningAccum, Accumulator\nfrom pytorch_lightning.utilities import parsing, AMPType\nfrom pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.memory import recursive_detach\nfrom pytorch_lightning.utilities.model_utils import is_overridden\nfrom pytorch_lightning.utilities.parsing import AttributeDict\nfrom pytorch_lightning.utilities.warning_utils import WarningCache\n\n\nclass TrainLoop:\n def __init__(self, trainer):\n self.trainer = trainer\n self.early_stopping_accumulator = None\n self.checkpoint_accumulator = None\n self.accumulated_loss = None\n self.warning_cache = WarningCache()\n self._teardown_already_run = False\n self.running_loss = TensorRunningAccum(window_length=20)\n self.automatic_optimization = True\n self._curr_step_result = None\n self._cur_grad_norm_dict = None\n\n def on_trainer_init(\n self, max_epochs, min_epochs, max_steps, min_steps, num_sanity_val_steps, automatic_optimization\n ):\n self.trainer.global_step = 0\n self.trainer.current_epoch = 0\n self.trainer.interrupted = False\n self.trainer.should_stop = False\n self.trainer._state = TrainerState.INITIALIZING\n\n self.trainer.total_batch_idx = 0\n self.trainer.batch_idx = 0\n self.trainer.num_training_batches = 0\n self.trainer.train_dataloader = None\n self.automatic_optimization = automatic_optimization\n\n self.trainer.max_epochs = max_epochs\n self.trainer.min_epochs = min_epochs\n self.trainer.max_steps = max_steps\n self.trainer.min_steps = min_steps\n\n if num_sanity_val_steps == -1:\n self.trainer.num_sanity_val_steps = float(\"inf\")\n else:\n self.trainer.num_sanity_val_steps = num_sanity_val_steps\n\n @property\n def num_optimizers(self):\n num_optimizers = len(self.get_optimizers_iterable())\n return num_optimizers\n\n def should_skip_training(self):\n if self.trainer.current_epoch >= self.trainer.max_epochs:\n return True\n\n if self.trainer.limit_train_batches == 0:\n return True\n\n return False\n\n def on_train_start(self):\n # clear cache before training\n if self.trainer.on_gpu and self.trainer.root_gpu is not None:\n # use context because of:\n # https://discuss.pytorch.org/t/out-of-memory-when-i-use-torch-cuda-empty-cache/57898\n with torch.cuda.device(f\"cuda:{self.trainer.root_gpu}\"):\n torch.cuda.empty_cache()\n\n # hook\n self.trainer.call_hook(\"on_train_start\")\n\n def setup_fit(self, model, train_dataloader, val_dataloaders, datamodule):\n # bind logger and other properties\n self.trainer.model_connector.copy_trainer_model_properties(model)\n\n # clean hparams\n if hasattr(model, \"hparams\"):\n parsing.clean_namespace(model.hparams)\n\n # links data to the trainer\n self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)\n\n # check that model is configured correctly\n self.trainer.config_validator.verify_loop_configurations(model)\n\n def setup_training(self, model: LightningModule):\n \"\"\"Sanity check a few things before starting actual training.\n\n Args:\n model: The model to run sanity test on.\n \"\"\"\n # --------------------------\n # Setup??\n # --------------------------\n ref_model = model\n if self.trainer.data_parallel:\n ref_model = model.module\n\n # set the ranks and devices\n self.trainer.accelerator_backend.dist.rank = self.trainer.global_rank\n self.trainer.accelerator_backend.dist.device = ref_model.device\n\n # give model convenience properties\n ref_model.trainer = self.trainer\n\n # set local properties on the model\n self.trainer.model_connector.copy_trainer_model_properties(ref_model)\n\n # init amp. Must be done here instead of __init__ to allow ddp to work\n if self.trainer.amp_backend == AMPType.NATIVE and self.trainer.precision == 16 and not self.trainer.use_tpu:\n self.trainer.scaler = torch.cuda.amp.GradScaler()\n\n # log hyper-parameters\n if self.trainer.logger is not None:\n # save exp to get started (this is where the first experiment logs are written)\n self.trainer.logger.log_hyperparams(ref_model.hparams_initial)\n self.trainer.logger.log_graph(ref_model)\n self.trainer.logger.save()\n\n # wait for all to join if on distributed\n self.trainer.accelerator_backend.barrier(\"setup_training\")\n\n # register auto-resubmit when on SLURM\n self.trainer.slurm_connector.register_slurm_signal_handlers()\n\n # --------------------------\n # Pre-train\n # --------------------------\n # on pretrain routine start\n self.trainer.on_pretrain_routine_start(ref_model)\n if self.trainer.is_function_implemented(\"on_pretrain_routine_start\"):\n ref_model.on_pretrain_routine_start()\n\n # print model summary\n if self.trainer.is_global_zero and self.trainer.weights_summary is not None and not self.trainer.testing:\n if self.trainer.weights_summary in ModelSummary.MODES:\n ref_model.summarize(mode=self.trainer.weights_summary)\n else:\n raise MisconfigurationException(\"weights_summary can be None, \" + \", \".join(ModelSummary.MODES))\n\n # track model now.\n # if cluster resets state, the model will update with the saved weights\n self.trainer.model = model\n\n # restore training and model before hpc is called\n self.trainer.checkpoint_connector.restore_weights(model)\n\n # on pretrain routine end\n self.trainer.on_pretrain_routine_end(ref_model)\n if self.trainer.is_function_implemented(\"on_pretrain_routine_end\"):\n ref_model.on_pretrain_routine_end()\n\n def on_train_end(self):\n if self._teardown_already_run:\n return\n\n self._teardown_already_run = True\n\n # trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates\n # when a checkpoint was saved at the last step\n self.trainer.global_step -= 1\n self.check_checkpoint_callback(should_save=True, is_last=True)\n self.trainer.global_step += 1\n\n # hook\n self.trainer.call_hook(\"on_train_end\")\n\n # kill loggers\n if self.trainer.logger is not None:\n self.trainer.logger.finalize(\"success\")\n\n # summarize profile results\n if self.trainer.global_rank == 0:\n self.trainer.profiler.describe()\n\n # give accelerators a chance to finish\n self.trainer.accelerator_backend.on_train_end()\n\n # clear mem\n if self.trainer.on_gpu:\n model = self.trainer.get_model()\n model.cpu()\n torch.cuda.empty_cache()\n\n def check_checkpoint_callback(self, should_save, is_last=False):\n # TODO bake this logic into the checkpoint callback\n if should_save and self.trainer.checkpoint_connector.has_trained:\n checkpoint_callbacks = [c for c in self.trainer.callbacks if isinstance(c, ModelCheckpoint)]\n if is_last and any(c.save_last for c in checkpoint_callbacks):\n rank_zero_info(\"Saving latest checkpoint...\")\n model = self.trainer.get_model()\n [c.on_validation_end(self.trainer, model) for c in checkpoint_callbacks]\n\n def on_train_epoch_start(self, epoch):\n\n # update training progress in trainer\n self.trainer.current_epoch = epoch\n\n model = self.trainer.get_model()\n\n # reset train dataloader\n if self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n # set seed for distributed sampler (enables shuffling for each epoch)\n try:\n self.trainer.train_dataloader.sampler.set_epoch(epoch)\n except Exception:\n pass\n\n # changing gradient according accumulation_scheduler\n self.trainer.accumulation_scheduler.on_epoch_start(self.trainer, self.trainer.get_model())\n\n # stores accumulated grad fractions per batch\n self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)\n\n # structured result accumulators for callbacks\n self.early_stopping_accumulator = Accumulator()\n self.checkpoint_accumulator = Accumulator()\n\n # hook\n self.trainer.call_hook(\"on_epoch_start\")\n self.trainer.call_hook(\"on_train_epoch_start\")\n\n def on_train_batch_end(self, epoch_output, epoch_end_outputs, batch, batch_idx, dataloader_idx):\n # hook\n self.trainer.call_hook('on_batch_end')\n self.trainer.call_hook('on_train_batch_end', epoch_end_outputs, batch, batch_idx, dataloader_idx)\n\n # figure out what to track for epoch end\n self.track_epoch_end_reduce_metrics(epoch_output, epoch_end_outputs)\n\n # reset batch logger internals\n self.trainer.logger_connector.on_train_batch_end()\n\n def reset_train_val_dataloaders(self, model):\n if not self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_val_dataloader(model)\n\n def track_epoch_end_reduce_metrics(self, epoch_output, epoch_end_outputs):\n # track the outputs to reduce at the end of the epoch\n for opt_idx, opt_outputs in enumerate(epoch_end_outputs):\n # with 1 step (no tbptt) don't use a sequence at epoch end\n if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):\n opt_outputs = opt_outputs[0]\n epoch_output[opt_idx].append(opt_outputs)\n\n def get_optimizers_iterable(self):\n \"\"\"\n Generates an iterable with (idx, optimizer) for each optimizer.\n \"\"\"\n if not self.trainer.optimizer_frequencies:\n # call training_step once per optimizer\n return list(enumerate(self.trainer.optimizers))\n\n optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)\n optimizers_loop_length = optimizer_freq_cumsum[-1]\n current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length\n\n # find optimzier index by looking for the first {item > current_place} in the cumsum list\n opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)\n return [[opt_idx, self.trainer.optimizers[opt_idx]]]\n\n def on_after_backward(self, training_step_output, batch_idx, untouched_loss):\n is_result_obj = isinstance(training_step_output, Result)\n\n if is_result_obj:\n training_step_output.detach()\n else:\n training_step_output.batch_loss = training_step_output.batch_loss.detach()\n\n # insert after step hook\n self.trainer.call_hook(\"on_after_backward\")\n\n # when in dev debugging track the losses\n self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())\n\n def _check_training_step_output(self, training_step_output):\n if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:\n if training_step_output.grad_fn is None:\n # TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...\n raise MisconfigurationException(\"In manual optimization, `training_step` should not return a Tensor\")\n\n def training_step(self, split_batch, batch_idx, opt_idx, hiddens):\n # give the PL module a result for logging\n model_ref = self.trainer.get_model()\n\n with self.trainer.profiler.profile(\"model_forward\"):\n args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)\n\n # manually capture logged metrics\n model_ref._current_fx_name = 'training_step'\n training_step_output = self.trainer.accelerator_backend.training_step(args)\n self.trainer.logger_connector.cache_logged_metrics()\n\n self._check_training_step_output(training_step_output)\n\n training_step_output = self.trainer.call_hook(\"training_step_end\", training_step_output)\n\n training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(\n training_step_output, split_batch\n )\n is_result_obj = isinstance(training_step_output, Result)\n\n if training_step_output_for_epoch_end is None:\n return None\n\n # enable empty loss when using manual opt\n closure_loss = None\n untouched_loss = None\n\n if self.trainer.train_loop.automatic_optimization:\n # accumulate loss\n # (if accumulate_grad_batches = 1 no effect)\n if is_result_obj:\n closure_loss = training_step_output.minimize\n else:\n closure_loss = training_step_output.batch_loss\n\n closure_loss = closure_loss / self.trainer.accumulate_grad_batches\n\n # the loss will get scaled for amp. avoid any modifications to it\n untouched_loss = closure_loss.detach().clone()\n\n # result\n result = AttributeDict(\n closure_loss=closure_loss,\n loss=untouched_loss,\n training_step_output=training_step_output,\n training_step_output_for_epoch_end=training_step_output_for_epoch_end,\n hiddens=training_step_output.hiddens,\n )\n return result\n\n def _process_training_step_output(self, training_step_output, split_batch):\n training_step_output_for_epoch_end = training_step_output\n\n # enable validation_step return None\n if training_step_output_for_epoch_end is None:\n return None, None\n\n # -----------------------------------------\n # process result return (DEPRECATE in 1.0)\n # -----------------------------------------\n if isinstance(training_step_output, Result):\n training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)\n return training_step_output_for_epoch_end, training_step_output\n\n # -----------------------------------------\n # process hybrid (1.0)\n # -----------------------------------------\n # no need for these checks in 1.0.0\n # TODO: remove checks in 1.0.0\n is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)\n is_1_0_output = is_tensor or (\"log\" not in training_step_output and \"progress_bar\" not in training_step_output)\n if is_1_0_output:\n return self._process_training_step_output_1_0(training_step_output, split_batch)\n\n # -----------------------------------------\n # process old dict (deprecate 1.0)\n # -----------------------------------------\n training_step_output = self.trainer.process_dict_result(training_step_output, train=True)\n\n training_step_output = AttributeDict(\n batch_loss=training_step_output[0],\n pbar_on_batch_end=training_step_output[1],\n log_metrics=training_step_output[2],\n callback_metrics=training_step_output[3],\n hiddens=training_step_output[4],\n )\n # if the user decides to finally reduce things in epoch_end, save raw output without graphs\n if isinstance(training_step_output_for_epoch_end, torch.Tensor):\n training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()\n else:\n training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end)\n\n return training_step_output_for_epoch_end, training_step_output\n\n def _process_training_step_output_1_0(self, training_step_output, split_batch):\n result = self.trainer.get_model()._results\n\n loss = None\n hiddens = None\n\n # handle dict return\n if isinstance(training_step_output, dict):\n loss = training_step_output.pop(\"loss\", None)\n hiddens = training_step_output.pop(\"hiddens\", None)\n result[\"extra\"] = training_step_output\n\n # handle scalar return\n elif isinstance(training_step_output, torch.Tensor):\n loss = training_step_output\n result[\"extra\"] = {}\n\n # map to results under the hood\n result.minimize = loss\n result.hiddens = hiddens\n\n # track batch for manual reduction with result\n result.track_batch_size(len(split_batch))\n\n # track metrics without grads for epoch reduction\n training_step_output_for_epoch_end = copy(result)\n training_step_output_for_epoch_end.detach()\n if self.trainer.move_metrics_to_cpu:\n training_step_output_for_epoch_end.cpu()\n\n # what flows back into the system\n training_step_output = result\n\n return training_step_output_for_epoch_end, training_step_output\n\n def _process_result(self, training_step_output, split_batch):\n training_step_output.track_batch_size(len(split_batch))\n m = \"\"\"\n TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.\n Use self.log and .write from the LightningModule to log metrics and write predictions.\n training_step can now only return a scalar (for the loss) or a dictionary with anything you want.\n\n Option 1:\n return loss\n\n Option 2:\n return {'loss': loss, 'anything_else': ...}\n\n Option 3:\n return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}\n \"\"\"\n rank_zero_warn(m)\n\n # don't allow EvalResult in the training_step\n if isinstance(training_step_output, EvalResult):\n raise MisconfigurationException(\n \"training_step cannot return EvalResult, \" \"use a dict or TrainResult instead\"\n )\n\n training_step_output_for_epoch_end = copy(training_step_output)\n training_step_output_for_epoch_end.detach()\n\n return training_step_output_for_epoch_end\n\n def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):\n with self.trainer.profiler.profile(\"optimizer_step\"):\n # optimizer step lightningModule hook\n self.trainer.accelerator_backend.optimizer_step(\n optimizer, batch_idx, opt_idx, train_step_and_backward_closure\n )\n\n def on_before_zero_grad(self, optimizer):\n self.trainer.call_hook('on_before_zero_grad', optimizer)\n\n def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):\n self.trainer.accelerator_backend.optimizer_zero_grad(batch_idx, optimizer, opt_idx)\n\n def track_and_norm_grad(self, optimizer):\n # track gradient norms\n grad_norm_dic = self._track_gradient_norm()\n\n # clip gradients\n self.trainer.accelerator_backend.clip_gradients(optimizer)\n self._cur_grad_norm_dict = grad_norm_dic\n\n def _track_gradient_norm(self):\n grad_norm_dict = {}\n if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:\n if float(self.trainer.track_grad_norm) > 0:\n model = self.trainer.get_model()\n grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)\n return grad_norm_dict\n\n def process_hiddens(self, opt_closure_result):\n hiddens = opt_closure_result.hiddens\n if isinstance(opt_closure_result.training_step_output, Result):\n opt_closure_result.training_step_output_for_epoch_end.drop_hiddens()\n return hiddens\n\n def tbptt_split_batch(self, batch):\n splits = [batch]\n if self.trainer.truncated_bptt_steps is not None:\n model_ref = self.trainer.get_model()\n with self.trainer.profiler.profile(\"tbptt_split_batch\"):\n splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)\n return splits\n\n def run_training_epoch(self):\n\n # get model\n model = self.trainer.get_model()\n\n # modify dataloader if needed (ddp, etc...)\n train_dataloader = self.trainer.accelerator_backend.process_dataloader(self.trainer.train_dataloader)\n\n # track epoch output\n epoch_output = [[] for _ in range(self.num_optimizers)]\n\n # enable profiling for the dataloader\n train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)\n dataloader_idx = 0\n should_check_val = False\n for batch_idx, (batch, is_last_batch) in train_dataloader:\n\n self.trainer.batch_idx = batch_idx\n\n # ------------------------------------\n # TRAINING_STEP + TRAINING_STEP_END\n # ------------------------------------\n batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)\n\n # when returning -1 from train_step, we end epoch early\n if batch_output.signal == -1:\n break\n\n # only track outputs when user implements training_epoch_end\n # otherwise we will build up unnecessary memory\n epoch_end_outputs = self.process_train_step_outputs(\n batch_output.training_step_output_for_epoch_end,\n self.early_stopping_accumulator,\n self.checkpoint_accumulator,\n )\n\n # hook\n # TODO: add outputs to batches\n self.on_train_batch_end(epoch_output, epoch_end_outputs, batch, batch_idx, dataloader_idx)\n\n # -----------------------------------------\n # SAVE METRICS TO LOGGERS\n # -----------------------------------------\n self.trainer.logger_connector.log_train_step_metrics(batch_output)\n\n # -----------------------------------------\n # VALIDATE IF NEEDED + CHECKPOINT CALLBACK\n # -----------------------------------------\n should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)\n if should_check_val:\n self.trainer.run_evaluation(test_mode=False)\n # reset stage to train\n self.trainer.logger_connector.set_stage(\"train\")\n\n # -----------------------------------------\n # SAVE LOGGERS (ie: Tensorboard, etc...)\n # -----------------------------------------\n self.save_loggers_on_train_batch_end()\n\n # update LR schedulers\n monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)\n self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)\n self.trainer.checkpoint_connector.has_trained = True\n\n # max steps reached, end training\n if self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1:\n accumulation_done = self._accumulated_batches_reached()\n # Ensure accumulation across batches has completed before breaking loop\n if accumulation_done:\n break\n\n # end epoch early\n # stop when the flag is changed or we've gone past the amount\n # requested in the batches\n if self.trainer.should_stop:\n break\n\n self.trainer.total_batch_idx += 1\n\n # stop epoch if we limited the number of training batches\n if (batch_idx + 1) >= self.trainer.num_training_batches:\n break\n\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n # epoch end hook\n self.run_on_epoch_end_hook(epoch_output)\n\n # log epoch metrics\n self.trainer.logger_connector.log_train_epoch_end_metrics(\n epoch_output,\n self.checkpoint_accumulator,\n self.early_stopping_accumulator,\n self.num_optimizers\n )\n\n # when no val loop is present or fast-dev-run still need to call checkpoints\n self.check_checkpoint_callback(not (should_check_val or is_overridden('validation_step', model)))\n\n # increment the global step once\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n def run_training_batch(self, batch, batch_idx, dataloader_idx):\n # track grad norms\n grad_norm_dic = {}\n\n # bookkeeping\n using_results_obj = False\n self.trainer.hiddens = None\n\n # track all outputs across time and num of optimizers\n batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))]\n\n if batch is None:\n return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)\n\n # hook\n response = self.trainer.call_hook(\"on_batch_start\")\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)\n\n # hook\n response = self.trainer.call_hook(\"on_train_batch_start\", batch, batch_idx, dataloader_idx)\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)\n\n # lightning module hook\n splits = self.tbptt_split_batch(batch)\n\n for split_idx, split_batch in enumerate(splits):\n\n # create an iterable for optimizers and loop over them\n for opt_idx, optimizer in self.prepare_optimizers():\n\n # toggle model params + set info to logger_connector\n self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)\n\n if self.should_accumulate():\n # For gradient accumulation\n\n # -------------------\n # calculate loss (train step + train step end)\n # -------------------\n\n # perform dpp sync only when performing optimizer_step\n with self.block_ddp_sync_behaviour():\n self.training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens)\n\n batch_outputs = self._process_closure_result(\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n # ------------------------------\n # BACKWARD PASS\n # ------------------------------\n # gradient update with accumulated gradients\n\n else:\n if self.automatic_optimization:\n\n def train_step_and_backward_closure():\n result = self.training_step_and_backward(\n split_batch,\n batch_idx,\n opt_idx,\n optimizer,\n self.trainer.hiddens\n )\n return None if result is None else result.loss\n\n # optimizer step\n self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)\n\n else:\n self._curr_step_result = self.training_step(\n split_batch,\n batch_idx,\n opt_idx,\n self.trainer.hiddens\n )\n\n if self._curr_step_result is None:\n # user decided to skip optimization\n # make sure to zero grad.\n self.zero_grad_handler(batch_idx, optimizer, opt_idx)\n continue\n\n batch_outputs = self._process_closure_result(\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n # todo: Properly aggregate grad_norm accros opt_idx and split_idx\n grad_norm_dic = self._cur_grad_norm_dict\n self._cur_grad_norm_dict = None\n\n # hook + clear gradients\n self.zero_grad_handler(batch_idx, optimizer, opt_idx)\n\n # update running loss + reset accumulated loss\n self.update_running_loss()\n\n result = AttributeDict(\n signal=0,\n grad_norm_dic=grad_norm_dic,\n training_step_output_for_epoch_end=batch_outputs,\n )\n return result\n\n @contextmanager\n def block_ddp_sync_behaviour(self):\n if isinstance(self.trainer.model, torch.nn.parallel.DistributedDataParallel):\n yield self.trainer.model.no_sync()\n else:\n yield\n\n def _process_closure_result(\n self, batch_outputs: list, opt_idx: int\n ) -> list:\n opt_closure_result = self._curr_step_result\n\n if opt_closure_result is not None:\n\n # cache metrics\n self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)\n\n # track hiddens\n self.trainer.hiddens = self.process_hiddens(opt_closure_result)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self.trainer.detect_nan_tensors(opt_closure_result.loss)\n\n # track all the outputs across all steps\n batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0\n batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)\n\n if self.automatic_optimization:\n # track total loss for logging (avoid mem leaks)\n self.accumulated_loss.append(opt_closure_result.loss)\n\n self._curr_step_result = None\n\n return batch_outputs\n\n def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):\n \"\"\"\n wrap the forward step in a closure so second order methods work\n \"\"\"\n # lightning module hook\n result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)\n self._curr_step_result = result\n\n if result is None:\n self.warning_cache.warn(\"training_step returned None if it was on purpose, ignore this warning...\")\n return None\n\n if self.trainer.train_loop.automatic_optimization:\n # backward pass\n with self.trainer.profiler.profile(\"model_backward\"):\n self.backward(result, optimizer, opt_idx)\n\n # hook - call this hook only\n # when gradients have finished to accumulate\n if not self.should_accumulate():\n self.on_after_backward(result.training_step_output, batch_idx, result.loss)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self.trainer.detect_nan_tensors(result.loss)\n\n return result\n\n def backward(self, result, optimizer, opt_idx, *args, **kwargs):\n self.trainer.dev_debugger.track_event(\"backward_call\")\n\n # backward can be called manually in the training loop\n if isinstance(result, torch.Tensor):\n self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, *args, **kwargs)\n else:\n result.closure_loss = self.trainer.accelerator_backend.backward(\n result.closure_loss, optimizer, opt_idx, *args, **kwargs\n )\n\n if not self.should_accumulate():\n # track gradients\n self.track_and_norm_grad(optimizer=optimizer)\n\n def update_train_loop_lr_schedulers(self, monitor_metrics=None):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n if num_accumulated_batches_reached or num_training_batches_reached:\n # update lr\n self.trainer.optimizer_connector.update_learning_rates(interval=\"step\", monitor_metrics=monitor_metrics)\n\n def run_on_epoch_end_hook(self, epoch_output):\n self.trainer.call_hook('on_epoch_end')\n self.trainer.call_hook('on_train_epoch_end', epoch_output)\n\n self.trainer.logger_connector.on_train_epoch_end()\n\n def increment_accumulated_grad_global_step(self):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n # progress global step according to grads progress\n if num_accumulated_batches_reached or num_training_batches_reached:\n self.trainer.global_step += 1\n\n def _accumulated_batches_reached(self):\n return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0\n\n def _num_training_batches_reached(self):\n return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches\n\n def should_accumulate(self):\n # checks if backward or backward + optimizer step (via closure)\n accumulation_done = self._accumulated_batches_reached()\n is_final_batch = self._num_training_batches_reached()\n return not (accumulation_done or is_final_batch)\n\n def should_check_val_fx(self, batch_idx, is_last_batch):\n # decide if we should run validation\n is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0\n is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0\n can_check_val = self.trainer.enable_validation and is_val_check_epoch\n should_check_val = is_val_check_batch or self.trainer.should_stop\n is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float(\"inf\")\n should_check_val = can_check_val and (should_check_val or is_last_batch_for_infinite_dataset)\n\n return should_check_val\n\n def build_train_args(self, batch, batch_idx, opt_idx, hiddens):\n # enable not needing to add opt_idx to training_step\n args = [batch, batch_idx]\n\n if len(self.trainer.optimizers) > 1:\n if self.trainer.has_arg(\"training_step\", \"optimizer_idx\"):\n args.append(opt_idx)\n else:\n num_opts = len(self.trainer.optimizers)\n raise ValueError(\n f\"Your LightningModule defines {num_opts} optimizers but \"\n f'training_step is missing the \"optimizer_idx\" argument.'\n )\n\n # pass hiddens if using tbptt\n if self.trainer.truncated_bptt_steps is not None:\n args.append(hiddens)\n\n return args\n\n def save_loggers_on_train_batch_end(self):\n # when loggers should save to disk\n should_flush_logs = self.trainer.logger_connector.should_flush_logs\n if should_flush_logs or self.trainer.fast_dev_run:\n if self.trainer.is_global_zero and self.trainer.logger is not None:\n self.trainer.logger.save()\n\n def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator):\n \"\"\"\n Figure out what needs to be tracked/logged at the end of the epoch\n \"\"\"\n\n # the training step outputs a list per optimizer. The list contains the outputs at each time step\n # when no TBPTT is used, then the list has 1 item per batch\n # when TBPTT IS used, then the list has n items (1 per time step)\n epoch_end_outputs = []\n for optimizer_idx_outputs in all_train_step_outputs:\n # extract one representative sample from each time step (1 if no tbptt) and 0th optimizer\n if len(optimizer_idx_outputs) == 0:\n continue\n\n sample_output = optimizer_idx_outputs[-1]\n\n # pull out callback info if available (ie: Results object)\n if isinstance(sample_output, dict) and \"early_stop_on\" in sample_output:\n early_stopping_accumulator.accumulate(sample_output[\"early_stop_on\"])\n\n if isinstance(sample_output, dict) and \"checkpoint_on\" in sample_output:\n checkpoint_accumulator.accumulate(sample_output[\"checkpoint_on\"])\n\n # decide if we need to reduce at the end of the epoch automatically\n auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end\n\n # only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end\n if is_overridden(\"training_epoch_end\", model=self.trainer.get_model()) or auto_reduce_tng_result:\n epoch_end_outputs.append(optimizer_idx_outputs)\n\n return epoch_end_outputs\n\n def prepare_optimizers(self):\n # in manual optimization we loop over all optimizers at once\n optimizers = self.get_optimizers_iterable()\n if not self.automatic_optimization:\n optimizers = [optimizers[0]]\n return optimizers\n\n def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):\n # set split_idx to trainer for tracking\n self.trainer.split_idx = split_idx\n\n # make sure only the gradients of the current optimizer's parameters are calculated\n # in the training step to prevent dangling gradients in multiple-optimizer setup.\n if self.automatic_optimization and len(self.trainer.optimizers) > 1:\n model = self.trainer.get_model()\n model.toggle_optimizer(optimizer, opt_idx)\n\n # use to track metrics internally\n self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)\n\n def update_running_loss(self):\n accumulated_loss = self.accumulated_loss.mean()\n\n if accumulated_loss is not None:\n # calculate running loss for display\n self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)\n\n # reset for next set of accumulated grads\n self.accumulated_loss.reset()\n\n def zero_grad_handler(self, batch_idx, optimizer, opt_idx):\n if self.automatic_optimization:\n # hook\n self.on_before_zero_grad(optimizer)\n optimizers = enumerate([optimizer])\n else:\n optimizers = self.get_optimizers_iterable()\n\n for idx, optimizer in optimizers:\n self.optimizer_zero_grad(batch_idx, optimizer, opt_idx)\n"
] | [
[
"torch.cuda.empty_cache",
"torch.cuda.amp.GradScaler",
"numpy.cumsum",
"numpy.argmax",
"torch.cuda.device"
]
] |
hnkulkarni/cs231aApproachingOdt | [
"07c68d787442243d653ae72a7e9473b4c3c5c6b4"
] | [
"tracking.py"
] | [
"# This file will track detections\nimport tqdm\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom matplotlib.ticker import NullLocator\nfrom cs231aApproachingOdt import utils as myutils\nfrom PIL import Image\nimport os\n\nimport torch\nimport torchvision.ops.boxes as bops\n\ndef match_detections(prev_path, prev_detection, new_path, new_detection, size=(640, 480)):\n prev_range = [*range(len(prev_detection))]\n new_range = [*range(len(new_detection))]\n\n permutations = myutils.unique_permutations(prev_range, new_range)\n\n fig, ax = plt.subplots(1, 2)\n prev_img = myutils.load_resize(prev_path, size)\n new_img = myutils.load_resize(new_path, size)\n\n matching_pairs = []\n for old, new in permutations:\n [a.cla() for a in ax]\n draw_detection(prev_img, prev_detection[old], ax[0])\n ax[0].set_title(f\"{os.path.basename(prev_path)}\")\n\n draw_detection(new_img, new_detection[new], ax[1])\n ax[1].set_title(f\"{os.path.basename(new_path)}\")\n #plt.pause(0.1)\n iou = get_iou(prev_detection[old], new_detection[new])\n\n if iou < 0.7:\n continue\n prev_crop = crop_detection(prev_img, prev_detection[old])\n new_crop = crop_detection(new_img, new_detection[new])\n #keypoint_matching(prev_crop, new_crop)\n\n methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',\n 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']\n\n is_match = template_matching(new_crop, prev_crop, methods[3])\n\n if is_match == True:\n matching_pairs.append((old, new))\n\n\n plt.close(fig)\n return matching_pairs\n\ndef get_iou(prev_detection, new_detection):\n box1 = new_detection[:4].reshape((1, 4))\n box2 = prev_detection[:4].reshape((1, 4))\n iou = bops.box_iou(box1, box2)\n return iou\n\ndef template_matching(img1, template, method):\n fig_template, ax = plt.subplots()\n\n template_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)\n img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n img = img1_gray.copy()\n w_t, h_t = template_gray.shape[::-1]\n w_i, h_i = img1_gray.shape[::-1]\n\n if (w_t > w_i) or (h_t > h_i):\n return False\n\n method = eval(method)\n # Apply template Matching\n res = cv2.matchTemplate(img1_gray, template_gray, method)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n #print(f\"\\n{min_val}, {max_val}, {min_loc}, {max_loc}\")\n # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n top_left = min_loc\n else:\n top_left = max_loc\n # bottom_right = (top_left[0] + w, top_left[1] + h)\n # cv2.rectangle(img, top_left, bottom_right, 255, 2)\n\n # plt.subplot(121), plt.imshow(res, cmap='gray')\n # plt.title('Matching Result'), plt.xticks([]), plt.yticks([])\n # plt.subplot(122), plt.imshow(img, cmap='gray')\n # plt.title('Detected Point'), plt.xticks([]), plt.yticks([])\n # plt.suptitle(method)\n # plt.show()\n # plt.close(fig_template)\n\n if max_val > 0.9:\n return True\n else:\n return False\n\ndef keypoint_matching(img1, img2):\n # Source: https://docs.opencv.org/master/dc/dc3/tutorial_py_matcher.html\n\n img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n myutils.show(img1_gray)\n orb = cv2.ORB_create()\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(img1_gray, None)\n kp2, des2 = orb.detectAndCompute(img2_gray, None)\n\n # create BFMatcher object\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # Match descriptors.\n matches = bf.match(des1, des2)\n # Sort them in the order of their distance.\n matches = sorted(matches, key=lambda x: x.distance)\n # Draw first 10 matches.\n img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:10], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n fig_match, ax_match = plt.subplot()\n plt.imshow(img3)\n plt.show()\n plt.close(fig_match)\n\ndef crop_detection(img, detection):\n x1, y1, x2, y2, conf, cls_conf, cls_pred = detection\n crop = img[int(y1):int(y2), int(x1):int(x2)]\n return crop\n\ndef draw_detection(img, detection, ax):\n ax.imshow(myutils.bgr2rgb(img))\n x1, y1, x2, y2, conf, cls_conf, cls_pred = detection\n box_w = x2 - x1\n box_h = y2 - y1\n # Create a Rectangle patch\n bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=\"red\", facecolor=\"none\")\n # Add the bbox to the plot\n ax.add_patch(bbox)\n ax.set_xticks([])\n ax.set_yticks([])\n\ndef tracking_by_detection(img_folder, image_paths, img_detections, size=(640, 480)):\n # Iterate through images and save plot of detections\n print(\"In Tracking By Detection\")\n path_detections_zip = zip(image_paths, img_detections)\n num_images = len(image_paths)\n tqdm_pbar = tqdm.tqdm(path_detections_zip, total=num_images)\n\n tracks_dict = dict()\n for img_i, (path, detections) in enumerate(tqdm_pbar):\n tqdm_pbar.set_postfix({\"Processing \": path})\n if img_i == 0:\n print(\"Initialize Detections\")\n continue\n\n matching_pairs = match_detections(prev_path=image_paths[img_i - 1], prev_detection=img_detections[img_i - 1],\n new_path=path, new_detection=detections, size=size)\n print(matching_pairs)\n tracks_dict[path] = matching_pairs\n\n myutils.pickle_save(os.path.join(img_folder, \"output/tracks.pickle\"), (tracks_dict, img_detections))\n return tracks_dict"
] | [
[
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.close"
]
] |
Captainr22/SAE | [
"f3e370604978a273eb1e1ffdbd342dee3de431c9"
] | [
"hotpotqa_utils_joint.py"
] | [
"import torch\nimport numpy as np\nimport json, sys, re, string\nimport collections\nfrom collections import Counter\nfrom collections import OrderedDict\n\n\ndef get_sp_pred(pred_sp_idx, data):\n \"\"\"get the prediction of supporting facts in original format\n \n Arguments:\n pred_sp_idx {[type]} -- [description]\n data {[type]} -- [description]\n \"\"\"\n pred = []\n for p in pred_sp_idx:\n if p < len(data):\n pred.append([data[p].doc_title[0], data[p].sent_id])\n\n return pred\n\ndef process_logit(batch_index, batch_logits, predict_features, predict_examples, max_answer_length):\n \"\"\"get predictions for each sample in the batch\n \n Arguments:\n batch_index {[type]} -- [description]\n batch_logits {[type]} -- 0: supporting facts logits, 1: answer span logits, 2: answer type logits 3: gold doc logits\n batch_size {[type]} -- [description]\n predict_file {[type]} -- [description]\n \"\"\"\n \n sp_logits_np = torch.sigmoid(batch_logits[0]).detach().cpu().numpy()\n ans_type_logits_np = batch_logits[1].detach().cpu().numpy()\n\n batch_index = batch_index.numpy().tolist()\n\n sp_pred, span_pred, ans_type_pred = [], [], []\n\n for idx, data in enumerate(batch_index):\n\n # supporting facts prediction\n pred_sp_idx = [ x[0] for x in enumerate(sp_logits_np[idx,:].tolist()) if x[1] > 0.5 ]\n print(pred_sp_idx)\n if len(pred_sp_idx) != 0:\n sp_pred.append(get_sp_pred(pred_sp_idx, predict_examples[data]))\n else:\n sp_pred.append([])\n\n # answer type prediction, for debug purpose\n ans_type_pred.append(np.argmax(ans_type_logits_np[idx,:]))\n\n # answer span prediction\n if ans_type_pred[-1] == 0:\n span_pred.append(\"no\")\n elif ans_type_pred[-1] == 1:\n span_pred.append(\"yes\")\n else:\n span_pred.append(\"\")\n \n return sp_pred, span_pred, ans_type_pred\n\n\n# def evaluate(eval_file, answer_dict):\n# f1 = exact_match = total = 0\n# for key, value in enumerate(answer_dict):\n# total += 1\n# ground_truths = eval_file[key][\"answer\"]\n# prediction = value\n# cur_EM = exact_match_score(prediction, ground_truths)\n# cur_f1, _, _ = f1_score(prediction, ground_truths)\n# exact_match += cur_EM\n# f1 += cur_f1\n\n# exact_match = 100.0 * exact_match / total\n# f1 = 100.0 * f1 / total\n\n# return {'exact_match': exact_match, 'f1': f1}\n\ndef normalize_answer(s):\n\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\n\ndef f1_score(prediction, ground_truth):\n normalized_prediction = normalize_answer(prediction)\n normalized_ground_truth = normalize_answer(ground_truth)\n\n ZERO_METRIC = (0, 0, 0)\n\n if normalized_prediction in ['yes', 'no', 'noanswer'] and normalized_prediction != normalized_ground_truth:\n return ZERO_METRIC\n if normalized_ground_truth in ['yes', 'no', 'noanswer'] and normalized_prediction != normalized_ground_truth:\n return ZERO_METRIC\n\n prediction_tokens = normalized_prediction.split()\n ground_truth_tokens = normalized_ground_truth.split()\n common = Counter(prediction_tokens) & Counter(ground_truth_tokens)\n num_same = sum(common.values())\n if num_same == 0:\n return ZERO_METRIC\n precision = 1.0 * num_same / len(prediction_tokens)\n recall = 1.0 * num_same / len(ground_truth_tokens)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1, precision, recall\n\n\ndef exact_match_score(prediction, ground_truth):\n return (normalize_answer(prediction) == normalize_answer(ground_truth))\n\n\ndef write_prediction(sp_preds, answer_preds, orig_data, predict_file, output_dir):\n \"\"\"write predictions to json file\n \n Arguments:\n sp_preds {[type]} -- [description]\n answer_preds {[type]} -- [description]\n orig_data {[type]} -- [description]\n predict_file {[type]} -- [description]\n output_dir {[type]} -- [description]\n \"\"\"\n if len(answer_preds) == 0:\n answer_preds = [\"place_holder\"] * len(orig_data)\n all_pred = {}\n all_pred['answer'] = OrderedDict()\n all_pred['sp'] = OrderedDict()\n for idx, data in enumerate(orig_data):\n all_pred['answer'][data['_id']] = answer_preds[idx]\n all_pred['sp'][data['_id']] = sp_preds[idx]\n\n with open(output_dir, 'w') as fid:\n json.dump(all_pred, fid)\n\n"
] | [
[
"torch.sigmoid",
"numpy.argmax"
]
] |
bettybhzhou/EasyMarkit_AI | [
"028824a0af246d232013246bf1784013921beec3"
] | [
"sigma_script.py"
] | [
"\"\"\"\n Winning Python script for EasyMarkit Hackathon by Team Sigma\n\"\"\"\n\n##Team Sigma - Members: Betty Zhou, Bailey Lei, Alex Pak\n\n# Usage: python sigma_script.py data/train.csv data/test.csv\n\n\n# import any necessary packages here\n#loading libraries\nimport argparse\nimport os\nimport pandas as pd\nimport numpy as np\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nimport lightgbm as lgb\n\n# read in command line arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"train_file_path\") #path of training set\nparser.add_argument(\"test_file_path\") #path of test set\nargs = parser.parse_args()\n\ndef onehot_drop(df, column_name):\n for index in column_name:\n one_hot = pd.get_dummies(df[index], prefix = index)\n df = df.drop(index,axis = 1)\n df = df.join(one_hot)\n return df\n\ndef fit_train(df):\n train_df = df\n train_clean = onehot_drop(train_df, ['type', 'province'])\n train_clean['cli_area'] = train_clean['cli_area'].map({'Urban':1, 'Rural':0})\n train_clean['pat_area'] = train_clean['pat_area'].map({'Urban':1, 'Rural':0})\n train_clean['gender'] = train_clean['gender'].map({'M':1, 'F':0})\n\n # convert to datetime\n train_clean['apt_date'] = pd.to_datetime(train_df.apt_date,format='%Y-%m-%d %H:%M:%S', utc =True)\n train_clean['sent_time'] = pd.to_datetime(train_df.sent_time,format='%Y-%m-%d %H:%M', utc =True)\n train_clean['send_time'] = pd.to_datetime(train_df.send_time, format='%H:%M:%S', utc =True).dt.time\n\n # find time between reminder and appointment\n train_clean['sent_to_apt'] = (train_clean['apt_date'] - train_clean['sent_time']).dt.total_seconds()/3600\n\n # attributes\n train_clean['apt_month'] = train_clean['apt_date'].dt.month\n train_clean['sent_day_of_week'] = train_clean['sent_time'].dt.day_name()\n\n # one-hot encoding\n train_clean = onehot_drop(train_clean, ['sent_day_of_week'])\n\n X = train_clean.iloc[:, 2:]\n y = train_clean.iloc[:,1]\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, random_state=1)\n\n X_train_drop = X_train.drop([\"apt_type\", \"apt_date\", \"sent_time\", \"send_time\", \"city\", \"cli_zip\", 'pat_id', 'family_id','clinic'], axis = 1)\n X_test_drop = X_test.drop([\"apt_type\", \"apt_date\", \"sent_time\", \"send_time\", \"city\", \"cli_zip\", 'pat_id', 'family_id','clinic'], axis = 1)\n\n print(\"Number of training examples:\", len(y_train))\n print(\"Number of test examples:\", len(y_test))\n\n lg = lgb.LGBMClassifier(silent=False, n_estimators = 2000, max_depth=100)\n\n lg_model = lg.fit(X_train_drop, y_train)\n\n print(\"train accuracy: \", lg.score(X_train_drop, y_train))\n print(\"test accuracy: \", lg.score(X_test_drop, y_test))\n\n return lg_model\n\ndef predict_test(test_df, lg_model):\n test_clean = onehot_drop(test_df, ['type', 'province'])\n test_clean['cli_area'] = test_clean['cli_area'].map({'Urban':1, 'Rural':0})\n test_clean['pat_area'] = test_clean['pat_area'].map({'Urban':1, 'Rural':0})\n test_clean['gender'] = test_clean['gender'].map({'M':1, 'F':0})\n\n # convert to datetime\n test_clean['apt_date'] = pd.to_datetime(test_df.apt_date,format='%Y-%m-%d %H:%M:%S', utc =True)\n test_clean['sent_time'] = pd.to_datetime(test_df.sent_time,format='%Y-%m-%d %H:%M', utc =True)\n test_clean['send_time'] = pd.to_datetime(test_df.send_time, format='%H:%M:%S', utc =True).dt.time\n\n # find time between reminder and appointment\n test_clean['sent_to_apt'] = (test_clean['apt_date'] - test_clean['sent_time']).dt.total_seconds()/3600\n\n # attributes\n test_clean['apt_month'] = test_clean['apt_date'].dt.month\n test_clean['sent_day_of_week'] = test_clean['sent_time'].dt.day_name()\n\n # one-hot encoding\n test_clean = onehot_drop(test_clean, ['sent_day_of_week'])\n test_clean_month = onehot_drop(test_clean, ['apt_month'])\n\n test_final = test_clean.iloc[:, 1:]\n test_final = test_final.drop([\"apt_type\", \"apt_date\", \"sent_time\", \"send_time\", \"city\", \"cli_zip\", 'pat_id', 'family_id','clinic'], axis = 1)\n\n print(\"Number of test examples:\", len(test_df))\n print(\"Number of final cleaned test examples:\", len(test_final))\n print(\"test data shape: \", test_final.shape)\n\n test_clean[\"response\"] = lg_model.predict(test_final)\n df = test_clean[[\"ReminderId\",\"response\"]]\n return df\n\ndef write_to_csv(df):\n group_name = \"sigma\"\n df.to_csv(group_name + \"_output.csv\", index=False)\n print(group_name + \"_output.csv output successful\")\n\ndef main():\n # loading train and test data\n train_df = pd.read_csv(args.train_file_path)\n test_df = pd.read_csv(args.test_file_path)\n\n # pre-processing input train and test data for training model\n lg_model = fit_train(train_df)\n\n #predict and write to new CSV for submission\n df = predict_test(test_df, lg_model)\n write_to_csv(df)\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime",
"sklearn.model_selection.train_test_split",
"pandas.get_dummies"
]
] |
khanfarhan10/PINTO_model_zoo | [
"4cad2e506d8c0fb604aa7b5f84115a840ab59ba1"
] | [
"10_mobilenetv3/06_mobilenet_v3_large_224_dm07/01_float32/02_weight_quantization.py"
] | [
"import tensorflow as tf\n\ntf.compat.v1.enable_eager_execution()\n\n# Weight Quantization - Input/Output=float32\nconverter = tf.lite.TFLiteConverter.from_saved_model('./saved_model')\nconverter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]\ntflite_quant_model = converter.convert()\nwith open('./mobilenet_v3_large_224_dm07_weight_quant.tflite', 'wb') as w:\n w.write(tflite_quant_model)\nprint(\"Weight Quantization complete! - mobilenet_v3_large_224_dm07_weight_quant.tflite\")\n\n"
] | [
[
"tensorflow.compat.v1.enable_eager_execution",
"tensorflow.lite.TFLiteConverter.from_saved_model"
]
] |
TheSDK-blocks/f2_testbench | [
"6a263dbab6de1da980619c0ecf6d181342c7592b"
] | [
"f2_testbench/analyzers_mixin.py"
] | [
"#This is a mixin class for signal analyzers to be used by f2_system class\n#Todo: Analyzers should be a independent class\n#Last modification by Marko Kosunen, [email protected], 30.07.2018 18:09\nimport numpy as np\nimport scipy.signal as sig\nimport matplotlib as mpl \nmpl.use('Agg') #To enble plotting without X\nimport matplotlib.pyplot as plt\n\nclass analyzers_mixin:\n#Define signal analyzer methods \n def oscilloscope(self,argdict):\n ymax=argdict['ymax']\n ymin=argdict['ymin']\n timex=argdict['timex']\n sigin=argdict['sigin']\n tstr=argdict['tstr']\n printstr=argdict['printstr']\n msg=\"Generating %s\" %(printstr)\n self.print_log(type='I', msg=msg) \n\n figure=plt.figure()\n h=plt.subplot();\n hfont = {'fontname':'Sans'}\n plt.plot(timex, sigin, linewidth=2)\n plt.ylim((ymin, ymax));\n plt.xlim((np.amin(timex), np.amax(timex)));\n #plt.plot(self.x,self.a,label='Blowing in the wind',linewidth=2);\n #plt.plot(self.x,self.b,label='Blowing in the wind',linewidth=2);\n tstr=argdict['tstr']\n plt.suptitle(tstr,fontsize=20);\n plt.ylabel('Out', **hfont,fontsize=18);\n plt.xlabel('Sample (n)', **hfont,fontsize=18);\n h.tick_params(labelsize=14)\n plt.grid(True);\n printstr=argdict['printstr']\n figure.savefig(printstr, format='eps', dpi=300);\n plt.close(\"all\")\n\n def constellation(self,argdict):\n ymax=argdict['ymax']\n ymin=argdict['ymin']\n I=argdict['I']\n Q=argdict['Q']\n tstr=argdict['tstr']\n printstr=argdict['printstr']\n msg=\"Generating %s\" %(printstr)\n self.print_log(type='I', msg=msg) \n\n figure=plt.figure()\n h=plt.subplot();\n hfont = {'fontname':'Sans'}\n plt.plot(I, Q, linestyle='None', marker='x')\n plt.ylim((ymin, ymax));\n plt.ylim((1.1*np.amin(Q), 1.1*np.amax(Q)));\n plt.xlim((1.1*np.amin(I), 1.1*np.amax(I)));\n #plt.plot(self.x,self.a,label='Blowing in the wind',linewidth=2);\n #plt.plot(self.x,self.b,label='Blowing in the wind',linewidth=2);\n tstr=argdict['tstr']\n plt.suptitle(tstr,fontsize=20);\n plt.ylabel('Q', **hfont,fontsize=18);\n plt.xlabel('I', **hfont,fontsize=18);\n h.tick_params(labelsize=14)\n plt.grid(True);\n printstr=argdict['printstr']\n figure.savefig(printstr, format='eps', dpi=300);\n plt.close(\"all\")\n\n\n def spectrum_analyzer(self, **kwargs):\n #Example argdict\n #argdict={'sig':self.signal_gen._Z.Data[i,:,0],'ymax':3, 'ymin':spectrumfloorideal,'nperseg':1024, \n # 'tstr' : \"Tx, User:%i\" %(i),'printstr':\"%s/F2_system_Tx_antennas_Spectrum_Rs_%i_k:%i.eps\" %(self.picpath, self.Rs, i)} \n ymax=kwargs.get('ymax',3)\n ymin=kwargs.get('ymin',-80)\n nperseg=kwargs.get('nperseg',1024) #Samples for the Welch periodogram seqment\n fs=kwargs.get('Rs',self.Rs)\n freqx=np.arange(nperseg)/nperseg*fs/1e6\n freqx.shape=(-1,1)\n sigin=kwargs['sigin']\n sigin.shape=(-1,1)\n tstr=kwargs['tstr']\n printstr=kwargs['printstr']\n msg=\"Generating %s\" %(printstr)\n self.print_log(type='I', msg=msg) \n figure=plt.figure()\n h=plt.subplot();\n hfont = {'fontname':'Sans'}\n fs, spe=sig.welch(sigin,fs=self.Rs,nperseg=nperseg,return_onesided=False,scaling='spectrum',axis=0)\n spelog=10*np.log10(np.abs(spe)/np.amax(np.abs(spe)))\n plt.plot(freqx,spelog, linewidth=2 )\n #plt.setp(markerline,'markerfacecolor', 'b','linewidth',2)\n #plt.setp(stemlines, 'linestyle','solid','color','b', 'linewidth', 2)\n #plt.ylim((np.amin([self.a,self.b]), np.amax([self.a,self.b])));\n plt.ylim((ymin, ymax));\n plt.xlim((np.amin(freqx), np.amax(freqx)));\n #plt.plot(self.x,self.a,label='Blowing in the wind',linewidth=2);\n #plt.plot(self.x,self.b,label='Blowing in the wind',linewidth=2);\n plt.suptitle(tstr,fontsize=20);\n plt.ylabel('Normalized Spectrum', **hfont,fontsize=18);\n plt.xlabel('Frequency (MHz)', **hfont,fontsize=18);\n h.tick_params(labelsize=14)\n #for axis in ['top','bottom','left','right']:\n #h.spines[axis].set_linewidth(2)\n #lgd=plt.legend(loc='upper right', fontsize=14);\n ##lgd.set_fontsize(12);\n plt.grid(True);\n figure.savefig(printstr, format='eps', dpi=300);\n plt.close(\"all\")\n\n def logic_analyzer(self,argdict):\n ymax=argdict['ymax']\n ymin=argdict['ymin']\n timex=argdict['timex']\n sigin=argdict['sigin']\n tstr = argdict['tstr']\n printstr=argdict['printstr']\n msg=\"Generating %s\" %(printstr)\n self.print_log(type='I', msg=msg) \n \n figure=plt.figure()\n h=plt.subplot();\n hfont = {'fontname':'Sans'}\n markerline, stemlines, baseline = plt.stem(timex, sigin, '-.')\n plt.setp(markerline,'markerfacecolor', 'b','linewidth',2)\n plt.setp(stemlines, 'linestyle','solid','color','b', 'linewidth', 2)\n plt.ylim((ymin, ymax));\n plt.xlim((np.amin(timex), np.amax(timex)));\n plt.suptitle(tstr,fontsize=20);\n plt.ylabel('Out', **hfont,fontsize=18);\n plt.xlabel('Sample (n)', **hfont,fontsize=18);\n h.tick_params(labelsize=14)\n plt.grid(True);\n figure.savefig(printstr, format='eps', dpi=300);\n\n def evm_calculator(self,argdict):\n reference=argdict['ref']\n received=argdict['signal']\n \n #Shape the vectors: time is row observation is colum\n #if received.shape[0]<received.shape[1]:\n # received=np.transpose(received)\n reference.shape=(-1,1)\n received.shape=(-1,1)\n\n #RMS for Scaling\n rmsref=np.std(reference)\n rmsreceived=np.std(received)\n EVM=10*np.log10(np.mean(np.mean(np.abs(received/rmsreceived*rmsref-reference)**2,axis=0)/np.mean(np.abs(reference)**2,axis=0)))\n self.print_log(type='I', msg=\"Estimated EVM is %0.2f dB\" %(EVM))\n return EVM\n\n def ber_calculator(self,argdict):\n reference=argdict['ref']\n received=argdict['signal']\n \n #Shape the vectors: time is row observation is colum\n #if received.shape[0]<received.shape[1]:\n # received=np.transpose(received)\n\n #reference.shape=received.shape\n reference.shape=(-1,1)\n received.shape=(-1,1)\n \n #Discard samples rounded away in transmission\n #if received.shape[1] < reference.shape[1]:\n # reference=reference[:,0:received.shape[1]]\n\n errors=np.sum(np.sum(np.abs(received-reference),axis=0))/(received.shape[0]*received.shape[1])\n errors=np.sum(np.sum(np.abs(received-reference),axis=0))\n bits=(received.shape[0]*received.shape[1])\n self.print_log(type='I', msg=\"Received %i errors in %i bits\" %(int(errors), int(bits)))\n BER=errors/bits\n self.print_log(type='I', msg=\"Resulting BER is %0.3g\" %(BER))\n return BER\n\n#From Kosta. \ndef plot_generic(x, y_list, title_str, legend_list, xlabel_str, ylabel_str, xscale, yscale, plot_style_str='o-', xlim=[], ylim=[]):\n if (xscale, yscale) == ('linear', 'linear'):\n plot_type_str = 'plot'\n elif (xscale, yscale) == ('log', 'linear'):\n plot_type_str = 'semilogx'\n elif (xscale, yscale) == ('linear', 'log'):\n plot_type_str = 'semilogy'\n elif (xscale, yscale) == ('log', 'log'):\n plot_type_str = 'loglog'\n else:\n raise Exception('xscale = %s, yscale = %s, both should be linear or log!!' % (xscale, yscale))\n\n fig, ax = plt.subplots() # default is 1,1,1\n if (isinstance(x[0], list)) and (len(x) == len(y_list)): # several plots with different x values\n for x, y in zip(x, y_list):\n exec('ax.' + plot_type_str + '(x, y, plot_style_str, linewidth=linewidth)')\n else:\n if (isinstance(y_list[0], list)): # several plots with the same x values\n for y in y_list:\n exec('ax.' + plot_type_str + '(x, y, plot_style_str, linewidth=linewidth)')\n else: # single plot only\n exec('ax.' + plot_type_str + '(x, y_list, plot_style_str, linewidth=linewidth)')\n if xlim != []:\n plt.xlim(xlim)\n if ylim != []:\n plt.ylim(ylim)\n ax.set_xlabel(xlabel_str, fontsize=fontsize)\n plt.ylabel(ylabel_str, fontsize=fontsize)\n if title_str == []:\n loc_y = 1.05\n else:\n plt.title(title_str, fontsize=fontsize)\n loc_y = 1\n if legend_list != []:\n plt.legend(legend_list, loc=(0, loc_y))\n plt.grid(True, which='both')\n ax.tick_params(axis='both', which='major', labelsize=fontsize)\n plt.show()\n\n\n"
] | [
[
"matplotlib.pyplot.stem",
"matplotlib.pyplot.ylabel",
"scipy.signal.welch",
"numpy.amax",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.abs",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.title",
"matplotlib.pyplot.suptitle",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"numpy.std",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.amin",
"matplotlib.pyplot.xlabel"
]
] |
multichannelsystems/McsPyDataTools | [
"45777d5955043cc6849ea2f01ea442aa19141edd"
] | [
"McsPyDataTools/McsPy/McsCMOS.py"
] | [
"\"\"\"\r\n McsCMOS\r\n ~~~~~~~\r\n\r\n Wrapper and Helper to access MCS CMOS Data within H5 Files \r\n \r\n :copyright: (c) 2018 by Multi Channel Systems MCS GmbH\r\n :license: see LICENSE for more details\r\n\"\"\"\r\n\r\nimport h5py\r\nimport numpy as np\r\n\r\nclass CMOSData(h5py.File):\r\n \"\"\"\r\n Wrapper for a HDF5 File containing CMOS Data\r\n \"\"\"\r\n def __init__(self, path):\r\n \"\"\"\r\n Creates a CMOSData file and links it to a H5 File\r\n :param path: Path to a H5 File containing CMOS Data\r\n :type path: string \r\n \"\"\"\r\n super(CMOSData, self).__init__(path, mode='r')\r\n \r\n # -- map raw data --\r\n self.raw_data= self['/Data/Recording_0/FrameStream/Stream_0/FrameDataEntity_0/FrameData']\r\n self.conv_factors= self['/Data/Recording_0/FrameStream/Stream_0/FrameDataEntity_0/ConversionFactors']\r\n\r\n # - map proxy data -\r\n self.conv_data = CMOSConvProxy(self)\r\n\r\n # -- map meta --\r\n self.meta={}\r\n \r\n # - from InfoFrame\r\n info_frame= self['/Data/Recording_0/FrameStream/Stream_0/InfoFrame']\r\n \r\n for key in info_frame.dtype.names:\r\n if hasattr(info_frame[key][0], \"decode\"):\r\n self.meta[key]=info_frame[key][0].decode('utf-8')\r\n else:\r\n self.meta[key]=info_frame[key][0]\r\n\r\n if(\"Tick\" in self.meta):\r\n self.meta[\"FrameRate\"] = 10.0**6/self.meta[\"Tick\"]\r\n \r\n # - from File\r\n for key, value in self.attrs.items():\r\n if hasattr(value, \"decode\"):\r\n self.meta[key]= value.decode('utf-8')\r\n else:\r\n self.meta[key]= value\r\n\r\n # - from Data Group\r\n for key, value in self['/Data'].attrs.items():\r\n if hasattr(value, \"decode\"):\r\n self.meta[key]= value.decode('utf-8')\r\n else:\r\n self.meta[key]= value\r\n\r\n # -- map events --\r\n if(\"EventStream\" in self[\"Data/Recording_0/\"].keys()):\r\n event_group = self[\"Data/Recording_0/EventStream/Stream_0/\"]\r\n event_info = self[\"Data/Recording_0/EventStream/Stream_0/InfoEvent\"]\r\n\r\n self.events={}\r\n self.event_frames={}\r\n \r\n for key in event_group.keys():\r\n if \"EventEntity\" in key:\r\n info = event_info[\"Label\"][event_info[\"EventID\"]==int(key.split(\"_\")[1])][0]\r\n self.events[info] = event_group[key][0, 0]\r\n self.event_frames[info] = event_group[key][0, 0]/self.meta[\"Tick\"]\r\n\r\n\r\n\r\nclass CMOSConvProxy:\r\n \"\"\"\r\n Private Class, should be embedded within a CMOSData Object.\r\n A proxy that transparently converts raw data to calibrated data. \r\n \"\"\"\r\n\r\n def __init__(self, parent):\r\n \"\"\"\r\n Creates a new CMOSConvProxy\r\n :param parent: Object that can provide raw_data and conv_factors\r\n :type parent: CMOSData\r\n \"\"\"\r\n self._parent = parent\r\n self.dtype = np.int32\r\n\r\n def __getitem__(self, slices):\r\n \"\"\"\r\n Sliced access to converted data\r\n :param slices: Data-slices to retrieve\r\n :returns: converted data\r\n \"\"\"\r\n raw_data = self._parent.raw_data.__getitem__(slices)\r\n conv_fasctors = self._parent.conv_factors.__getitem__((slices[0], slices[1]))\r\n return (raw_data*conv_fasctors).astype(self.dtype)\r\n\r\n @property\r\n def shape(self):\r\n \"\"\"\r\n Shape of the data\r\n \"\"\"\r\n return self._parent.raw_data.shape\r\n\r\n\r\nclass CMOSSpikes(h5py.File):\r\n \"\"\"\r\n Wrapper for a HDF5 File containing CMOS Spike Data.\r\n Spike Information is accessible through the .spike Member,\r\n Waveform Information (if available) through the .waveforms Member.\r\n \"\"\"\r\n def __init__(self, path):\r\n super(CMOSSpikes, self).__init__(path)\r\n\r\n # -- Check for right structure --\r\n if(\"data\" in self.keys() and \"spikes\" in self['data'].keys()):\r\n \r\n # -- Map Spike-Data to RecordArray\r\n self.spikes = np.core.records.fromarrays(self['data/spikes'][:,:], \r\n names='time, col, row',\r\n formats = 'int64, int64, int64')\r\n # -- Map Waveforms to Array\r\n if(\"waveforms\" in self['data'].keys()):\r\n self.waveforms = self['data/waveforms'][:,:].transpose()\r\n \r\n else:\r\n raise IOError(path+ \" has no valid CMOSSpikeFile Structure\")"
] | [
[
"numpy.core.records.fromarrays"
]
] |
EXYNOS-999/AWS_JPL_DRL | [
"ea9df7f293058b0ca2dc63753e68182fcc5380f5"
] | [
"DRL/log_analysis/numpy2png.py"
] | [
"import numpy as np\nfrom PIL import Image\nimport sys\n\ndef numpy_to_png(source, dest):\n image = Image.fromarray(np.load(source))\n image.save(dest,\"PNG\")\n\nif __name__ == \"__main__\":\n source = sys.argv[1]\n dest = source.split('.npy')[0] + '.png'\n print(source, \" to \", dest)\n numpy_to_png(source, dest)\n\n"
] | [
[
"numpy.load"
]
] |
okdefinet/oknodes | [
"c74797f7d3b6a9ec690f0129ea5313d1afc2ae66"
] | [
"app.py"
] | [
"\"\"\"\nOpen Nodes web server\nCopyright (c) 2018 Opennodes / Blake Bjorn Anderson\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport gzip\nimport json\nimport os\nimport sys\nfrom io import BytesIO\n\nfrom flask import Flask, render_template, request, redirect, flash, Response\nfrom flask_sqlalchemy import SQLAlchemy\nfrom geoip2.errors import AddressNotFoundError\nfrom sqlalchemy import and_\n\nfrom config import load_config, DefaultFlaskConfig\nfrom crawler import init_geoip, connect\nfrom models import *\nimport pandas as pd\nfrom autodoc import Autodoc\n\n\napp = Flask(__name__)\nauto = Autodoc(app)\napp.config.from_object(DefaultFlaskConfig())\napp.config.from_object('flask_config')\ndb = SQLAlchemy(app)\n\nCONF = load_config()\nCOUNTRY, CITY, ASN = init_geoip()\n\[email protected]('/')\[email protected]('/networks/<network_name>', methods=['GET'])\ndef network_dashboard(network_name=None):\n if not network_name in (\"okcash\", \"testnet\", None):\n flash(\"Invalid network\")\n return redirect(\"/\")\n\n with open(\"static/network_summaries.json\", 'r') as f:\n summaries = json.load(f)\n\n if network_name:\n age_min = summaries[network_name]['age_min']\n age_max = summaries[network_name]['age_max']\n else:\n age_min = min((summaries[network]['age_min'] for network in CONF['networks']))\n age_max = max((summaries[network]['age_max'] for network in CONF['networks']))\n\n return render_template(\"network_dashboard.html\",\n network=network_name,\n include_client=False if network_name is not None else False,\n include_user_agent=True if network_name is not None else False,\n include_network=True if network_name is None else False,\n include_version=True if network_name is not None else False,\n include_active=True if CONF['export_inactive_nodes'] else False,\n age_min=age_min * 1000.0,\n age_max=age_max * 1000.0)\n\n\ndef gzip_response(input_str, pre_compressed):\n response = Response()\n if not pre_compressed:\n buffer = BytesIO()\n gzip_file = gzip.GzipFile(mode='wb', fileobj=buffer)\n gzip_file.write(input_str if isinstance(input_str, bytes) else input_str.encode())\n gzip_file.close()\n response.data = buffer.getvalue()\n else:\n response.data = input_str\n response.headers['Content-Encoding'] = 'gzip'\n response.headers['Vary'] = 'Accept-Encoding'\n response.headers['Content-Length'] = len(response.data)\n return response\n\n\[email protected]('/api/get_networks', methods=['POST'])\[email protected]()\ndef get_networks():\n \"\"\"\n Returns a list of all available network names\n :return: JSON string, ex. \"['okcash','testnet']\"\n \"\"\"\n return json.dumps([x[0] for x in db.session.query(Node.network).distinct().all()])\n\n\[email protected]('/api/gzip_file/<filename>', methods=['GET'])\[email protected]()\ndef gzip_static_file(filename):\n \"\"\"\n Returns a crawl result as a gzipped response\n :param filename: file_network.ext - file is 'data' or 'history', ext is either .json, .csv, .txt (data.ext returns data for all crawled networks)\n :return: gzip encoded html response\n \"\"\"\n valid_files = [\"custom.geo.json\"]\n for coin in (\"\", \"_groestlcoin\", \"_testnet\"):\n for suff in (\"\", \"_unique\"):\n for ext in (\".csv\", \".json\", \".txt\"):\n valid_files.append(\"data\" + coin + suff + ext)\n valid_files.append(\"history\" + coin + '.json')\n if filename not in valid_files:\n return redirect(\"/\", code=404)\n with open(os.path.join(\"static\", filename), \"r\") as f:\n return gzip_response(f.read(), False)\n\ndef deconstruct_address_string(inp):\n assert isinstance(inp, str)\n\n resp = {}\n aliases = {'ok': 'okcash',\n 'tok': 'testnet'}\n\n inp = inp.lower()\n network = inp.split(\":\")[0]\n if network:\n inp = \":\".join(inp.split(\":\")[1:])\n network = aliases[network] if network in aliases else network\n network = network if network in CONF['networks'] else None\n if not network:\n network = \"okcash\"\n resp['warning'] = \"Network not recognized, using OK\"\n\n if \":\" in inp:\n port = inp.split(\":\")[-1]\n try:\n port = int(port)\n inp = \":\".join(inp.split(\":\")[:-1])\n except ValueError:\n resp['warning'] = \"port not recognized, using default\"\n port = int(CONF['networks'][network]['port'])\n else:\n port = int(CONF['networks'][network]['port'])\n\n return network, inp, port, resp\n\n\[email protected]('/api/check_node', methods=['POST'])\[email protected]()\ndef check_node():\n \"\"\"\n Checks the current status of a node. This is a live result, so response times will be longer - to view a saved\n result see /api/check_historic_node.\n :param node: connection string, e.g. ok:127.0.0.1:6970 - port is optional if it is the network default\n :param to_services (integer, optional): outgoing services to broadcast, default=0\n :param from_services (integer, optional): outgoing services to broadcast, default=0\n :param version (integer, optional): version code to broadcast, default varies by network\n :param user_agent (string, optional): user agent to broadcast, default=\"/oknodes:0.1/\"\n :param height (integer, optional): block height to broadcast during handshake. default=network median\n :param p2p_nodes (bool, optional): issues a getaddr call and list of connected nodes, default=False\n :return: json dict {\"result\":{\"user_agent\":\"/oktoshi:5.0.0.2/\", \"version\":\" .... }, \"nodes\":[[\"127.0.0.1:6970, 157532132191], ...]}\n \"\"\"\n\n dat = request.form\n node = dat.get(\"node\")\n network, address, port, resp = deconstruct_address_string(node)\n\n network_data = CONF['networks'][network]\n if dat.get(\"height\"):\n network_data['height'] = dat.get(\"height\")\n else:\n with open(\"static/network_summaries.json\", 'r') as f:\n network_data['height'] = int(json.load(f)[network]['med'])\n\n network_data['protocol_version'] = dat.get(\"version\") or network_data['protocol_version']\n result = connect(network, address, port,\n to_services=dat.get(\"to_services\") or network_data['services'],\n network_data=network_data,\n user_agent=dat.get(\"user_agent\") or None,\n p2p_nodes=False,\n explicit_p2p=dat.get(\"p2p_nodes\") or False,\n from_services=dat.get('from_services') or None,\n keepalive=False)\n\n resp['result'] = result[0]\n resp['nodes'] = result[1]\n\n resp['result'] = geocode(resp['result'])\n return to_json(resp)\n\n\[email protected]('/api/check_historic_node', methods=['POST', 'GET'])\[email protected]()\ndef check_historic_node():\n \"\"\"\n Checks the status of a node based on the last crawl\n result see /api/check_historical_node\n :param node: connection string, e.g. ok:127.0.0.1:6970 - port is optional if it is the network default\n :return: json dict {\"result\":{\"user_agent\":\"/oktoshi:5.0.0.2/\", \"version\":\" .... }}\n \"\"\"\n\n if request.method == \"POST\":\n dat = request.form\n else:\n dat = request.args\n node = dat.get(\"node\")\n\n network, address, port, resp = deconstruct_address_string(node)\n\n if network not in CONF['networks']:\n return json.dumps({'error': \"network not recognized\"})\n\n result = db.session.query(Node).get((network, address, port))\n resp['result'] = \"None\" if result is None else result.to_dict()\n\n return to_json(resp)\n\n\[email protected](\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\[email protected](\"/api_docs\")\ndef api_docs():\n return auto.html()\n\n\[email protected]('/api/get_nodes', methods=['POST'])\[email protected]()\ndef get_node_list():\n \"\"\"\n Gets a list of all nodes visible during the past 30 days\n :param network (optional): Filters the result set based on the given network\n :return: json array [{\"address\":\"127.0.0.1\" ... }, {\"address\":\"0.0.0.0\", \"port:6970}]\n \"\"\"\n\n q = db.session.query(Node.network, Node.address, Node.port, Node.user_agent, Node.version, Node.first_seen,\n Node.last_seen, Node.last_checked, Node.country, Node.city, Node.asn, Node.aso).filter(\n Node.seen)\n if request.args.get(\"network\") is not None:\n network = request.args.get(\"network\")\n if network not in CONF['networks']:\n return {\"error\": \"network must be one of \" + \", \".join(CONF['networks'])}\n q = q.filter(Node.network == network)\n return pd.read_sql(q.statement, q.session.bind).to_json(orient='records')\n\[email protected]('/api/node_history', methods=['POST'])\[email protected]()\ndef get_node_history():\n \"\"\"\n Returns the data associated with a node, and all crawler visitations on record\n :param node: connection string, e.g. ok:127.0.0.1:6970 - port is optional if it is the network default.\n :return: json dict {\"node\":{\"user_agent\":\"/oktoshi/\", \"last_seen\": ... }, \"history\":{\"timestamp\":157032190321,\"height\":56000, \"success\":1 ...}}\n \"\"\"\n\n\n node = request.form.get(\"node\")\n\n network, address, port, resp = deconstruct_address_string(node)\n\n if network not in CONF['networks']:\n return json.dumps({'error': \"network not recognized\"})\n\n default_port = int(CONF['networks'][network]['port'])\n\n resp = {}\n\n try:\n port = int(port) if port is not None else default_port\n except ValueError:\n resp['warning'] = \"port not recognized, using default\"\n port = default_port\n\n n = db.session.query(Node.network, Node.address, Node.port, Node.user_agent, Node.version, Node.first_seen,\n Node.last_seen, Node.last_checked, Node.country, Node.city, Node.asn, Node.aso) \\\n .filter(and_(Node.network == network, Node.address == address, Node.port == port)).one()\n\n q = db.session.query(NodeVisitation.timestamp, NodeVisitation.height, NodeVisitation.success) \\\n .join(Node, and_(Node.network == NodeVisitation.network, Node.address == NodeVisitation.address,\n Node.port == NodeVisitation.port)) \\\n .filter(and_(Node.network == network, Node.address == address, Node.port == port)) \\\n .order_by(NodeVisitation.timestamp.desc())\n\n df = pd.read_sql(q.statement, q.session.bind)\n df['timestamp'] = df['timestamp'].astype(pd.np.int64) // 10 ** 9\n\n resp.update({\"node\": {\"network\": n.network, 'address': n.address, \"port\": n.port, \"user_agent\": n.user_agent,\n \"version\": n.version,\n \"first_seen\": n.first_seen,\n \"last_seen\": n.last_seen,\n \"last_checked\": n.last_checked,\n \"country\": n.country, \"city\": n.city, \"asn\": n.asn, \"aso\": n.aso},\n \"history\": df.to_dict(orient='records')})\n return to_json(resp)\n\n\ndef geocode(result):\n if result and result['address'].endswith('.onion'):\n aso, asn, country, city = \"Anonymous\", \"Anonymous\", \"Anonymous\", \"Anonymous\"\n elif result:\n try:\n aso = ASN.asn(result['address']).autonomous_system_organization\n asn = ASN.asn(result['address']).autonomous_system_number\n except AddressNotFoundError:\n aso = None\n asn = None\n\n try:\n country = COUNTRY.country(result['address']).country.name\n except AddressNotFoundError:\n country = None\n\n try:\n city = CITY.city(result['address']).city.name\n except AddressNotFoundError:\n city = None\n else:\n return result\n\n result['aso'] = aso\n result['asn'] = asn\n result['country'] = country\n result['city'] = city\n return result\n\n\ndef clean_dates(d):\n for i in d:\n if isinstance(d[i], datetime.datetime):\n d[i] = d[i].timestamp()\n if isinstance(d[i], dict):\n d[i] = clean_dates(d[i])\n return d\n\n\ndef to_json(d):\n \"\"\"\n Sanitizes a dictionary - converts datetime.datetime instances to timestamps\n :param d: dictionary\n :return: json string\n \"\"\"\n d = clean_dates(d)\n return json.dumps(d)\n\n\n\n\n\ndef main():\n app.run(\"0.0.0.0\", debug=False if \"--prod\" in sys.argv else True, port=8888 if \"--prod\" in sys.argv else 5000)\n # app.run(\"0.0.0.0\", debug=False if \"--prod\" in sys.argv else True, port=443 if \"--prod\" in sys.argv else 5000, ssl_context=('/etc/letsencrypt/live/nodes.okcash.org/fullchain.pem', '/etc/letsencrypt/live/nodes.okcash.org/privkey.pem'))\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.read_sql"
]
] |
Derollez/PyAbel | [
"c8be4ed7e8e08ee026634b9e856fb473e58d7330"
] | [
"abel/tools/polar.py"
] | [
"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nfrom scipy.ndimage import map_coordinates\nfrom scipy.ndimage.interpolation import shift\nfrom scipy.optimize import curve_fit, minimize\n\n\ndef reproject_image_into_polar(data, origin=None, Jacobian=False,\n dr=1, dt=None):\n \"\"\"\n Reprojects a 2D numpy array (``data``) into a polar coordinate system.\n \"origin\" is a tuple of (x0, y0) relative to the bottom-left image corner,\n and defaults to the center of the image.\n\n Parameters\n ----------\n data : 2D np.array\n origin : tuple\n The coordinate of the image center, relative to bottom-left\n Jacobian : boolean\n Include ``r`` intensity scaling in the coordinate transform. \n This should be included to account for the changing pixel size that \n occurs during the transform.\n dr : float\n Radial coordinate spacing for the grid interpolation\n tests show that there is not much point in going below 0.5\n dt : float\n Angular coordinate spacing (in radians)\n if ``dt=None``, dt will be set such that the number of theta values\n is equal to the maximum value between the height or the width of \n the image.\n\n Returns\n -------\n output : 2D np.array\n The polar image (r, theta)\n r_grid : 2D np.array\n meshgrid of radial coordinates\n theta_grid : 2D np.array\n meshgrid of theta coordinates\n \n Notes\n -----\n Adapted from: \n http://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system\n \n \"\"\"\n # bottom-left coordinate system requires numpy image to be np.flipud\n data = np.flipud(data)\n\n ny, nx = data.shape[:2]\n if origin is None:\n origin = (nx//2, ny//2) \n\n # Determine that the min and max r and theta coords will be...\n x, y = index_coords(data, origin=origin) # (x,y) coordinates of each pixel\n r, theta = cart2polar(x, y) # convert (x,y) -> (r,θ), note θ=0 is vertical\n\n nr = np.int(np.ceil((r.max()-r.min())/dr))\n\n if dt is None:\n nt = max(nx, ny)\n else:\n # dt in radians\n nt = np.int(np.ceil((theta.max()-theta.min())/dt))\n\n # Make a regular (in polar space) grid based on the min and max r & theta\n r_i = np.linspace(r.min(), r.max(), nr, endpoint=False)\n theta_i = np.linspace(theta.min(), theta.max(), nt, endpoint=False)\n theta_grid, r_grid = np.meshgrid(theta_i, r_i)\n\n # Project the r and theta grid back into pixel coordinates\n X, Y = polar2cart(r_grid, theta_grid)\n\n X += origin[0] # We need to shift the origin\n Y += origin[1] # back to the bottom-left corner...\n xi, yi = X.flatten(), Y.flatten()\n coords = np.vstack((yi, xi)) # (map_coordinates requires a 2xn array)\n\n zi = map_coordinates(data, coords)\n output = zi.reshape((nr, nt))\n\n if Jacobian:\n output = output*r_i[:, np.newaxis]\n\n return output, r_grid, theta_grid\n\n\ndef index_coords(data, origin=None):\n \"\"\"\n Creates x & y coords for the indicies in a numpy array\n \n Parameters\n ----------\n data : numpy array\n 2D data\n origin : (x,y) tuple\n defaults to the center of the image. Specify origin=(0,0)\n to set the origin to the *bottom-left* corner of the image.\n \n Returns\n -------\n x, y : arrays\n \"\"\"\n ny, nx = data.shape[:2]\n if origin is None:\n origin_x, origin_y = nx//2, ny//2\n else:\n origin_x, origin_y = origin\n\n x, y = np.meshgrid(np.arange(float(nx)), np.arange(float(ny)))\n\n x -= origin_x\n y -= origin_y\n return x, y\n\n\ndef cart2polar(x, y):\n \"\"\"\n Transform Cartesian coordinates to polar\n \n Parameters\n ----------\n x, y : floats or arrays\n Cartesian coordinates\n \n Returns\n -------\n r, theta : floats or arrays\n Polar coordinates\n \n \"\"\"\n r = np.sqrt(x**2 + y**2)\n theta = np.arctan2(x, y) # θ referenced to vertical\n return r, theta\n\n\ndef polar2cart(r, theta):\n \"\"\"\n Transform polar coordinates to Cartesian\n \n Parameters\n -------\n r, theta : floats or arrays\n Polar coordinates\n \n Returns\n ----------\n x, y : floats or arrays\n Cartesian coordinates\n \"\"\"\n y = r * np.cos(theta) # θ referenced to vertical\n x = r * np.sin(theta)\n return x, y\n"
] | [
[
"numpy.vstack",
"numpy.arctan2",
"numpy.flipud",
"numpy.cos",
"scipy.ndimage.map_coordinates",
"numpy.sqrt",
"numpy.sin",
"numpy.meshgrid"
]
] |
siddheshmhatre/cuml | [
"ed0e58c6b3ebfc17b944cdad7c04cd4af8860736"
] | [
"python/cuml/test/test_umap.py"
] | [
"# Copyright (c) 2019-2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\n# Please install UMAP before running the code\n# use 'conda install -c conda-forge umap-learn' command to install it\n\nimport numpy as np\nimport pytest\nimport umap\nimport copy\n\nimport cupyx\nimport scipy.sparse\n\nfrom cuml.manifold.umap import UMAP as cuUMAP\nfrom cuml.test.utils import array_equal, unit_param, \\\n quality_param, stress_param\nfrom sklearn.neighbors import NearestNeighbors\n\nimport joblib\n\nfrom cuml.common import logger\n\nfrom sklearn import datasets\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets import make_blobs\nfrom sklearn.manifold import trustworthiness\nfrom sklearn.metrics import adjusted_rand_score\n\ndataset_names = ['iris', 'digits', 'wine', 'blobs']\n\n\[email protected]('nrows', [unit_param(500), quality_param(5000),\n stress_param(500000)])\[email protected]('n_feats', [unit_param(20), quality_param(100),\n stress_param(1000)])\ndef test_blobs_cluster(nrows, n_feats):\n\n data, labels = datasets.make_blobs(\n n_samples=nrows, n_features=n_feats, centers=5, random_state=0)\n embedding = cuUMAP().fit_transform(data, convert_dtype=True)\n\n if nrows < 500000:\n score = adjusted_rand_score(labels,\n KMeans(5).fit_predict(embedding))\n assert score == 1.0\n\n\[email protected]('nrows', [unit_param(500), quality_param(5000),\n stress_param(500000)])\[email protected]('n_feats', [unit_param(10), quality_param(100),\n stress_param(1000)])\ndef test_umap_fit_transform_score(nrows, n_feats):\n\n n_samples = nrows\n n_features = n_feats\n\n data, labels = make_blobs(n_samples=n_samples, n_features=n_features,\n centers=10, random_state=42)\n\n model = umap.UMAP(n_neighbors=10, min_dist=0.1)\n cuml_model = cuUMAP(n_neighbors=10, min_dist=0.01)\n\n embedding = model.fit_transform(data)\n cuml_embedding = cuml_model.fit_transform(data, convert_dtype=True)\n\n assert not np.isnan(embedding).any()\n assert not np.isnan(cuml_embedding).any()\n\n if nrows < 500000:\n cuml_score = adjusted_rand_score(labels,\n KMeans(10).fit_predict(\n cuml_embedding))\n score = adjusted_rand_score(labels,\n KMeans(10).fit_predict(embedding))\n\n assert array_equal(score, cuml_score, 1e-2, with_sign=True)\n\n\ndef test_supervised_umap_trustworthiness_on_iris():\n iris = datasets.load_iris()\n data = iris.data\n embedding = cuUMAP(n_neighbors=10, random_state=0,\n min_dist=0.01).fit_transform(\n data, iris.target, convert_dtype=True)\n trust = trustworthiness(iris.data, embedding, n_neighbors=10)\n assert trust >= 0.97\n\n\ndef test_semisupervised_umap_trustworthiness_on_iris():\n iris = datasets.load_iris()\n data = iris.data\n target = iris.target.copy()\n target[25:75] = -1\n embedding = cuUMAP(n_neighbors=10, random_state=0,\n min_dist=0.01).fit_transform(\n data, target, convert_dtype=True)\n\n trust = trustworthiness(iris.data, embedding, n_neighbors=10)\n assert trust >= 0.97\n\n\ndef test_umap_trustworthiness_on_iris():\n iris = datasets.load_iris()\n data = iris.data\n embedding = cuUMAP(n_neighbors=10, min_dist=0.01,\n random_state=0).fit_transform(\n data, convert_dtype=True)\n trust = trustworthiness(iris.data, embedding, n_neighbors=10)\n assert trust >= 0.97\n\n\[email protected]('target_metric', [\"categorical\", \"euclidean\"])\ndef test_umap_transform_on_iris(target_metric):\n\n iris = datasets.load_iris()\n\n iris_selection = np.random.RandomState(42).choice(\n [True, False], 150, replace=True, p=[0.75, 0.25])\n data = iris.data[iris_selection]\n\n fitter = cuUMAP(n_neighbors=10, init=\"random\", n_epochs=800, min_dist=0.01,\n random_state=42, target_metric=target_metric)\n fitter.fit(data, convert_dtype=True)\n new_data = iris.data[~iris_selection]\n embedding = fitter.transform(new_data, convert_dtype=True)\n\n assert not np.isnan(embedding).any()\n\n trust = trustworthiness(new_data, embedding, n_neighbors=10)\n assert trust >= 0.85\n\n\[email protected]('input_type', ['cupy', 'scipy'])\[email protected]('xform_method', ['fit', 'fit_transform'])\[email protected]('target_metric', [\"categorical\", \"euclidean\"])\ndef test_umap_transform_on_digits_sparse(target_metric, input_type,\n xform_method):\n\n digits = datasets.load_digits()\n\n digits_selection = np.random.RandomState(42).choice(\n [True, False], 1797, replace=True, p=[0.75, 0.25])\n\n if input_type == 'cupy':\n sp_prefix = cupyx.scipy.sparse\n else:\n sp_prefix = scipy.sparse\n\n data = sp_prefix.csr_matrix(\n scipy.sparse.csr_matrix(digits.data[digits_selection]))\n\n fitter = cuUMAP(n_neighbors=15,\n verbose=logger.level_info,\n init=\"random\",\n n_epochs=0,\n min_dist=0.01,\n random_state=42,\n target_metric=target_metric)\n\n new_data = sp_prefix.csr_matrix(\n scipy.sparse.csr_matrix(digits.data[~digits_selection]))\n\n if xform_method == 'fit':\n fitter.fit(data, convert_dtype=True)\n embedding = fitter.transform(new_data, convert_dtype=True)\n else:\n embedding = fitter.fit_transform(new_data, convert_dtype=True)\n\n if input_type == 'cupy':\n embedding = embedding.get()\n\n trust = trustworthiness(digits.data[~digits_selection], embedding,\n n_neighbors=15)\n assert trust >= 0.96\n\n\[email protected]('target_metric', [\"categorical\", \"euclidean\"])\ndef test_umap_transform_on_digits(target_metric):\n\n digits = datasets.load_digits()\n\n digits_selection = np.random.RandomState(42).choice(\n [True, False], 1797, replace=True, p=[0.75, 0.25])\n data = digits.data[digits_selection]\n\n fitter = cuUMAP(n_neighbors=15,\n verbose=logger.level_debug,\n init=\"random\",\n n_epochs=0,\n min_dist=0.01,\n random_state=42,\n target_metric=target_metric)\n fitter.fit(data, convert_dtype=True)\n\n new_data = digits.data[~digits_selection]\n\n embedding = fitter.transform(new_data, convert_dtype=True)\n trust = trustworthiness(digits.data[~digits_selection], embedding,\n n_neighbors=15)\n assert trust >= 0.96\n\n\[email protected]('target_metric', [\"categorical\", \"euclidean\"])\[email protected]('name', dataset_names)\ndef test_umap_fit_transform_trust(name, target_metric):\n\n if name == 'iris':\n iris = datasets.load_iris()\n data = iris.data\n labels = iris.target\n\n elif name == 'digits':\n digits = datasets.load_digits(n_class=5)\n data = digits.data\n labels = digits.target\n\n elif name == 'wine':\n wine = datasets.load_wine()\n data = wine.data\n labels = wine.target\n else:\n data, labels = make_blobs(n_samples=500, n_features=10,\n centers=10, random_state=42)\n\n model = umap.UMAP(n_neighbors=10, min_dist=0.01,\n target_metric=target_metric)\n cuml_model = cuUMAP(n_neighbors=10, min_dist=0.01,\n target_metric=target_metric)\n embedding = model.fit_transform(data)\n cuml_embedding = cuml_model.fit_transform(data, convert_dtype=True)\n\n trust = trustworthiness(data, embedding, n_neighbors=10)\n cuml_trust = trustworthiness(data, cuml_embedding, n_neighbors=10)\n\n assert array_equal(trust, cuml_trust, 1e-1, with_sign=True)\n\n\[email protected]('target_metric', [\"categorical\", \"euclidean\"])\[email protected]('name', [unit_param('digits')])\[email protected]('nrows', [quality_param(5000),\n stress_param(500000)])\[email protected]('n_feats', [quality_param(100),\n stress_param(1000)])\[email protected]('should_downcast', [True])\[email protected]('input_type', ['dataframe', 'ndarray'])\ndef test_umap_data_formats(input_type, should_downcast,\n nrows, n_feats, name, target_metric):\n\n dtype = np.float32 if not should_downcast else np.float64\n n_samples = nrows\n n_feats = n_feats\n\n if name == 'digits':\n # use the digits dataset for unit test\n digits = datasets.load_digits(n_class=9)\n X = digits[\"data\"].astype(dtype)\n\n else:\n X, y = datasets.make_blobs(n_samples=n_samples,\n n_features=n_feats, random_state=0)\n\n umap = cuUMAP(n_neighbors=3, n_components=2, target_metric=target_metric)\n\n embeds = umap.fit_transform(X)\n assert type(embeds) == np.ndarray\n\n\[email protected]('target_metric', [\"categorical\", \"euclidean\"])\[email protected](\"ignore:(.*)connected(.*):UserWarning:sklearn[.*]\")\ndef test_umap_fit_transform_score_default(target_metric):\n\n n_samples = 500\n n_features = 20\n\n data, labels = make_blobs(n_samples=n_samples, n_features=n_features,\n centers=10, random_state=42)\n\n model = umap.UMAP(target_metric=target_metric)\n cuml_model = cuUMAP(target_metric=target_metric)\n\n embedding = model.fit_transform(data)\n cuml_embedding = cuml_model.fit_transform(data, convert_dtype=True)\n\n cuml_score = adjusted_rand_score(labels,\n KMeans(10).fit_predict(\n cuml_embedding))\n score = adjusted_rand_score(labels,\n KMeans(10).fit_predict(embedding))\n\n assert array_equal(score, cuml_score, 1e-2, with_sign=True)\n\n\ndef test_umap_fit_transform_against_fit_and_transform():\n\n n_samples = 500\n n_features = 20\n\n data, labels = make_blobs(n_samples=n_samples, n_features=n_features,\n centers=10, random_state=42)\n\n \"\"\"\n First test the default option does not hash the input\n \"\"\"\n\n cuml_model = cuUMAP()\n\n ft_embedding = cuml_model.fit_transform(data, convert_dtype=True)\n fit_embedding_same_input = cuml_model.transform(data, convert_dtype=True)\n\n assert joblib.hash(ft_embedding) != joblib.hash(fit_embedding_same_input)\n\n \"\"\"\n Next, test explicitly enabling feature hashes the input\n \"\"\"\n\n cuml_model = cuUMAP(hash_input=True)\n\n ft_embedding = cuml_model.fit_transform(data, convert_dtype=True)\n fit_embedding_same_input = cuml_model.transform(data, convert_dtype=True)\n\n assert joblib.hash(ft_embedding) == joblib.hash(fit_embedding_same_input)\n\n fit_embedding_diff_input = cuml_model.transform(data[1:],\n convert_dtype=True)\n assert joblib.hash(ft_embedding) != joblib.hash(fit_embedding_diff_input)\n\n\[email protected]('n_components,random_state',\n [unit_param(2, None),\n unit_param(2, 8),\n unit_param(2, np.random.RandomState(42)),\n unit_param(21, None),\n unit_param(21, np.random.RandomState(42)),\n unit_param(25, 8),\n unit_param(50, None),\n stress_param(50, 8)])\ndef test_umap_fit_transform_reproducibility(n_components, random_state):\n\n n_samples = 8000\n n_features = 200\n\n if random_state is None:\n n_components *= 2\n\n data, labels = make_blobs(n_samples=n_samples, n_features=n_features,\n centers=10, random_state=42)\n\n def get_embedding(n_components, random_state):\n reducer = cuUMAP(init=\"random\",\n n_components=n_components,\n random_state=random_state)\n return reducer.fit_transform(data, convert_dtype=True)\n\n state = copy.copy(random_state)\n cuml_embedding1 = get_embedding(n_components, state)\n state = copy.copy(random_state)\n cuml_embedding2 = get_embedding(n_components, state)\n\n assert not np.isnan(cuml_embedding1).any()\n assert not np.isnan(cuml_embedding2).any()\n\n # Reproducibility threshold raised until intermittent failure is fixed\n # Ref: https://github.com/rapidsai/cuml/issues/1903\n mean_diff = np.mean(np.abs(cuml_embedding1 - cuml_embedding2))\n if random_state is not None:\n assert mean_diff == 0.0\n else:\n assert mean_diff > 0.5\n\n\[email protected]('n_components,random_state',\n [unit_param(2, None),\n unit_param(2, 8),\n unit_param(2, np.random.RandomState(42)),\n unit_param(21, None),\n unit_param(25, 8),\n unit_param(25, np.random.RandomState(42)),\n unit_param(50, None),\n stress_param(50, 8)])\ndef test_umap_transform_reproducibility(n_components, random_state):\n\n n_samples = 5000\n n_features = 200\n\n if random_state is None:\n n_components *= 2\n\n data, labels = make_blobs(n_samples=n_samples, n_features=n_features,\n centers=10, random_state=42)\n\n selection = np.random.RandomState(42).choice(\n [True, False], n_samples, replace=True, p=[0.5, 0.5])\n fit_data = data[selection]\n transform_data = data[~selection]\n\n def get_embedding(n_components, random_state):\n reducer = cuUMAP(init=\"random\",\n n_components=n_components,\n random_state=random_state)\n reducer.fit(fit_data, convert_dtype=True)\n return reducer.transform(transform_data, convert_dtype=True)\n\n state = copy.copy(random_state)\n cuml_embedding1 = get_embedding(n_components, state)\n state = copy.copy(random_state)\n cuml_embedding2 = get_embedding(n_components, state)\n\n assert not np.isnan(cuml_embedding1).any()\n assert not np.isnan(cuml_embedding2).any()\n\n # Reproducibility threshold raised until intermittent failure is fixed\n # Ref: https://github.com/rapidsai/cuml/issues/1903\n mean_diff = np.mean(np.abs(cuml_embedding1 - cuml_embedding2))\n if random_state is not None:\n assert mean_diff == 0.0\n else:\n assert mean_diff > 0.5\n\n\ndef test_umap_fit_transform_trustworthiness_with_consistency_enabled():\n iris = datasets.load_iris()\n data = iris.data\n algo = cuUMAP(n_neighbors=10, min_dist=0.01, init=\"random\",\n random_state=42)\n embedding = algo.fit_transform(data, convert_dtype=True)\n trust = trustworthiness(iris.data, embedding, n_neighbors=10)\n assert trust >= 0.97\n\n\ndef test_umap_transform_trustworthiness_with_consistency_enabled():\n iris = datasets.load_iris()\n data = iris.data\n selection = np.random.RandomState(42).choice(\n [True, False], data.shape[0], replace=True, p=[0.5, 0.5])\n fit_data = data[selection]\n transform_data = data[~selection]\n model = cuUMAP(n_neighbors=10, min_dist=0.01, init=\"random\",\n random_state=42)\n model.fit(fit_data, convert_dtype=True)\n embedding = model.transform(transform_data, convert_dtype=True)\n trust = trustworthiness(transform_data, embedding, n_neighbors=10)\n assert trust >= 0.92\n\n\[email protected](\"ignore:(.*)zero(.*)::scipy[.*]|umap[.*]\")\ndef test_exp_decay_params():\n def compare_exp_decay_params(a=None, b=None, min_dist=0.1, spread=1.0):\n cuml_model = cuUMAP(a=a, b=b, min_dist=min_dist, spread=spread)\n state = cuml_model.__getstate__()\n cuml_a, cuml_b = state['a'], state['b']\n skl_model = umap.UMAP(a=a, b=b, min_dist=min_dist, spread=spread)\n skl_model.fit(np.zeros((1, 1)))\n sklearn_a, sklearn_b = skl_model._a, skl_model._b\n\n assert abs(cuml_a) - abs(sklearn_a) < 1e-6\n assert abs(cuml_b) - abs(sklearn_b) < 1e-6\n\n compare_exp_decay_params(min_dist=0.1, spread=1.0)\n compare_exp_decay_params(a=0.5, b=2.0)\n compare_exp_decay_params(a=0.5)\n compare_exp_decay_params(b=0.5)\n compare_exp_decay_params(min_dist=0.1, spread=10.0)\n\n\[email protected]('n_neighbors', [5, 15])\ndef test_umap_knn_parameters(n_neighbors):\n data, labels = datasets.make_blobs(\n n_samples=2000, n_features=10, centers=5, random_state=0)\n data = data.astype(np.float32)\n\n def fit_transform_embed(knn_graph=None):\n model = cuUMAP(random_state=42,\n init='random',\n n_neighbors=n_neighbors)\n return model.fit_transform(data, knn_graph=knn_graph,\n convert_dtype=True)\n\n def transform_embed(knn_graph=None):\n model = cuUMAP(random_state=42,\n init='random',\n n_neighbors=n_neighbors)\n model.fit(data, knn_graph=knn_graph, convert_dtype=True)\n return model.transform(data, knn_graph=knn_graph,\n convert_dtype=True)\n\n def test_trustworthiness(embedding):\n trust = trustworthiness(data, embedding, n_neighbors=n_neighbors)\n assert trust >= 0.92\n\n def test_equality(e1, e2):\n mean_diff = np.mean(np.abs(e1 - e2))\n print(\"mean diff: %s\" % mean_diff)\n assert mean_diff < 1.0\n\n neigh = NearestNeighbors(n_neighbors=n_neighbors)\n neigh.fit(data)\n knn_graph = neigh.kneighbors_graph(data, mode=\"distance\")\n\n embedding1 = fit_transform_embed(None)\n embedding2 = fit_transform_embed(knn_graph.tocsr())\n embedding3 = fit_transform_embed(knn_graph.tocoo())\n embedding4 = fit_transform_embed(knn_graph.tocsc())\n embedding5 = transform_embed(knn_graph.tocsr())\n embedding6 = transform_embed(knn_graph.tocoo())\n embedding7 = transform_embed(knn_graph.tocsc())\n\n test_trustworthiness(embedding1)\n test_trustworthiness(embedding2)\n test_trustworthiness(embedding3)\n test_trustworthiness(embedding4)\n test_trustworthiness(embedding5)\n test_trustworthiness(embedding6)\n test_trustworthiness(embedding7)\n\n test_equality(embedding2, embedding3)\n test_equality(embedding3, embedding4)\n test_equality(embedding5, embedding6)\n test_equality(embedding6, embedding7)\n"
] | [
[
"sklearn.neighbors.NearestNeighbors",
"numpy.zeros",
"sklearn.manifold.trustworthiness",
"numpy.abs",
"sklearn.datasets.load_wine",
"numpy.random.RandomState",
"sklearn.cluster.KMeans",
"numpy.isnan",
"sklearn.datasets.load_digits",
"sklearn.datasets.make_blobs",
"sklearn.datasets.load_iris"
]
] |
kaylani2/machineLearning | [
"692623abf6fe02bde6c7da6c2f8c0ec526a3e8f8"
] | [
"src/specific_models/federated/single_machine_simulation_flower/single_machine_simulation.py"
] | [
"import os\nimport time\nfrom multiprocessing import Process\nfrom typing import Tuple\n\nimport flwr as fl\nimport numpy as np\nimport tensorflow as tf\nfrom flwr.server.strategy import FedAvg\n\nimport dataset\n\n# generate random integer values\nfrom random import seed\nfrom random import randint\n\n# Make TensorFlow log less verbose\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\n# K: Prevent TF from using GPU (not enough memory)\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n\nDATASET = Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]\n\n\ndef start_server(num_rounds: int, num_clients: int, fraction_fit: float):\n \"\"\"Start the server with a slightly adjusted FedAvg strategy.\"\"\"\n strategy = FedAvg(min_available_clients=num_clients, fraction_fit=fraction_fit)\n # Exposes the server by default on port 8080\n fl.server.start_server(strategy=strategy, config={\"num_rounds\": num_rounds})\n\n\ndef start_client(dataset: DATASET) -> None:\n \"\"\"Start a single client with the provided dataset.\"\"\"\n\n # Load and compile a Keras model for CIFAR-10\n #model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None)\n model = tf.keras.Sequential(\n [\n tf.keras.Input(shape=(32, 32, 3)),\n tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n )\n model.compile(\"adam\", \"sparse_categorical_crossentropy\", metrics=[tf.keras.metrics.CategoricalAccuracy(), tf.keras.metrics.MeanSquaredError()])\n ### @TODO: check if \"accuracy\" and tf.keras.metrics.CategoricalAccuracy() return the same results\n\n # Unpack the CIFAR-10 dataset partition\n (x_train, y_train), (x_test, y_test) = dataset\n\n # Define a Flower client\n class CifarClient(fl.client.NumPyClient):\n def get_parameters(self):\n \"\"\"Return current weights.\"\"\"\n return model.get_weights()\n\n def fit(self, parameters, config):\n \"\"\"Fit model and return new weights as well as number of training\n examples.\"\"\"\n model.set_weights(parameters)\n # Remove steps_per_epoch if you want to train over the full dataset\n # https://keras.io/api/models/model_training_apis/#fit-method\n #nap_time = randint (0, 5)\n #time.sleep (nap_time)\n #print (\"Slept for\", nap_time, \"seconds.\")\n model.fit(x_train, y_train, epochs=10, batch_size=256, steps_per_epoch=10)\n return model.get_weights(), len(x_train), {}\n\n def evaluate(self, parameters, config):\n \"\"\"Evaluate using provided parameters.\"\"\"\n model.set_weights(parameters)\n loss, accuracy, mse = model.evaluate(x_test, y_test)\n print ('\"Loss:', loss, \". Accuracy:\", accuracy, \". MSE:\", mse, \".\")\n return loss, len(x_test), {\"accuracy\": accuracy}\n\n # Start Flower client\n fl.client.start_numpy_client(\"0.0.0.0:8080\", client=CifarClient())\n\n\ndef run_simulation(num_rounds: int, num_clients: int, fraction_fit: float):\n \"\"\"Start a FL simulation.\"\"\"\n\n # This will hold all the processes which we are going to create\n processes = []\n\n # Start the server\n server_process = Process(\n target=start_server, args=(num_rounds, num_clients, fraction_fit)\n )\n server_process.start()\n processes.append(server_process)\n\n # Optionally block the script here for a second or two so the server has time to start\n time.sleep(2)\n\n # Load the dataset partitions\n partitions = dataset.load(num_partitions=num_clients)\n\n # Start all the clients\n for partition in partitions:\n client_process = Process(target=start_client, args=(partition,))\n client_process.start()\n processes.append(client_process)\n\n # Block until all processes are finished\n for p in processes:\n p.join()\n\n\nif __name__ == \"__main__\":\n run_simulation(num_rounds=100, num_clients=5, fraction_fit=0.5)\n"
] | [
[
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.metrics.CategoricalAccuracy",
"tensorflow.keras.metrics.MeanSquaredError",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Input"
]
] |
gajen105/video-servillance-analysis-sysytem | [
"c79f9010aa7c2fe00297fb0370b0635603faf135"
] | [
"classify.py"
] | [
"import glob\nimport datetime\nimport inference\nimport numpy as np\nflist = []\ndef run_classifier():\n flist = []\n list1 = glob.glob(\"./images/*.jpg\")\n list1.sort()\n print(\"Printing the time of Interesting Events.....\\n\\n\")\n temp = str(inference.run_inference_on_image())\n for i in range(len(list1) - 1):\n inference.imagePath = list1[i]\n temp2 = str(inference.run_inference_on_image2())\n inference.imagePath = list1[i+1]\n temp = str(inference.run_inference_on_image2())\n if temp2 != temp:\n print(\"Time : \" + str(datetime.timedelta(seconds=(i))))\n flist.extend([i])\n else:\n print(\".\" ,)\n d = np.array(flist)\n d.sort()\n\n diff = [y - x for x, y in zip(*[iter(d)] * 2)]\n avg = sum(diff) / len(diff)\n\n m = [[d[0]]]\n\n for x in d[1:]:\n if x - m[-1][0] < avg:\n m[-1].append(x)\n else:\n m.append([x])\n\n\n# print(m)\n# print(type(m))\n with open('list.txt' , 'w') as f:\n print(\"Writing to file\\n\")\n for i in range(0 , (len(m))):\n with open('list.txt' , 'a') as f:\n print(\"\\n\", file=f)\n print(\"start time : \" + str(datetime.timedelta(seconds = int((m[i][0])) )) , file=f)\n print(\"end time : \" + str(datetime.timedelta(seconds = int((m[i][len(m[i]) - 1])) )) , file=f )\n print(\"\\n\\nFinished Analysis\\n\\n\")\n print(\"The timestanps of all interesting events is stored in a File named list.txt \")\nif __name__ == '__main__':\n run_classifier()\n\n"
] | [
[
"numpy.array"
]
] |
oscarkey/multitask-learning | [
"c4503c044ca7a29bebd4e70e9e030524654e5d00"
] | [
"multitask-learning/cityscapestask/decoders.py"
] | [
"\"\"\"Decoder portion of the model.\"\"\"\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\ndef _build_base_decoder():\n \"\"\"Builds the base decoder shared by all three decoder types.\"\"\"\n return nn.Sequential(nn.Conv2d(in_channels=1280, out_channels=256, kernel_size=(3, 3), stride=1, padding=1),\n nn.BatchNorm2d(num_features=256), nn.ReLU())\n\n\nclass Decoders(nn.Module):\n \"\"\"Module which contains all three decoders.\"\"\"\n\n def __init__(self, num_classes: int, enabled_tasks: (bool, bool, bool), output_size=(128, 256)):\n super().__init__()\n self._output_size = output_size\n self._num_classes = num_classes\n self._enabled_tasks = enabled_tasks\n\n self._base_semseg = _build_base_decoder()\n self._base_insseg = _build_base_decoder()\n self._base_depth = _build_base_decoder()\n\n kernel_size = (1, 1)\n self._semsegcls = nn.Conv2d(256, self._num_classes, kernel_size)\n self._inssegcls = nn.Conv2d(256, 2, kernel_size)\n self._depthcls = nn.Conv2d(256, 1, kernel_size)\n\n def set_output_size(self, size):\n self._output_size = size\n\n def forward(self, x):\n \"\"\"Returns (sem seg, instance seg, depth).\"\"\"\n # x: [batch x 1280 x H/8 x W/8]\n\n sem_seg_enabled, inst_seg_enabled, depth_enabled = self._enabled_tasks\n\n if sem_seg_enabled:\n x1 = self._base_semseg(x)\n x1 = self._semsegcls(x1)\n x1 = F.interpolate(x1, size=self._output_size, mode='bilinear', align_corners=True)\n else:\n x1 = None\n\n if inst_seg_enabled:\n x2 = self._base_insseg(x)\n x2 = self._inssegcls(x2)\n x2 = F.interpolate(x2, size=self._output_size, mode='bilinear', align_corners=True)\n else:\n x2 = None\n\n if depth_enabled:\n x3 = self._base_depth(x)\n x3 = self._depthcls(x3)\n x3 = F.interpolate(x3, size=self._output_size, mode='bilinear', align_corners=True)\n else:\n x3 = None\n\n return x1, x2, x3\n\n\nif __name__ == '__main__':\n # ### Shape test\n output_size = (123, 432)\n model = Decoders(num_classes=20, output_size=output_size)\n test = torch.zeros(size=(2, 1280, 256, 256))\n result = model.forward(test)\n assert result[0].shape == (2, 20, *output_size), \"output shape is {}\".format(result[0].shape)\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d",
"torch.zeros",
"torch.nn.ReLU",
"torch.nn.functional.interpolate"
]
] |
PatrickPrakash/lipreading | [
"6380508ba3ffad64fc01ce10a5f43e4da0f652fd"
] | [
"src/decoder/decoder.py"
] | [
"import numpy as np\nimport editdistance\n\n\nclass Decoder():\n def __init__(self, vocab):\n self.vocab_list = [char for char in vocab]\n\n def predict(self, batch_size, logits, y, lengths, y_lengths, n_show=5):\n decoded = self.decode(logits, lengths)\n\n cursor = 0\n gt = []\n n = min(n_show, logits.size(1))\n samples = []\n for b in range(batch_size):\n y_str = ''.join([self.vocab_list[ch] for ch in y[cursor: cursor + y_lengths[b]]])\n gt.append(y_str)\n cursor += y_lengths[b]\n if b < n:\n samples.append([y_str, decoded[b]])\n\n return decoded, gt, samples\n\n def decode(self, logits, seq_lens):\n raise NotImplementedError\n\n def wer(self, s1, s2):\n s1_words, s2_words = s1.split(), s2.split()\n distance = editdistance.eval(s1_words, s2_words)\n return distance / max(len(s1_words), len(s2_words))\n\n def cer(self, s1, s2):\n s1, s2 = s1.replace(' ', ''), s2.replace(' ', '')\n distance = editdistance.eval(s1, s2)\n return distance / max(len(s1), len(s2))\n\n def cer_batch(self, decoded, gt):\n return self.compare_batch(decoded, gt, self.cer)\n\n def wer_batch(self, decoded, gt):\n return self.compare_batch(decoded, gt, self.wer)\n\n def compare_batch(self, decoded, gt, func):\n assert len(decoded) == len(gt), f'batch size mismatch: {len(decoded)}!={len(gt)}'\n\n results = []\n for i, batch in enumerate(decoded):\n for sentence in range(len(batch)):\n error = func(decoded[i][sentence], gt[i])\n results.append(error)\n\n return np.mean(results)\n"
] | [
[
"numpy.mean"
]
] |
luksfarris/pydeeprecsys | [
"0409ca220a235bb65ccf72d9077aaecf108722bb"
] | [
"pydeeprecsys/rl/agents/rainbow.py"
] | [
"from numpy.random import RandomState\nfrom typing import Any, Optional, List\nfrom numpy import arange\nfrom copy import deepcopy\nfrom pydeeprecsys.rl.neural_networks.dueling import DuelingDDQN\nfrom pydeeprecsys.rl.experience_replay.priority_replay_buffer import (\n PrioritizedExperienceReplayBuffer,\n)\nfrom pydeeprecsys.rl.experience_replay.buffer_parameters import (\n PERBufferParameters,\n ExperienceReplayBufferParameters,\n)\nfrom pydeeprecsys.rl.agents.agent import ReinforcementLearning\nfrom pydeeprecsys.rl.learning_statistics import LearningStatistics\n\n\nclass RainbowDQNAgent(ReinforcementLearning):\n\n \"\"\"Instead of sampling randomly from the buffer we prioritize experiences with PER\n Instead of epsilon-greedy we use gaussian noisy layers for exploration\n Instead of the Q value we calculate Value and Advantage (Dueling DQN).\n This implementation does not include the Categorical DQN part (yet).\"\"\"\n\n def __init__(\n self,\n input_size: int,\n output_size: int,\n network_update_frequency: int = 5,\n network_sync_frequency: int = 200,\n priority_importance: float = 0.6,\n priority_weigth_growth: float = 0.001,\n buffer_size: int = 10000,\n buffer_burn_in: int = 1000,\n batch_size: int = 32,\n noise_sigma: float = 0.017,\n discount_factor: float = 0.99,\n learning_rate: float = 0.0001,\n hidden_layers: List[int] = None,\n random_state: RandomState = RandomState(),\n statistics: Optional[LearningStatistics] = None,\n ):\n\n self.network = DuelingDDQN(\n n_input=input_size,\n n_output=output_size,\n learning_rate=learning_rate,\n noise_sigma=noise_sigma,\n discount_factor=discount_factor,\n statistics=statistics,\n hidden_layers=hidden_layers,\n )\n self.target_network = deepcopy(self.network)\n\n self.buffer = PrioritizedExperienceReplayBuffer(\n ExperienceReplayBufferParameters(\n max_experiences=buffer_size,\n minimum_experiences_to_start_predicting=buffer_burn_in,\n batch_size=batch_size,\n random_state=random_state,\n ),\n PERBufferParameters(\n alpha=priority_importance,\n beta_growth=priority_weigth_growth,\n ),\n )\n self.step_count = 0\n self.network_update_frequency = network_update_frequency\n self.network_sync_frequency = network_sync_frequency\n self.actions = arange(output_size)\n self.random_state = random_state\n\n def _check_update_network(self):\n # we only start training the network once the buffer is ready\n # (the burn in is filled)\n if self.buffer.ready_to_predict():\n self.step_count += 1\n if self.step_count % self.network_update_frequency == 0:\n # we train at every K steps\n self.network.learn_with(self.buffer, self.target_network)\n if self.step_count % self.network_sync_frequency == 0:\n # at every N steps replaces the target network with the main network\n self.target_network.load_state_dict(self.network.state_dict())\n\n def top_k_actions_for_state(self, state: Any, k: int = 1) -> Any:\n state_flat = state.flatten()\n if self.buffer.ready_to_predict():\n actions = self.target_network.top_k_actions_for_state(state_flat, k=k)\n else:\n actions = self.random_state.choice(self.actions, size=k)\n self._check_update_network()\n return actions\n\n def action_for_state(self, state: Any) -> Any:\n return self.top_k_actions_for_state(state, k=1)[0]\n\n def store_experience(\n self, state: Any, action: Any, reward: float, done: bool, new_state: Any\n ):\n state_flat = state.flatten()\n new_state_flat = new_state.flatten()\n self.buffer.store_experience(state_flat, action, reward, done, new_state_flat)\n"
] | [
[
"numpy.random.RandomState",
"numpy.arange"
]
] |
MuAuan/cheating_DL | [
"e8c543d83c304ca072b479cf34fe0a07b58ec6e3"
] | [
"grad-cam_5category.py"
] | [
"#grad_cam\n#[keras-grad-cam/grad-cam.py](https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py)\n\nfrom keras.applications.vgg16 import (VGG16, preprocess_input, decode_predictions)\nfrom keras.models import Model\nfrom keras.preprocessing import image\nfrom keras.layers.core import Lambda\nfrom keras.models import Sequential\nfrom tensorflow.python.framework import ops\nimport keras.backend as K\nimport tensorflow as tf\nimport numpy as np\nimport keras\nimport sys\nimport cv2\n#from keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions\n#from keras.applications.vgg19 import VGG19, preprocess_input, decode_predictions\n#from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions\n\ndef target_category_loss(x, category_index, nb_classes):\n return tf.multiply(x, K.one_hot([category_index], nb_classes))\n\ndef target_category_loss_output_shape(input_shape):\n return input_shape\n\ndef normalize(x):\n # utility function to normalize a tensor by its L2 norm\n return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)\n\ndef load_image(path):\n img_path = sys.argv[1]\n img = image.load_img(img_path, target_size=(224,224)) #299,299)) #224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n return x\n\ndef register_gradient():\n if \"GuidedBackProp\" not in ops._gradient_registry._registry:\n @ops.RegisterGradient(\"GuidedBackProp\")\n def _GuidedBackProp(op, grad):\n dtype = op.inputs[0].dtype\n return grad * tf.cast(grad > 0., dtype) * \\\n tf.cast(op.inputs[0] > 0., dtype)\n\ndef compile_saliency_function(model, activation_layer='block5_conv3'): #mixed10 'activation_49' add_16 add_32 activation_98\n input_img = model.input\n layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])\n #print(layer_dict)\n layer_output = layer_dict[activation_layer].output\n max_output = K.max(layer_output, axis=3)\n saliency = K.gradients(K.sum(max_output), input_img)[0]\n return K.function([input_img, K.learning_phase()], [saliency])\n\ndef modify_backprop(model, name):\n g = tf.get_default_graph()\n with g.gradient_override_map({'Relu': name}):\n\n # get layers that have an activation\n layer_dict = [layer for layer in model.layers[1:]\n if hasattr(layer, 'activation')]\n\n # replace relu activation\n for layer in layer_dict:\n if layer.activation == keras.activations.relu:\n layer.activation = tf.nn.relu\n\n # re-instanciate a new model\n new_model = VGG16(weights='imagenet')\n #new_model = ResNet50(weights='imagenet')\n new_model.summary()\n return new_model\n\ndef deprocess_image(x):\n '''\n Same normalization as in:\n https://github.com/fchollet/keras/blob/master/examples/conv_filter_visualization.py\n '''\n if np.ndim(x) > 3:\n x = np.squeeze(x)\n # normalize tensor: center on 0., ensure std is 0.1\n x -= x.mean()\n x /= (x.std() + 1e-5)\n x *= 0.1\n\n # clip to [0, 1]\n x += 0.5\n x = np.clip(x, 0, 1)\n\n # convert to RGB array\n x *= 255\n if K.image_dim_ordering() == 'th':\n x = x.transpose((1, 2, 0))\n x = np.clip(x, 0, 255).astype('uint8')\n return x\n\ndef _compute_gradients(tensor, var_list):\n grads = tf.gradients(tensor, var_list)\n return [grad if grad is not None else tf.zeros_like(var) for var, grad in zip(var_list, grads)]\n\ndef grad_cam(input_model, image, category_index, layer_name):\n nb_classes = 1000\n target_layer = lambda x: target_category_loss(x, category_index, nb_classes)\n x = Lambda(target_layer, output_shape = target_category_loss_output_shape)(input_model.output)\n model = Model(inputs=input_model.input, outputs=x)\n #model.summary()\n loss = K.sum(model.output)\n conv_output = [l for l in model.layers if l.name == layer_name][0].output #is\n grads = normalize(_compute_gradients(loss, [conv_output])[0])\n gradient_function = K.function([model.input], [conv_output, grads])\n\n output, grads_val = gradient_function([image])\n output, grads_val = output[0, :], grads_val[0, :, :, :]\n\n weights = np.mean(grads_val, axis = (0, 1))\n cam = np.ones(output.shape[0 : 2], dtype = np.float32)\n\n for i, w in enumerate(weights):\n cam += w * output[:, :, i]\n\n cam = cv2.resize(cam, (224,224)) #299,299)) #224, 224)) \n cam = np.maximum(cam, 0)\n heatmap = cam / np.max(cam)\n\n #Return to BGR [0..255] from the preprocessed image\n image = image[0, :]\n image -= np.min(image)\n image = np.minimum(image, 255)\n\n cam = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET)\n cam = np.float32(cam) + np.float32(image)\n cam = 255 * cam / np.max(cam)\n return np.uint8(cam), heatmap\n\npreprocessed_input = load_image(sys.argv[1])\nmodel = VGG16(weights='imagenet')\n#model = VGG19(weights='imagenet')\n#model = InceptionV3(weights='imagenet')\n#model = ResNet50(weights = 'imagenet')\n#model.summary()\ntarget_layer = 'block5_conv3' #'activation_49' add_16 \"block5_conv3\"\n\npredictions = model.predict(preprocessed_input)\nregister_gradient()\nguided_model = modify_backprop(model, 'GuidedBackProp')\nguided_model.summary()\nfor i in range(5):\n top_1 = decode_predictions(predictions)[0][i]\n print(predictions.argsort()[0][::-1][i])\n print('Predicted class:')\n print('%s (%s) with probability %.2f' % (top_1[1], top_1[0], top_1[2]))\n predicted_class = predictions.argsort()[0][::-1][i] #np.argmax(predictions)\n cam, heatmap = grad_cam(model, preprocessed_input, predicted_class, target_layer)\n cv2.imwrite(\"gradcam\"+str(top_1[1])+\".jpg\", cam)\n saliency_fn = compile_saliency_function(guided_model)\n saliency = saliency_fn([preprocessed_input, 0])\n gradcam = saliency[0] * heatmap[..., np.newaxis]\n cv2.imwrite(\"guided_gradcam\"+str(top_1[1])+\".jpg\", deprocess_image(gradcam))\n"
] | [
[
"numpy.ones",
"tensorflow.python.framework.ops.RegisterGradient",
"numpy.squeeze",
"numpy.float32",
"tensorflow.zeros_like",
"numpy.uint8",
"tensorflow.cast",
"tensorflow.gradients",
"numpy.expand_dims",
"numpy.clip",
"tensorflow.get_default_graph",
"numpy.min",
"numpy.maximum",
"numpy.ndim",
"numpy.max",
"numpy.mean",
"numpy.minimum"
]
] |
tjd2002/spikeforest2 | [
"2e393564b858b2995aa2ccccd9bd73065681b5de",
"2e393564b858b2995aa2ccccd9bd73065681b5de"
] | [
"gui/sfbrowser/sfbrowser.py",
"spikeforest_analysis/compare_sortings_with_truth.py"
] | [
"import vdomr as vd\nimport spikeforest as sf\nfrom cairio import client as ca\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\nclass AccuracyPlot(vd.components.Pyplot):\n def __init__(self, snrs, accuracies):\n vd.components.Pyplot.__init__(self)\n self._snrs = snrs\n self._accuracies = accuracies\n\n def plot(self):\n plt.scatter(self._snrs, self._accuracies)\n\n\nclass StudySorterFigure(vd.Component):\n def __init__(self, sfdata):\n vd.Component.__init__(self)\n self._plot = None\n self._SF_data = sfdata\n self._study = None\n self._sorter = None\n\n def setStudySorter(self, *, study, sorter):\n self._study = study\n self._sorter = sorter\n self._update_plot()\n\n def _update_plot(self):\n SF = self._SF_data\n study = SF.study(self._study)\n b = _get_study_sorting_results(study)\n a = b[self._sorter]\n snrs = a['true_unit_snrs']\n accuracies = a['num_matches'] / \\\n (a['num_matches']+a['num_false_positives']+a['num_false_negatives'])\n\n self._plot = AccuracyPlot(snrs, accuracies)\n self.refresh()\n\n def render(self):\n if self._plot is None:\n return vd.div('Nothing')\n return vd.div(\n vd.div('test '+self._study+' '+self._sorter),\n self._plot\n )\n\n\nclass SFBrowser(vd.Component):\n def __init__(self, output_id):\n vd.Component.__init__(self)\n\n self._output_id = output_id\n\n a = ca.loadObject(\n key=dict(name='spikeforest_results'),\n subkey=output_id\n )\n if not a:\n print('ERROR: unable to open results: '+output_id)\n return\n\n if ('recordings' not in a) or ('studies' not in a) or ('sorting_results' not in a):\n print('ERROR: problem with output: '+output_id)\n return\n\n studies = a['studies']\n recordings = a['recordings']\n sorting_results = a['sorting_results']\n\n SF = sf.SFData()\n SF.loadStudies(studies)\n SF.loadRecordings2(recordings)\n SF.loadSortingResults(sorting_results)\n\n # sorter_names=[]\n # for SR in sorting_results:\n # sorter_names.append(SR['sorter']['name'])\n # sorter_names=list(set(sorter_names))\n # sorter_names.sort()\n\n self._SF_data = SF\n\n self._accuracy_threshold_input = vd.components.LineEdit(\n value=0.8, dtype=float, style=dict(width='70px'))\n self._update_button = vd.components.Button(\n onclick=self._on_update, class_='button', label='Update')\n self._study_sorter_fig = StudySorterFigure(SF)\n self._study_sorter_table = vd.div() # dummy\n\n vd.devel.loadBootstrap()\n\n self._update_accuracy_table()\n\n def _on_update(self):\n self._update_accuracy_table()\n\n def _update_accuracy_table(self):\n accuracy_threshold = self._accuracy_threshold_input.value()\n self._accuracy_table_data, self._sorters = self._get_accuracy_table_data(\n accuracy_threshold=accuracy_threshold)\n self._accuracy_table = self._to_table(\n self._accuracy_table_data, ['study']+self._sorters)\n print(self._accuracy_table_data)\n self.refresh()\n\n def _open_study_sorter_fig(self, *, sorter, study):\n self._study_sorter_fig.setStudySorter(study=study, sorter=sorter)\n\n def _get_accuracy_table_data(self, *, accuracy_threshold):\n SF = self._SF_data\n accuracy_table = []\n sorters = set()\n for sname in SF.studyNames():\n print('STUDY: '+sname)\n study = SF.study(sname)\n b = _get_study_sorting_results(study)\n tmp = dict(\n study=dict( # first column\n text=sname\n )\n )\n for sorter in b:\n sorters.add(sorter)\n a = b[sorter]\n accuracies = a['num_matches'] / \\\n (a['num_matches']+a['num_false_positives'] +\n a['num_false_negatives'])\n tmp[sorter] = dict(\n text=str(np.count_nonzero(\n accuracies >= accuracy_threshold)),\n callback=lambda sorter=sorter, study=sname: self._open_study_sorter_fig(\n sorter=sorter, study=study)\n )\n accuracy_table.append(tmp)\n\n sorters = list(sorters)\n sorters.sort()\n return accuracy_table, sorters\n\n def _to_table(self, X, column_names):\n rows = []\n rows.append(vd.tr([vd.th(cname) for cname in column_names]))\n for x in X:\n elmts = []\n for cname in column_names:\n tmp = x.get(cname)\n if tmp:\n if 'callback' in tmp:\n elmt = vd.a(tmp['text'], onclick=tmp['callback'])\n else:\n elmt = vd.span(tmp['text'])\n else:\n elmt = vd.span('N/A')\n elmts.append(elmt)\n rows.append(vd.tr([vd.td(elmt) for elmt in elmts]))\n return vd.table(rows, class_='table')\n\n def render(self):\n return vd.div(\n vd.table(\n vd.tr(\n vd.td('Accuracy threshold:'),\n vd.td(self._accuracy_threshold_input),\n vd.td(self._update_button)\n ),\n class_='table',\n style={'max-width': '200px'}\n ),\n vd.components.ScrollArea(\n self._accuracy_table,\n height=500\n ),\n self._study_sorter_fig,\n style=dict(padding='15px')\n )\n\n\ndef _get_study_sorting_results(study):\n results = []\n for rname in study.recordingNames():\n rec = study.recording(rname)\n true_units_info = rec.trueUnitsInfo(format='json')\n true_units_info_by_id = dict()\n for true_unit in true_units_info:\n true_units_info_by_id[true_unit['unit_id']] = true_unit\n for srname in rec.sortingResultNames():\n a = rec.sortingResult(srname)\n res0 = dict(sorter=srname, recording=rname, study=study.name())\n tmp = a.comparisonWithTruth(format='json')\n for i in tmp:\n tmp[i]['true_unit_info'] = true_units_info_by_id[tmp[i]['unit_id']]\n res0['comparison_with_truth'] = tmp\n results.append(res0)\n\n sorters = list(set([a['sorter'] for a in results]))\n sorters.sort()\n\n units_by_sorter = dict()\n for sorter in sorters:\n units_by_sorter[sorter] = []\n\n for obj in results:\n sorter0 = obj['sorter']\n units = [obj['comparison_with_truth'][i]\n for i in obj['comparison_with_truth']]\n units_by_sorter[sorter0] = units_by_sorter[sorter0]+units\n\n ret = dict()\n for sorter in sorters:\n units = units_by_sorter[sorter]\n try:\n ret[sorter] = dict(\n true_unit_ids=[unit['unit_id'] for unit in units],\n true_unit_snrs=np.array(\n [unit['true_unit_info']['snr'] for unit in units]),\n true_unit_firing_rates=np.array(\n [unit['true_unit_info']['firing_rate'] for unit in units]),\n num_matches=np.array([unit['num_matches'] for unit in units]),\n num_false_positives=np.array(\n [unit['num_false_positives'] for unit in units]),\n num_false_negatives=np.array(\n [unit['num_false_negatives'] for unit in units])\n )\n except:\n print('WARNING: Problem loading results for sorter: '+sorter)\n ret[sorter] = dict(\n true_unit_ids=[],\n true_unit_snrs=np.array([]),\n true_unit_firing_rates=np.array([]),\n num_matches=np.array([]),\n num_false_positives=np.array([]),\n num_false_negatives=np.array([])\n )\n\n return ret\n",
"import spikeextractors as si\n#import spikewidgets as sw\nimport spiketoolkit as st\nimport mlprocessors as mlpr\nimport json\nfrom cairio import client as ca\nimport numpy as np\nfrom copy import deepcopy\n\ndef compare_sortings_with_truth(sortings,compute_resource,num_workers=None):\n print('>>>>>> compare sortings with truth')\n container='sha1://3b26155930cc4a4745c67b702ce297c9c968ac94/02-12-2019/mountaintools_basic.simg'\n jobs_gen_table=[]\n for sorting in sortings:\n units_true=sorting.get('units_true',[])\n firings=sorting['firings']\n firings_true=sorting['firings_true']\n units_true=units_true\n job=GenSortingComparisonTable.createJob(\n firings=firings,\n firings_true=firings_true,\n units_true=units_true,\n json_out={'ext':'.json','upload':True},\n html_out={'ext':'.html','upload':True},\n _container=container\n )\n jobs_gen_table.append(job)\n \n all_jobs=jobs_gen_table\n label='Compare sortings with truth'\n mlpr.executeBatch(jobs=all_jobs,label=label,num_workers=num_workers,compute_resource=compute_resource)\n \n sortings_out=[]\n for i,sorting in enumerate(sortings):\n comparison_with_truth=dict()\n comparison_with_truth['json']=jobs_gen_table[i]['result']['outputs']['json_out']\n comparison_with_truth['html']=jobs_gen_table[i]['result']['outputs']['html_out']\n sorting2=deepcopy(sorting)\n sorting2['comparison_with_truth']=comparison_with_truth\n sortings_out.append(sorting2)\n\n return sortings_out\n\nclass GenSortingComparisonTable(mlpr.Processor):\n VERSION='0.2.0'\n firings=mlpr.Input('Firings file (sorting)')\n firings_true=mlpr.Input('True firings file')\n units_true=mlpr.IntegerListParameter('List of true units to consider')\n json_out=mlpr.Output('Table as .json file produced from pandas dataframe')\n html_out=mlpr.Output('Table as .html file produced from pandas dataframe')\n \n def run(self):\n sorting=si.MdaSortingExtractor(firings_file=self.firings)\n sorting_true=si.MdaSortingExtractor(firings_file=self.firings_true)\n if (self.units_true is not None) and (len(self.units_true)>0):\n sorting_true=si.SubSortingExtractor(parent_sorting=sorting_true,unit_ids=self.units_true)\n SC=st.comparison.SortingComparison(sorting_true,sorting)\n df=get_comparison_data_frame(comparison=SC)\n #sw.SortingComparisonTable(comparison=SC).getDataframe()\n json=df.transpose().to_dict()\n html=df.to_html(index=False)\n _write_json_file(json,self.json_out)\n _write_json_file(html,self.html_out)\n\ndef get_comparison_data_frame(*,comparison):\n import pandas as pd\n SC=comparison\n\n unit_properties=[] #snr, etc? these would need to be properties in the sortings of the comparison\n\n # Compute events counts\n sorting1=SC.getSorting1()\n sorting2=SC.getSorting2()\n unit1_ids = sorting1.getUnitIds()\n unit2_ids = sorting2.getUnitIds()\n N1 = len(unit1_ids)\n N2 = len(unit2_ids)\n event_counts1 = dict()\n for i1, u1 in enumerate(unit1_ids):\n times1 = sorting1.getUnitSpikeTrain(u1)\n event_counts1[u1] = len(times1)\n event_counts2 = dict()\n for i2, u2 in enumerate(unit2_ids):\n times2 = sorting2.getUnitSpikeTrain(u2)\n event_counts2[u2] = len(times2)\n\n rows = []\n for u_1, unit1 in enumerate(unit1_ids):\n unit2 = SC.getBestUnitMatch1(unit1)\n if unit2>=0:\n num_matches=SC.getMatchingEventCount(unit1, unit2)\n num_false_negatives=event_counts1[unit1]-num_matches\n num_false_positives=event_counts2[unit2]-num_matches\n else:\n num_matches=0\n num_false_negatives=event_counts1[unit1]\n num_false_positives=0\n row0 = {\n 'unit_id': unit1,\n 'accuracy': _safe_frac(num_matches,num_false_positives+num_false_negatives+num_matches),\n 'best_unit': unit2,\n 'matched_unit': SC.getMappedSorting1().getMappedUnitIds(unit1),\n 'num_matches': num_matches,\n 'num_false_negatives': num_false_negatives,\n 'num_false_positives': num_false_positives,\n 'f_n': _safe_frac(num_false_negatives,num_false_negatives+num_matches),\n 'f_p': _safe_frac(num_false_positives,num_false_positives+num_matches)\n }\n for prop in unit_properties:\n pname = prop['name']\n row0[pname] = SC.getSorting1().getUnitProperty(unit_id=int(unit1), property_name=pname)\n rows.append(row0)\n\n df = pd.DataFrame(rows)\n fields = ['unit_id']\n fields = fields + ['accuracy', 'best_unit', 'matched_unit', 'num_matches', 'num_false_negatives', 'num_false_positives', 'f_n', 'f_p']\n for prop in unit_properties:\n pname = prop['name']\n fields.append(pname)\n df = df[fields]\n df['accuracy'] = df['accuracy'].map('{:,.4f}'.format)\n # df['Best match'] = df['Accuracy'].map('{:,.2f}'.format)\n df['f_n'] = df['f_n'].map('{:,.4f}'.format)\n df['f_p'] = df['f_p'].map('{:,.4f}'.format)\n return df\n\ndef _safe_frac(numer, denom):\n if denom == 0:\n return 0\n return float(numer) / denom\n\ndef _write_json_file(obj,path):\n with open(path,'w') as f:\n return json.dump(obj,f)\n\n"
] | [
[
"numpy.array",
"matplotlib.pyplot.scatter",
"numpy.count_nonzero"
],
[
"pandas.DataFrame"
]
] |
mspp-data-studio-2021/aptitude-analysis | [
"90a7fc8655650f8166d530d325b963b93a42f311"
] | [
"code/plots/plots_exploratory.py"
] | [
"\"\"\"This script creates some informative graphs on subgroups of income quartile, gender, and race.\"\"\"\n# %%\nimport os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom pathlib import Path\n\n\n# %%\n# Set up folder path\ncode_folder = Path(os.path.abspath(''))\nprint(code_folder)\nproject_dir = os.path.dirname(code_folder)\nos.chdir(project_dir)\nprint(project_dir)\n\n\n# %%\nfrom setup_fin_dataset import get_dataset\n\n\n# %%\nos.chdir(code_folder)\nprint(code_folder)\n\n\n\n# %%\n'''Plot scores by income quartile\n'''\ndf = get_dataset()\n\n#%%\ndf.dropna(axis=0, how='any', subset=['AFQT_1','ROSENBERG_SCORE', 'ROTTER_SCORE'], inplace=True)\n\n\n# %%\nax = plt.figure().add_subplot(111)\nfor group in ['first quartile', 'second quartile', 'third quartile', 'fourth quartile']:\n cond = df['FAMILY_INCOME_QUARTILE'] == group\n dat = df.loc[df['SURVEY_YEAR'] == 1978, ['AFQT_1']].loc[cond].dropna()\n sns.distplot(dat, label=group.capitalize())\n\ncsfont = {'fontname':'Times New Roman'}\nax.yaxis.get_major_ticks()[0].set_visible(False)\nax.set_xlabel('AFQT Scores', **csfont)\nax.set_xlim([0, 120])\nax.legend()\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\n\nplt.savefig('fig-inc-quartile-afqt.png')\n\n\nfor score in ['ROTTER', 'ROSENBERG']:\n\n ax = plt.figure().add_subplot(111)\n for group in ['first quartile', 'second quartile', 'third quartile', 'fourth quartile']:\n label = score + '_SCORE'\n cond = df['FAMILY_INCOME_QUARTILE'] == group\n dat = df[cond].loc[df['SURVEY_YEAR'] == 1978, [label]].dropna()\n sns.distplot(dat, label=group)\n\n ax.set_xlabel(score.lower().capitalize() + ' Scores', **csfont)\n if score == 'ROTTER':\n plt.gca().invert_xaxis()\n ax.yaxis.get_major_ticks()[0].set_visible(False)\n ax.legend()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\n plt.savefig('fig-inc-quartile-' + score.lower() + '.png')\n\n\n\n# %%\n'''Plot scores by gender\n'''\ndf = get_dataset()\n\n#%%\ndf.dropna(axis=0, how='any', subset=['AFQT_1','ROSENBERG_SCORE', 'ROTTER_SCORE'], inplace=True)\n\n\nax = plt.figure().add_subplot(111)\nfor group in [1, 2]:\n cond = df['GENDER'] == group\n dat = df.loc[df['SURVEY_YEAR'] == 1978, ['AFQT_1']].loc[cond].dropna()\n sns.distplot(dat, label=group)\n\ncsfont = {'fontname':'Times New Roman'}\nax.yaxis.get_major_ticks()[0].set_visible(False)\nax.set_xlabel('AFQT Scores', **csfont)\nax.set_xlim([0, 120])\nax.legend()\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\n\nplt.savefig('fig-aptitude-gender.png')\n\nfor score in ['ROTTER', 'ROSENBERG']:\n\n ax = plt.figure().add_subplot(111)\n for group in [1, 2]:\n label = score + '_SCORE'\n cond = df['GENDER'] == group\n dat = df[cond].loc[df['SURVEY_YEAR'] == 1978, [label]].dropna()\n sns.distplot(dat, label=group)\n\n ax.set_xlabel(score.lower().capitalize() + ' Scores', **csfont)\n if score == 'ROTTER':\n plt.gca().invert_xaxis()\n ax.yaxis.get_major_ticks()[0].set_visible(False)\n ax.legend()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\n plt.savefig('fig-attitude-gender-' + score.lower() + '.png')\n\n\n\n\n# %%\n'''Plot scores by race\n'''\ndf = get_dataset()\n\n#%%\ndf.dropna(axis=0, how='any', subset=['AFQT_1','ROSENBERG_SCORE', 'ROTTER_SCORE'], inplace=True)\n\n\nax = plt.figure().add_subplot(111)\nfor group in [1, 2, 3]:\n cond = df['RACE'] == group\n dat = df.loc[df['SURVEY_YEAR'] == 1978, ['AFQT_1']].loc[cond].dropna()\n sns.distplot(dat, label=group)\n\ncsfont = {'fontname':'Times New Roman'}\n\nax.yaxis.get_major_ticks()[0].set_visible(False)\nax.set_xlabel('AFQT Scores', **csfont)\nax.set_xlim([0, 120])\nax.legend()\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\n\nplt.savefig('fig-aptitude-race.png')\n\nfor score in ['ROTTER', 'ROSENBERG']:\n\n ax = plt.figure().add_subplot(111)\n for group in [1, 2, 3]:\n label = score + '_SCORE'\n cond = df['RACE'] == group\n dat = df[cond].loc[df['SURVEY_YEAR'] == 1978, [label]].dropna()\n sns.distplot(dat, label=group)\n\n ax.set_xlabel(score.lower().capitalize() + ' Scores', **csfont)\n if score == 'ROTTER':\n plt.gca().invert_xaxis()\n ax.yaxis.get_major_ticks()[0].set_visible(False)\n ax.legend()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\n plt.savefig('fig-attitude-race-' + score.lower() + '.png')\n\n\n\n\n# %%\n'''Plot by parental educational attainment, mother\n'''\ndf = get_dataset()\n\n#%%\ndf.dropna(axis=0, how='any', subset=['AFQT_1','ROSENBERG_SCORE', 'ROTTER_SCORE'], inplace=True)\n\n# %%\ndf['MOTHER_EDU'].nunique()\n\n# %%\ndf['FATHER_EDU'].nunique()\n\n\n# %%\ndf_mother = df.groupby('MOTHER_EDU')['IDENTIFIER'].nunique().sort_values(ascending=False)\ndf_mother\n\n# %%\ndf_father = df.groupby('FATHER_EDU')['IDENTIFIER'].nunique().sort_values(ascending=False)\ndf_father\n\n\n\n# %%\n\nax = plt.figure().add_subplot(111)\nfor group in ['Less than HS', 'HS or more']:\n cond = df['MOTHER_EDU'] == group\n dat = df['AFQT_1'].loc[cond].dropna()\n sns.distplot(dat, label=group)\n\ncsfont = {'fontname':'Times New Roman'}\nax.yaxis.get_major_ticks()[0].set_visible(False)\nax.set_xlabel('AFQT Scores', **csfont)\nax.set_xlim([0, 120])\nax.legend()\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\n\nplt.savefig('fig-aptitude-mother-edu.png')\n\nfor score in ['ROTTER', 'ROSENBERG']:\n\n ax = plt.figure().add_subplot(111)\n for group in ['Less than HS', 'HS or more']:\n label = score + '_SCORE'\n cond = df['MOTHER_EDU'] == group\n dat = df[cond].loc[df['SURVEY_YEAR'] == 1978, [label]].dropna()\n sns.distplot(dat, label=group)\n\n ax.set_xlabel(score.lower().capitalize() + ' Scores', **csfont)\n if score == 'ROTTER':\n plt.gca().invert_xaxis()\n ax.yaxis.get_major_ticks()[0].set_visible(False)\n ax.legend()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\n plt.savefig('fig-attitude-mother-edu-' + score.lower() + '.png')\n\n\n# %%\n'''Plot by parental educational attainment, father\n'''\n# %%\ndf = get_dataset()\n\n#%%\ndf.dropna(axis=0, how='any', subset=['AFQT_1','ROSENBERG_SCORE', 'ROTTER_SCORE'], inplace=True)\n\n\nax = plt.figure().add_subplot(111)\nfor group in ['Less than HS', 'HS or more']:\n cond = df['FATHER_EDU'] == group\n dat = df['AFQT_1'].loc[cond].dropna()\n sns.distplot(dat, label=group)\n\ncsfont = {'fontname':'Times New Roman'}\nax.yaxis.get_major_ticks()[0].set_visible(False)\nax.set_xlabel('AFQT Scores', **csfont)\nax.set_xlim([0, 120])\nax.legend()\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\n\nplt.savefig('fig-aptitude-father-edu.png')\n\nfor score in ['ROTTER', 'ROSENBERG']:\n\n ax = plt.figure().add_subplot(111)\n for group in ['Less than HS', 'HS or more']:\n label = score + '_SCORE'\n cond = df['FATHER_EDU'] == group\n dat = df[cond].loc[df['SURVEY_YEAR'] == 1978, [label]].dropna()\n sns.distplot(dat, label=group)\n\n ax.set_xlabel(score.lower().capitalize() + ' Scores', **csfont)\n if score == 'ROTTER':\n plt.gca().invert_xaxis()\n ax.yaxis.get_major_ticks()[0].set_visible(False)\n ax.legend()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\n plt.savefig('fig-attitude-father-edu-' + score.lower() + '.png')\n# %%\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.figure"
]
] |
shromonag/active_testing | [
"ca9c8f909f6b0f4e7b1affda6f9333e0d0b6c04b"
] | [
"adversarial_testing/test_module.py"
] | [
"'''\nThis file defines the testing module. This needs the following:\n1. The system under test\n2. The specification or the function which we are trying to minimize\n3. Domains of the uncertainities\n'''\n\nfrom .optimizers import *\nfrom .func_tree import *\nfrom .utils import *\nfrom sklearn.decomposition import KernelPCA\n\nimport copy\nimport GPy\n\nclass test_module:\n def __init__(self, sut, bounds, spec=None,f_tree=None, optimizer=None,\n normalizer=False,seed=None, **kwargs):\n self.system_under_test = sut\n\n # Choosing the optimizer function\n if spec is None:\n self.f_acqu = f_tree\n else:\n self.spec = spec\n # To implement parser to convert from specification to the function f\n\n self.bounds = bounds\n self.normalizer=normalizer\n self.seed=seed\n\n if 'cost_model' in kwargs:\n self.cost_model = kwargs['cost_model']\n else:\n self.cost_model = lambda x: 1\n\n # Choosing the optimizers\n if 'opt_name' in kwargs:\n self.optimizer = select_opt(kwargs[opt_name])(bounds, **kwargs)\n elif optimizer is None:\n self.optimizer = sample_opt(bounds=bounds, cost=self.cost_model)\n else:\n self.optimizer = optimizer\n\n # Number of samples for initializing GPs\n if 'init_sample' in kwargs:\n self.init_sample = kwargs['init_sample']\n else:\n self.init_sample = 2*len(bounds)\n\n # Model GPs for the smooth functions\n if 'with_smooth' in kwargs:\n self.with_smooth = kwargs['with_smooth']\n else:\n self.with_smooth = True\n\n # Model GPs for the top level requirement, potentially modeling\n # non-smooth function\n if 'with_ns' in kwargs:\n self.with_ns = kwargs['with_ns']\n else:\n self.with_ns = False\n\n # Random sampling\n if 'with_random' in kwargs:\n self.with_random = kwargs['with_random']\n else:\n self.with_random = False\n\n # Exploration weight for GP-LCB\n if 'exp_weight' in kwargs:\n self.k = kwargs['exp_weight']\n else:\n self.k = 10\n\n # Optimize retsrats for hyper parameter optimization for GPs\n if 'optimize_restarts' in kwargs:\n self.optimize_restarts = kwargs['optimize_restarts']\n else:\n self.optimize_restarts = 1\n\n\n # Search in lower dimension\n if 'low_dim' in kwargs:\n self.using_kpca=True\n self.low_dim = kwargs['low_dim']\n if 'kernel_type' in kwargs:\n self.kernel = kwargs['kernel_type'](self.low_dim)\n elif 'kernel' in kwargs:\n self.kernel = kwargs['kernel']\n self.using_kpca = True\n self.low_dim = self.kernel.input_dim\n else:\n self.using_kpca=False\n if 'kernel_type' in kwargs:\n self.kernel = kwargs['kernel_type'](len(bounds))\n else:\n self.kernel = GPy.kern.Matern32(len(bounds), ARD=True)\n\n if self.using_kpca:\n if isinstance(self.optimizer, lbfgs_opt) or \\\n isinstance(self.optimizer, direct_opt):\n print('Can use only sample_opt or delta_opt!')\n print('Changing optimizer to sample_opt!')\n self.optimizer = sample_opt(bounds, **kwargs)\n\n # Sending in pre sampled data\n if 'X' in kwargs:\n self.X = kwargs['X']\n else:\n self.X = []\n\n\n def initialize(self):\n if len(self.X) == 0:\n X = sample_from(self.init_sample, self.bounds)\n self.X = X\n\n trajs = []\n for x in self.X:\n trajs.append(self.system_under_test(x))\n Y = self.f_acqu.eval_robustness(trajs)\n if self.with_smooth:\n self.smooth_X = copy.deepcopy(self.X)\n if self.using_kpca:\n self.kpca_s = KernelPCA(kernel='rbf', fit_inverse_transform=True,\n copy_X=True, n_components=self.low_dim)\n X_s = self.kpca_s.fit_transform(self.smooth_X)\n else:\n X_s = self.smooth_X\n self.f_acqu.init_GPs(X_s, trajs,\n kernel=copy.deepcopy(self.kernel),\n optimize_restarts=self.optimize_restarts,\n normalizer=self.normalizer)\n\n if self.with_ns:\n self.ns_X = copy.deepcopy(self.X)\n if self.using_kpca:\n self.kpca_ns = KernelPCA(kernel='rbf', fit_inverse_transform=True,\n copy_X=True, n_components=self.low_dim)\n X_ns = self.kpca_ns.fit_transform(self.ns_X)\n else:\n X_ns = copy.deepcopy(self.ns_X)\n self.ns_GP = GPy.models.GPRegression(X_ns, Y,\n kernel=copy.deepcopy(self.kernel),\n normalizer=self.normalizer)\n self.ns_GP.optimize_restarts(self.optimize_restarts)\n\n\n if self.with_random:\n self.random_X = copy.deepcopy(self.X)\n self.random_Y = Y\n\n\n def run_BO(self, iters_BO):\n for ib in range(iters_BO):\n print('BO iteration:', ib)\n if self.with_smooth:\n def f(x):\n if self.using_kpca:\n x_s = self.kpca_s.transform(x)\n else:\n x_s = x\n if isinstance(self.optimizer, lbfgs_opt):\n df = self.f_acqu.eval_df(x_s, k = self.k)\n else:\n df=None\n return self.f_acqu.evaluate(x_s, k=self.k), df\n x,f= self.optimizer.optimize(f=lambda x:f(x)[0],\n df = lambda x:f(x)[1])\n self.smooth_X = np.vstack((self.smooth_X, np.atleast_2d(x)))\n trajs = [self.system_under_test(x_i) for x_i in x]\n if self.using_kpca:\n X_s = self.kpca_s.fit_transform(self.smooth_X)\n else:\n X_s = self.smooth_X\n self.f_acqu.update_GPs(X_s, trajs,\n optimize_restarts=self.optimize_restarts)\n if self.with_ns:\n def f(X):\n if self.using_kpca:\n X_ns = self.kpca_ns.transform(X)\n else:\n X_ns = X\n m,v = self.ns_GP.predict(X_ns)\n if isinstance(self.optimizer, lbfgs_opt):\n dm,dv = self.ns_GP.predictive_gradients(X_ns)\n dm = dm[:,:,0]\n df = dm - (self.k/2)*(dv/np.sqrt(v))\n else:\n df =None\n return m - self.k*np.sqrt(v), df\n x,f = self.optimizer.optimize(f=lambda x: f(x)[0],\n df = lambda x:f(x)[1])\n trajs = [self.system_under_test(x_i) for x_i in x]\n f_x = self.f_acqu.eval_robustness(trajs)\n self.ns_X = np.vstack((self.ns_X, np.atleast_2d(x)))\n if self.using_kpca:\n X_ns = self.kpca_ns.fit_transform(self.ns_X)\n else:\n X_ns = self.ns_X\n self.ns_GP.set_XY(X_ns,\n np.vstack((self.ns_GP.Y, np.atleast_2d(f_x))))\n self.ns_GP.optimize_restarts(self.optimize_restarts)\n if self.with_random:\n if self.seed is not None:\n np.random.seed(self.seed)\n sample_from(self.init_sample, self.bounds)\n rand_x = sample_from(iters_BO, self.bounds)\n trajs = []\n for x in rand_x:\n trajs.append(self.system_under_test(x))\n self.random_X = np.vstack((self.random_X, rand_x))\n rand_y = self.f_acqu.eval_robustness(trajs)\n self.random_Y = np.vstack((self.random_Y, rand_y))\n\n if self.with_smooth:\n vals = self.f_acqu.find_GP_func()\n\n self.smooth_min_val = np.array(vals).min()\n self.smooth_min_loc = np.array(vals).argmin()\n self.smooth_min_x = self.smooth_X[self.smooth_min_loc]\n\n self.smooth_count = np.sum(np.array(vals) < 0)\n self.smooth_ce = np.flatnonzero(np.array(vals) < 0)\n\n if self.with_ns:\n self.ns_min_val = self.ns_GP.Y.min()\n self.ns_min_loc = self.ns_GP.Y.argmin()\n self.ns_min_x = self.ns_GP.X[self.ns_min_loc]\n\n self.ns_count = np.sum(self.ns_GP.Y < 0)\n self.ns_ce = np.flatnonzero(self.ns_GP.Y < 0)\n\n if self.with_random:\n self.rand_min_val = self.random_Y.min()\n self.rand_min_loc = self.random_Y.argmin()\n self.rand_min_x = self.random_X[self.rand_min_loc]\n\n self.rand_count = np.sum(self.random_Y < 0)\n self.rand_ce = np.flatnonzero(self.random_Y < 0)\n\n\n"
] | [
[
"sklearn.decomposition.KernelPCA"
]
] |
SonicZedt/ColorCount | [
"55fee92a7858c504b5a135b007f2065c2ec0a1be"
] | [
"image_data.py"
] | [
"import requests\nimport numpy as np\nimport collections\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom PIL import Image\nfrom io import BytesIO\n\nclass Image_Data:\n image = None\n\n @property\n def Array(self) -> np.ndarray:\n \"\"\"\n Return image array (RGB)\n \"\"\"\n return self.image\n\n @property\n def Color_Hex(self) -> list:\n hex = []\n\n def convert_RGB2HEX(color):\n return \"#{:02x}{:02x}{:02x}\".format(int(color[0]), int(color[1]), int(color[2]))\n\n image = self.image \n image_height = len(image)\n for y in range(image_height):\n for x in image[y]:\n hex.append(convert_RGB2HEX(x))\n\n return hex\n\n def __init__(self, image_path: str):\n if 'http' in image_path:\n # Online image\n image_req = requests.get(image_path, stream=True)\n if image_req.status_code == 200:\n self.image = np.array(Image.open(BytesIO(image_req.content)))\n\n else:\n # Local image\n self.image = np.array(Image.open(image_path))\n\n def show(self):\n Image.fromarray(self.image, 'RGB').show()\n\nclass Color:\n color = []\n\n @property\n def Total(self) -> int:\n return len(self.color)\n\n @property\n def Count(self) -> dict:\n \"\"\"\n Return total unique color\n \"\"\"\n color_count = dict(collections.Counter(self.color))\n \n # Sort dict by highest value\n color_count = {\n key: value for key, value in sorted(color_count.items(), key=lambda x: x[1], reverse=True)\n }\n\n return color_count\n\n @property\n def Listed_Count(self) -> list[dict]:\n \"\"\"\n Return total unique color in list of dictionary\n \"\"\"\n list_colors = []\n colors = self.Count.items()\n \n # List each dict item\n for key, val in colors:\n item = \"{'%(key)s' : %(val)s}\" % {'key': key, 'val': val}\n list_colors.append(eval(item))\n\n return list_colors\n\n def __init__(self, color: list):\n self.color = color\n\n def plot(self, min_value = 1):\n \"\"\"\n Plot color data with value more than min_value\n \"\"\"\n color_count = self.Count\n color_count = {key : value for key, value in color_count.items() if value >= min_value}\n \n color = list(color_count.keys())\n count = list(color_count.values())\n bar_colors = color\n\n # Draw plot\n #fig_width = len(color)\n #fig_height\n figure = plt.figure('Color Distribution', tight_layout=True)\n\n plt.barh(color, count, color=bar_colors, edgecolor='#aaaaaa')\n plt.title('Color Distribution')\n plt.ylabel('Color')\n plt.xlabel('Count')\n plt.show()\n\n # Render figure\n canvas = FigureCanvas(figure)\n canvas.draw()\n\n width, height = figure.get_size_inches() * figure.get_dpi()\n image = np.frombuffer(canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3)\n\n return image"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.barh",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"matplotlib.pyplot.xlabel"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.