repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
rohitvk1/pymindaffectBCI
[ "0119145a8b280c776f4c4e6cd776fed0f0156404" ]
[ "mindaffectBCI/decoder/decoder.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) 2019 MindAffect B.V. \n# Author: Jason Farquhar <[email protected]>\n# This file is part of pymindaffectBCI <https://github.com/mindaffect/pymindaffectBCI>.\n#\n# pymindaffectBCI is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# pymindaffectBCI is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with pymindaffectBCI. If not, see <http://www.gnu.org/licenses/>\n\nimport numpy as np\nfrom mindaffectBCI.decoder.UtopiaDataInterface import UtopiaDataInterface, butterfilt_and_downsample\nfrom mindaffectBCI.utopiaclient import NewTarget, Selection, ModeChange, PredictedTargetDist, PredictedTargetProb\nfrom mindaffectBCI.decoder.devent2stimsequence import devent2stimSequence, upsample_stimseq\nfrom mindaffectBCI.decoder.model_fitting import BaseSequence2Sequence, MultiCCA\nfrom mindaffectBCI.decoder.decodingSupervised import decodingSupervised\nfrom mindaffectBCI.decoder.decodingCurveSupervised import decodingCurveSupervised, plot_decoding_curve\nfrom mindaffectBCI.decoder.scoreOutput import dedupY0\nfrom mindaffectBCI.decoder.updateSummaryStatistics import updateSummaryStatistics, plot_summary_statistics, plot_erp\nfrom mindaffectBCI.decoder.utils import search_directories_for_file\nfrom mindaffectBCI.decoder.normalizeOutputScores import normalizeOutputScores\nfrom mindaffectBCI.decoder.zscore2Ptgt_softmax import softmax\nimport os\nimport traceback\n\nPYDIR = os.path.dirname(os.path.abspath(__file__))\nLOGDIR = os.path.join(PYDIR,'../../logs/')\n\nPREDICTIONPLOTS = False\nCALIBRATIONPLOTS = False\ntry :\n import matplotlib\n import matplotlib.pyplot as plt\n guiplots=True\n for be in matplotlib.rcsetup.all_backends: \n try:\n matplotlib.use(be)\n print(be)\n except: pass\n print(\"Initial backend: {}\".format(matplotlib.get_backend()))\n try:\n # backends to try: \"TkAgg\" \"WX\" \"WXagg\"\n matplotlib.use('TkAgg')\n except:\n print(\"couldn't change backend\")\n #plt.ion()\n print(\"Using backend: {}\".format(matplotlib.get_backend()))\nexcept:\n guiplots=False\n\ndef redraw_plots():\n if guiplots and not matplotlib.is_interactive():\n for i in plt.get_fignums():\n if plt.figure(i).get_visible():\n #plt.figure(i).canvas.draw_idle() # v.v.v. slow\n plt.gcf().canvas.flush_events()\n #plt.show(block=False)\n\n\ndef get_trial_start_end(msgs, start_ts=None):\n \"\"\"\n get the start+end times of the trials in a utopia message stream\n\n Args:\n msgs ([mindaffectBCI.UtopiaMessage]): list of messages recenty recieved\n start_ts (float, optional): time-stamp for start of *current* trial. Defaults to None.\n\n Returns:\n (list (start_ts,end_ts)): list of completed trial (start_ts,end_ts) time-stamp tuples\n (float): start_ts for trial started but not finished\n (list UtopiaMessage): list of unprocessed messages\n \"\"\" \n \n trials = []\n keeplast = False\n for mi, m in enumerate(msgs):\n #print(\"msg={}\".format(m))\n # process begin trail messages, N.B. after end-trial!\n if m.msgID == NewTarget.msgID:\n if start_ts is None:\n start_ts = m.timestamp\n print(\"NT: tr-bgn={}\".format(start_ts))\n else: # treat as end of sequence + start next sequence\n trials.append((start_ts, m.timestamp))\n start_ts = m.timestamp\n print(\"NT: tr-end={}\".format(m.timestamp))\n print(\"NT: tr-bgn={}\".format(m.timestamp))\n \n # process the end-trial messages\n if m.msgID == Selection.msgID:\n if start_ts is not None:\n trials.append((start_ts, m.timestamp))\n start_ts = m.timestamp\n print(\"SL: tr-end={}\".format(m.timestamp))\n else:\n print(\"Selection without start\")\n \n if m.msgID == ModeChange.msgID:\n if start_ts is not None:\n trials.append((start_ts,m.timestamp))\n start_ts=m.timestamp\n print(\"MC: tr-end={}\".format(m.timestamp))\n print(\"mod-chg={}\".format(m))\n # mark this message a *not* processed so it gets to the main loop\n keeplast=True\n break\n # make list un-processed messages\n if msgs and keeplast:\n msgs = msgs[mi:]\n #print(\"unproc messages: {}\".format(msgs))\n # return trial start/end + non-processed messages\n # N.B. start_ts is not None if trail start without end..\n return (trials, start_ts, msgs)\n\ndef getCalibration_dataset(ui:UtopiaDataInterface):\n \"\"\"\n extract a labelled dataset from the utopiaInterface, which are trials between modechange messages\n\n Args:\n ui (UtopiaDataInterface): the data interface object\n\n Returns:\n (list (data,stimulus)): list of pairs of time-stamped data and stimulus information as 2d (time,ch) (or (time,output)) numpy arrays\n \"\"\"\n \n # run until we get a mode change gathering training data in trials\n dataset = []\n start_ts = None # assume we have just started the first trial?\n isCalibrating = True\n while isCalibrating:\n\n # get new messages from utopia-hub\n newmsgs, _, _ = ui.update()\n # print(\"Extact_msgs:\"); print(\"{}\".format(newmsgs))\n\n # incremental extract trial limits\n trials, start_ts, newmsgs = get_trial_start_end(newmsgs, start_ts)\n\n # extract any complete trials data/msgs\n for (bgn_ts, end_ts) in trials:\n # N.B. be sure to make a copy so isn't changed outside us..\n data = ui.extract_data_segment(bgn_ts, end_ts)\n stimulus = ui.extract_stimulus_segment(bgn_ts, end_ts)\n print(\"Extract trl: {}->{}: data={} stim={}\".format(bgn_ts, end_ts, data.shape, stimulus.shape))\n dataset.append((data, stimulus))\n\n # check for end-calibration messages\n for i, m in enumerate(newmsgs):\n if m.msgID == ModeChange.msgID:\n isCalibrating = False\n # return unprocessed messages including the mode change\n # print('cal pushback: {}'.format(newmsgs[i:]))\n ui.push_back_newmsgs(newmsgs[i:])\n break\n\n return dataset\n\ndef dataset_to_XY_ndarrays(dataset):\n \"\"\"convert a dataset, consisting of a list of pairs of time-stamped data and stimulus events, to 3-d matrices of X=(trials,samples,channels) and Y=(trials,samples,outputs)\n\n Args:\n dataset ([type]): list of pairs of time-stamped data and stimulus events\n\n Returns:\n X (tr,samp,d): the per-trial data\n Y (tr,samp,nY): the per-trial stimulus, with sample rate matched to X\n X_ts (tr,samp): the time-stamps for the smaples in X\n Y_ts (tr,samp): the time-stamps for the stimuli in Y\n \"\"\" \n if dataset is None or not hasattr(dataset, '__iter__'):\n print(\"Warning: empty dataset input!\")\n return None, None, None, None\n # get length of each trial\n trlen = [trl[0].shape[0] for trl in dataset]\n trstim = [trl[1].shape[0] for trl in dataset]\n print(\"Trlen: {}\".format(trlen))\n print(\"Trstim: {}\".format(trstim))\n # set array trial length to 90th percential length\n trlen = int(np.percentile(trlen, 75))\n trstim = max(20, int(np.percentile(trstim, 75)))\n # filter the trials to only be the ones long enough to be worth processing\n dataset = [d for d in dataset if d[0].shape[0] > trlen//2 and d[1].shape[0] > trstim//2]\n if trlen == 0 or len(dataset) == 0:\n return None, None, None, None\n\n # map to single fixed size matrix + upsample stimulus to he EEG sample rate\n Y = np.zeros((len(dataset), trlen, 256), dtype=dataset[0][1].dtype)\n X = np.zeros((len(dataset), trlen, dataset[0][0].shape[-1]-1), dtype=dataset[0][0].dtype) # zero-padded data, w/o time-stamps\n X_ts = np.zeros((len(dataset),trlen),dtype=int)\n Y_ts = np.zeros((len(dataset),trlen),dtype=int)\n for ti, (data, stimulus) in enumerate(dataset):\n # extract the data & remove the timestamp channel and insert into the ndarray\n # guard for slightly different sizes..\n if X.shape[1] <= data.shape[0]:\n X[ti, :, :] = data[:X.shape[1], :-1]\n X_ts[ti, :] = data[:X.shape[1], -1]\n else: # pad end with final value\n X[ti, :data.shape[0], :] = data[:, :-1]\n X[ti, data.shape[0]:, :] = data[-1, :-1]\n X_ts[ti, :data.shape[0]] = data[:, -1]\n\n # upsample stimulus to the data-sample rate and insert into ndarray\n data_ts = data[:, -1] # data timestamp per sample\n stimulus_ts = stimulus[:, -1] # stimulus timestamp per stimulus event\n stimulus, data_i = upsample_stimseq(data_ts, stimulus[:, :-1], stimulus_ts)\n # store -- compensating for any variable trial lengths.\n if Y.shape[1] < stimulus.shape[0]: # long trial\n Y[ti, :, :] = stimulus[:Y.shape[1], :]\n else: # short trial\n Y[ti, :stimulus.shape[0], :] = stimulus\n # record stim-ts @ this data_ts\n tmp = data_i < Y.shape[1]\n Y_ts[ti,data_i[tmp]] = stimulus_ts[tmp] \n\n return X, Y, X_ts, Y_ts\n\n\ndef strip_unused(Y):\n \"\"\"\n strip unused outputs from the stimulus info in Y\n\n Args:\n Y (np.ndarray (time,outputs)): the full stimulus information, potentionally with many unused outputs\n\n Returns:\n (np.ndarray (time,used-outputs)): Y with unused outputs removed\n \"\"\" \n \n used_y = np.any(Y.reshape((-1, Y.shape[-1])), 0)\n used_y[0] = True # ensure objID=0 is always used..\n Y = Y[..., used_y]\n return Y, used_y\n\ndef load_previous_dataset(f:str):\n \"\"\"\n search standard directory locations and load a previously saved (pickled) calibration dataset\n\n Args:\n f (str, file-like): buffered interface to the data and stimulus streams\n\n Returns:\n (list of (data,stimulus)): list of stimulus,data pairs for each trial\n \"\"\"\n import pickle\n import glob\n if isinstance(f,str): # filename to load from\n # search in likely dataset locations for the file to load\n f = search_directories_for_file(f,\n PYDIR,\n os.path.join(PYDIR,'..','..'),\n LOGDIR)\n # pick the most recent if multiple files match\n f = max(glob.glob(f), key=os.path.getctime)\n if f:\n with open(f,'rb') as file:\n dataset = pickle.load(file)\n else: # is it a byte-stream to load from?\n dataset = pickle.load(f)\n if isinstance(dataset,dict):\n dataset=dataset['dataset']\n return dataset\n\n\ndef doCalibrationSupervised(ui: UtopiaDataInterface, clsfr: BaseSequence2Sequence, **kwargs):\n \"\"\"\n do a calibration phase = basically just extract the training data and train a classifier from the utopiaInterface\n\n Args:\n ui (UtopiaDataInterface): buffered interface to the data and stimulus streams\n clsfr (BaseSequence2Sequence): the classifier to use to fit a model to the calibration data\n cv (int, optional): the number of cross-validation folds to use for model generalization performance estimation. Defaults to 2.\n prior_dataset ([type], optional): data-set from a previous calibration run, used to accumulate data over subsequent calibrations. Defaults to None.\n ranks (tuple, optional): a list of model ranks to optimize as hyperparameters. Defaults to (1,2,3,5).\n\n Returns:\n dataset [type]: the gathered calibration data\n X : the calibration data as a 3-d array (tr,samp,d)\n Y : the calibration stimulus as a 3-d array (tr,samp,num-outputs)\n \"\"\" \n X = None\n Y = None\n\n # get the calibration data on-line\n dataset = getCalibration_dataset(ui)\n\n # fit the model to this data\n perr, dataset, X, Y = doModelFitting(clsfr,dataset, fs=ui.fs, **kwargs)\n\n # send message with calibration performance score, if we got one\n if perr is not None:\n ui.sendMessage(PredictedTargetProb(ui.stimulus_timestamp, 0, perr))\n \n return dataset, X, Y\n\n\ndef doModelFitting(clsfr: BaseSequence2Sequence, dataset,\n cv:int=2, prior_dataset=None, ranks=(1,2,3,5), fs:float=None, n_ch:int=None, **kwargs):\n \"\"\"\n fit a model given a dataset \n\n Args:\n clsfr (BaseSequence2Sequence): the classifier to use to fit a model to the calibration data\n cv (int, optional): the number of cross-validation folds to use for model generalization performance estimation. Defaults to 2.\n prior_dataset ([type], optional): data-set from a previous calibration run, used to accumulate data over subsequent calibrations. Defaults to None.\n ranks (tuple, optional): a list of model ranks to optimize as hyperparameters. Defaults to (1,2,3,5).\n\n Returns:\n perr (float): the estimated model generalization performance on the training data.\n dataset [type]: the gathered calibration data\n X : the calibration data as a 3-d array (tr,samp,d)\n Y : the calibration stimulus as a 3-d array (tr,samp,num-outputs)\n \"\"\" \n global UNAME\n perr = None \n X = None\n Y = None\n\n if isinstance(prior_dataset,str): # filename to load the data from?\n try:\n prior_dataset = load_previous_dataset(prior_dataset)\n except:\n # soft-fail if load failed\n print(\"Warning: couldn't load / user prior_dataset: {}\".format(prior_dataset))\n prior_dataset = None\n if prior_dataset is not None: # combine with the old calibration data\n p_n_ch = [ x.shape[-1] for (x,_) in prior_dataset ]\n p_n_ch = max(p_n_ch) if len(p_n_ch)>0 else -1\n if dataset is not None:\n # validate the 2 datasets are compatiable -> same number channels in X\n d_n_ch = [ x.shape[-1] for (x,_) in dataset ]\n d_n_ch = max(d_n_ch) if len(d_n_ch)>0 else -1\n if d_n_ch == p_n_ch and d_n_ch > 0: # match the max channels info\n dataset.extend(prior_dataset)\n else:\n print(\"Warning: prior dataset ({}ch) not compatiable with current {}ch. Ignored!\".format(p_n_ch,d_n_ch))\n else:\n if n_ch is None or n_ch == p_n_ch:\n dataset = prior_dataset\n else:\n print(\"Warning: prior dataset ({}ch) not compatiable with current {} channels. Ignored!\".format(p_n_ch,n_ch))\n\n if dataset:\n try:\n import pickle\n fn = os.path.join(LOGDIR,'calibration_dataset_{}.pk'.format(UNAME))\n print('Saving calibration data to {}'.format(fn))\n pickle.dump(dict(dataset=dataset), open(fn,'wb'))\n except:\n print('Error saving cal data')\n\n # convert msgs -> to nd-arrays\n X, Y, X_ts, Y_ts = dataset_to_XY_ndarrays(dataset)\n\n # guard against empty training dataset\n if X is None or Y is None :\n return None, None, None, None\n Y, used_idx = strip_unused(Y)\n \n # now call the clsfr fit method, on the true-target info\n try:\n print(\"Training dataset = ({},{})\".format(X.shape, Y.shape))\n cvscores = clsfr.cv_fit(X, Y, cv=cv, ranks=ranks, **kwargs)\n score = np.mean(cvscores['test_score'])\n print(\"clsfr={} => {}\".format(clsfr, score))\n except:\n traceback.print_exc()\n return None, None, None, None\n\n decoding_curve = decodingCurveSupervised(cvscores['estimator'], nInt=(10, 10),\n priorsigma=(clsfr.sigma0_, clsfr.priorweight),\n softmaxscale=clsfr.softmaxscale_, \n marginalizedecis=True, minDecisLen=clsfr.minDecisLen, \n bwdAccumulate=clsfr.bwdAccumulate, \n nEpochCorrection=clsfr.startup_correction)\n # extract the final estimated performance\n #print(\"decoding curve {}\".format(decoding_curve[1]))\n #print(\"score {}\".format(score))\n perr = decoding_curve[1][-1] if len(decoding_curve)>1 else 1-score\n if CALIBRATIONPLOTS:\n try:\n #if True:\n import matplotlib.pyplot as plt\n plt.figure(1)\n clsfr.plot_model(fs=fs, ncol=3) # use 3 cols, so have: spatial, temporal, decoding-curve\n plt.subplot(1,3,3) # put decoding curve in last sub-plot\n plot_decoding_curve(*decoding_curve)\n plt.suptitle(\"Model + Decoding Performance\")\n # from analyse_datasets import debug_test_dataset\n # debug_test_dataset(X,Y,None,fs=fs)\n plt.figure(3) # plot the CCA info\n Y_true = clsfr.stim2event(Y)\n Y_true = Y_true[...,0:1,:]\n Cxx, Cxy, Cyy = updateSummaryStatistics(X,Y_true,tau=clsfr.tau,offset=clsfr.offset)\n plot_summary_statistics(Cxx,Cxy,Cyy,clsfr.evtlabs,fs=fs)\n plt.suptitle(\"Summary Statistics\")\n try:\n import pickle\n fn = os.path.join(LOGDIR,'summary_statistics_{}.pk'.format(UNAME))\n print('Saving SS to {}'.format(fn))\n pickle.dump(dict(Cxx=Cxx, Cxy=Cxy, Cyy=Cyy, evtlabs=clsfr.evtlabs, fs=fs),\n open(fn,'wb'))\n except:\n print('Error saving cal data')\n plt.figure(4)\n plot_erp(Cxy,evtlabs=clsfr.evtlabs,fs=fs)\n plt.suptitle(\"Event Related Potential (ERP)\")\n plt.show(block=False)\n # save figures\n plt.figure(1)\n plt.savefig(os.path.join(LOGDIR,'model_{}.png'.format(UNAME)))\n #plt.figure(2)\n #plt.savefig(os.path.join(LOGDIR,'decoding_curve_{}.png'.format(UNAME)))\n plt.figure(3)\n plt.savefig(os.path.join(LOGDIR,'summary_statistics_{}.png'.format(UNAME)))\n plt.figure(4)\n plt.savefig(os.path.join(LOGDIR,'erp_{}.png'.format(UNAME)))\n except:\n traceback.print_exc()\n pass\n\n return perr, dataset, X, Y\n\n\ndef doPrediction(clsfr: BaseSequence2Sequence, data, stimulus, prev_stimulus=None):\n \"\"\"\n given the current trials data, apply the classifier and decoder to make target predictions\n\n Args:\n clsfr (BaseSequence2Sequence): the trained classifier to apply to the data\n data (np.ndarray (time,channels)): the pre-processed EEG data\n stimulus (np.ndarray (time,outputs)): the raw stimulus information\n prev_stimulus (np.ndarray, optional): previous stimulus before stimulus -- poss needed for correct event coding. Defaults to None.\n\n Returns:\n (np.ndarray (time,outputs)): Fy scores for each output at each time-point\n \"\"\" \n \n X = data[:, :-1]\n X_ts = data[:, -1]\n Y = stimulus[:, :-1]\n Y_ts = stimulus[:, -1]\n if X_ts.size == 0 or Y_ts.size == 0: # fast path empty inputs\n return None\n # strip outputs that we don't use, to save compute time\n Y, used_idx = strip_unused(Y)\n # strip the true target info if it's a copy, so it doesn't mess up Py computation\n #Y = dedupY0(Y, zerodup=False, yfeatdim=False)\n # up-sample Y to the match the rate of X\n # TODO[]: should this happen in the data-interface?\n Y, _ = upsample_stimseq(X_ts, Y, Y_ts)\n # predict on X,Y without the time-stamp info\n Fy_1 = clsfr.predict(X, Y, prevY=prev_stimulus, dedup0=-1) # predict, removing objID==0\n # map-back to 256\n Fy = np.zeros(Fy_1.shape[:-1]+(256,),dtype=Fy_1.dtype)\n Fy[..., used_idx] = Fy_1\n return Fy\n\n\ndef combine_Ptgt(pvals_objIDs):\n \"\"\"combine target probabilities in a correct way\n\n Args:\n pvals_objIDs (list (pval,objId)): list of Ptgt,objID pairs for outputs at different time points.\n\n Returns:\n (np.ndarray (outputs,)) : target probabilities\n (np.ndarray (outputs,)) : object IDs for the targets\n \"\"\" \n pvals = [p[0] for p in pvals_objIDs] \n objIDs = [p[1] for p in pvals_objIDs]\n if not all(np.isequal(objIDs[0], oi) for oi in objIDs):\n print(\"Warning combination only supported for fixed output set currently!\")\n return pvals[-1], objIDs[-1]\n pvals = np.hstack(pvals) # (nBlk,nObj)\n # coorected combination\n Ptgt = softmax(np.sum(np.log(pvals))/np.sqrt(pvals.shape[0]))\n return Ptgt, objIDs\n\n\ndef send_prediction(ui: UtopiaDataInterface, Ptgt, used_idx=None, timestamp:int=-1):\n \"\"\"Send the current prediction information to the utopia-hub\n\n Args:\n ui (UtopiaDataInterface): the interface to the data-hub\n Ptgt (np.ndarray (outputs,)): the current distribution of target probabilities over outputs\n used_idx (np.ndarray, optional): a set of output indices currently used. Defaults to None.\n timestamp (int, optional): time stamp for which this prediction applies. Defaults to -1.\n \"\"\" \n if Ptgt is None or len(Ptgt)==0 :\n return \n #print(\" Pred= used_idx:{} ptgt:{}\".format(used_idx,Ptgt))\n # N.B. for network efficiency, only send for non-zero probability outputs\n nonzero_idx = np.flatnonzero(Ptgt)\n # print(\"{}={}\".format(Ptgt,nonzero_idx))\n # ensure a least one entry\n if nonzero_idx.size == 0: \n nonzero_idx = [0]\n Ptgt = Ptgt[nonzero_idx]\n if used_idx is None:\n used_idx = nonzero_idx\n else:\n if np.issubdtype(used_idx.dtype, np.bool): # logical->index\n used_idx = np.flatnonzero(used_idx)\n used_idx = used_idx[nonzero_idx]\n # print(\" Pred= used_idx:{} ptgt:{}\".format(used_idx,Ptgt))\n # send the prediction messages, PredictedTargetProb, PredictedTargetDist\n y_est_idx = np.argmax(Ptgt, axis=-1)\n # most likely target and the chance that it is wrong\n if Ptgt[y_est_idx] == 1.0 :\n print(\"P==1?\") \n else:\n ptp = PredictedTargetProb(timestamp, used_idx[y_est_idx], 1-Ptgt[y_est_idx])\n print(\" Pred= {}\".format(ptp))\n ui.sendMessage(ptp)\n # distribution over all *non-zero* targets\n ui.sendMessage(PredictedTargetDist(timestamp, used_idx, Ptgt))\n \n\ndef doPredictionStatic(ui: UtopiaDataInterface, clsfr: BaseSequence2Sequence, model_apply_type:str='trial', timeout_ms:float=None, block_step_ms:float=100, maxDecisLen_ms:float=8000):\n \"\"\" \n do the prediction stage = basically extract data/msgs from trial start and generate a prediction from them '''\n\n Args:\n ui (UtopiaDataInterface): buffered interface to the data and stimulus streams\n clsfr (BaseSequence2Sequence): the trained classification model\n maxDecisLen_ms (float, optional): the maximum amount of data to use to make a prediction, i.e. prediction sliding window size. Defaults to 8000\n\n \"\"\"\n if not clsfr.is_fitted():\n print(\"Warning: trying to predict without training classifier!\")\n return\n\n if PREDICTIONPLOTS and guiplots:\n plt.close('all')\n\n # TODO []: Block based prediction is slightly slower? Why?\n if timeout_ms is None:\n timeout_ms = block_step_ms\n \n # start of the data block to apply the model to\n block_start_ts = ui.data_timestamp\n overlap_samp = clsfr.tau\n overlap_ms = overlap_samp * 1000 / ui.fs\n maxDecisLen_samp = int(maxDecisLen_ms * ui.fs / 1000)\n Fy = None # (1,nSamp,nY):float score for each output for each sample\n trial_start_ts = None\n isPredicting = True\n # run until we get a mode change gathering training data in trials\n while isPredicting:\n # get new messages from utopia-hub\n newmsgs, ndata, nstim = ui.update(timeout_ms=timeout_ms,mintime_ms=timeout_ms//2)\n\n # TODO[]: Fix to not re-process the same data if no new stim to be processed..\n if len(newmsgs) == 0 and nstim == 0 and ndata == 0:\n continue\n if ui.data_timestamp is None or ui.stimulus_timestamp is None:\n continue\n\n # get the timestamp for the last data which it is valid to apply the model to,\n # that is where have enough data to include a complete response for this stimulus\n # Note: can't just use last data, incase stimuli are lagged w.r.t. data\n # also, prevents processing data for which are not stimulus events to compare with\n valid_end_ts = min(ui.stimulus_timestamp + overlap_ms, ui.data_timestamp)\n \n # incremental extract trial limits\n otrial_start_ts = trial_start_ts\n trials, trial_start_ts, newmsgs = get_trial_start_end(newmsgs, trial_start_ts)\n\n # change in trial-start -> end-of-trial / start new trial detected\n if not trial_start_ts == otrial_start_ts:\n print(\"New trial! tr_start={}\".format(trial_start_ts))\n\n Fy = None\n block_start_ts = trial_start_ts\n\n # compute the start/end of the segement to apply the model to\n if model_apply_type == 'trial':\n # apply the model to all available data from trial start\n block_start_ts = trial_start_ts\n if block_start_ts is not None and block_start_ts + block_step_ms + overlap_ms < valid_end_ts:\n block_end_ts = valid_end_ts\n else:\n block_end_ts = None\n # limit the trial size and hence computational cost!\n if block_start_ts is not None:\n block_start_ts = max( block_start_ts, valid_end_ts - maxDecisLen_ms)\n else:\n # check if enough data to apply the model\n if block_start_ts is not None and block_start_ts + block_step_ms + overlap_ms < valid_end_ts:\n # got enough data to process this block\n block_end_ts = valid_end_ts\n else:\n # not enough yet -> clear the end to indicate dont apply the model\n block_end_ts = None\n\n # if we have a valid block to apply the model do\n if block_start_ts is not None and block_end_ts is not None:\n\n # extract and apply to this block\n print(\"Extract block: {}->{} = {}ms\".format(block_start_ts, block_end_ts, block_end_ts-block_start_ts))\n data = ui.extract_data_segment(block_start_ts, block_end_ts)\n stimulus = ui.extract_stimulus_segment(block_start_ts, block_end_ts)\n # skip if no data/stimulus to process\n if data.size == 0 or stimulus.size == 0:\n continue\n\n print('got: data {}->{} ({}) stimulus {}->{} ({}>0)'.format(data[0, -1], data[-1, -1], data.shape[0],\n stimulus[0, -1], stimulus[-1, -1], np.sum(stimulus[:,0])))\n if model_apply_type == 'block':\n # update the start point for the next block\n # start next block at overlap before the end of this blocks data\n # so have sample accurate predictions, with no missing data, and no overlaps\n block_start_ts = data[-overlap_samp+1, -1] # ~= block_end_ts - overlap_ms +1-sample\n bend = block_start_ts + block_step_ms + overlap_ms\n print(\"next block {}->{}: in {}ms\".format(block_start_ts, bend, bend - ui.data_timestamp))\n\n # get predictions for this data block\n block_Fy = doPrediction(clsfr, data, stimulus)\n # strip predictions from the overlap period\n block_Fy = block_Fy[..., :-overlap_samp, :]\n\n # if got valid predictions...\n if block_Fy is not None:\n # accumulate or store the predictions\n if model_apply_type == 'trial':\n Fy = block_Fy\n elif model_apply_type == 'block': # accumulate blocks in the trial\n if Fy is None: # restart accumulation\n Fy = block_Fy\n else:\n Fy = np.append(Fy, block_Fy, -2)\n # limit the trial length\n if maxDecisLen_ms > 0 and Fy.shape[-2] > maxDecisLen_samp:\n print(\"limit trial length {} -> {}\".format(Fy.shape[-2], maxDecisLen_samp))\n Fy = Fy[..., -maxDecisLen_samp:, :]\n\n # send prediction event\n # only process the used-subset\n used_idx = np.any(Fy.reshape((-1, Fy.shape[-1])), 0)\n used_idx[0] = True # force include 0\n # map to probabilities, including the prior over sigma! as the clsfr is configured\n Ptgt = clsfr.decode_proba(Fy[...,used_idx], marginalizedecis=True, marginalizemodels=True,\n minDecisLen=clsfr.minDecisLen, bwdAccumulate=clsfr.bwdAccumulate)\n # BODGE: only use the last (most data?) prediction...\n Ptgt = Ptgt[-1, -1, :] if Ptgt.ndim==3 else Ptgt[0,-1,-1,:]\n if PREDICTIONPLOTS and guiplots and len(Ptgt)>1:\n # bar plot of current Ptgt info\n #try:\n ssFy, _, _, _, _ = normalizeOutputScores(Fy[...,used_idx], minDecisLen=-10, marginalizemodels=True, \n nEpochCorrection=clsfr.startup_correction, priorsigma=(clsfr.sigma0_,clsfr.priorweight))\n Py = clsfr.decode_proba(Fy[...,used_idx], marginalizemodels=True, minDecisLen=-10, bwdAccumulate=False)\n plot_trial_summary(Ptgt,ssFy,Py,fs=ui.fs/10)\n #except:\n # pass\n\n # send prediction with last recieved stimulus_event timestamp\n print(\"Fy={} Yest={} Perr={}\".format(Fy.shape, np.argmax(Ptgt), 1-np.max(Ptgt)))\n\n send_prediction(ui, Ptgt, used_idx=used_idx)\n\n if PREDICTIONPLOTS:\n redraw_plots()\n \n # check for end-prediction messages\n for i,m in enumerate(newmsgs):\n if m.msgID == ModeChange.msgID:\n isPredicting = False\n # return unprocessed messages to stack. Q: why i+1?\n ui.push_back_newmsgs(newmsgs[i:])\n\naxPtgt, axFy, axPy = (None, None, None)\ndef plot_trial_summary(Ptgt, Fy=None, Py=None, fs:float=None):\n \"\"\"Plot a summary of the trial decoding information\n\n Args:\n Ptgt (np.ndarray): the current output probabilities\n Fy (np.ndarray): the raw output scores over time\n Py (np.ndarray): the raw probabilities for each target over time\n fs (float, optional): the data sample rate. Defaults to None.\n \"\"\" \n global axFy, axPy, axPtgt\n\n if axFy is None or not plt.fignum_exists(10):\n # init the fig\n fig = plt.figure(10)\n plt.clf()\n axPtgt = fig.add_axes((.45,.1,.50,.85))\n axPy = fig.add_axes((.1,.1,.25,.35))\n axFy = fig.add_axes((.1,.55,.25,.35),sharex=axPy)\n axFy.tick_params(labelbottom=False)\n plt.tight_layout()\n\n if Fy is not None and axFy is not None:\n axFy.cla()\n axFy.set_ylabel('Fy')\n axFy.set_title(\"Trial Summary\")\n axFy.grid(True)\n if Fy.ndim>3 : # sum out model dim \n Fy=np.mean(Fy,-4)\n times = np.arange(-Fy.shape[-2],0)\n t_unit = 'samples'\n if fs is not None:\n times = times / fs\n t_unit = 's'\n axFy.plot(times,Fy[0,:,:])\n axPy.cla()\n axPy.set_ylabel('Py')\n axPy.set_ylim((0,1))\n axPy.set_xlabel(\"time ({})\".format(t_unit))\n axPy.grid(True)\n axPy.plot(times,Py[0,:,:])\n\n if Ptgt is not None and axPtgt is not None:\n # init the fig\n axPtgt.cla()\n axPtgt.set_title(\"Current: P_target\")\n axPtgt.set_ylabel(\"P_target\")\n axPtgt.set_xlabel('Output (objID)')\n axPtgt.set_ylim((0,1))\n axPtgt.grid(True)\n axPtgt.bar(range(len(Ptgt)),Ptgt)\n #plt.xticklabel(np.flatnonzero(used_idx))\n plt.show(block=False)\n # fig.canvas.draw()\n\ndef run(ui: UtopiaDataInterface=None, clsfr: BaseSequence2Sequence=None, msg_timeout_ms: float=100, \n host:str=None, prior_dataset:str=None,\n tau_ms:float=450, offset_ms:float=0, out_fs:float=100, evtlabs=None, \n stopband=((45,65),(5.5,25,'bandpass')), ftype='butter', order:int=6, cv:int=5,\n prediction_offsets=None, logdir=None,\n calplots:bool=False, predplots:bool=False, label:str=None, **kwargs):\n \"\"\" run the main decoder processing loop\n\n Args:\n ui (UtopiaDataInterface, optional): The utopia data interface class. Defaults to None.\n clsfr (BaseSequence2Sequence, optional): the classifer to use when model fitting. Defaults to None.\n msg_timeout_ms (float, optional): timeout for getting new messages from the data-interface. Defaults to 100.\n host (str, optional): hostname for the utopia hub. Defaults to None.\n tau_ms (float, optional): length of the stimulus response. Defaults to 400.\n offset_ms (float, optiona): offset in ms to shift the analysis window. Use to compensate for response lag. Defaults to 0.\n stopband (tuple, optional): temporal filter specification for `UtopiaDataInterface.butterfilt_and_downsample`. Defaults to ((45,65),(5.5,25,'bandpass'))\n ftype (str, optional): type of temporal filter to use. Defaults to 'butter'.\n logdir (str, optional): location to save output files. Defaults to None.\n order (int, optional): order of temporal filter to use. Defaults to 6.\n out_fs (float, optional): sample rate after the pre-processor. Defaults to 100.\n evtlabs (tuple, optional): the brain event coding to use. Defaults to None.\n calplots (bool, optional): flag if we make plots after calibration. Defaults to False.\n predplots (bool, optional): flag if we make plots after each prediction trial. Defaults to False.\n prior_dataset ([str,(dataset)]): calibration data from a previous run of the system. Used to pre-seed the model. Defaults to None.\n prediction_offsets ([ListInt], optional): a list of stimulus offsets to try at prediction time to cope with stimulus timing jitter. Defaults to None.\n \"\"\"\n global CALIBRATIONPLOTS, PREDICTIONPLOTS, UNAME, LOGDIR\n CALIBRATIONPLOTS = calplots\n PREDICTIONPLOTS = predplots\n\n\n # setup the saving label\n from datetime import datetime \n UNAME = datetime.now().strftime(\"%y%m%d_%H%M\")\n if label is not None: # include label as prefix\n UNAME = \"{}_{}\".format(label,UNAME)\n # setup saving location\n if logdir:\n LOGDIR=os.path.expanduser(logdir)\n if not os.path.exists(logdir):\n try:\n os.makedirs(logdir)\n except:\n print(\"Error making the log directory.... ignoring\")\n\n print(\"LOGDIR={}\".format(LOGDIR))\n\n # create data interface with bandpass and downsampling pre-processor, running about 10hz updates\n if ui is None:\n try:\n from scipy.signal import butter\n ppfn = butterfilt_and_downsample(order=order, stopband=stopband, fs_out=out_fs, ftype=ftype)\n except: # load filter from file\n print(\"Warning: stopband specification *ignored*, using sos_filter_coeff.pk file...\")\n ppfn = butterfilt_and_downsample(stopband='sos_filter_coeff.pk', fs_out=out_fs)\n #ppfn = None\n ui = UtopiaDataInterface(data_preprocessor=ppfn,\n stimulus_preprocessor=None,\n timeout_ms=100, mintime_ms=55, clientid='decoder') # 20hz updates\n ui.connect(host=host, queryifhostnotfound=False)\n ui.update()\n \n # use a multi-cca for the model-fitting\n if clsfr is None:\n if isinstance(evtlabs,str): # decode string coded spec\n evtlabs = evtlabs.split(',')\n clsfr = MultiCCA(tau=int(out_fs*tau_ms/1000), evtlabs=evtlabs, offset=int(out_fs*offset_ms/1000), prediction_offsets=prediction_offsets)\n print('clsfr={}'.format(clsfr))\n\n # pre-train the model if the prior_dataset is given\n if prior_dataset is not None:\n doModelFitting(clsfr, None, cv=cv, prior_dataset=prior_dataset, fs=ui.fs, n_ch=ui.data_ringbuffer.shape[-1])\n\n current_mode = \"idle\"\n # clean shutdown when told shutdown\n while current_mode.lower != \"shutdown\".lower():\n\n if current_mode.lower() in (\"calibration.supervised\",\"calibrate.supervised\"):\n prior_dataset, _, _ = doCalibrationSupervised(ui, clsfr, cv=cv, prior_dataset=prior_dataset)\n \n elif current_mode.lower() in (\"prediction.static\",\"predict.static\"):\n if not clsfr.is_fitted() and prior_dataset is not None:\n doModelFitting(clsfr, None, cv=cv, prior_dataset=prior_dataset, fs=ui.fs, n_ch=ui.data_ringbuffer.shape[-1])\n\n doPredictionStatic(ui, clsfr)\n\n elif current_mode.lower() in (\"reset\"):\n prior_dataset = None\n clsfr.clear()\n\n # check for new mode-messages\n newmsgs, nsamp, nstim = ui.update()\n\n # update the system mode\n current_mode = \"idle\"\n for i, m in enumerate(newmsgs):\n if m.msgID == ModeChange.msgID:\n current_mode = m.newmode\n print(\"\\nNew Mode: {}\".format(current_mode))\n ui.push_back_newmsgs(newmsgs[i+1:])\n # stop processing messages\n break\n \n # BODGE: re-draw plots so they are interactive.\n redraw_plots()\n\ndef parse_args():\n import argparse\n import json\n parser = argparse.ArgumentParser()\n parser.add_argument('--host',type=str, help='address (IP) of the utopia-hub', default=None)\n parser.add_argument('--out_fs',type=int, help='output sample rate', default=100)\n parser.add_argument('--tau_ms',type=float, help='output sample rate', default=450)\n parser.add_argument('--evtlabs', type=str, help='comma separated list of stimulus even types to use', default='re,fe')\n parser.add_argument('--stopband',type=json.loads, help='set of notch filters to apply to the data before analysis', default=((45,65),(5.5,25,'bandpass')))\n parser.add_argument('--cv',type=int, help='number cross validation folds', default=5)\n parser.add_argument('--predplots', action='store_true', help='flag make decoding plots are prediction time')\n parser.add_argument('--calplots', action='store_false', help='turn OFF model and decoding plots after calibration')\n parser.add_argument('--savefile', type=str, help='run decoder using this file as the proxy data source', default=None)\n parser.add_argument('--savefile_fs', type=float, help='effective sample rate for the save file', default=None)\n parser.add_argument('--logdir', type=str, help='directory to save log/data files', default='~/Desktop/logs')\n parser.add_argument('--prior_dataset', type=str, help='prior dataset to fit initial model to', default='~/Desktop/logs/calibration_dataset*.pk')\n\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n if args.savefile is not None or False:#\n #savefile=\"~/utopia/java/messagelib/UtopiaMessages_.log\"\n #savefile=\"~/utopia/java/utopia2ft/UtopiaMessages_*1700.log\"\n #savefile=\"~/Downloads/jason/UtopiaMessages_200923_1749_*.log\"\n savefile='~/Desktop/mark/mindaffectBCI*.txt'\n savefile=args.logdir + \"/mindaffectBCI*.txt\"\n setattr(args,'savefile',savefile)\n #setattr(args,'out_fs',100)\n #setattr(args,'savefile_fs',200)\n #setattr(args,'cv',5)\n setattr(args,'predplots',True) # prediction plots -- useful for prediction perf debugging\n setattr(args,'prior_dataset',None)\n from mindaffectBCI.decoder.FileProxyHub import FileProxyHub\n U = FileProxyHub(args.savefile,use_server_ts=True)\n ppfn = butterfilt_and_downsample(order=6, stopband=args.stopband, fs_out=args.out_fs, ftype='butter')\n ui = UtopiaDataInterface(data_preprocessor=ppfn,\n stimulus_preprocessor=None,\n timeout_ms=100, mintime_ms=0, U=U, fs=args.savefile_fs, clientid='decoder') # 20hz updates\n # add the file-proxy ui as input argument\n setattr(args,'ui',ui)\n\n\n # # HACK: set debug attrs....\n #setattr(args,'prior_dataset','calibration_dataset_debug.pk')\n \n # hack testing arguments!\n #setattr(args,'prediction_offsets',(-1,0,1))\n\n running=True\n nCrash = 0\n run(**vars(args))\n while running and nCrash < 10:\n try:\n run(**vars(args))\n # stop restarting if normal terminate\n running=False\n except KeyboardInterrupt:\n # stop running if keyboard interrrupt\n running=False\n except Exception as ex:\n print(\"Error running mainloop\"+ str(ex))\n nCrash = nCrash + 1\n pass\n" ]
[ [ "numpy.sqrt", "numpy.issubdtype", "numpy.max", "numpy.mean", "numpy.hstack", "matplotlib.pyplot.tight_layout", "numpy.arange", "numpy.flatnonzero", "matplotlib.pyplot.gcf", "numpy.argmax", "matplotlib.pyplot.subplot", "matplotlib.pyplot.close", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.fignum_exists", "numpy.log", "matplotlib.is_interactive", "matplotlib.get_backend", "numpy.append", "matplotlib.pyplot.get_fignums", "matplotlib.pyplot.show", "matplotlib.pyplot.suptitle", "numpy.sum", "numpy.isequal", "matplotlib.use", "numpy.percentile", "matplotlib.pyplot.clf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
csrunner/new_feat
[ "5174312634c696b022f624a047d1dcb7435dfeba" ]
[ "socket_cv_client.py" ]
[ "# -*- coding:utf-8 -*-\n__author__ = 'shichao'\n\n# 客户端\n\n\nimport socket\nimport cv2\nimport threading\nimport struct\nimport numpy\n\n\nclass Camera_Connect_Object:\n def __init__(self, D_addr_port=[\"\", 8880]):\n self.resolution = [640, 480]\n self.addr_port = D_addr_port\n self.src = 888 + 15 # 双方确定传输帧数,(888)为校验值\n self.interval = 0 # 图片播放时间间隔\n self.img_fps = 15 # 每秒传输多少帧数\n\n def Set_socket(self):\n self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n def Socket_Connect(self):\n self.Set_socket()\n self.client.connect(self.addr_port)\n print(\"IP is %s:%d\" % (self.addr_port[0], self.addr_port[1]))\n\n def RT_Image(self):\n # 按照格式打包发送帧数和分辨率\n self.name = self.addr_port[0] + \" Camera\"\n self.client.send(struct.pack(\"lhh\", self.src, self.resolution[0], self.resolution[1]))\n while (1):\n info = struct.unpack(\"lhh\", self.client.recv(12))\n buf_size = info[0] # 获取读的图片总长度\n if buf_size:\n try:\n self.buf = b\"\" # 代表bytes类型\n temp_buf = self.buf\n while (buf_size): # 读取每一张图片的长度\n temp_buf = self.client.recv(buf_size)\n buf_size -= len(temp_buf)\n self.buf += temp_buf # 获取图片\n data = numpy.fromstring(self.buf, dtype='uint8') # 按uint8转换为图像矩阵\n self.image = cv2.imdecode(data, 1) # 图像解码\n cv2.imshow(self.name, self.image) # 展示图片\n except:\n pass;\n finally:\n if (cv2.waitKey(10) == 27): # 每10ms刷新一次图片,按‘ESC’(27)退出\n self.client.close()\n cv2.destroyAllWindows()\n break\n\n def Get_Data(self, interval):\n showThread = threading.Thread(target=self.RT_Image)\n showThread.start()\n\n\nif __name__ == '__main__':\n camera = Camera_Connect_Object()\n camera.addr_port[0] = \"127.0.0.1\"\n camera.addr_port = tuple(camera.addr_port)\n camera.Socket_Connect()\n camera.Get_Data(camera.interval)" ]
[ [ "numpy.fromstring" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
starsky/pymlearn
[ "f9687882aef401a7ecceab4537bdc15a44cf1356" ]
[ "tests/test_theano_loss_functios.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom context import _loss_func_semi_vectorized\nfrom context import _loss_func_theano\nimport unittest\nimport sklearn.preprocessing\nimport theano\nimport numpy as np\n\n\nclass TheanoLossFunctionsTestSuite(unittest.TestCase):\n \"\"\"Advanced test cases.\"\"\"\n\n def test_hinge_loss(self):\n W = np.random.random((10, 1000)).astype(theano.config.floatX)\n X = np.random.random((10000, 1000)).astype(theano.config.floatX)\n Y = np.random.randint(0, 10, 10000).astype(np.int32)[:, np.newaxis]\n to_binary_label = sklearn.preprocessing.MultiLabelBinarizer()\n Y = to_binary_label.fit_transform(Y).astype(theano.config.floatX).T\n\n reference_loss = _loss_func_semi_vectorized.hinge_loss(W, X, Y)\n reference_gradient = _loss_func_semi_vectorized.hinge_loss_derivatives(W, X, Y)\n\n hinge_loss, hinge_loss_derivatives = _loss_func_theano._compile_hinge_loss_func(compile_=True)\n\n loss = hinge_loss(W, X, Y)\n gradient = hinge_loss_derivatives(W, X, Y)\n\n np.testing.assert_almost_equal(reference_loss, loss)\n np.testing.assert_array_almost_equal(reference_gradient, gradient)\n\n def test_softmax_loss(self):\n W = np.random.random((10, 1000)).astype(theano.config.floatX)\n X = np.random.random((10000, 1000)).astype(theano.config.floatX)\n Y = np.random.randint(0, 10, 10000).astype(np.int32)[:, np.newaxis]\n to_binary_label = sklearn.preprocessing.MultiLabelBinarizer()\n Y = to_binary_label.fit_transform(Y).astype(theano.config.floatX).T\n\n reference_loss = _loss_func_semi_vectorized.softmax_loss(W, X, Y)\n reference_gradient = _loss_func_semi_vectorized.softmax_loss_derivatives(W, X, Y)\n\n softmax_loss, softmax_loss_derivatives = _loss_func_theano._compile_softmax_loss_func(compile_=True)\n loss = softmax_loss(W, X, Y)\n gradient = softmax_loss_derivatives(W, X, Y)\n\n np.testing.assert_almost_equal(reference_loss, loss)\n np.testing.assert_array_almost_equal(reference_gradient, gradient)\n\n def test_l1_penalty(self):\n W = np.random.random((10, 1000))\n\n reference_loss = _loss_func_semi_vectorized.l1_penalty(W)\n reference_gradient = _loss_func_semi_vectorized.l1_penalty_der(W)\n\n l1_penalty, l1_penalty_der = _loss_func_theano._compile_l1_penalty_func(compile_=True)\n loss = l1_penalty(W)\n gradient = l1_penalty_der(W)\n\n np.testing.assert_almost_equal(reference_loss, loss)\n np.testing.assert_array_almost_equal(reference_gradient, gradient)\n\n def test_l2_penalty(self):\n W = np.random.random((10, 1000))\n\n reference_loss = _loss_func_semi_vectorized.l2_penalty(W)\n reference_gradient = _loss_func_semi_vectorized.l2_penalty_der(W)\n loss_func, gradient_func = _loss_func_theano._compile_l2_penalty_func(compile_=True)\n loss = loss_func(W)\n gradient = gradient_func(W)\n\n np.testing.assert_almost_equal(reference_loss, loss)\n np.testing.assert_array_almost_equal(reference_gradient, gradient)\n\n\n def test_get_loss_function(self):\n W = np.random.random((10, 1000)).astype(theano.config.floatX).ravel()\n X = np.random.random((10000, 1000)).astype(theano.config.floatX)\n Y = np.random.randint(0, 10, 10000).astype(np.int32)[:, np.newaxis]\n to_binary_label = sklearn.preprocessing.MultiLabelBinarizer()\n Y = to_binary_label.fit_transform(Y).astype(theano.config.floatX).T\n reg_values = [0, 0.5]\n loss_values = ['softmax', 'hinge']\n penalty_values = ['L1', 'L2']\n\n for reg in reg_values:\n for loss_fn_name in loss_values:\n for penalty in penalty_values:\n loss_ref_fun, loss_der_ref_fun = _loss_func_semi_vectorized.get_loss_function(loss_fn_name, penalty)\n reference_loss = loss_ref_fun(W, X, Y, reg)\n reference_gradient = loss_der_ref_fun(W, X, Y, reg)\n\n loss_fun, loss_der_fun = _loss_func_theano.get_loss_function(loss_fn_name, penalty)\n loss = loss_fun(W, X, Y, reg)\n gradient = loss_der_fun(W, X, Y, reg)\n np.testing.assert_almost_equal(reference_loss, loss)\n np.testing.assert_array_almost_equal(reference_gradient, gradient)\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.random.randint", "numpy.testing.assert_almost_equal", "numpy.random.random", "numpy.testing.assert_array_almost_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mmore500/dishtiny
[ "9fcb52c4e56c74a4e17f7d577143ed40c158c92e" ]
[ "microbenchmarks/script/consolidate.py" ]
[ "from collections import defaultdict\nfrom frozendict import frozendict\nimport json\nfrom keyname import keyname as kn\nimport pandas as pd\nimport sys\n\ndef load_json(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n return data\n\nres = defaultdict(list)\n\nfor filename, entry in [\n (filename, load_json(filename))\n for filename in sys.argv[1:]\n ]:\n for benchmark in entry['benchmarks']:\n res[frozendict({\n 'run_type' : benchmark['run_type'],\n })].append({\n 'Task' : kn.unpack(filename)['task'],\n 'Statistic' : (\n benchmark['aggregate_name']\n if 'aggregate_name' in benchmark\n else 'measurement'\n ),\n 'Wall Nanoseconds' : benchmark['real_time'],\n 'CPU Nanoseconds' : benchmark['cpu_time'],\n 'Num Cells' : int(benchmark['Num Cells']),\n 'Num Threads' : int(benchmark['Num Threads']),\n 'Cell-Updates Executed' : int(benchmark['Cell-Updates Executed']),\n 'CPU Core-Nanoseconds per Cell Update' : (\n benchmark['cpu_time'] * benchmark['iterations']\n / ( benchmark['Cell-Updates Executed'] or 1 )\n ) ,\n 'Wall Core-Nanoseconds per Cell Update' : (\n benchmark['Num Threads']\n * benchmark['real_time'] * benchmark['iterations']\n / ( benchmark['Cell-Updates Executed'] or 1 )\n ) ,\n })\n\nfor run_specs, rows in res.items():\n pd.DataFrame(rows).to_csv(\n kn.pack({\n 'run_type' : run_specs['run_type'],\n 'ext' : '.csv',\n }),\n index=False,\n )\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
vanyaland/deep-learning-foundation
[ "05a0df56c8223547bd7e8b62653a67f265c8e5ca" ]
[ "1-neural-networks/gradient-descent/multiple/gradient_descent.py" ]
[ "import numpy as np\nfrom data_prep import features, targets, features_test, targets_test\n\n# Weight update can be calculated as:\n# Δwi = ηδxi\n#\n# with the error term δ as:\n# δ = (y − y^) f′(h) = (y − y^)f′(∑wixi)\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1 / (1 + np.exp(-x))\n\n# TODO: We haven't provided the sigmoid_prime function like we did in\n# the previous lesson to encourage you to come up with a more\n# efficient solution. If you need a hint, check out the comments\n# in solution.py from the previous lecture.\n\n# Use to same seed to make debugging easier\nnp.random.seed(42)\n\nn_records, n_features = features.shape\nlast_loss = None\n\n# Initialize weights\nweights = np.random.normal(scale=1 / n_features**.5, size=n_features)\n\n# Neural Network hyperparameters\nepochs = 1000\nlearnrate = 0.5\n\nfor e in range(epochs):\n del_w = np.zeros(weights.shape)\n for x, y in zip(features.values, targets):\n # Loop through all records, x is the input, y is the target\n\n # Activation of the output unit\n # Notice we multiply the inputs and the weights here \n # rather than storing h as a separate variable \n output = sigmoid(np.dot(x, weights))\n\n # The error, the target minus the network output\n error = y - output\n\n # The error term\n # Notice we calulate f'(h) here instead of defining a separate\n # sigmoid_prime function. This just makes it faster because we\n # can re-use the result of the sigmoid function stored in\n # the output variable\n error_term = error * output * (1 - output)\n\n # The gradient descent step, the error times the gradient times the inputs\n del_w += error_term * x\n\n # Update the weights here. The learning rate times the \n # change in weights, divided by the number of records to average\n weights += learnrate * del_w / n_records\n\n # Printing out the mean square error on the training set\n if e % (epochs / 10) == 0:\n out = sigmoid(np.dot(features, weights))\n loss = np.mean((out - targets) ** 2)\n if last_loss and last_loss < loss:\n print(\"Train loss: \", loss, \" WARNING - Loss Increasing\")\n else:\n print(\"Train loss: \", loss)\n last_loss = loss\n\n\n# Calculate accuracy on test data\ntes_out = sigmoid(np.dot(features_test, weights))\npredictions = tes_out > 0.5\naccuracy = np.mean(predictions == targets_test)\nprint(\"Prediction accuracy: {:.3f}\".format(accuracy))\n" ]
[ [ "numpy.dot", "numpy.random.seed", "numpy.random.normal", "numpy.mean", "numpy.exp", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iSiddharth20/Covid-19-Hotspot-Detection
[ "89b7757bae248e749b7c04cc0ed83f93c7dd7a5d" ]
[ "Python_Codes/DataSource.py" ]
[ "# Importing Libraries\nimport requests\nimport pandas as pd \n\n# Data Source\nsource_url = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vSz8Qs1gE_IYpzlkFkCXGcL_BqR8hZieWVi-rphN1gfrO3H4lDtVZs4kd0C3P8Y9lhsT1rhoB-Q_cP4/pub?output=xlsx'\n\n# Getting Data from Source\nr = requests.get(source_url, allow_redirects=True)\n\n# Writing Data to File for easy Access and Security\nopen('dataset.xlsx', 'wb').write(r.content)\n\n# Getting Data from File to Pandas Data Frame\nraw_data = pd.read_excel('dataset.xlsx',sheet_name='Raw_Data')\n\n# Cleaning and Obtaining just the required columns of the data\ndata = raw_data[['Detected State','Detected City']]\n\n# Removing all columns with missing values\ndata = data.dropna()\n\n# Resetting index to avoid error\ndata.reset_index(inplace = True)\n\n# Removing the extra column created by resetting index\ndata.drop(columns = ['index'],inplace = True)\n\n" ]
[ [ "pandas.read_excel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
neighthan/pytorch-lightning
[ "03a73b37bc25d66ff78fe3e71e64e2173a04b429" ]
[ "pytorch_lightning/trainer/training_loop.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom contextlib import contextmanager, suppress\nfrom copy import copy, deepcopy\nfrom typing import Dict, List, Optional, Union\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning.callbacks import EarlyStopping\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.core.step_result import Result\nfrom pytorch_lightning.plugins import ParallelPlugin\nfrom pytorch_lightning.trainer.states import TrainerState\nfrom pytorch_lightning.trainer.supporters import TensorRunningAccum\nfrom pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType, parsing\nfrom pytorch_lightning.utilities.distributed import rank_zero_info\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.model_helpers import is_overridden\nfrom pytorch_lightning.utilities.finite_checks import detect_nan_parameters\nfrom pytorch_lightning.utilities.parsing import AttributeDict\nfrom pytorch_lightning.utilities.warnings import WarningCache\n\n\nclass TrainLoop:\n\n def __init__(self, trainer, multiple_trainloader_mode: str):\n self.trainer = trainer\n self.accumulated_loss = None\n self.warning_cache = WarningCache()\n self._teardown_already_run = False\n self.running_loss = TensorRunningAccum(window_length=20)\n self.automatic_optimization = True\n self._curr_step_result = None\n self._cur_grad_norm_dict = None\n self._multiple_trainloader_mode = multiple_trainloader_mode\n self._skip_backward = False\n self.trainer._multiple_trainloader_mode = multiple_trainloader_mode\n\n def on_trainer_init(\n self,\n max_epochs: Optional[int],\n min_epochs: Optional[int],\n max_steps: Optional[int],\n min_steps: Optional[int],\n num_sanity_val_steps: int,\n ) -> None:\n self.trainer.global_step = 0\n self.trainer.current_epoch = 0\n self.trainer.should_stop = False\n self.trainer._state = TrainerState.INITIALIZING\n\n self.trainer.total_batch_idx = 0\n self.trainer.batch_idx = 0\n self.trainer.num_training_batches = 0\n self.trainer.train_dataloader = None\n\n # If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000\n self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs\n # If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1\n self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs\n self.trainer.max_steps = max_steps\n self.trainer.min_steps = min_steps\n\n if num_sanity_val_steps == -1:\n self.trainer.num_sanity_val_steps = float(\"inf\")\n else:\n self.trainer.num_sanity_val_steps = num_sanity_val_steps\n\n @property\n def num_optimizers(self):\n num_optimizers = len(self.get_optimizers_iterable())\n return num_optimizers\n\n def should_skip_training(self):\n should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps\n should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs\n return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0\n\n def on_train_start(self):\n # hook\n self.trainer.call_hook(\"on_train_start\")\n\n def setup_fit(self, model, train_dataloader=None, val_dataloaders=None, datamodule=None):\n # clean hparams\n if hasattr(model, \"hparams\"):\n parsing.clean_namespace(model.hparams)\n\n # links data to the trainer\n self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)\n\n # check that model is configured correctly\n self.trainer.config_validator.verify_loop_configurations(model)\n\n # attach model log function to callback\n self.trainer.callback_connector.attach_model_logging_functions(model)\n\n def on_train_end(self):\n if self._teardown_already_run:\n return\n self._teardown_already_run = True\n\n # trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates\n # when a checkpoint was saved at the last step\n self.trainer.global_step -= 1\n self.check_checkpoint_callback(should_update=True, is_last=True)\n self.trainer.global_step += 1\n\n # hook\n self.trainer.call_hook(\"on_train_end\")\n\n # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.\n # It might be related to xla tensors blocked when moving the cpu\n # kill loggers\n if self.trainer.logger is not None:\n self.trainer.logger.finalize(\"success\")\n\n # summarize profile results\n self.trainer.profiler.describe()\n\n # give accelerators a chance to finish\n self.trainer.accelerator.on_train_end()\n\n # reset bookkeeping\n self.trainer._running_stage = None\n\n def check_checkpoint_callback(self, should_update, is_last=False):\n # TODO bake this logic into the ModelCheckpoint callback\n if should_update and self.trainer.checkpoint_connector.has_trained:\n callbacks = self.trainer.checkpoint_callbacks\n\n if is_last and any(cb.save_last and cb.verbose for cb in callbacks):\n rank_zero_info(\"Saving latest checkpoint...\")\n\n model = self.trainer.lightning_module\n\n for cb in callbacks:\n cb.on_validation_end(self.trainer, model)\n\n def check_early_stopping_callback(self, should_update):\n # TODO bake this logic into the EarlyStopping callback\n if should_update and self.trainer.checkpoint_connector.has_trained:\n callbacks = [c for c in self.trainer.callbacks if isinstance(c, EarlyStopping)]\n model = self.trainer.lightning_module\n\n for cb in callbacks:\n cb.on_validation_end(self.trainer, model)\n\n def on_train_epoch_start(self, epoch):\n\n # update training progress in trainer\n self.trainer.current_epoch = epoch\n\n model = self.trainer.lightning_module\n\n # reset train dataloader\n if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n # todo: specify the possible exception\n with suppress(Exception):\n # set seed for distributed sampler (enables shuffling for each epoch)\n self.trainer.train_dataloader.sampler.set_epoch(epoch)\n\n # changing gradient according accumulation_scheduler\n self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module)\n\n # stores accumulated grad fractions per batch\n self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)\n\n # hook\n self.trainer.call_hook(\"on_epoch_start\")\n self.trainer.call_hook(\"on_train_epoch_start\")\n\n def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):\n batch_end_outputs = [opt_idx_out for opt_idx_out in batch_end_outputs if len(opt_idx_out)]\n\n processed_batch_end_outputs = TrainLoop._prepare_outputs(batch_end_outputs, batch_mode=True)\n\n # hook\n self.trainer.call_hook('on_train_batch_end', processed_batch_end_outputs, batch, batch_idx, dataloader_idx)\n self.trainer.call_hook('on_batch_end')\n\n # figure out what to track for epoch end\n self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)\n\n # reset batch logger internals\n self.trainer.logger_connector.on_train_batch_end()\n\n def reset_train_val_dataloaders(self, model):\n if self.trainer.train_dataloader is None or not self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_val_dataloader(model)\n\n def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):\n\n # track the outputs to reduce at the end of the epoch\n for opt_idx, opt_outputs in enumerate(batch_end_outputs):\n sample_output = opt_outputs[-1]\n\n # decide if we need to reduce at the end of the epoch automatically\n auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end\n hook_overridden = (\n is_overridden(\"training_epoch_end\", model=self.trainer.lightning_module)\n or is_overridden(\"on_train_epoch_end\", model=self.trainer.lightning_module)\n )\n\n # only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end\n if not (hook_overridden or auto_reduce_tng_result):\n continue\n\n # with 1 step (no tbptt) don't use a sequence at epoch end\n if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):\n opt_outputs = opt_outputs[0]\n\n epoch_output[opt_idx].append(opt_outputs)\n\n def get_optimizers_iterable(self):\n \"\"\"\n Generates an iterable with (idx, optimizer) for each optimizer.\n \"\"\"\n if not self.trainer.optimizer_frequencies:\n # call training_step once per optimizer\n return list(enumerate(self.trainer.optimizers))\n\n optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)\n optimizers_loop_length = optimizer_freq_cumsum[-1]\n current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length\n\n # find optimzier index by looking for the first {item > current_place} in the cumsum list\n opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)\n return [[opt_idx, self.trainer.optimizers[opt_idx]]]\n\n def on_after_backward(self, training_step_output, batch_idx, untouched_loss):\n training_step_output.detach()\n\n # insert after step hook\n self.trainer.call_hook(\"on_after_backward\")\n\n # when in dev debugging track the losses\n self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())\n\n def _check_training_step_output(self, training_step_output):\n if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:\n if training_step_output.grad_fn is None:\n # TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...\n raise MisconfigurationException(\"In manual optimization, `training_step` should not return a Tensor\")\n\n def training_step(self, split_batch, batch_idx, opt_idx, hiddens):\n # give the PL module a result for logging\n model_ref = self.trainer.lightning_module\n\n with self.trainer.profiler.profile(\"model_forward\"):\n args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)\n\n # manually capture logged metrics\n model_ref._current_fx_name = 'training_step'\n model_ref._results = Result()\n with self.trainer.profiler.profile(\"training_step\"):\n training_step_output = self.trainer.accelerator.training_step(args)\n self.trainer.accelerator.post_training_step()\n\n self.trainer.logger_connector.cache_logged_metrics()\n\n self._check_training_step_output(training_step_output)\n\n training_step_output = self.trainer.call_hook(\"training_step_end\", training_step_output)\n\n training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(\n training_step_output, split_batch\n )\n if training_step_output_for_epoch_end is None:\n return\n\n # enable empty loss when using manual opt\n closure_loss = None\n untouched_loss = None\n\n if self.automatic_optimization:\n # accumulate loss. if accumulate_grad_batches==1, no effect\n closure_loss = training_step_output.minimize / self.trainer.accumulate_grad_batches\n\n # the loss will get scaled for amp. avoid any modifications to it\n untouched_loss = closure_loss.detach().clone()\n\n # result\n result = AttributeDict(\n closure_loss=closure_loss,\n loss=untouched_loss,\n training_step_output=training_step_output,\n training_step_output_for_epoch_end=training_step_output_for_epoch_end,\n )\n return result\n\n def _process_training_step_output(self, training_step_output, split_batch):\n training_step_output_for_epoch_end = training_step_output\n\n # enable validation_step return None\n if training_step_output_for_epoch_end is None:\n return None, None\n\n result = self.trainer.lightning_module._results\n\n loss = None\n hiddens = None\n result[\"extra\"] = {}\n\n # handle dict return\n if isinstance(training_step_output, dict):\n loss = training_step_output.pop(\"loss\", None)\n hiddens = training_step_output.pop(\"hiddens\", None)\n if hiddens is not None:\n hiddens = hiddens.detach()\n result[\"extra\"] = training_step_output\n\n # handle scalar return\n elif isinstance(training_step_output, torch.Tensor):\n loss = training_step_output\n\n # map to results under the hood\n result.minimize = loss\n self.trainer.hiddens = hiddens\n\n # track batch for manual reduction with result\n result.track_batch_size(len(split_batch))\n\n # track metrics without grads for epoch reduction\n training_step_output_for_epoch_end = copy(result)\n training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()\n if self.trainer.move_metrics_to_cpu:\n training_step_output_for_epoch_end = training_step_output_for_epoch_end.cpu()\n\n return training_step_output_for_epoch_end, result\n\n @staticmethod\n def _prepare_outputs(\n outputs: List[List[List[Result]]],\n batch_mode: bool,\n ) -> Union[List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict]:\n \"\"\"\n Extract required information from batch or epoch end results.\n\n Args:\n outputs: A 3-dimensional list of ``Result`` objects with dimensions:\n [optimizer outs][batch outs][tbptt steps].\n\n batch_mode: If True, ignore the batch output dimension.\n\n Returns:\n The cleaned outputs with ``Result`` objects converted to dictionaries. All list dimensions of size one will\n be collapsed.\n \"\"\"\n processed_outputs = []\n for opt_outputs in outputs:\n # handle an edge case where an optimizer output is the empty list\n if len(opt_outputs) == 0:\n continue\n\n processed_batch_outputs = []\n\n if batch_mode:\n opt_outputs = [opt_outputs]\n\n for batch_outputs in opt_outputs:\n processed_tbptt_outputs = []\n\n for tbptt_output in batch_outputs:\n out = tbptt_output.extra\n out['loss'] = tbptt_output.minimize\n processed_tbptt_outputs.append(out)\n\n # if there was only one tbptt step then we can collapse that dimension\n if len(processed_tbptt_outputs) == 1:\n processed_tbptt_outputs = processed_tbptt_outputs[0]\n processed_batch_outputs.append(processed_tbptt_outputs)\n\n # batch_outputs should be just one dict (or a list of dicts if using tbptt) per optimizer\n if batch_mode:\n processed_batch_outputs = processed_batch_outputs[0]\n processed_outputs.append(processed_batch_outputs)\n\n # if there is only one optimiser then we collapse that dimension\n if len(processed_outputs) == 1:\n processed_outputs = processed_outputs[0]\n return processed_outputs\n\n def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):\n model_ref = self.trainer.lightning_module\n\n is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)\n using_native_amp = self.trainer.amp_backend == AMPType.NATIVE\n\n # native amp + lbfgs is a no go right now\n if using_native_amp and is_lbfgs:\n raise MisconfigurationException(\n 'native PyTorch amp and lbfgs are not compatible.'\n ' To request, please file a Github issue in PyTorch and tag @mcarilli'\n )\n\n # wraps into LightningOptimizer only for running step\n optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)\n\n # model hook\n model_ref.optimizer_step(\n self.trainer.current_epoch,\n batch_idx,\n optimizer,\n opt_idx,\n train_step_and_backward_closure,\n on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,\n using_native_amp=using_native_amp,\n using_lbfgs=is_lbfgs,\n )\n\n def on_before_zero_grad(self, optimizer):\n self.trainer.call_hook('on_before_zero_grad', optimizer)\n\n def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):\n self.trainer.accelerator.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)\n\n def track_and_norm_grad(self, optimizer):\n # track gradient norms\n grad_norm_dic = self._track_gradient_norm()\n\n # clip gradients\n self.trainer.accelerator.clip_gradients(\n optimizer, self.trainer.gradient_clip_val,\n gradient_clip_algorithm=self.trainer.gradient_clip_algorithm\n )\n self._cur_grad_norm_dict = grad_norm_dic\n\n def _track_gradient_norm(self):\n grad_norm_dict = {}\n if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:\n if float(self.trainer.track_grad_norm) > 0:\n model = self.trainer.lightning_module\n grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)\n return grad_norm_dict\n\n def tbptt_split_batch(self, batch):\n splits = [batch]\n if self.trainer.truncated_bptt_steps is not None:\n model_ref = self.trainer.lightning_module\n with self.trainer.profiler.profile(\"tbptt_split_batch\"):\n splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)\n return splits\n\n def run_training_epoch(self):\n # modify dataloader if needed (ddp, etc...)\n train_dataloader = self.trainer.accelerator.process_dataloader(self.trainer.train_dataloader)\n\n # track epoch output\n epoch_output = [[] for _ in range(self.num_optimizers)]\n\n train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)\n dataloader_idx = 0\n val_loop_called = False\n\n for batch_idx, (batch, is_last_batch) in train_dataloader:\n\n self.trainer.batch_idx = batch_idx\n\n # ------------------------------------\n # TRAINING_STEP + TRAINING_STEP_END\n # ------------------------------------\n with self.trainer.profiler.profile(\"run_training_batch\"):\n batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)\n\n # when returning -1 from train_step, we end epoch early\n if batch_output.signal == -1:\n break\n\n # hook\n # TODO: add outputs to batches\n self.on_train_batch_end(\n epoch_output,\n batch_output.training_step_output_for_epoch_end,\n batch,\n batch_idx,\n dataloader_idx,\n )\n\n # -----------------------------------------\n # SAVE METRICS TO LOGGERS\n # -----------------------------------------\n self.trainer.logger_connector.log_train_step_metrics(batch_output)\n\n # -----------------------------------------\n # VALIDATE IF NEEDED + CHECKPOINT CALLBACK\n # -----------------------------------------\n should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)\n if should_check_val:\n self.trainer.validating = True\n self.trainer.run_evaluation()\n self.trainer.training = True\n val_loop_called = True\n\n # -----------------------------------------\n # SAVE LOGGERS (ie: Tensorboard, etc...)\n # -----------------------------------------\n self.save_loggers_on_train_batch_end()\n\n # update LR schedulers\n monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)\n self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)\n self.trainer.checkpoint_connector.has_trained = True\n\n # max steps reached, end training\n if (\n self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1\n and self._accumulated_batches_reached()\n ):\n break\n\n # end epoch early\n # stop when the flag is changed or we've gone past the amount\n # requested in the batches\n if self.trainer.should_stop:\n break\n\n self.trainer.total_batch_idx += 1\n\n # stop epoch if we limited the number of training batches\n if self._num_training_batches_reached(is_last_batch):\n break\n\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n # handle epoch_output on epoch end\n self.on_train_epoch_end(epoch_output)\n\n # log epoch metrics\n self.trainer.logger_connector.log_train_epoch_end_metrics(epoch_output)\n\n should_check_val = self.should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)\n should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)\n should_train_only = self.trainer.disable_validation or should_skip_eval\n\n # update epoch level lr_schedulers if no val loop outside train loop is triggered\n if (val_loop_called and not should_check_val) or should_train_only:\n self.trainer.optimizer_connector.update_learning_rates(interval='epoch')\n\n if should_train_only:\n self.check_checkpoint_callback(True)\n self.check_early_stopping_callback(True)\n\n if should_check_val:\n self.trainer.validating = True\n self.trainer.run_evaluation(on_epoch=True)\n self.trainer.training = True\n\n # increment the global step once\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n def on_train_epoch_end(self, epoch_output: List[List[List[Result]]]) -> None:\n # inform logger the batch loop has finished\n self.trainer.logger_connector.on_train_epoch_end()\n\n # prepare epoch output\n processed_epoch_output = TrainLoop._prepare_outputs(epoch_output, batch_mode=False)\n\n # get the model and call model.training_epoch_end\n model = self.trainer.lightning_module\n\n if is_overridden('training_epoch_end', model=model):\n # run training_epoch_end\n # refresh the result for custom logging at the epoch level\n model._current_fx_name = 'training_epoch_end'\n\n # lightningmodule hook\n training_epoch_end_output = model.training_epoch_end(processed_epoch_output)\n\n if training_epoch_end_output is not None:\n raise MisconfigurationException(\n 'training_epoch_end expects a return of None. '\n 'HINT: remove the return statement in training_epoch_end'\n )\n\n # capture logging\n self.trainer.logger_connector.cache_logged_metrics()\n\n # call train epoch end hooks\n self.trainer.call_hook('on_train_epoch_end', processed_epoch_output)\n self.trainer.call_hook('on_epoch_end')\n\n def run_training_batch(self, batch, batch_idx, dataloader_idx):\n # track grad norms\n grad_norm_dic = {}\n\n # bookkeeping\n self.trainer.hiddens = None\n\n optimizers = self.prepare_optimizers()\n\n # track all outputs across time and num of optimizers\n batch_outputs = [[] for _ in range(len(optimizers))]\n\n if batch is None:\n return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)\n\n # hook\n response = self.trainer.call_hook(\"on_batch_start\")\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)\n\n # hook\n response = self.trainer.call_hook(\"on_train_batch_start\", batch, batch_idx, dataloader_idx)\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)\n\n # lightning module hook\n splits = self.tbptt_split_batch(batch)\n\n for split_idx, split_batch in enumerate(splits):\n\n # create an iterable for optimizers and loop over them\n for opt_idx, optimizer in optimizers:\n\n # toggle model params + set info to logger_connector\n self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)\n\n if self.should_accumulate():\n # For gradient accumulation\n\n # -------------------\n # calculate loss (train step + train step end)\n # -------------------\n\n # automatic_optimization=True: perform dpp sync only when performing optimizer_step\n # automatic_optimization=False: don't block synchronization here\n with self.block_ddp_sync_behaviour():\n self.training_step_and_backward(\n split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens\n )\n\n batch_outputs = self._process_closure_result(\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n # ------------------------------\n # BACKWARD PASS\n # ------------------------------\n # gradient update with accumulated gradients\n\n else:\n if self.automatic_optimization:\n\n def train_step_and_backward_closure():\n result = self.training_step_and_backward(\n split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens\n )\n return None if result is None else result.loss\n\n # optimizer step\n self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)\n\n else:\n self._curr_step_result = self.training_step(\n split_batch, batch_idx, opt_idx, self.trainer.hiddens\n )\n\n if self._curr_step_result is None:\n # user decided to skip optimization\n # make sure to zero grad.\n continue\n\n batch_outputs = self._process_closure_result(\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n # todo: Properly aggregate grad_norm accros opt_idx and split_idx\n grad_norm_dic = self._cur_grad_norm_dict\n self._cur_grad_norm_dict = None\n\n # update running loss + reset accumulated loss\n self.update_running_loss()\n\n result = AttributeDict(\n signal=0,\n grad_norm_dic=grad_norm_dic,\n training_step_output_for_epoch_end=batch_outputs,\n )\n return result\n\n @contextmanager\n def block_ddp_sync_behaviour(self, should_block_sync: bool = False):\n \"\"\"\n automatic_optimization = True\n Blocks ddp sync gradients behaviour on backwards pass.\n This is useful for skipping sync when accumulating gradients, reducing communication overhead\n\n automatic_optimization = False\n do not block ddp gradient sync when using manual optimization\n as gradients are needed within the training step\n\n Returns:\n context manager with sync behaviour off\n\n \"\"\"\n if (\n isinstance(self.trainer.training_type_plugin, ParallelPlugin)\n and (self.automatic_optimization or should_block_sync)\n ):\n with self.trainer.training_type_plugin.block_backward_sync():\n yield None\n else:\n yield None\n\n def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:\n opt_closure_result = self._curr_step_result\n\n if opt_closure_result is not None:\n\n # cache metrics\n self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self._check_finite(opt_closure_result.loss)\n\n # track all the outputs across all steps\n batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0\n batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)\n\n if self.automatic_optimization:\n # track total loss for logging (avoid mem leaks)\n self.accumulated_loss.append(opt_closure_result.loss)\n\n self._curr_step_result = None\n\n return batch_outputs\n\n def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):\n \"\"\"\n wrap the forward step in a closure so second order methods work\n \"\"\"\n with self.trainer.profiler.profile(\"training_step_and_backward\"):\n # lightning module hook\n result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)\n self._curr_step_result = result\n\n if not self._skip_backward and self.automatic_optimization:\n is_first_batch_to_accumulate = batch_idx % self.trainer.accumulate_grad_batches == 0\n\n if is_first_batch_to_accumulate:\n self.on_before_zero_grad(optimizer)\n self.optimizer_zero_grad(batch_idx, optimizer, opt_idx)\n\n # backward pass\n if result is not None:\n with self.trainer.profiler.profile(\"backward\"):\n self.backward(result, optimizer, opt_idx)\n\n # hook - call this hook only\n # when gradients have finished to accumulate\n if not self.should_accumulate():\n self.on_after_backward(result.training_step_output, batch_idx, result.loss)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self._check_finite(result.loss)\n\n else:\n self.warning_cache.warn(\"training_step returned None if it was on purpose, ignore this warning...\")\n\n if len(self.trainer.optimizers) > 1:\n # revert back to previous state\n self.trainer.lightning_module.untoggle_optimizer(opt_idx)\n\n return result\n\n def _check_finite(self, loss: torch.Tensor) -> None:\n if not torch.isfinite(loss).all():\n raise ValueError(f'The loss returned in `training_step` is {loss}.')\n model = self.trainer.lightning_module\n detect_nan_parameters(model)\n\n def backward(self, result, optimizer, opt_idx, *args, **kwargs):\n self.trainer.dev_debugger.track_event(\"backward_call\")\n\n should_accumulate = self.should_accumulate()\n\n # backward can be called manually in the training loop\n if isinstance(result, torch.Tensor):\n self.trainer.accelerator.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)\n else:\n result.closure_loss = self.trainer.accelerator.backward(\n result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs\n )\n\n if not self.should_accumulate():\n # track gradients\n self.track_and_norm_grad(optimizer=optimizer)\n\n def update_train_loop_lr_schedulers(self, monitor_metrics=None):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n if num_accumulated_batches_reached or num_training_batches_reached:\n # update lr\n self.trainer.optimizer_connector.update_learning_rates(interval=\"step\", monitor_metrics=monitor_metrics)\n\n def increment_accumulated_grad_global_step(self):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n # progress global step according to grads progress\n if num_accumulated_batches_reached or num_training_batches_reached:\n self.trainer.global_step = self.trainer.accelerator.update_global_step(\n self.trainer.total_batch_idx, self.trainer.global_step\n )\n\n def _accumulated_batches_reached(self):\n return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0\n\n def _num_training_batches_reached(self, is_last_batch=False):\n return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch\n\n def should_accumulate(self):\n # checks if backward or backward + optimizer step (via closure)\n accumulation_done = self._accumulated_batches_reached()\n is_final_batch = self._num_training_batches_reached()\n return not (accumulation_done or is_final_batch)\n\n def should_check_val_fx(self, batch_idx, is_last_batch, on_epoch=False):\n # decide if we should run validation\n is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0\n is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0\n can_check_val = self.trainer.enable_validation and is_val_check_epoch\n is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float(\"inf\")\n epoch_end_val_check = (batch_idx + 1) % self.trainer.num_training_batches == 0\n\n should_check_val = ((is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop\n or is_last_batch_for_infinite_dataset\n ) if on_epoch else (is_val_check_batch and not epoch_end_val_check)\n\n return should_check_val and can_check_val\n\n def build_train_args(self, batch, batch_idx, opt_idx, hiddens):\n # enable not needing to add opt_idx to training_step\n args = [batch, batch_idx]\n\n if len(self.trainer.optimizers) > 1:\n if self.trainer.has_arg(\"training_step\", \"optimizer_idx\"):\n if not self.automatic_optimization:\n self.warning_cache.warn(\n \"`training_step` hook signature has changed in v1.3.\"\n \" `optimizer_idx` argument has been removed in case of manual optimization. Support for\"\n \" the old signature will be removed in v1.5\", DeprecationWarning\n )\n args.append(opt_idx)\n elif not self.trainer.has_arg(\"training_step\", \"optimizer_idx\") and self.automatic_optimization:\n raise ValueError(\n f\"Your LightningModule defines {len(self.trainer.optimizers)} optimizers but\"\n ' `training_step` is missing the `optimizer_idx` argument.'\n )\n\n # pass hiddens if using tbptt\n if self.trainer.truncated_bptt_steps is not None:\n args.append(hiddens)\n\n return args\n\n def save_loggers_on_train_batch_end(self):\n # when loggers should save to disk\n should_flush_logs = self.trainer.logger_connector.should_flush_logs\n if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:\n self.trainer.logger.save()\n\n def prepare_optimizers(self):\n # in manual optimization we loop over all optimizers at once\n optimizers = self.get_optimizers_iterable()\n if not self.automatic_optimization:\n optimizers = [optimizers[0]]\n return optimizers\n\n def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):\n # set split_idx to trainer for tracking\n self.trainer.split_idx = split_idx\n\n # make sure only the gradients of the current optimizer's parameters are calculated\n # in the training step to prevent dangling gradients in multiple-optimizer setup.\n if self.automatic_optimization and len(self.trainer.optimizers) > 1:\n model = self.trainer.lightning_module\n model.toggle_optimizer(optimizer, opt_idx)\n\n # use to track metrics internally\n self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)\n\n def update_running_loss(self):\n accumulated_loss = self.accumulated_loss.mean()\n\n if accumulated_loss is not None:\n # calculate running loss for display\n self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)\n\n # reset for next set of accumulated grads\n self.accumulated_loss.reset()\n" ]
[ [ "torch.isfinite", "numpy.argmax", "numpy.cumsum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
IbrahimEl-Shal/Digital_Video_Processing
[ "ce5649dba94ba5c50bc3fe6740d3059a99a6ea8f" ]
[ "3. Motion Estimation/Assignment_3_ME.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 4 00:32:16 2019\n\n@author: Ibrahim El-Shal\nAssignment 3:\n- Develop a Motion Estimation algorithm using Window Search to encode two images in YUV format. \n- Use 8x8 block.\n- Reconstruct the images using Motion Compensation. \n- Compute PSNR between the Source Image and the Reconstructed Images.\n- Compare between the two Algorithms\n- Bonus : you may choose more than one matching criteria\n- Bonus : you may choose more than these two algorithms \n\"\"\"\n# In[1]: Import Packages\n\nimport os\nimport sys\nimport cv2\nimport math\nimport time\nimport numpy as np\n\n# In[1-2]:\n \nGRID_SIZE = 8\nOVERLAPPED_WIDTH = 10\nOVERLAPPED_HEIGHT = 10\n\n# In[2]: Functions of Image\n\ndef ReadFrames(FrameNumber):\n return(cv2.imread(\"frames/frame%d.jpg\"%FrameNumber))\n \ndef RGB2YUV(RGBImage):\n return(cv2.cvtColor(RGBImage, cv2.COLOR_BGR2YUV)) \n \ndef YUV2RGB(YUVImage):\n return(cv2.cvtColor(YUVImage,cv2.COLOR_YUV2BGR)) \n \ndef Split_Channels(img):\n return (cv2.split((img))) \n \ndef Create_Dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n\ndef Save_Image(Name,Image):\n return(cv2.imwrite(Image, Name))\n \ndef Get_PSNR(arr):\n mse = (arr ** 2).mean()\n psnr = 10 * math.log10((255 ** 2) / mse)\n return psnr\n\ndef psnr(orignal_picture,compressed_picture):\n# peak signal-to-noise ratio\n mse =0\n #mean squared error\n for i in range(len(orignal_picture)):\n for j in range(len(orignal_picture[i])):\n mse=mse+(orignal_picture[i][j]-compressed_picture[i][j])*(orignal_picture[i][j]-compressed_picture[i][j])\n mse=mse/(len(orignal_picture)*len(orignal_picture[i]))\n mx_value=0\n for lst in orignal_picture:\n value=max(lst)\n if value > mx_value:\n mx_value=value\n psnr_=10*math.log( mx_value*mx_value/ mse, 10)\n return psnr_\n \n# In[3]: Convert Video Into frames\n \ndef Video2Frames(VideoName):\n \n cap = cv2.VideoCapture(VideoName)\n Create_Dir('./frames/') \n frame_counter = 0\n \n if not cap.isOpened():\n print('{} not opened'.format(VideoName))\n sys.exit(1)\n\n while(1):\n return_flag, frame = cap.read()\n if not return_flag:\n print('Video Reach End')\n break\n #Start\n cv2.imwrite('./frames/' + 'frame%d.jpg' % frame_counter, frame)\n frame_counter += 1\n #End\n cap.release()\n return(1)\n\n# In[4]: Get the needed search area \n \ndef Search_Range(w_min, h_min, w_max, h_max, curr_w, curr_h, w_size, h_size):\n \n start_w = curr_w - w_size if curr_w - w_size > w_min else w_min\n end_w = curr_w + w_size if curr_w + w_size < w_max else curr_w\n start_h = curr_h - h_size if curr_h - h_size > h_min else h_min\n end_h = curr_h + h_size if curr_h + h_size < h_max else curr_h\n return (start_w, start_h, end_w, end_h)\n\n# In[5]: Get Needed blocked 8x8\n \ndef Needed_Blocks(Frame):\n \n img_blocks = []\n img_blocks_idx = [] \n\n # shape of image\n height, width = Frame.shape\n\n for h_idx in range(0, height, GRID_SIZE):\n micro_block_per_row = []\n micro_block_idx_per_row = []\n for w_idx in range(0, width, GRID_SIZE):\n micro_block_per_row.append(Frame[h_idx: h_idx + GRID_SIZE, w_idx: w_idx + GRID_SIZE])\n micro_block_idx_per_row.append((w_idx, h_idx))\n img_blocks_idx.append(micro_block_idx_per_row)\n img_blocks.append(micro_block_per_row)\n\n return(img_blocks_idx, img_blocks)\n \n# In[6]:Get the Movtion Vector of each picked Block to comparison with others \n \ndef MotionVector(Current_Block, Next_Frame, x_micro, y_micro, search_area):\n \n mv = (0, 0)\n min_value = np.inf\n \n start_w, start_h, end_w, end_h = search_area\n \n for y in range(start_h, end_h + 1):\n for x in range(start_w, end_w + 1):\n # search range \n window_block = Next_Frame[y:y + GRID_SIZE, x:x + GRID_SIZE]\n value = np.sum(np.abs(Current_Block - window_block))\n\n if value < min_value:\n mv = (x - x_micro, y - y_micro)\n min_value = value \n return(mv)\n \n# In[7]: \n\ndef Block_Matching(curr_frame, next_frame):\n\n height, width = curr_frame.shape\n block_idx_list, block_list = Needed_Blocks(curr_frame)\n\n frame_motion_vector = [[0 for j in range(len(block_idx_list[0]))] for i in range(len(block_list))]\n \n for h in range(len(block_idx_list)):\n for w in range(len(block_list[0])):\n # search range \n micro_x, micro_y = block_idx_list[h][w]\n Grid_Block = block_list[h][w]\n\n search_range = Search_Range(0, 0, width, height, micro_x, micro_y, GRID_SIZE, GRID_SIZE)\n \n frame_motion_vector[h][w] = MotionVector(Grid_Block,next_frame,\n micro_x, micro_y, search_range)\n\n return frame_motion_vector\n\n# In[8]: \n \ndef TSS_Block_Matching(curr_frame, next_frame):\n\n TSS_GRID_SIZE = GRID_SIZE\n height, width = curr_frame.shape\n block_idx_list, block_list = Needed_Blocks(curr_frame)\n\n frame_motion_vector = [[(0,0) for j in range(len(block_idx_list[0]))] for i in range(len(block_list))]\n \n for h in range(len(block_idx_list)-1): \n for w in range(len(block_list[0])-1): \n # search range \n micro_x, micro_y = block_idx_list[h][w]\n Grid_Block = block_list[h][w] \n TSS_GRID_SIZE = GRID_SIZE\n \n for i in range(3):\n TSS_GRID_SIZE = TSS_GRID_SIZE // 2\n search_range = Search_Range(0, 0, width, height, micro_x, micro_y,\n TSS_GRID_SIZE, TSS_GRID_SIZE)\n \n frame_motion_vector[h][w] = MotionVector(Grid_Block,next_frame,\n micro_x, micro_y, search_range)\n \n micro_x, micro_y = frame_motion_vector[h][w]\n \n return frame_motion_vector\n\n# In[8]:\n\ndef Overlapped_Motion_Vector(Current_frame, motion_vector):\n \n height, width = Current_frame.shape\n Current_frame = Current_frame.astype(np.uint32)\n \n overlapped_range = [[[] for j in range(len(motion_vector[i]))] for i in range(len(motion_vector))]\n overlapped_width = int((OVERLAPPED_WIDTH - GRID_SIZE) / 2)\n overlapped_height = int((OVERLAPPED_HEIGHT - GRID_SIZE) / 2)\n\n overlapped_motion_vector = [[[] for j in range(width)] for i in range(height)]\n\n for h in range(0, int(height / GRID_SIZE)):\n for w in range(0, int(width / GRID_SIZE)):\n temp_w = w * GRID_SIZE\n temp_h = h * GRID_SIZE\n s_x = temp_w - overlapped_width if temp_w - overlapped_width >= 0 else temp_w\n s_y = temp_h - overlapped_height if temp_h - overlapped_height >= 0 else temp_h\n e_x = (w + 1) * GRID_SIZE\n e_x = e_x + overlapped_width if e_x + overlapped_width < width else e_x\n e_y = (h + 1) * GRID_SIZE\n e_y = e_y + overlapped_height if e_y + overlapped_height < height else e_y\n overlapped_range[h][w] = (motion_vector[h][w], [[s_x, s_y], [e_x, e_y]])\n for y in range(s_y, e_y):\n for x in range(s_x, e_x):\n overlapped_motion_vector[y][x].append(motion_vector[h][w])\n \n return(overlapped_motion_vector)\n \n# In[9]:\n \n#Function to reconstruct a frame from a reference frame given the motion vectors in a macroblock \n#Inputs: Reference Frame, Macroblocks containing motion vectors\n#Outputs:reconstructed_frame \n\ndef Create_Compressed_Image(Curr_frame, Post_frame, overlapped_MV):\n \n height, width = Curr_frame.shape\n Post_frame = Post_frame.astype(np.uint32)\n interpolated_frame = [[0 for j in range(width)] for i in range(height)]\n\n for y in range(height):\n for x in range(width):\n sum = 0\n for mv in overlapped_MV[y][x]:\n \n prev_y = y + mv[1]\n if prev_y >= height or prev_y < 0:\n prev_y = 0 if prev_y < 0 else height - 1\n\n prev_x = x + mv[0]\n if prev_x >= width or prev_x < 0:\n prev_x = 0 if prev_x < 0 else width - 1\n\n next_y = y - mv[1]\n if next_y >= height or next_y < 0:\n next_y = 0 if next_y < 0 else height - 1\n\n next_x = x - mv[0]\n if next_x >= width or next_x < 0:\n next_x = 0 if next_x < 0 else width - 1\n\n sum += Curr_frame[prev_y][prev_x] + Post_frame[next_y, next_x]\n\n l = len(overlapped_MV[y][x]) * 2\n res = sum / l\n res = np.array(res).T\n \n interpolated_frame[y][x] = res.astype(np.uint8)\n\n Final_Image = np.array(interpolated_frame)\n return(Final_Image)\n\n# In[10]:\n\ndef Window_Full_Search(): \n\n current_frame = ReadFrames(0)\n next_frame = ReadFrames(1)\n\n #Convert to YUV Image\n current_yuv = RGB2YUV(current_frame)\n next_yuv = RGB2YUV(next_frame)\n\n ###Get Channels\n curr_Y, curr_U, curr_V = Split_Channels(current_yuv)\n next_Y, next_U, next_V = Split_Channels(next_yuv)\n\n Mv = Block_Matching(curr_Y,next_Y)\n Overlapped_Mv = Overlapped_Motion_Vector(curr_Y, Mv)\n Img = Create_Compressed_Image(curr_Y, next_Y, Overlapped_Mv) \n \n return(Img)\n\n\ndef TSS_Search(): \n\n current_frame = ReadFrames(0)\n next_frame = ReadFrames(1)\n\n #Convert to YUV Image\n current_yuv = RGB2YUV(current_frame)\n next_yuv = RGB2YUV(next_frame)\n\n ###Get Channels\n curr_Y, curr_U, curr_V = Split_Channels(current_yuv)\n next_Y, next_U, next_V = Split_Channels(next_yuv)\n\n Save_Image(curr_Y,\"Original Img.jpg\")\n Mv = TSS_Block_Matching(curr_Y,next_Y)\n Overlapped_Mv = Overlapped_Motion_Vector(curr_Y, Mv)\n Img = Create_Compressed_Image(curr_Y, next_Y, Overlapped_Mv) \n \n return(Img)\n\n# In[11]:\n \ndef main():\n \n #Video2Frames('./video.mp4')\n \n start = time.time()\n WinImg = Window_Full_Search()\n end = time.time()\n res_psnr = Get_PSNR(WinImg)\n print('PSNR at Window Matching:',res_psnr)\n print('Window Matching Running Time:',(end - start)) \n\n start = time.time()\n TssImg = TSS_Search()\n end = time.time()\n res_psnr = Get_PSNR(WinImg)\n print('\\nPSNR at TSS:',res_psnr)\n print('TSS Running Time:',(end - start)) \n \n return(WinImg,TssImg)\n\n# In[11]:\n\n## call the main function\nif __name__ == '__main__':\n WinImg,TssImg = main()\n\nSave_Image(WinImg,\"Img of Window.jpg\")\nSave_Image(TssImg,\"Img of Thee Step.jpg\")" ]
[ [ "numpy.array", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
woosal1337/cv2
[ "cae4ad1e3ba4259507acde4db74559a726b09281" ]
[ "drawing-shapes/shifting-parameters-animations.py" ]
[ "import cv2\nimport numpy as np\nimport time\nimport random\n\nimport numpy as np\nimport cv2\n\nimage_path = \"../assets/img.png\"\n\nimage = cv2.imread(image_path)\nimage = cv2.resize(image, (int(image.shape[0] * 0.25), int(image.shape[1] * 0.25)))\n\nx1, x2, x3, x4, x5, x6, y1, y2, y3, y4, y5, y6 = 25, 25, 110, 200, 200, 110, 70, 160, 200, 160, 70, 20\ncolors = [(0, 255, 0), (255, 0, 0), (0, 0, 255), (122, 59, 199), (222, 222, 222), (254, 90, 199)]\n\nfor i in range(100):\n color = random.choice(colors)\n\n x1 += 4\n x2 += 4\n x3 += 4\n x4 += 4\n x5 += 4\n x6 += 4\n\n y1 -= 4\n y2 -= 4\n y3 -= 4\n y4 += 4\n y5 += 4\n y6 += 4\n\n pts = np.array([[x1, y1], [x2, y2],\n [x3, y3], [x4, y4],\n [x5, y5], [x6, y6]],\n np.int32)\n\n image = cv2.polylines(image, [pts],\n isClosed=True, color=color, thickness=2)\n\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n break\n\n cv2.imshow(\"Reading Image\", image)\n\n cv2.waitKey(50)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Zeng-Lecheng/Music-Genre-Classification
[ "6d489c777431af94f5167808ac4487957743f04b" ]
[ "main/rnn.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, random_split\nimport torch.optim as optim\nfrom tqdm import tqdm\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\n\nfrom util import WavData\n\nfrom torch.utils.tensorboard import SummaryWriter\n\n# fixed : pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113\n# use cpu by default, change to cuda if you want and change it back before committing\n# we only debug and ensure everything works well on cpu\ndevice = 'cuda'\n\n# uncomment to run with limited cores\n# torch.set_num_threads(1)\ntorch.manual_seed(0)\n\n\nclass RNNet(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.lstm_1 = nn.LSTM(1, 16, 1, batch_first=True)\n self.lstm_2 = nn.LSTM(32, 12, 1, batch_first=True)\n self.lstm_3 = nn.LSTM(32, 32, 1, batch_first=True)\n self.rnn_1 = nn.RNN(1, 64, 1, batch_first=True)\n self.rnn_2 = nn.RNN(64, 32, 1, batch_first=True)\n self.rnn_3 = nn.RNN(32, 32, 1, batch_first=True)\n self.rnn_4 = nn.RNN(32, 32, 1, batch_first=True)\n self.drop_1 = nn.Dropout(0.5)\n self.drop_2 = nn.Dropout(0.3)\n self.fc_1 = nn.Linear(32, 20)\n self.fc_1_conv = nn.Linear(1776, 20)\n self.fc_2 = nn.Linear(20, 16)\n self.fc_3 = nn.Linear(16, 10)\n\n self.conv_1 = nn.Conv1d(32, 16, 5)\n self.conv_2 = nn.Conv1d(16, 8, 5)\n\n def forward(self, x):\n # ref: https://www.diva-portal.org/smash/get/diva2:1354738/FULLTEXT01.pdf\n x, hc = self.lstm_1(x)\n # x, hc = self.lstm_2(x)\n # out, hc = self.lstm_2(self.drop_1(out))\n # out, hc = self.lstm_3(self.drop_2(out))\n # h_0 = torch.relu(hc[0][0])\n hidden_state = torch.relu(hc[0][0])\n\n # ave_out = torch.sum(x, dim=1) / x.shape[1]\n # x = torch.swapaxes(x, 1, 2)\n # x = torch.relu(torch.max_pool1d(self.conv_1(x), 2))\n # x = torch.relu(torch.max_pool1d(self.conv_2(x), 2))\n # x = torch.flatten(hidden_state, 1)\n # x = torch.relu(self.fc_1_conv(x))\n # x = torch.relu(self.fc_1(hidden_state))\n # x = torch.relu(self.fc_2(x))\n x = torch.sigmoid(self.fc_3(hidden_state))\n return x\n\n\ndef model_train(train_set, test_set,\n epochs: int,\n learning_rate: float,\n batch_size: int,\n weight_decay: float,\n verbose: bool = False,\n test_while_train: bool = True) -> list[float]:\n\n train_dataloader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n net = RNNet().to(device)\n # net.load_state_dict(torch.load('../saved_models/rnn_with_cov_final.pt'))\n optimizer = optim.Adam(net.parameters(), lr=learning_rate, weight_decay=weight_decay)\n criterion = nn.CrossEntropyLoss().to(device)\n\n acc = []\n for epoch in tqdm(range(1, epochs + 1)):\n epoch_loss = 0\n for x_train, y_train in train_dataloader:\n optimizer.zero_grad()\n pred_train = net(x_train)\n batch_loss = criterion(pred_train, y_train)\n batch_loss.backward()\n optimizer.step()\n epoch_loss += batch_loss.item() * len(x_train)\n\n epoch_loss = epoch_loss / len(train_set)\n if test_while_train:\n test_acc = model_test(test_set, net)\n train_acc = model_test(train_set, net)\n acc.append(test_acc)\n writer.add_scalar('Accuracy/test', test_acc, epoch)\n writer.add_scalar('Accuracy/train', train_acc, epoch)\n if verbose:\n print(f'Epoch: {epoch} Loss: {epoch_loss} Accuracy: {test_acc}')\n elif verbose:\n print(f'Epoch: {epoch} Loss: {epoch_loss}')\n\n writer.add_scalar('Loss/train', epoch_loss, epoch)\n\n # Uncomment this if you want to save the trained model.\n # torch.save(net.state_dict(), f'../saved_models/rnn_{datetime.now().strftime(\"%Y%m%d_%H%M%S\")}.pt')\n # torch.save(net.state_dict(), '../saved_models/rnn_with_cov_final.pt')\n if not test_while_train:\n acc = [model_test(test_set, net)]\n return acc\n\n\ndef model_test(test_set, net) -> float:\n with torch.no_grad():\n correct_count = 0\n count = 0\n x_test, y_test = next(iter(DataLoader(test_set, batch_size=20, shuffle=True)))\n pred_test = net(x_test)\n\n for i in range(len(pred_test)):\n pred_label = torch.argmax(pred_test[i])\n true_label = torch.argmax(y_test[i])\n if pred_label.item() == true_label.item():\n correct_count += 1\n count += 1\n return correct_count / count\n\n\ndef get_data():\n dataset = WavData('../data/genres_original', device=device)\n train_size = int(len(dataset) * 0.8)\n test_size = len(dataset) - train_size\n train_set, test_set = random_split(dataset, [train_size, test_size])\n return train_set, test_set\n\n\ndef hyperparameter_test():\n train_set, test_set = get_data()\n learning_rate_list = [0.001, 0.0005, 0.0003, 0.0001, 0.00005]\n batch_size_list = [50, 100, 200]\n epochs = 3\n weight_decay = 0.002\n for b in batch_size_list:\n for lr in learning_rate_list:\n acc = model_train(train_set, test_set, epochs=epochs, learning_rate=lr, weight_decay=weight_decay, batch_size=b, verbose=False,\n test_while_train=True)\n plt.plot(range(1, epochs + 1), acc, label=f'batches: {b}, lr: {lr}')\n\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Percent Accuracy on test set\")\n plt.title('Percent Accuracy over Epochs')\n plt.legend()\n plt.show()\n\n\nif __name__ == '__main__':\n epochs = 1000\n lr = .0005\n batch = 60\n weight_decay = 0\n train_set, test_set = get_data()\n writer = SummaryWriter(comment=f'lr{lr}_batch{batch}_l2{weight_decay}')\n acc = model_train(train_set, test_set, epochs=epochs, learning_rate=lr, batch_size=batch, weight_decay=weight_decay, verbose=False,\n test_while_train=True)\n" ]
[ [ "matplotlib.pyplot.legend", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "matplotlib.pyplot.title", "torch.nn.LSTM", "torch.manual_seed", "torch.nn.RNN", "torch.utils.data.DataLoader", "torch.argmax", "torch.nn.Linear", "torch.utils.data.random_split", "torch.relu", "torch.no_grad", "torch.utils.tensorboard.SummaryWriter", "torch.nn.Conv1d", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Moctader/Master_thesis
[ "1852b05e413d3ab3108d4018d86e35b10497f622" ]
[ "scripts/experiments_runner.py" ]
[ "# Refactored idea from https://dmitryulyanov.github.io/if-you-are-lazy-to-install-slurm/\nfrom joblib import Parallel, delayed\nfrom queue import Queue\nimport os\nimport torch\nfrom functools import partial\nimport argparse\nimport glob\nimport time\nimport subprocess\n\n\ndef experiment_worker(exp_name, queue_obj, data_root, workdir, script, n_threads, log_dir):\n gpu = queue_obj.get()\n exp_fname = exp_name.split('/')[-1].split('.yml')[0]\n print(f'Working on {exp_fname} | GPU {gpu}')\n\n cmd = f\"python {script}\"\n cmd += f\" --n_threads {n_threads}\"\n cmd += f\" --dataset_root {data_root}\"\n cmd += f\" --workdir {workdir}\"\n cmd += f\" --experiment {exp_name}\"\n\n my_env = os.environ.copy()\n my_env['CUDA_VISIBLE_DEVICES'] = f\"{gpu}\"\n\n time.sleep(gpu)\n with open(f'{log_dir}/{exp_fname}.log', 'w') as f_log:\n subprocess.call(cmd.split(), stdout=f_log, stderr=f_log, env=my_env)\n\n queue_obj.put(gpu)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--n_threads_per_task', type=int, default=6)\n parser.add_argument('--data_root', default='')\n parser.add_argument('--workdir', default='')\n parser.add_argument('--script_path', default='')\n parser.add_argument('--log_dir', default='')\n parser.add_argument('--experiment_dir', default='')\n args = parser.parse_args()\n\n n_gpus = torch.cuda.device_count()\n q = Queue(maxsize=n_gpus)\n for i in range(n_gpus):\n q.put(i)\n\n worker = partial(experiment_worker,\n queue_obj=q,\n data_root=args.data_root,\n workdir=args.workdir,\n script=args.script_path,\n n_threads=args.n_threads_per_task,\n log_dir=args.log_dir)\n\n experiments = glob.glob(os.path.join(args.experiment_dir, '*.yml'))\n os.makedirs(args.log_dir, exist_ok=True)\n Parallel(n_jobs=n_gpus, backend=\"threading\")(delayed(worker)(exp) for exp in experiments)\n" ]
[ [ "torch.cuda.device_count" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zgsbughammer/leaf
[ "7ef36df20d492c558829259611708f413ed80487" ]
[ "models/model.py" ]
[ "\"\"\"Interfaces for ClientModel and ServerModel.\"\"\"\n\nfrom abc import ABC, abstractmethod\nimport numpy as np\nimport os\nimport sys\nimport tensorflow as tf\n\nfrom baseline_constants import ACCURACY_KEY\n\nfrom utils.model_utils import batch_data\nfrom utils.tf_utils import graph_size\n\n\nclass Model(ABC):\n\n\n def __init__(self, seed, lr, optimizer=None, gpu_fraction=0.2):\n\n self.lr = lr\n self._optimizer = optimizer\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n tf.set_random_seed(123 + seed)\n self.features, self.labels, self.train_op, self.eval_metric_ops, self.loss = self.create_model()\n self.saver = tf.train.Saver()\n config=tf.ConfigProto(log_device_placement=False)\n # config.gpu_options.per_process_gpu_memory_fraction = gpu_fraction\n self.sess = tf.Session(graph=self.graph, config=config)\n\n self.size = graph_size(self.graph)\n\n with self.graph.as_default():\n self.sess.run(tf.global_variables_initializer())\n\n metadata = tf.RunMetadata()\n opts = tf.profiler.ProfileOptionBuilder.float_operation()\n self.flops = tf.profiler.profile(self.graph, run_meta=metadata, cmd='scope', options=opts).total_float_ops\n\n def set_params(self, model_params):\n with self.graph.as_default():\n all_vars = tf.trainable_variables()\n for variable, value in zip(all_vars, model_params):\n variable.load(value, self.sess)\n\n def get_params(self):\n with self.graph.as_default():\n model_params = self.sess.run(tf.trainable_variables())\n return model_params\n\n @property\n def optimizer(self):\n \"\"\"Optimizer to be used by the model.\"\"\"\n if self._optimizer is None:\n self._optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr)\n\n return self._optimizer\n\n @abstractmethod\n def create_model(self):\n \"\"\"Creates the model for the task.\n\n Returns:\n A 4-tuple consisting of:\n features: A placeholder for the samples' features.\n labels: A placeholder for the samples' labels.\n train_op: A Tensorflow operation that, when run with the features and\n the labels, trains the model.\n eval_metric_ops: A Tensorflow operation that, when run with features and labels,\n returns the accuracy of the model.\n \"\"\"\n return None, None, None, None, None\n\n def train(self, data, num_epochs=1, batch_size=10):\n \"\"\"\n Trains the client model.\n\n Args:\n data: Dict of the form {'x': [list], 'y': [list]}.\n num_epochs: Number of epochs to train.\n batch_size: Size of training batches.\n Return:\n comp: Number of FLOPs computed while training given data\n update: List of np.ndarray weights, with each weight array\n corresponding to a variable in the resulting graph\n \"\"\"\n for _ in range(num_epochs):\n self.run_epoch(data, batch_size)\n\n update = self.get_params()\n comp = num_epochs * (len(data['y'])//batch_size) * batch_size * self.flops\n return comp, update\n\n def run_epoch(self, data, batch_size):\n for batched_x, batched_y in batch_data(data, batch_size):\n \n input_data = self.process_x(batched_x)\n target_data = self.process_y(batched_y)\n \n with self.graph.as_default():\n self.sess.run(self.train_op,\n feed_dict={\n self.features: input_data,\n self.labels: target_data\n })\n\n def test(self, data):\n \"\"\"\n Tests the current model on the given data.\n\n Args:\n data: dict of the form {'x': [list], 'y': [list]}\n Return:\n dict of metrics that will be recorded by the simulation.\n \"\"\"\n x_vecs = self.process_x(data['x'])\n labels = self.process_y(data['y'])\n with self.graph.as_default():\n tot_acc, loss = self.sess.run(\n [self.eval_metric_ops, self.loss],\n feed_dict={self.features: x_vecs, self.labels: labels}\n )\n acc = float(tot_acc) / x_vecs.shape[0]\n return {ACCURACY_KEY: acc, 'loss': loss}\n\n def close(self):\n self.sess.close()\n\n @abstractmethod\n def process_x(self, raw_x_batch):\n \"\"\"Pre-processes each batch of features before being fed to the model.\"\"\"\n pass\n\n @abstractmethod\n def process_y(self, raw_y_batch):\n \"\"\"Pre-processes each batch of labels before being fed to the model.\"\"\"\n pass\n\n\nclass ServerModel:\n def __init__(self, model):\n self.model = model\n\n @property\n def size(self):\n return self.model.size\n\n @property\n def cur_model(self):\n return self.model\n\n def send_to(self, clients):\n \"\"\"Copies server model variables to each of the given clients\n\n Args:\n clients: list of Client objects\n \"\"\"\n var_vals = {}\n with self.model.graph.as_default():\n all_vars = tf.trainable_variables()\n for v in all_vars:\n val = self.model.sess.run(v)\n var_vals[v.name] = val\n for c in clients:\n with c.model.graph.as_default():\n all_vars = tf.trainable_variables()\n for v in all_vars:\n v.load(var_vals[v.name], c.model.sess)\n\n def save(self, path='checkpoints/model.ckpt'):\n return self.model.saver.save(self.model.sess, path)\n\n def close(self):\n self.model.close()\n" ]
[ [ "tensorflow.Graph", "tensorflow.RunMetadata", "tensorflow.trainable_variables", "tensorflow.set_random_seed", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "tensorflow.profiler.profile", "tensorflow.train.GradientDescentOptimizer", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.profiler.ProfileOptionBuilder.float_operation" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
leeshinyook/RectangleCropper
[ "c83cbef3d8c0550fd5642f7c2aed759a13fbf131" ]
[ "rectanglecropper/crop.py" ]
[ "from PIL import Image\nfrom collections import deque\nimport numpy as np\nimport warnings\n\n\nclass ImageThresholdUtil:\n \"\"\"\n It has to do with the image threshold. Threshold here means filtering,\n which finds the pixels to be filtered out. So this class has static methods\n related to the threshold and cooperates in context.\n \"\"\"\n\n @staticmethod\n def get_threshold_pixels(row, col, image, rep_count, target_rate) -> list:\n \"\"\"\n It does the job of finding a threshold in the image.\n It has an algorithm that reads an image and calculates\n which pixels are used the most and what their proportions are.\n\n :param row: int\n image's row size\n :param col: int\n image's column size\n :param image: ndarray\n image loaded by Pillow Image module and change to numpy array\n :param rep_count: int\n Indicates the number of candidates. The candidate group is a group of pixels\n that can become threshold points, and is an argument for\n how much this candidate group will be accommodated.\n :param target_rate: int\n Check how many pixels in the image are being used.\n Then divide this number of used pixels by total image and multiply by\n 100 to calculate the percentage. In other words, the ratio of pixels\n is calculated, and it is a setting argument for which ratio to set as a valid value.\n\n :return: ndarray\n Output array which has threshold points\n \"\"\"\n area = row * col\n rep_count = rep_count * -1\n _, counts = np.unique(image, return_counts=True)\n parted_ind = np.argpartition(counts, rep_count)[rep_count:]\n ratio_counts = counts[parted_ind] / area * 100\n threshold_pixels = (ratio_counts > target_rate).nonzero()[0]\n if threshold_pixels is None:\n return []\n return parted_ind[threshold_pixels]\n\n\nclass RectangleImageCrop:\n \"\"\"\n This class was created to crop an image into a rectangle.\n After opening the image, it automatically detects the background, then detects the effective area and crops it.\n Background detection works based on rules. Right now it's based on how many coherent pixels are found.\n Then, the image is converted to a NumPy array rather than a list,\n and the average of the RGB values is calculated and the pixels are merged into one.\n \"\"\"\n\n __pix, __img = None, None\n __row, __col = None, None\n __img_field = None, None\n __min_width, __min_height = None, None\n __direction_row = [0, 1, 0, -1]\n __direction_col = [1, 0, -1, 0]\n __pixel_sensitivity = 5\n __occupancy_rate = 10\n __candidate_threshold_group = 3\n __threshold_pixels = None\n __save_points = []\n\n def open(self, image_path, auto_threshold_detection=True) -> None:\n \"\"\"\n The image will open. The way to open it is using the PIL library.\n And this image will be converted to RGB option.\n\n :param image_path:\n The path where the image file exists.\n :param auto_threshold_detection:\n Option to automatically detect thresholds. When the value of this option is True,\n the threshold is selected based on the rules. In case of False, the function has not been added yet.\n I plan to add an option to let users enter their own thresholds.\n :return: None\n There is no return value. It is assigned to a class internal variable.\n \"\"\"\n self.__img = Image.open(image_path).convert('RGB')\n self.__row, self.__col = self.__img.size\n self.__numpy_mean_variant()\n\n def __numpy_mean_variant(self) -> None:\n \"\"\"\n Reads an image file and converts it to a NumPy array. During the conversion process,\n only one value is extracted as the average value of the RGB channels.\n\n :return: None\n There is no return value. It is assigned to a class internal variable.\n \"\"\"\n self.__img_field = np.zeros((self.__col, self.__row))\n average_pixel_values = np.mean(np.array(self.__img), axis=2, keepdims=True)\n self.__pix = np.rint(np.concatenate([average_pixel_values], axis=2))\n\n def __validate_image_size(self, upper_left_point, bottom_right_point) -> bool:\n \"\"\"\n It reads the width and height of the extracted image and verifies\n whether the image meets the conditions.\n\n :return: bool\n It returns True or False according to the verification result.\n \"\"\"\n x1, y1 = upper_left_point\n x2, y2 = bottom_right_point\n height = y2 - y1\n width = x2 - x1\n return width > self.__min_width and height > self.__min_height\n\n def __validate_image_pixel(self, row, col):\n \"\"\"\n Algorithm for traversing image pixels. It checks the validity of image pixels by mixing\n an algorithm called breadth-first search and a rule-based algorithm.\n\n :param row: int\n image's row size\n :param col: int\n image's column size\n :return: list, bool\n Returns a list containing the coordinate values of the image, or False.\n \"\"\"\n self.__img_field[row][col] = 1\n temp_upper_bottom_points = []\n point = [row, col]\n q = deque()\n q.append((row, col))\n temp_upper_bottom_points.append(point)\n while q:\n row_x, row_y = q.popleft()\n point = [row_x, row_y]\n for i in range(4):\n mx = row_x + self.__direction_row[i]\n my = row_y + self.__direction_col[i]\n if 0 <= mx < self.__col and 0 <= my < self.__row:\n if self.__img_field[mx][my] == 0 and self.__validate_pixel(mx, my):\n self.__img_field[mx][my] = 1\n q.append((mx, my))\n temp_upper_bottom_points.append(point)\n if self.__validate_image_size(temp_upper_bottom_points[0], temp_upper_bottom_points[1]):\n return temp_upper_bottom_points\n else:\n return False\n\n def __validate_pixel(self, row, col) -> bool:\n \"\"\"\n Check the validity of the pixel values along with the threshold.\n Pixel sensitivity relates to how much blurring can be achieved at\n a threshold point, and acts as a masking agent.\n\n :param row: int\n image's row size\n :param col: int\n image's column size\n :return: bool\n Returns the validation value as a bool type.\n \"\"\"\n for pixel in self.__threshold_pixels:\n if pixel - self.__pixel_sensitivity <= self.__pix[row, col] <= pixel + self.__pixel_sensitivity:\n return False\n return True\n\n def __iterate_pixel(self) -> None:\n \"\"\"\n A double loop for traversing the pixels. A check is being made on the pixels that have been traversed.\n\n :return: None\n Save the upper and lower pixel positions of the image in the member variable save_points.\n It doesn't return any value. Save the upper and lower pixel positions of the image\n If you want to save the image based on the extracted crop points values, call the 'save' method.\n If you want to receive only the crop points value, call 'get_crop_points'.\n \"\"\"\n for j in range(self.__row):\n for i in range(self.__col):\n if self.__img_field[i][j] == 0 and self.__validate_pixel(i, j) is True:\n result = self.__validate_image_pixel(i, j)\n if result:\n self.__save_points.append(result)\n\n def crop(self, min_crop_width=100, min_crop_height=100, occupancy_rate=10, candidate_threshold_group=3, pixel_sensitivity=10):\n \"\"\"\n This method cuts the image into a rectangular shape. A separate function call is required to save the image\n\n :param min_crop_width: int\n The minimum width of the image you want to crop. Values below this are ignored.\n :param min_crop_height: int\n The minimum height of the image you want to crop. Values below this are ignored.\n :param occupancy_rate: int\n When choosing a threshold, it is affected by the frequency of pixels across the image.\n It indicates how much of a single or multiple background pixels that are not\n in the image to be cropped out of the overall image. For example, if the percentage of white background\n in the entire image is more than 10%, this number can be regarded as 10.\n :param candidate_threshold_group: int\n It is an option to decide how many threshold candidates to extract when calculating\n the threshold along with the above occupancy_rate.\n For example, if this value is 5, up to 5 threshold pixels are extracted.\n :param pixel_sensitivity: int\n While traversing the pixels, the verification process for the threshold is included,\n and it is an option for how much blur to apply this threshold.\n :exception TypeError: If the arguments are not int.\n :exception ValueError: If the arguments are not positive, or If the argument values are too large or too small\n :exception RuntimeError: If threshold points are emtpy.\n :return: None\n Save the upper and lower pixel positions of the image in the member variable save_points.\n \"\"\"\n\n if not isinstance(min_crop_width, int):\n raise TypeError(\"min_width must be a int\")\n if min_crop_width <= 0:\n raise ValueError(\"min_width must be a positive. bigger than 0\")\n\n if not isinstance(min_crop_height, int):\n raise TypeError(\"min_width must be a int\")\n if min_crop_height <= 0:\n raise ValueError(\"min_width must be a positive. bigger than 0\")\n\n if not isinstance(occupancy_rate, int):\n raise TypeError(\"occupancy_rate must be a int\")\n if occupancy_rate <= 0:\n raise ValueError(\"occupancy_rate must be a positive. bigger than 0\")\n\n if not isinstance(candidate_threshold_group, int):\n raise TypeError(\"candidate_threshold_group must be a int\")\n if candidate_threshold_group <= 0:\n raise ValueError(\"candidate_threshold_group must be a positive. bigger than 0\")\n\n if not isinstance(pixel_sensitivity, int):\n raise TypeError(\"candidate_threshold_group must be a int\")\n if pixel_sensitivity <= 0:\n raise ValueError(\"pixel_sensitivity must be a positive. bigger than 0\")\n\n if self.__occupancy_rate < 7:\n warnings.warn(\n \"occupancy_rate is too low. Performance degradation is possible. \"\n \"It is recommended to have a value of 10 or more.\",)\n if self.__candidate_threshold_group > 5:\n warnings.warn(\n \"candidate_threshold_group is too large. Performance degradation is possible. \"\n \"It is recommended to have a value of 5 or smaller.\",\n )\n\n if self.__pixel_sensitivity > 10:\n warnings.warn(\n \"pixel_sensitivity is too large. Performance degradation is possible. \"\n \"effective range too large. The drop in accuracy can be large. \"\n \"It is recommended to have a value of between 5 and 10\")\n\n if self.__pixel_sensitivity < 5:\n warnings.warn(\n \"pixel_sensitivity is too small. Performance degradation is possible. \"\n \"effective range too large. The drop in accuracy can be large. \"\n \"It is recommended to have a value of between 5 and 10\")\n\n self.__min_width = min_crop_height\n self.__min_height = min_crop_width\n self.__occupancy_rate = occupancy_rate\n self.__candidate_threshold_group = candidate_threshold_group\n self.__pixel_sensitivity = pixel_sensitivity\n self.__threshold_pixels = ImageThresholdUtil.get_threshold_pixels(self.__row, self.__col, self.__pix, self.__candidate_threshold_group, self.__occupancy_rate)\n if not self.__threshold_pixels:\n raise RuntimeError(\"threshold pixels are empty. No threshold pixels were detected. \"\n \"drop a occupancy_rate value\")\n self.__iterate_pixel()\n\n def save(self, saved_path, filename, saved_format) -> None:\n \"\"\"\n This method saves the image. Since there can be many images,\n they are saved with an incrementing number after the file name.\n For example, crop_1.jpeg, crop_2.jpeg\n\n :param saved_path: str\n This is the path to the file you want to save.\n :param filename: str\n File name to save\n :param saved_format: format\n File format to save\n :return: None\n \"\"\"\n for idx, point in enumerate(self.__save_points):\n box = (point[0][1], point[0][0], point[1][1], point[1][0])\n crop_img = self.__img.crop(box)\n saved_filename = filename + '_' + str(idx + 1) + '.' + saved_format.lower()\n crop_img.save(saved_path + '/' + saved_filename, format=saved_format)\n\n def get_threshold_pixels(self) -> list:\n \"\"\"\n Get the extracted threshold.\n\n :exception RuntimeError: If threshold points are emtpy.\n :return: list\n extracted threshold points\n \"\"\"\n if self.__threshold_pixels is None:\n raise RuntimeError(\"threshold pixels are None. crop method must be done before this method.\")\n return self.__threshold_pixels\n\n def get_crop_points(self) -> list:\n \"\"\"\n Get the crop points\n\n :return: list\n extracted crop points\n \"\"\"\n points = []\n for point in self.__save_points:\n points.append((point[0][1], point[0][0], point[1][1], point[1][0]))\n return points" ]
[ [ "numpy.unique", "numpy.concatenate", "numpy.argpartition", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
selenecodes/MLWorkshop-Answers
[ "c2b6b56f5b5a5c2b289eefb397692a7aa971e816" ]
[ "MonteCarlo/main.py" ]
[ "import datetime as dt\n\nimport matplotlib.pylab as plt\nimport numpy as np\nimport pandas_datareader.data as web\nimport seaborn as sns\n\nsns.set(context='notebook')\n\n\ndef get_stocks_2019(stock):\n \"\"\"\n Bron: https://www.youtube.com/watch?v=_T0l015ecK4\n \"\"\"\n start = dt.datetime(2019, 1, 1)\n end = dt.datetime(2019, 12, 31)\n\n stocks = web.DataReader(stock, 'yahoo', start, end)['Close']\n\n # Returns last known price & st. dev. of procentual change\n return stocks[-1], stocks.pct_change().std()\n\n\ndef calc_price(start_price=25, days=100, EPS=0.05):\n prices = np.zeros([days + 1])\n prices[0] = start_price\n\n for day in range(days):\n prices[day + 1] = prices[day] * (1 + np.random.normal(0, EPS))\n\n return prices\n\n\ndef calc_all_prices(n=100, start_price=25, days=100, EPS=0.05):\n all_prices = []\n for _ in range(n):\n prices = calc_price(days=days, start_price=start_price, EPS=EPS)\n all_prices.append(prices)\n return all_prices\n\n\ndef plot(data):\n days = len(data[0])\n start_price = data[0][0]\n time = list(np.arange(0, days))\n\n all_prices = data\n\n for price in all_prices:\n sns.lineplot(time, price)\n\n plt.axhline(y=start_price, color='black', linestyle='--', label=\"Startprijs\")\n plt.xlim(0, days)\n plt.ylim(bottom=0)\n plt.title(\"Monte Carlo Methods\")\n plt.xlabel(\"Tijd [dagen]\")\n plt.ylabel(\"Prijs [€]\")\n plt.legend(loc=\"upper left\")\n plt.tight_layout()\n plt.show()\n\n\ndef plot_norm_dist(data):\n latest_prices = []\n\n for price in data:\n latest_prices.append(price[-1])\n\n sns.distplot(latest_prices, rug=True)\n plt.title(f\"Gemiddelde: {np.mean(latest_prices):.2f}, Std.dev.: {np.std(latest_prices):.2f}\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n last_price, std = get_stocks_2019(\"IBM\")\n\n all_prices = calc_all_prices(n=100, days=100, start_price=last_price, EPS=std)\n\n # plot(all_prices)\n # plot_norm_dist(all_prices)\n" ]
[ [ "matplotlib.pylab.tight_layout", "matplotlib.pylab.show", "matplotlib.pylab.xlim", "matplotlib.pylab.legend", "numpy.arange", "matplotlib.pylab.title", "numpy.random.normal", "numpy.std", "numpy.mean", "matplotlib.pylab.ylabel", "matplotlib.pylab.ylim", "matplotlib.pylab.xlabel", "numpy.zeros", "matplotlib.pylab.axhline" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BambooPalace/Federated-Learning-PyTorch
[ "9dbf5ebd65bd4d03a50f42f2d380e5faa8378778" ]
[ "classification/update.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python version: 3.6\n\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader, Dataset\n\n# from train import criterion\n\n\nclass DatasetSplit(Dataset):\n \"\"\"An abstract Dataset class wrapped around Pytorch Dataset class.\n \"\"\"\n\n def __init__(self, dataset, idxs):\n self.dataset = dataset\n self.idxs = [int(i) for i in idxs]\n\n def __len__(self):\n return len(self.idxs)\n\n def __getitem__(self, item):\n image, label = self.dataset[self.idxs[item]]\n # return torch.tensor(image), torch.tensor(label)\n # pytorch warning and suggest below \n return image.clone().detach(), torch.tensor(label)\n\n\nclass LocalUpdate(object):\n def __init__(self, args, dataset, idxs, logger):\n self.args = args\n self.logger = logger\n self.trainloader, self.validloader, self.testloader = self.train_val_test(\n dataset, list(idxs))\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n # Default criterion set to NLL loss function\n self.criterion = nn.NLLLoss().to(self.device)\n\n def train_val_test(self, dataset, idxs):\n \"\"\"\n Returns train, validation and test dataloaders for a given dataset\n and user indexes.\n \"\"\"\n # split indexes for train, validation, and test (80, 10, 10)\n idxs_train = idxs[:int(0.8*len(idxs))]\n idxs_val = idxs[int(0.8*len(idxs)):int(0.9*len(idxs))]\n idxs_test = idxs[int(0.9*len(idxs)):]\n\n # mod1: add num_workers, to see if can speed up training. ANS is no for cifar\n trainloader = DataLoader(DatasetSplit(dataset, idxs_train),\n batch_size=self.args.local_bs, num_workers=self.args.num_workers, shuffle=True)\n validloader = DataLoader(DatasetSplit(dataset, idxs_val),\n batch_size=max(len(idxs_val)//10,1), num_workers=self.args.num_workers, shuffle=False) # mod2: minsize 1 if bs ~0\n testloader = DataLoader(DatasetSplit(dataset, idxs_test),\n batch_size=max(len(idxs_test)//10,1), num_workers=self.args.num_workers, shuffle=False)\n return trainloader, validloader, testloader\n\n def update_weights(self, model, global_round):\n # Set mode to train model\n model.train()\n epoch_loss = []\n\n # Set optimizer for the local updates\n if self.args.optimizer == 'sgd':\n optimizer = torch.optim.SGD(model.parameters(), lr=self.args.lr,\n momentum=0.5)\n elif self.args.optimizer == 'adam':\n optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr,\n weight_decay=1e-4)\n\n for iter in range(self.args.local_ep):\n batch_loss = []\n for batch_idx, (images, labels) in enumerate(self.trainloader):\n images, labels = images.to(self.device), labels.to(self.device)\n\n model.zero_grad()\n log_probs = model(images)\n loss = self.criterion(log_probs, labels)\n loss.backward()\n optimizer.step()\n\n\n self.logger.add_scalar('loss', loss.item())\n batch_loss.append(loss.item())\n epoch_loss.append(sum(batch_loss)/len(batch_loss))\n if self.args.verbose:\n print('| Global Round : {} | Local Epoch : {} | {} images\\tLoss: {:.6f}'.format(\n global_round, iter,\n len(self.trainloader.dataset),loss.item()))\n print('| Global Round : {} | Local Epochs : {} | {} images\\tLoss: {:.6f}'.format(\n global_round, self.args.local_ep,\n len(self.trainloader.dataset), loss.item()))\n return model.state_dict(), sum(epoch_loss) / len(epoch_loss)\n\n\n def inference(self, model):\n \"\"\" Returns the inference accuracy and loss.\n \"\"\"\n\n model.eval()\n loss, total, correct = 0.0, 0.0, 0.0\n\n for batch_idx, (images, labels) in enumerate(self.testloader):\n images, labels = images.to(self.device), labels.to(self.device)\n\n # Inference\n outputs = model(images)\n batch_loss = self.criterion(outputs, labels)\n loss += batch_loss.item()\n\n # Prediction\n _, pred_labels = torch.max(outputs, 1)\n pred_labels = pred_labels.view(-1)\n correct += torch.sum(torch.eq(pred_labels, labels)).item()\n total += len(labels)\n\n accuracy = correct/total\n return accuracy, loss\n\n\ndef test_inference(args, model, test_dataset):\n \"\"\" Returns the test accuracy and loss.\n \"\"\"\n\n model.eval()\n loss, total, correct = 0.0, 0.0, 0.0\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n testloader = DataLoader(test_dataset, batch_size=128,\n shuffle=False)\n criterion = nn.NLLLoss()\n for batch_idx, (images, labels) in enumerate(testloader):\n images, labels = images.to(device), labels.to(device)\n\n # Inference\n outputs = model(images)\n batch_loss = criterion(outputs, labels)\n loss += batch_loss.item()\n\n # Prediction\n _, pred_labels = torch.max(outputs, 1)\n pred_labels = pred_labels.view(-1)\n correct += torch.sum(torch.eq(pred_labels, labels)).item()\n total += len(labels)\n\n accuracy = correct/total\n return accuracy, loss\n" ]
[ [ "torch.nn.NLLLoss", "torch.max", "torch.eq", "torch.utils.data.DataLoader", "torch.tensor", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhangweichen2006/SRDAN_Open
[ "47c1bd9d2369d8e486b18a7aea220af7324c9011" ]
[ "pcdet/models/dense_heads/anchor_head_fuse_fpn_combine_cross_scale.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport scipy\nfrom scipy.spatial.distance import cdist\n\nfrom .anchor_head_template import AnchorHeadTemplate\n\nclass Self_Attn(nn.Module):\n \"\"\" Self attention Layer\"\"\"\n def __init__(self,in_dim,activation):\n super(Self_Attn,self).__init__()\n self.chanel_in = in_dim\n self.activation = activation\n\n self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)\n self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)\n self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1)\n self.gamma = nn.Parameter(torch.zeros(1))\n\n self.softmax = nn.Softmax(dim=-1) #\n def forward(self,x):\n \"\"\"\n inputs :\n x : input feature maps( B X C X W X H)\n returns :\n out : self attention value + input feature\n attention: B X N X N (N is Width*Height)\n \"\"\"\n m_batchsize,C,width ,height = x.size()\n proj_query = self.query_conv(x).view(m_batchsize,-1,width*height).permute(0,2,1) # B X CX(N)\n proj_key = self.key_conv(x).view(m_batchsize,-1,width*height) # B X C x (*W*H)\n energy = torch.bmm(proj_query,proj_key) # transpose check\n attention = self.softmax(energy) # BX (N) X (N)\n proj_value = self.value_conv(x).view(m_batchsize,-1,width*height) # B X C X N\n\n out = torch.bmm(proj_value,attention.permute(0,2,1) )\n out = out.view(m_batchsize,C,width,height)\n\n out = self.gamma*out + x\n return out,attention\n\n\nclass GradReverse(torch.autograd.Function):\n def __init__(self, lambd):\n self.lambd = lambd\n\n def forward(self, x):\n return x.view_as(x)\n\n def backward(self, grad_output):\n return (grad_output * self.lambd)\n\ndef grad_reverse(x, lambd):\n return GradReverse(lambd)(x)\n\n\nclass LocationAttentionLayer(nn.Module):\n\n def __init__(self, num_channels, kernel_size, kernel_size2=0, no_sigmoid=False, detach=False):\n super(LocationAttentionLayer, self).__init__()\n if kernel_size2 == 0:\n kernel_size2 = kernel_size\n\n self.kernel_size = kernel_size\n self.kernel_size2 = kernel_size2\n self.patch_matrix = nn.Parameter(torch.randn(1, kernel_size, kernel_size2), requires_grad=True)\n # self.patch_conv = nn.Conv2d(num_channels, 1, kernel_size, kernel_size) n, 126, 126\n self.sigmoid = nn.Sigmoid()\n self.no_sigmoid = no_sigmoid\n self.detach = detach\n\n def forward(self, input_tensor):\n #2, 512, 126, 126\n\n # print(\"kernel_size\", self.kernel_size, self.kernel_size2)\n # print(\"input_tensor\", input_tensor.shape)\n bt, c, h, w = input_tensor.size()\n\n # print(\"bt, c, h, w\", bt, c, h, w)\n # print(\"input_tensor\", input_tensor.shape)\n # patch_tensor = self.patch_conv(input_tensor)\n # print(\"patch_tensor\", patch_tensor.shape)\n # print(\"self.patch_matrix.repeat(bt*c, 1, 1)\", self.patch_matrix.repeat(bt*c, 1, 1).shape)\n if self.no_sigmoid:\n input_tensor = input_tensor.contiguous().view(-1, h, w) #\n input_tensor = input_tensor * self.patch_matrix.repeat(bt*c, 1, 1)\n input_tensor = input_tensor.view(bt, c, h, w)\n else:\n input_tensor = input_tensor.view(-1, h, w) #\n att_matrix = self.patch_matrix.repeat(bt*c, 1, 1)\n # if self.detach:\n # att_matrix = att_matrix.detach()\n input_tensor = input_tensor * att_matrix\n\n # z = x * att_matrix.detach()\n # z = x.detach() * att_matrix\n input_tensor = self.sigmoid(input_tensor).view(bt, c, h, w)\n\n return input_tensor\n\nclass LocationAttentionDoubleLayer(nn.Module):\n\n def __init__(self, num_channels, kernel_size, kernel_size2=0, no_sigmoid=False):\n super(LocationAttentionDoubleLayer, self).__init__()\n if kernel_size2 == 0:\n kernel_size2 = kernel_size\n self.patch_matrix = nn.Parameter(torch.randn(1, kernel_size, kernel_size2), requires_grad=True)\n # self.patch_conv = nn.Conv2d(num_channels, 1, kernel_size, kernel_size) n, 126, 126\n self.sigmoid = nn.Sigmoid()\n self.no_sigmoid = no_sigmoid\n\n def forward(self, input_tensor, dom_atten):\n #2, 512, 126, 126\n # print(\"dom_atten\", dom_atten.shape) # 3, 514, 128, 128\n # print(\"input_tensor\", input_tensor.shape) # , , 128, 128\n bt, c, h, w = input_tensor.size()\n\n # print(\"bt, c, h, w\", bt, c, h, w)\n # print(\"input_tensor\", input_tensor.shape)\n # patch_tensor = self.patch_conv(input_tensor)\n # print(\"patch_tensor\", patch_tensor.shape)\n if self.no_sigmoid:\n input_tensor = input_tensor.contiguous().view(-1, h, w) #\n dom_atten = dom_atten.contiguous().view(-1, h, w)\n max_att = torch.max(dom_atten, self.patch_matrix.repeat(bt*c, 1, 1))\n input_tensor = input_tensor * max_att\n input_tensor = input_tensor.view(bt, c, h, w)\n else:\n input_tensor = input_tensor.view(-1, h, w) #\n dom_atten = dom_atten.view(-1, h, w) #\n max_att = torch.max(dom_atten, self.patch_matrix.repeat(bt*c, 1, 1))\n input_tensor = input_tensor * max_att\n input_tensor = self.sigmoid(input_tensor).view(bt, c, h, w)\n\n return input_tensor\n\nclass SpatialSELayer(nn.Module):\n \"\"\"\n Re-implementation of SE block -- squeezing spatially and exciting channel-wise described in:\n *Roy et al., Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, MICCAI 2018*\n \"\"\"\n\n def __init__(self, num_channels):\n \"\"\"\n :param num_channels: No of input channels\n \"\"\"\n super(SpatialSELayer, self).__init__()\n self.conv = nn.Conv2d(num_channels, 1, 1)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, input_tensor, weights=None):\n \"\"\"\n :param weights: weights for few shot learning\n :param input_tensor: X, shape = (batch_size, num_channels, H, W)\n :return: output_tensor\n \"\"\"\n # spatial squeeze\n batch_size, channel, a, b = input_tensor.size()\n\n # print(\"input_tensor.size()\", input_tensor.size()) #2, 512, 126, 126\n\n if weights is not None:\n weights = torch.mean(weights, dim=0)\n weights = weights.view(1, channel, 1, 1)\n out = F.conv2d(input_tensor, weights)\n else:\n out = self.conv(input_tensor)\n # print(\"out.size()\", out.size()) #2, 1, 126, 126\n\n squeeze_tensor = self.sigmoid(out)\n # print(\"squeeze_tensor.size()\", squeeze_tensor.size()) # 2, 1, 126, 126\n\n # spatial excitation\n squeeze_tensor = squeeze_tensor.view(batch_size, 1, a, b)\n # print(\"squeeze_tensor 2.size()\", squeeze_tensor.size()) # 2, 1, 126, 126\n output_tensor = torch.mul(input_tensor, squeeze_tensor)\n # print(\"output_tensor 2.size()\", output_tensor.size()) #2, 512, 126, 126\n #output_tensor = torch.mul(input_tensor, squeeze_tensor)\n return output_tensor\n\nclass ChannelSELayer(nn.Module):\n \"\"\"\n Re-implementation of Squeeze-and-Excitation (SE) block described in:\n *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507*\n \"\"\"\n\n def __init__(self, num_channels, reduction_ratio=2):\n \"\"\"\n :param num_channels: No of input channels\n :param reduction_ratio: By how much should the num_channels should be reduced\n \"\"\"\n super(ChannelSELayer, self).__init__()\n num_channels_reduced = num_channels // reduction_ratio\n self.reduction_ratio = reduction_ratio\n self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)\n self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, input_tensor):\n \"\"\"\n :param input_tensor: X, shape = (batch_size, num_channels, H, W)\n :return: output tensor\n \"\"\"\n batch_size, num_channels, H, W = input_tensor.size() #2, 512, 126, 126\n # Average along each channel\n squeeze_tensor = input_tensor.view(batch_size, num_channels, -1).mean(dim=2) #2, 512, 126*126(1)\n\n # channel excitation\n fc_out_1 = self.relu(self.fc1(squeeze_tensor))\n fc_out_2 = self.sigmoid(self.fc2(fc_out_1))\n\n a, b = squeeze_tensor.size()\n output_tensor = torch.mul(input_tensor, fc_out_2.view(a, b, 1, 1))\n return output_tensor\n\nclass LocalDomainClassifier(nn.Module):\n def __init__(self, input_channels=256, context=False):\n super(LocalDomainClassifier, self).__init__()\n self.conv1 = nn.Conv2d(input_channels, 256, kernel_size=1, stride=1,\n padding=0, bias=False)\n self.conv2 = nn.Conv2d(256, 128, kernel_size=1, stride=1,\n padding=0, bias=False)\n self.conv3 = nn.Conv2d(128, 1, kernel_size=1, stride=1,\n padding=0, bias=False)\n self.context = context\n # print(\"sef context\", self.context)\n self._init_weights()\n def _init_weights(self):\n def normal_init(m, mean, stddev, truncated=False):\n \"\"\"\n weight initalizer: truncated normal and random normal.\n \"\"\"\n # x is a parameter\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n #m.bias.data.zero_()\n normal_init(self.conv1, 0, 0.01)\n normal_init(self.conv2, 0, 0.01)\n normal_init(self.conv3, 0, 0.01)\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n if self.context:\n feat = F.avg_pool2d(x, (x.size(2), x.size(3)))\n x = self.conv3(x)\n return F.sigmoid(x),feat\n else:\n x = self.conv3(x)\n return F.sigmoid(x)\n\nclass AnchorHeadFuseFPNCombineCrossScale(AnchorHeadTemplate):\n def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,\n predict_boxes_when_training=True, nusc=False, input_channels_fpn=None, num_fpn_up=0, num_fpn_down=0, num_fpn_downup=0, fpn_layers=[], voxel_size=[0.1, 0.1, 0.2], **kwargs):\n super().__init__(\n model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,\n predict_boxes_when_training=predict_boxes_when_training, nusc=nusc,\n num_fpn_up=num_fpn_up, num_fpn_down=num_fpn_down, num_fpn_downup=num_fpn_downup, fpn_layers=fpn_layers, voxel_size=voxel_size, **kwargs\n )\n\n self.num_anchors_per_location = sum(self.num_anchors_per_location)\n\n #####################################\n\n self.voxel_det_seconv_attention = self.model_cfg.get('VOXEL_DET_SECONV_ATTENTION', False)\n self.voxel_det_se_attention = self.model_cfg.get('VOXEL_DET_SE_ATTENTION', False)\n self.voxel_det_patch_attention = self.model_cfg.get('VOXEL_DET_PATCH_ATTENTION', False)\n self.voxel_dom_seconv_attention = self.model_cfg.get('VOXEL_DOM_SECONV_ATTENTION', False)\n self.voxel_dom_se_attention = self.model_cfg.get('VOXEL_DOM_SE_ATTENTION', False)\n self.voxel_dom_patch_attention = self.model_cfg.get('VOXEL_DOM_PATCH_ATTENTION', False)\n self.joint_attention = self.model_cfg.get('VOXEL_DETDOM_JOINT_ATTENTION', False)\n self.dom_patch_first = self.model_cfg.get('DOM_PATCH_FIRST', False)\n self.no_sigmoid = self.model_cfg.get('NO_SIGMOID', False)\n\n if self.sep_two_dom or (self.double_pma and not self.joint_pma):\n self.input_channels_dom_sep = input_channels\n\n if self.range_guidance:\n if self.range_guidance_dom_only:\n self.input_channels = input_channels\n if self.range_guidance_dist:\n self.input_channels_dom = input_channels + 1\n else:\n self.input_channels_dom = input_channels + 2\n else:\n if self.range_guidance_dist:\n self.input_channels = input_channels + 1\n else:\n self.input_channels = input_channels + 2\n self.input_channels_dom = self.input_channels\n else:\n self.input_channels = input_channels\n self.input_channels_dom = input_channels\n\n if self.joint_two_dom:\n if self.dom_patch_first or self.patch_unplug_context:\n self.input_channels_dom_joint = input_channels\n else:\n if self.range_guidance_dist:\n self.input_channels_dom_joint = input_channels + 1\n else:\n self.input_channels_dom_joint = input_channels + 2\n\n if self.joint_pma:\n self.input_channels_dom_joint = input_channels + 2\n\n self.num_keypoints_range = self.model_cfg.get('NUM_KEYPOINTS_RANGE', {})\n self.range_keys = self.num_keypoints_range.keys()\n\n self.point_fc_range = nn.ModuleDict()\n # self.domain_classifier_range = nn.ModuleDict()\n\n for i in self.range_keys:\n self.point_fc_range[i] = nn.Sequential(nn.Linear(self.num_keypoints_range[i], input_channels), nn.ReLU(True), nn.Dropout())\n\n self.input_channels_fpn = input_channels_fpn\n self.input_channels_dom_fpn = {}\n if self.sep_two_dom or (self.double_pma and not self.joint_pma):\n self.input_channels_dom_sep_fpn = {}\n if self.joint_two_dom or self.joint_pma:\n self.input_channels_dom_joint_fpn = {}\n\n for layer in self.fpn_layers:\n\n if self.sep_two_dom or (self.double_pma and not self.joint_pma):\n self.input_channels_dom_sep_fpn[layer] = input_channels_fpn[layer]\n\n if self.range_guidance:\n if self.range_guidance_dom_only:\n if self.range_guidance_dist:\n self.input_channels_dom_fpn[layer] = input_channels_fpn[layer] + 1\n else:\n self.input_channels_dom_fpn[layer] = input_channels_fpn[layer] + 2\n else:\n if self.range_guidance_dist:\n self.input_channels_fpn[layer] = input_channels_fpn[layer] + 1\n else:\n self.input_channels_fpn[layer] = input_channels_fpn[layer] + 2\n\n self.input_channels_dom_fpn[layer] = self.input_channels_fpn[layer]\n else:\n self.input_channels_dom_fpn[layer] = input_channels_fpn[layer]\n\n if self.joint_two_dom:\n if self.dom_patch_first or self.patch_unplug_context:\n self.input_channels_dom_joint_fpn[layer] = input_channels_fpn[layer]\n else:\n if self.range_guidance_dist:\n self.input_channels_dom_joint_fpn[layer] = input_channels_fpn[layer] + 1\n else:\n self.input_channels_dom_joint_fpn[layer] = input_channels_fpn[layer] + 2\n\n if self.joint_pma:\n self.input_channels_dom_joint_fpn[layer] = input_channels_fpn[layer] + 2\n ######### DOM CONTEXT ######\n\n if self.dom_context:\n dom_fc1, dom_fc2 = self.model_cfg.get('DOM_FC', [1024, 256])\n else:\n dom_fc1, dom_fc2 = self.model_cfg.get('DOM_FC', [1024, 1024])\n\n if self.dom_context:\n self.context_num = 1\n\n if not self.sep_fpn_dom_context:\n self.context_num += self.num_fpn_up + self.num_fpn_down + self.num_fpn_downup\n\n if self.num_fpn_downup == 1:\n self.context_num += 1\n #256 512\n\n if self.point_feat_in_voxel_dom:\n self.context_num += 2 # point context 256*2=512\n\n self.input_channels += self.context_num*dom_fc2\n\n for layer in self.fpn_layers:\n self.input_channels_fpn[layer] += self.context_num*dom_fc2\n # print('self.input_channels_fpn[layer] ini', layer, self.input_channels_fpn[layer])\n\n if self.range_guidance_new_conv_dom_context:\n self.context_num = 1\n\n if not self.sep_fpn_dom_context:\n self.context_num += self.num_fpn_up + self.num_fpn_down + self.num_fpn_downup\n\n if self.num_fpn_downup == 1:\n self.context_num += 1\n #256 512\n\n if self.point_feat_in_voxel_dom:\n self.context_num += 2 # point context 256*2=512\n\n self.input_channels += self.context_num*128\n\n for layer in self.fpn_layers:\n self.input_channels_fpn[layer] += self.context_num*128\n # print(\" self.point_features_dim\", self.point_features_dim)\n #############\n if self.point_interpolation:\n self.input_channels = self.input_channels + self.point_features_dim\n self.input_channels_dom = self.input_channels_dom + self.point_features_dim\n\n for layer in self.fpn_layers:\n if self.point_interpolation:\n self.input_channels_dom_fpn[layer] = self.input_channels_dom_fpn[layer] + self.point_features_dim\n self.input_channels_fpn[layer] = self.input_channels_fpn[layer] + self.point_features_dim\n # print('self.input_channels_dom_fpn[layer]', layer, self.input_channels_dom_fpn[layer]) # 512+128 = 640\n # print('self.input_channels_fpn[layer]', layer, self.input_channels_fpn[layer]) # 512+1024+128 = 1664\n\n self.conv_cls = nn.Conv2d(\n self.input_channels, self.num_anchors_per_location * self.num_class,\n kernel_size=1\n )\n self.conv_box = nn.Conv2d(\n self.input_channels, self.num_anchors_per_location * self.box_coder.code_size,\n kernel_size=1\n )\n self.input_channels_det = self.input_channels\n\n self.rangeinv = self.model_cfg.get('RANGE_INV', False)\n self.keep_x = self.model_cfg.get('KEEP_X', False)\n self.keep_y = self.model_cfg.get('KEEP_Y', False)\n self.keep_xy = self.model_cfg.get('KEEP_XY', False)\n self.center_xy = self.model_cfg.get('CENTER_XY', False)\n\n self.rm_thresh = self.model_cfg.get('RM_THRESH', 0)\n\n if self.voxel_dom_patch_attention:\n\n if self.double_pma:\n if self.joint_pma:\n self.att_patch_layer_double = LocationAttentionLayer(self.input_channels_dom_joint, self.model_cfg.PATCH_SIZE, self.model_cfg.get('PATCH_SIZE2', self.model_cfg.PATCH_SIZE), self.no_sigmoid)\n else:\n self.att_patch_layer_double = LocationAttentionLayer(self.input_channels_dom, self.model_cfg.PATCH_SIZE, self.model_cfg.get('PATCH_SIZE2', self.model_cfg.PATCH_SIZE), self.no_sigmoid)\n\n if self.joint_two_dom:\n\n if self.two_attention_max:\n self.att_patch_layer = LocationAttentionDoubleLayer(self.input_channels_dom_joint, self.model_cfg.PATCH_SIZE, self.model_cfg.get('PATCH_SIZE2', self.model_cfg.PATCH_SIZE), self.no_sigmoid)\n else:\n self.att_patch_layer = LocationAttentionLayer(self.input_channels_dom_joint, self.model_cfg.PATCH_SIZE, self.model_cfg.get('PATCH_SIZE2', self.model_cfg.PATCH_SIZE), self.no_sigmoid)\n else:\n if self.two_attention_max:\n self.att_patch_layer = LocationAttentionDoubleLayer(self.input_channels_dom, self.model_cfg.PATCH_SIZE, self.model_cfg.get('PATCH_SIZE2', self.model_cfg.PATCH_SIZE), self.no_sigmoid)\n else:\n self.att_patch_layer = LocationAttentionLayer(self.input_channels_dom, self.model_cfg.PATCH_SIZE, self.model_cfg.get('PATCH_SIZE2', self.model_cfg.PATCH_SIZE), self.no_sigmoid)\n\n\n if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:\n self.conv_dir_cls = nn.Conv2d(\n self.input_channels,\n self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,\n kernel_size=1\n )\n else:\n self.conv_dir_cls = None\n\n # print(\"dom_fc \", dom_fc1, dom_fc2)\n if self.model_cfg.get('USE_DOMAIN_CLASSIFIER', None):\n\n if self.range_guidance_new_conv_dom:\n # print(\"input_channels_dom\", input_channels_dom)\n self.conv_dom_layers = LocalDomainClassifier(input_channels=self.input_channels_dom, context=self.range_guidance_new_conv_dom_context)\n\n if self.sep_two_dom or (self.double_pma and not self.joint_pma):\n self.domain_pool2 = nn.AdaptiveAvgPool2d(1)\n self.domain_classifier2 = nn.Sequential(nn.Linear(self.input_channels_dom_sep, dom_fc1),\n nn.ReLU(True), nn.Dropout(),\n nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),\n nn.Dropout(), nn.Linear(dom_fc2, 1))\n if self.joint_two_dom or (self.double_pma and self.joint_pma):\n self.domain_pool2 = nn.AdaptiveAvgPool2d(1)\n self.domain_classifier2 = nn.Sequential(nn.Linear(self.input_channels_dom_joint, dom_fc1),\n nn.ReLU(True), nn.Dropout(),\n nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),\n nn.Dropout(), nn.Linear(dom_fc2, 1))\n\n else:\n self.domain_pool = nn.AdaptiveAvgPool2d(1)\n self.domain_classifier = nn.Sequential(nn.Linear(self.input_channels_dom, dom_fc1),\n nn.ReLU(True), nn.Dropout(),\n nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),\n nn.Dropout(), nn.Linear(dom_fc2, 1))\n\n ######## FPN detector #########\n\n self.conv_cls_fpn = nn.ModuleDict()\n self.conv_box_fpn = nn.ModuleDict()\n\n self.att_spatial_se_layer_fpn = nn.ModuleDict()\n self.att_se_layer_fpn = nn.ModuleDict()\n self.att_patch_layer_fpn = nn.ModuleDict()\n\n\n self.att_spatial_se_layer_det_fpn = nn.ModuleDict()\n self.att_se_layer_det_fpn = nn.ModuleDict()\n self.att_patch_layer_det_fpn = nn.ModuleDict()\n\n if self.double_pma:\n self.att_patch_layer_fpn_double = nn.ModuleDict()\n self.att_patch_layer_det_fpn_double = nn.ModuleDict()\n\n\n # for layer in self.fpn_layers:\n # print(\"self.input_channels_fpn[layer] fi\", layer, self.input_channels_fpn[layer])\n\n for layer in self.fpn_layers:\n\n if self.voxel_dom_patch_attention:\n\n if self.double_pma:\n if self.joint_pma:\n self.att_patch_layer_fpn_double[layer] = LocationAttentionLayer(self.input_channels_dom_joint_fpn[layer], self.model_cfg.PATCH_SIZE_FPN[layer], self.model_cfg.get('PATCH_SIZE_FPN2', self.model_cfg.PATCH_SIZE_FPN)[layer], self.no_sigmoid)\n else:\n self.att_patch_layer_fpn_double[layer] = LocationAttentionLayer(self.input_channels_dom_fpn[layer], self.model_cfg.PATCH_SIZE_FPN[layer], self.model_cfg.get('PATCH_SIZE_FPN2', self.model_cfg.PATCH_SIZE_FPN)[layer], self.no_sigmoid)\n\n if self.joint_two_dom:\n if self.two_attention_max:\n self.att_patch_layer_fpn[layer] = LocationAttentionDoubleLayer(self.input_channels_dom_joint_fpn[layer], self.model_cfg.PATCH_SIZE_FPN[layer], self.model_cfg.get('PATCH_SIZE_FPN2', self.model_cfg.PATCH_SIZE_FPN)[layer], self.no_sigmoid)\n else:\n self.att_patch_layer_fpn[layer] = LocationAttentionLayer(self.input_channels_dom_joint_fpn[layer], self.model_cfg.PATCH_SIZE_FPN[layer], self.model_cfg.get('PATCH_SIZE_FPN2', self.model_cfg.PATCH_SIZE_FPN)[layer], self.no_sigmoid)\n else:\n if self.two_attention_max:\n self.att_patch_layer_fpn[layer] = LocationAttentionDoubleLayer(self.input_channels_fpn[layer], self.model_cfg.PATCH_SIZE_FPN[layer], self.model_cfg.get('PATCH_SIZE_FPN2', self.model_cfg.PATCH_SIZE_FPN)[layer], self.no_sigmoid)\n else:\n self.att_patch_layer_fpn[layer] = LocationAttentionLayer(self.input_channels_fpn[layer], self.model_cfg.PATCH_SIZE_FPN[layer], self.model_cfg.get('PATCH_SIZE_FPN2', self.model_cfg.PATCH_SIZE_FPN)[layer], self.no_sigmoid)\n\n\n self.num_anchors_per_location_fpn[layer] = sum(self.num_anchors_per_location_fpn[layer]) # 2, 7\n\n self.conv_cls_fpn[layer] = nn.Conv2d(\n self.input_channels_fpn[layer], self.num_anchors_per_location_fpn[layer] * self.num_class,\n kernel_size=1\n )# 512 -> 2\n self.conv_box_fpn[layer] = nn.Conv2d(\n self.input_channels_fpn[layer], self.num_anchors_per_location_fpn[layer] * self.box_coder.code_size,\n kernel_size=1\n )# 512 -> 14\n\n ######### fpn dir clf #########\n if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:\n\n self.conv_dir_cls_fpn = nn.ModuleDict()\n for layer in self.fpn_layers:\n self.conv_dir_cls_fpn[layer] = nn.Conv2d(\n self.input_channels_fpn[layer],\n self.num_anchors_per_location_fpn[layer] * self.model_cfg.NUM_DIR_BINS,\n kernel_size=1\n )\n else:\n for layer in self.fpn_layers:\n self.conv_dir_cls_fpn[layer] = None\n\n # print(\"USE_DOMAIN_CLASSIFIER\", self.model_cfg.get('USE_DOMAIN_CLASSIFIER', None))\n if self.model_cfg.get('USE_DOMAIN_CLASSIFIER', None):\n\n self.domain_pool_fpn = nn.ModuleDict()\n self.domain_classifier_fpn = nn.ModuleDict()\n self.conv_dom_layers_fpn = nn.ModuleDict()\n if self.sep_two_dom or self.joint_two_dom or self.double_pma:\n self.domain_pool2_fpn = nn.ModuleDict()\n self.domain_classifier2_fpn = nn.ModuleDict()\n\n for layer in self.fpn_layers:\n self.domain_pool_fpn[layer] = nn.AdaptiveAvgPool2d(1)\n self.domain_classifier_fpn[layer] = nn.Sequential(nn.Linear(self.input_channels_dom_fpn[layer], dom_fc1),\n nn.ReLU(True), nn.Dropout(),\n nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),\n nn.Dropout(), nn.Linear(dom_fc2, 1))\n\n if self.range_guidance_new_conv_dom:\n # print(\"input_channels_dom\", input_channels_dom)\n self.conv_dom_layers_fpn[layer] = LocalDomainClassifier(input_channels=self.input_channels_dom_fpn[layer], context=self.range_guidance_new_conv_dom_context)\n\n if self.sep_two_dom or (self.double_pma and not self.joint_pma):\n self.domain_pool2_fpn[layer] = nn.AdaptiveAvgPool2d(1)\n self.domain_classifier2_fpn[layer] = nn.Sequential(nn.Linear(self.input_channels_dom_sep_fpn[layer], dom_fc1),\n nn.ReLU(True), nn.Dropout(),\n nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),\n nn.Dropout(), nn.Linear(dom_fc2, 1))\n # print(\"sep\")\n if self.joint_two_dom or (self.double_pma and self.joint_pma):\n self.domain_pool2_fpn[layer] = nn.AdaptiveAvgPool2d(1)\n self.domain_classifier2_fpn[layer] = nn.Sequential(nn.Linear(self.input_channels_dom_joint_fpn[layer], dom_fc1),\n nn.ReLU(True), nn.Dropout(),\n nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),\n nn.Dropout(), nn.Linear(dom_fc2, 1))\n # print(\"joint\")\n\n\n if self.range_guidance:\n if self.fov:\n total_range_x = self.model_cfg.PATCH_SIZE\n total_range_y = self.model_cfg.get('PATCH_SIZE2', self.model_cfg.PATCH_SIZE)\n half_range_x = int(total_range_x * 0.5)\n self.x_range_matrix = torch.abs(torch.arange(0, total_range_y, 1).float()).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(1,1, total_range_x, 1).cuda()\n # print('x_range', x_range)\n self.y_range_matrix = torch.abs(torch.arange(-half_range_x, half_range_x, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(1,1,1,total_range_y).cuda()\n if self.range_guidance_dist:\n joint_range_matrix = torch.stack((self.x_range_matrix,self.y_range_matrix),dim=-1).view(-1,2)\n center_matrix = torch.tensor([(half_range_x, 0)]).float().cuda()\n self.range_matrix = torch.cdist(joint_range_matrix,center_matrix).cuda().view(1,1,total_range_x, total_range_y)\n else:\n total_range_x = self.model_cfg.PATCH_SIZE\n total_range_y = self.model_cfg.get('PATCH_SIZE2', self.model_cfg.PATCH_SIZE)\n half_range_x = int(total_range_x * 0.5)\n half_range_y = int(total_range_y * 0.5)\n self.x_range_matrix = torch.abs(torch.arange(-half_range_y, half_range_y, 1).float() + 0.5).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(1,1, total_range_x, 1).cuda()\n self.y_range_matrix = torch.abs(torch.arange(-half_range_x, half_range_x, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(1,1,1,total_range_y).cuda()\n if self.range_guidance_dist:\n joint_range_matrix = torch.stack((self.x_range_matrix,self.y_range_matrix),dim=-1).view(-1,2)\n center_matrix = torch.tensor([(0., 0.)]).float().cuda()\n self.range_matrix = torch.cdist(joint_range_matrix,center_matrix).view(1,1,total_range_x, total_range_y)\n\n self.x_range_matrix_fpn = {}\n self.y_range_matrix_fpn = {}\n self.range_matrix_fpn = {}\n\n for layer in self.fpn_layers:\n if self.fov:\n total_range_x = self.model_cfg.PATCH_SIZE_FPN[layer]\n total_range_y = self.model_cfg.get('PATCH_SIZE_FPN2', self.model_cfg.PATCH_SIZE_FPN)[layer]\n half_range_x = int(total_range_x * 0.5)\n self.x_range_matrix_fpn[layer] = torch.abs(torch.arange(0, total_range_y, 1).float()).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(1,1, total_range_x, 1).cuda()\n # print('x_range', x_range)\n self.y_range_matrix_fpn[layer] = torch.abs(torch.arange(-half_range_x, half_range_x, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(1,1,1,total_range_y).cuda()\n if self.range_guidance_dist:\n joint_range_matrix = torch.stack((self.x_range_matrix_fpn[layer],self.y_range_matrix_fpn[layer]),dim=-1).view(-1,2)\n center_matrix = torch.tensor([(half_range_x, 0)]).float().cuda()\n self.range_matrix_fpn[layer] = torch.cdist(joint_range_matrix,center_matrix).cuda().view(1,1,total_range_x, total_range_y)\n else:\n total_range_x = self.model_cfg.PATCH_SIZE_FPN[layer]\n total_range_y = self.model_cfg.get('PATCH_SIZE_FPN2', self.model_cfg.PATCH_SIZE_FPN)[layer]\n half_range_x = int(total_range_x * 0.5)\n half_range_y = int(total_range_y * 0.5)\n self.x_range_matrix_fpn[layer] = torch.abs(torch.arange(-half_range_y, half_range_y, 1).float() + 0.5).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(1,1, total_range_x, 1).cuda()\n self.y_range_matrix_fpn[layer] = torch.abs(torch.arange(-half_range_x, half_range_x, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(1,1,1,total_range_y).cuda()\n if self.range_guidance_dist:\n joint_range_matrix = torch.stack((self.x_range_matrix_fpn[layer],self.y_range_matrix_fpn[layer]),dim=-1).view(-1,2)\n center_matrix = torch.tensor([(0, 0)]).float().cuda()\n self.range_matrix_fpn[layer] = torch.cdist(joint_range_matrix,center_matrix).cuda().view(1,1,total_range_x, total_range_y)\n\n if self.cross_scale:\n self.scale_classifier_1_1 = nn.Sequential(nn.Linear(512, dom_fc1),\n nn.ReLU(True), nn.Dropout())\n self.scale_classifier_1_2 = nn.Sequential(nn.Linear(512, dom_fc1),\n nn.ReLU(True), nn.Dropout())\n self.scale_classifier_1 = nn.Sequential(nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),\n nn.Dropout(), nn.Linear(dom_fc2, 1))\n\n if self.cross_two_scale:\n self.scale_classifier_1_3 = nn.Sequential(nn.Linear(256, dom_fc1),\n nn.ReLU(True), nn.Dropout())\n\n self.scale_classifier_2 = nn.Sequential(nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),\n nn.Dropout(), nn.Linear(dom_fc2, 1))\n\n self.domain_pool = nn.AdaptiveAvgPool2d(1)\n self.init_weights()\n\n def init_weights(self):\n pi = 0.01\n nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))\n nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)\n\n for layer in self.fpn_layers:\n nn.init.constant_(self.conv_cls_fpn[layer].bias, -np.log((1 - pi) / pi))\n nn.init.normal_(self.conv_box_fpn[layer].weight, mean=0, std=0.001)\n\n def local_attention(self, features, d):\n # features.size() = [1, 256, h, w]\n # d.size() = [1, 1, h, w] after sigmoid\n\n d = d.clamp(1e-6, 1)\n H = - ( d * d.log() + (1-d) * (1-d).log() )\n w = 1 - H\n features_new = (1 + w) * features\n\n return features_new\n\n def forward(self, data_dict):\n t_mode = data_dict['t_mode']\n l = data_dict['l']\n\n if 'pseudo' in t_mode:\n pseudo = True\n else:\n pseudo = False\n\n if t_mode == 'dom_img_src':\n dom_src = True\n elif t_mode == 'dom_img_tgt':\n dom_src = False\n else:\n dom_src = None\n\n spatial_features_2d_fpn_det = {}\n\n spatial_features_2d = data_dict['spatial_features_2d']\n # print(\"spatial_features_2d\", spatial_features_2d.shape)\n\n range_fpn_dict = {'short': '3', 'mid': '4', 'long': '5'}\n fpn_range_dict = {'3':'short', '4':'mid', '5':'long'}\n\n if t_mode == 'tsne':\n return_dict = {}\n spatial_features_2d = data_dict[f'spatial_features_2d']\n return_dict[f'tsne_spatial_features_2d'] = self.domain_pool(spatial_features_2d)\n\n if self.voxel_dom_patch_attention and self.dom_patch_first:\n spatial_features_2d = self.att_patch_layer(spatial_features_2d)\n return_dict['tsne_spatial_features_2d_PMA_First'] = self.domain_pool(spatial_features_2d)\n\n if self.range_guidance and self.range_guidance_dom_only:\n total_range = spatial_features_2d.shape[-1]\n half_range = int(spatial_features_2d.shape[-1] * 0.5)\n x_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1, total_range, 1).cuda()\n y_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1,1,total_range).cuda()\n spatial_features_2d = torch.cat((spatial_features_2d, x_range, y_range), dim=1)\n return_dict['tsne_spatial_features_2d_RCD'] = self.domain_pool(spatial_features_2d)\n\n if self.voxel_dom_patch_attention and not self.dom_patch_first:\n spatial_features_2d = self.att_patch_layer(spatial_features_2d)\n return_dict['tsne_spatial_features_2d_PMA_Late'] = self.domain_pool(spatial_features_2d)\n\n for l in self.fpn_layers:\n spatial_features_2d = data_dict[f'spatial_features_2d_fpn{l}']\n return_dict[f'tsne_spatial_features_2d_fpn{l}'] = self.domain_pool(spatial_features_2d)\n\n if self.voxel_dom_patch_attention and self.dom_patch_first:\n spatial_features_2d = self.att_patch_layer_fpn[l](spatial_features_2d)\n return_dict['tsne_spatial_features_2d_PMA_First_fpn{l}'] = self.domain_pool(spatial_features_2d)\n\n if self.range_guidance and self.range_guidance_dom_only:\n total_range = spatial_features_2d.shape[-1]\n half_range = int(spatial_features_2d.shape[-1] * 0.5)\n x_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1, total_range, 1).cuda()\n y_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1,1,total_range).cuda()\n spatial_features_2d = torch.cat((spatial_features_2d, x_range, y_range), dim=1)\n return_dict['tsne_spatial_features_2d_RCD_fpn{l}'] = self.domain_pool(spatial_features_2d)\n\n if self.voxel_dom_patch_attention and not self.dom_patch_first:\n spatial_features_2d = self.att_patch_layer_fpn[l](spatial_features_2d)\n return_dict['tsne_spatial_features_2d_PMA_Late_fpn{l}'] = self.domain_pool(spatial_features_2d)\n\n return return_dict\n\n ######## point feat cat ########\n if self.point_feat_in_voxel_dom:\n if self.debug: print('point_feat_in_voxel_dom')\n point_features_2d = data_dict['point_features']\n point_features_avg = torch.mean(point_features_2d, -1)\n batch_point_features = point_features_avg.view(-1, self.num_keypoints)\n x_pool_point = self.point_fc(batch_point_features)\n\n # if self.num_fpn_up + self.num_fpn_down + self.num_fpn_downup > 0:\n # for layer in self.fpn_layers:\n # point_features_2d = data_dict['point_features']\n # point_features_avg = torch.mean(point_features_2d, -1)\n # batch_point_features = point_features_avg.view(-1, self.num_keypoints)\n # x_pool_point = self.point_fc(batch_point_features)\n\n ###################################\n # point interpolation for general dom\n if self.point_interpolation:\n if self.debug: print('point_interpolation')\n batch_size, _, bev_x, bev_y = spatial_features_2d.shape\n\n if self.multi_range_interpolate:\n point_feat_dim = data_dict[f'point_features_mid'].shape[-1]\n else:\n point_feat_dim = data_dict[f'point_features'].shape[-1]\n\n # interpolated_bev_features_joint = torch.zeros((batch_size, point_feat_dim, bev_x, bev_y)).cuda()\n\n bev_feat_range = {}\n\n if self.multi_range_interpolate:\n for i in self.range_keys:\n bev_feat_range[i] = self.interpolate_to_bev_features_fast(data_dict[f'point_coords_{i}'][:, 1:4], data_dict[f'point_features_{i}'], data_dict[f'spatial_features_2d_fpn{range_fpn_dict[i]}'].shape, data_dict['spatial_features_stride'])\n\n data_dict[f'spatial_features_2d_fpn{range_fpn_dict[i]}'] = torch.cat((data_dict[f'spatial_features_2d_fpn{range_fpn_dict[i]}'], bev_feat_range[i]), dim=1)\n\n interpolated_bev_features_joint = self.interpolate_to_bev_features_fast(data_dict[f'point_coords_joint'][:, 1:4], data_dict[f'point_features_joint'], data_dict[f'spatial_features_2d'].shape, data_dict['spatial_features_stride'])\n\n spatial_features_2d = torch.cat((spatial_features_2d, interpolated_bev_features_joint), dim=1)\n\n # data_dict[f'spatial_features_2d'] = spatial_features_2d\n\n else:\n\n if self.fast_interpolation:\n bev_feat_interpolated = self.interpolate_to_bev_features_fast(data_dict[f'point_coords'][:, 1:4], data_dict[f'point_features'], spatial_features_2d.shape, data_dict['spatial_features_stride'])\n else:\n bev_feat_interpolated = self.interpolate_to_bev_features(data_dict[f'point_coords'][:, 1:4], data_dict[f'point_features'], spatial_features_2d.shape, data_dict['spatial_features_stride'])\n\n spatial_features_2d = torch.cat((spatial_features_2d, bev_feat_interpolated), dim=1)\n\n for layer in self.fpn_layers:\n if self.fast_interpolation:\n bev_feat_interpolated_fpn = self.interpolate_to_bev_features_fast(data_dict[f'point_coords'][:, 1:4], data_dict[f'point_features'], data_dict[f'spatial_features_2d_fpn{layer}'].shape, data_dict['spatial_features_stride'])\n else:\n bev_feat_interpolated_fpn = self.interpolate_to_bev_features(data_dict[f'point_coords'][:, 1:4], data_dict[f'point_features'], data_dict[f'spatial_features_2d_fpn{layer}'].shape, data_dict['spatial_features_stride'])\n\n data_dict[f'spatial_features_2d_fpn{layer}'] = torch.cat((data_dict[f'spatial_features_2d_fpn{layer}'], bev_feat_interpolated_fpn), dim=1)\n # point interpolated for fpn\n\n ##### assign det feature ########\n spatial_features_2d_det = spatial_features_2d\n for layer in self.fpn_layers:\n spatial_features_2d_fpn_det[layer] = data_dict[f'spatial_features_2d_fpn{layer}']\n\n\n if self.model_cfg.get('USE_DOMAIN_CLASSIFIER', None):\n ############ attention ###############\n if 'dom_img' in t_mode and not self.fpn_only:\n\n spatial_features_2d_dom = spatial_features_2d\n\n if self.sep_two_dom:\n spatial_features_2d_dom_sep = spatial_features_2d_dom\n\n if self.voxel_dom_patch_attention and self.dom_patch_first:\n\n if self.sep_two_dom:\n spatial_features_2d_dom_sep = self.att_patch_layer(spatial_features_2d_dom_sep)\n else:\n spatial_features_2d_dom = self.att_patch_layer(spatial_features_2d_dom)\n\n ## dom pred 2 if joint ##\n if self.joint_two_dom:\n x_pool2 = self.domain_pool2(spatial_features_2d_dom).view(spatial_features_2d_dom.size(0), -1)\n if 'reg' in t_mode:\n x_reverse2 = grad_reverse(x_pool2, l)\n else:\n x_reverse2 = grad_reverse(x_pool2, l*-1)\n dom_img_preds2 = self.domain_classifier2(x_reverse2)#.\n\n if self.debug: print('voxel_dom_patch_attention first')\n\n ########## context ###########\n if self.range_guidance and self.range_guidance_dom_only:\n if self.debug: print('range_guidance')\n\n if self.range_guidance_dist:\n spatial_features_2d_dom = torch.cat((spatial_features_2d_dom, self.range_matrix.repeat(spatial_features_2d_dom.shape[0],1,1,1)), dim=1)\n\n else:\n spatial_features_2d_dom = torch.cat((spatial_features_2d_dom, self.x_range_matrix.repeat(spatial_features_2d_dom.shape[0],1,1,1), self.y_range_matrix.repeat(spatial_features_2d_dom.shape[0],1,1,1)), dim=1)\n\n if self.voxel_dom_patch_attention and not self.dom_patch_first:\n if self.sep_two_dom:\n spatial_features_2d_dom_sep = self.att_patch_layer(spatial_features_2d_dom)\n elif not self.joint_two_dom:\n spatial_features_2d_dom = self.att_patch_layer(spatial_features_2d_dom)\n if self.debug: print('voxel_dom_patch_attention late')\n\n if self.range_guidance_conv_dom or self.range_guidance_new_conv_dom:\n\n if self.range_guidance_new_conv_dom_attention:\n if self.debug: print('range_guidance_conv_dom/range_guidance_new_conv_dom _attention')\n if 'reg' in t_mode:\n x_reverse_dom = grad_reverse(spatial_features_2d_dom, l)\n else:\n x_reverse_dom = grad_reverse(spatial_features_2d_dom, l*-1)\n if self.range_guidance_new_conv_dom_context:\n dom_img_preds, _ = self.conv_dom_layers(x_reverse_dom)\n #print(d_pixel)\n # if not target:\n _, pixel_context = self.conv_dom_layers(spatial_features_2d_dom.detach())\n if 'dom_img_det' in t_mode:\n data_dict['dom_head_context'] = pixel_context\n\n else:\n dom_img_preds = self.conv_dom_layers(x_reverse_dom)\n\n if not self.two_attention_max:\n if self.range_guidance_dom_only:\n spatial_features_2d_dom = self.local_attention(spatial_features_2d_dom, dom_img_preds.detach())\n else:\n spatial_features_2d_dom = self.local_attention(spatial_features_2d_dom, dom_img_preds.detach())\n\n spatial_features_2d_det = spatial_features_2d_dom\n\n else:\n if self.debug: print('range_guidance_conv_dom/range_guidance_new_conv_dom no_attention')\n if 'reg' in t_mode:\n x_reverse_dom = grad_reverse(spatial_features_2d_dom, l)\n else:\n x_reverse_dom = grad_reverse(spatial_features_2d_dom, l*-1)\n if self.range_guidance_new_conv_dom_context:\n dom_img_preds, _ = self.conv_dom_layers(x_reverse_dom)\n #print(d_pixel)\n # if not target:\n _, pixel_context = self.conv_dom_layers(spatial_features_2d_dom.detach())\n if 'dom_img_det' in t_mode:\n data_dict['dom_head_context'] = pixel_context\n\n else:\n dom_img_preds = self.conv_dom_layers(x_reverse_dom)\n\n if self.range_guidance_double_dom:\n x_pool2 = self.domain_pool(spatial_features_2d).view(spatial_features_2d.size(0), -1)\n if 'reg' in t_mode:\n x_reverse2 = grad_reverse(x_pool2, l)\n else:\n x_reverse2 = grad_reverse(x_pool2, l*-1)\n # print(\"x_reverse2\", x_reverse2.shape)\n dom_img_preds2 = self.domain_classifier(x_reverse2)#.squeeze(-1)\n\n if self.sep_two_dom:\n x_pool2 = self.domain_pool2(spatial_features_2d_dom_sep).view(spatial_features_2d_dom_sep.size(0), -1)\n if 'reg' in t_mode:\n x_reverse2 = grad_reverse(x_pool2, l)\n else:\n x_reverse2 = grad_reverse(x_pool2, l*-1)\n # print(\"x_reverse2\", x_reverse2.shape)\n dom_img_preds2 = self.domain_classifier2(x_reverse2)#.squeeze(-1)\n\n else:\n if self.debug: print('no range_guidance_new_conv normal dom')\n\n x_pool = self.domain_pool(spatial_features_2d_dom).view(spatial_features_2d_dom.size(0), -1)\n if self.point_feat_in_voxel_dom:\n x_pool_joint = torch.cat((x_pool, x_pool_point),dim=-1)\n else:\n x_pool_joint = x_pool\n if 'reg' in t_mode:\n x_reverse = grad_reverse(x_pool_joint, l)\n else:\n x_reverse = grad_reverse(x_pool_joint, l*-1)\n dom_head_context = self.domain_classifier[:-2](x_reverse)#.squeeze(-1)\n\n if 'dom_img_det' in t_mode:\n data_dict['dom_head_context'] = dom_head_context\n\n dom_img_preds = self.domain_classifier[-2:](dom_head_context)#.squeeze(-1)\n\n\n if self.voxel_dom_patch_attention and not self.dom_patch_first:\n\n if self.joint_two_dom:\n\n if self.patch_unplug_context:\n range_dim = spatial_features_2d_dom.shape[1]\n spatial_features_2d_dom = spatial_features_2d_dom[:,:range_dim-2,:,:]\n\n if self.two_attention_max:\n local_dom_att = self.local_attention(spatial_features_2d_dom, dom_img_preds.detach())\n spatial_features_2d_dom = self.att_patch_layer(spatial_features_2d_dom, local_dom_att)\n else:\n spatial_features_2d_dom = self.att_patch_layer(spatial_features_2d_dom)\n\n x_pool2 = self.domain_pool2(spatial_features_2d_dom).view(spatial_features_2d_dom.size(0), -1)\n if 'reg' in t_mode:\n x_reverse2 = grad_reverse(x_pool2, l)\n else:\n x_reverse2 = grad_reverse(x_pool2, l*-1)\n # print(\"x_reverse2\", x_reverse2.shape)\n dom_img_preds2 = self.domain_classifier2(x_reverse2)#.\n\n if self.double_pma:\n if not self.joint_pma:\n spatial_features_2d_double = spatial_features_2d\n else:\n spatial_features_2d_double = spatial_features_2d_dom\n\n spatial_features_2d_double = self.att_patch_layer_double(spatial_features_2d_double)\n\n x_pool2_double = self.domain_pool2(spatial_features_2d_double).view(spatial_features_2d_double.size(0), -1)\n if 'reg' in t_mode:\n x_reverse2_double = grad_reverse(x_pool2_double, l)\n else:\n x_reverse2_double = grad_reverse(x_pool2_double, l*-1)\n dom_img_preds2 = self.domain_classifier2(x_reverse2_double)\n\n # if self.patch_unplug_context:\n # range_dim = spatial_features_2d_dom.shape[1]\n # spatial_features_2d_dom = spatial_features_2d_dom[:,:range_dim-2,:,:]\n\n # if self.two_attention_max:\n # local_dom_att = self.local_attention(spatial_features_2d_dom, dom_img_preds.detach())\n # spatial_features_2d_dom = self.att_patch_layer(spatial_features_2d_dom, local_dom_att)\n # else:\n # spatial_features_2d_dom = self.att_patch_layer(spatial_features_2d_dom)\n\n # if self.dom_squeeze:\n # dom_img_preds = dom_img_preds.squeeze(-1)\n # if self.range_guidance_double_dom or self.sep_two_dom:\n # dom_img_preds2 = dom_img_preds2.squeeze(-1)\n\n\n self.forward_ret_dict['dom_img_preds'] = dom_img_preds\n\n if self.range_guidance_double_dom or self.sep_two_dom or self.joint_two_dom or self.double_pma:\n self.forward_ret_dict['dom_img_preds2'] = dom_img_preds2\n\n\n if self.training:\n targets_dict_dom = self.assign_targets(\n gt_boxes=data_dict['gt_boxes'],\n dom_src=dom_src,\n pseudo=pseudo\n )\n self.forward_ret_dict.update(targets_dict_dom)\n\n ####################### dom fpn #####################\n\n if 'dom_img' in t_mode:\n if self.num_fpn_up + self.num_fpn_down + self.num_fpn_downup > 0:\n # print(\"fpn\")\n if self.debug: print('dom img fpn')\n\n for layer in self.fpn_layers:\n\n spatial_features_2d_fpn = data_dict[f'spatial_features_2d_fpn{layer}'] # 642\n\n spatial_features_2d_fpn_dom = spatial_features_2d_fpn# 642\n if self.sep_two_dom:\n spatial_features_2d_sep_fpn_dom = spatial_features_2d_fpn_dom\n\n if self.voxel_dom_patch_attention and self.dom_patch_first:\n\n if self.sep_two_dom:\n spatial_features_2d_sep_fpn_dom = self.att_patch_layer_fpn[layer](spatial_features_2d_sep_fpn_dom)\n else:\n spatial_features_2d_fpn_dom = self.att_patch_layer_fpn[layer](spatial_features_2d_fpn_dom)\n\n if self.joint_two_dom:\n x_pool2_fpn_dom = self.domain_pool2_fpn[layer](spatial_features_2d_fpn_dom).view(spatial_features_2d_fpn_dom.size(0), -1)\n if 'reg' in t_mode:\n x_reverse2_fpn_dom = grad_reverse(x_pool2_fpn_dom, l)\n else:\n x_reverse2_fpn_dom = grad_reverse(x_pool2_fpn_dom, l*-1)\n dom_img_preds2_fpn = self.domain_classifier2_fpn[layer](x_reverse2_fpn_dom)#.\n\n if self.range_guidance and self.range_guidance_dom_only:\n if self.range_guidance_dist:\n spatial_features_2d_fpn_dom = torch.cat((spatial_features_2d_fpn_dom, self.range_matrix_fpn[layer].repeat(spatial_features_2d_fpn_dom.shape[0],1,1,1)), dim=1)\n else:\n spatial_features_2d_fpn_dom = torch.cat((spatial_features_2d_fpn_dom, self.x_range_matrix_fpn[layer].repeat(spatial_features_2d_fpn_dom.shape[0],1,1,1), self.y_range_matrix_fpn[layer].repeat(spatial_features_2d_dom.shape[0],1,1,1)), dim=1)\n\n if self.voxel_dom_patch_attention and not self.dom_patch_first:\n if self.sep_two_dom:\n spatial_features_2d_sep_fpn_dom = self.att_patch_layer_fpn[layer](spatial_features_2d_sep_fpn_dom)\n elif not self.joint_two_dom:\n spatial_features_2d_fpn_dom = self.att_patch_layer_fpn[layer](spatial_features_2d_fpn_dom)\n\n if self.range_guidance_conv_dom or self.range_guidance_new_conv_dom:\n # x_pool = self.domain_pool().view(spatial_features_2d.size(0), -1)\n # print('t_mode', t_mode)\n # print(\"l\", l)\n if self.range_guidance_new_conv_dom_attention:\n if 'reg' in t_mode:\n x_reverse_fpn_dom = grad_reverse(spatial_features_2d_fpn_dom, l)\n else:\n x_reverse_fpn_dom = grad_reverse(spatial_features_2d_fpn_dom, l*-1)\n if self.range_guidance_new_conv_dom_context:\n dom_img_fpn_preds, _ = self.conv_dom_layers_fpn[layer](x_reverse_fpn_dom)\n #print(d_pixel)\n # if not target:\n _, pixel_context_fpn = self.conv_dom_layers_fpn[layer](spatial_features_2d_fpn_dom.detach())\n if 'dom_img_det' in t_mode:\n data_dict[f'dom_head_context_fpn{layer}'] = pixel_context_fpn\n else:\n dom_img_preds_fpn = self.conv_dom_layers_fpn[layer](x_reverse_fpn_dom)\n\n\n if not self.two_attention_max:\n if self.range_guidance_dom_only:\n spatial_features_2d_fpn_dom = self.local_attention(spatial_features_2d_fpn_dom, dom_img_preds_fpn.detach())\n else:\n spatial_features_2d_fpn_dom = self.local_attention(spatial_features_2d_fpn_dom, dom_img_preds_fpn.detach())\n\n spatial_features_2d_fpn_det[layer] = spatial_features_2d_fpn_dom\n\n else:\n if 'reg' in t_mode:\n x_reverse_fpn_dom = grad_reverse(spatial_features_2d_fpn_dom, l)\n else:\n x_reverse_fpn_dom = grad_reverse(spatial_features_2d_fpn_dom, l*-1)\n if self.range_guidance_new_conv_dom_context:\n dom_img_preds_fpn, _ = self.conv_dom_layers_fpn[layer](x_reverse_fpn_dom)\n #print(d_pixel)\n # if not target:\n _, pixel_context_fpn = self.conv_dom_layers_fpn[layer](spatial_features_2d_fpn_dom.detach())\n if 'dom_img_det' in t_mode:\n data_dict['dom_head_context'] = pixel_context_fpn\n else:\n dom_img_preds_fpn = self.conv_dom_layers_fpn[layer](x_reverse_fpn_dom)\n\n if self.sep_two_dom:\n x_pool2_sep_fpn = self.domain_pool2_fpn[layer](spatial_features_2d_sep_fpn_dom).view(spatial_features_2d_sep_fpn_dom.size(0), -1)\n if 'reg' in t_mode:\n x_reverse2_sep_fpn = grad_reverse(x_pool2_sep_fpn, l)\n else:\n x_reverse2_sep_fpn = grad_reverse(x_pool2_sep_fpn, l*-1)\n # print(\"x_reverse2\", x_reverse2.shape)\n dom_img_preds2_fpn = self.domain_classifier2_fpn[layer](x_reverse2_sep_fpn)#.squeeze(-1)\n\n\n else:\n x_pool_fpn = self.domain_pool_fpn[layer](spatial_features_2d_fpn_dom).view(spatial_features_2d_fpn_dom.size(0), -1)\n # print(\"x_pool_fpn\", x_pool_fpn.shape)\n if self.point_feat_in_voxel_dom:\n x_pool_joint_fpn = torch.cat((x_pool_fpn, x_pool_point),dim=-1)\n else:\n x_pool_joint_fpn = x_pool_fpn\n if 'reg' in t_mode:\n x_reverse_fpn = grad_reverse(x_pool_joint_fpn, l)\n else:\n x_reverse_fpn = grad_reverse(x_pool_joint_fpn, l*-1)\n # print(\"x_reverse_fpn\", x_reverse_fpn.shape)\n dom_head_context_fpn = self.domain_classifier_fpn[layer][:-2](x_reverse_fpn)#.squeeze(-1)\n\n if 'dom_img_det' in t_mode:\n data_dict[f'dom_head_context_fpn{layer}'] = dom_head_context_fpn\n\n dom_img_preds_fpn = self.domain_classifier_fpn[layer][-2:](dom_head_context_fpn).squeeze(-1)\n\n if self.voxel_dom_patch_attention and not self.dom_patch_first:\n if self.joint_two_dom:\n if self.two_attention_max:\n local_dom_att_fpn = self.local_attention(spatial_features_2d_fpn_dom, dom_img_preds_fpn.detach())\n spatial_features_2d_fpn_dom = self.att_patch_layer_fpn[layer](spatial_features_2d_fpn_dom, local_dom_att_fpn)\n\n x_pool2_fpn = self.domain_pool2_fpn[layer](spatial_features_2d_fpn_dom).view(spatial_features_2d_fpn_dom.size(0), -1)\n if 'reg' in t_mode:\n x_reverse2_fpn = grad_reverse(x_pool2_fpn, l)\n else:\n x_reverse2_fpn = grad_reverse(x_pool2_fpn, l*-1)\n # print(\"x_reverse2\", x_reverse2.shape)\n dom_img_preds2_fpn = self.domain_classifier2_fpn[layer](x_reverse2_fpn)#.\n\n\n else:\n if self.patch_unplug_context:\n range_dim = spatial_features_2d_fpn_dom.shape[1]\n spatial_features_2d_fpn_dom = spatial_features_2d_fpn_dom[:,:range_dim-2,:,:]\n spatial_features_2d_fpn_dom = self.att_patch_layer_fpn[layer](spatial_features_2d_fpn_dom)\n\n x_pool2_fpn = self.domain_pool2_fpn[layer](spatial_features_2d_fpn_dom).view(spatial_features_2d_fpn_dom.size(0), -1)\n if 'reg' in t_mode:\n x_reverse2_fpn = grad_reverse(x_pool2_fpn, l)\n else:\n x_reverse2_fpn = grad_reverse(x_pool2_fpn, l*-1)\n # print(\"x_reverse2\", x_reverse2.shape)\n dom_img_preds2_fpn = self.domain_classifier2_fpn[layer](x_reverse2_fpn)#.\n\n if self.double_pma:\n\n if not self.joint_pma:\n spatial_features_2d_fpn_double = spatial_features_2d_fpn\n else:\n spatial_features_2d_fpn_double = spatial_features_2d_fpn_dom\n\n spatial_features_2d_fpn_double = self.att_patch_layer_fpn_double[layer](spatial_features_2d_fpn_double)\n x_pool2_fpn = self.domain_pool2_fpn[layer](spatial_features_2d_fpn_double).view(spatial_features_2d_fpn_double.size(0), -1)\n if 'reg' in t_mode:\n x_reverse2_fpn = grad_reverse(x_pool2_fpn, l)\n else:\n x_reverse2_fpn = grad_reverse(x_pool2_fpn, l*-1)\n # print(\"x_reverse2\", x_reverse2.shape)\n dom_img_preds2_fpn = self.domain_classifier2_fpn[layer](x_reverse2_fpn)#.\n\n # if self.dom_squeeze:\n # dom_img_preds = dom_img_preds.squeeze(-1)\n # if self.range_guidance_double_dom or self.sep_two_dom:\n # dom_img_preds2 = dom_img_preds2.squeeze(-1)\n\n self.forward_ret_dict[f'dom_img_preds_fpn{layer}'] = dom_img_preds_fpn\n\n if self.sep_two_dom or self.joint_two_dom or self.double_pma:\n self.forward_ret_dict[f'dom_img_preds2_fpn{layer}'] = dom_img_preds2_fpn\n\n if self.training:\n targets_dict_dom = self.assign_targets(\n gt_boxes=data_dict['gt_boxes'],\n dom_src=dom_src,\n pseudo=pseudo,\n fpn_layer=layer\n )\n self.forward_ret_dict.update(targets_dict_dom)\n\n\n if self.cross_scale:\n xpool3=self.domain_pool_fpn['3'](data_dict[f'spatial_features_2d_fpn3']).view(data_dict[f'spatial_features_2d_fpn3'].size(0), -1)\n xpool4=self.domain_pool_fpn['4'](data_dict[f'spatial_features_2d_fpn4']).view(data_dict[f'spatial_features_2d_fpn4'].size(0), -1)\n\n scale_out1 = self.scale_classifier_1_1(xpool3)\n scale_out1 = grad_reverse(scale_out1, l)\n scale_pred1 = self.scale_classifier_1(scale_out1)\n\n scale_out2 = self.scale_classifier_1_2(xpool4)\n scale_out2 = grad_reverse(scale_out2, l)\n scale_pred2 = self.scale_classifier_1(scale_out2)\n\n self.forward_ret_dict[f'scale_preds1'] = scale_pred1\n self.forward_ret_dict[f'scale_preds2'] = scale_pred2\n\n self.forward_ret_dict[f'scale_labels1'] = torch.zeros((1), dtype=torch.float32, device=scale_out1.device)\n self.forward_ret_dict[f'scale_labels2'] = torch.ones((1), dtype=torch.float32, device=scale_out2.device)\n\n if self.cross_two_scale:\n\n # print('data_dict[f\"spatial_features_2d_fpn3\"]', data_dict[f'spatial_features_2d_fpn3'].shape)\n # print('data_dict[f\"spatial_features_2d_fpn4\"]', data_dict[f'spatial_features_2d_fpn4'].shape)\n # print('data_dict[f\"spatial_features_2d_fpn5\"]', data_dict[f'spatial_features_2d_fpn5'].shape)\n\n xpool5=self.domain_pool_fpn['5'](data_dict[f'spatial_features_2d_fpn5']).view(data_dict[f'spatial_features_2d_fpn5'].size(0), -1)\n scale_out3 = self.scale_classifier_1_3(xpool5)\n scale_out3 = grad_reverse(scale_out3, l)\n scale_pred2_2 = self.scale_classifier_2(scale_out2)\n scale_pred2_3 = self.scale_classifier_2(scale_out3)\n self.forward_ret_dict[f'scale_preds2_2'] = scale_pred2_2\n self.forward_ret_dict[f'scale_preds2_3'] = scale_pred2_3\n\n self.forward_ret_dict[f'scale_labels2_2'] = torch.zeros((1), dtype=torch.float32, device=scale_out1.device)\n self.forward_ret_dict[f'scale_labels2_3'] = torch.ones((1), dtype=torch.float32, device=scale_out2.device)\n\n\n # if 'det'\n\n ################# det #####################\n if 'det' in t_mode:\n if not self.fpn_only:\n if self.debug: print('det img')\n\n if self.range_guidance and not self.range_guidance_dom_only:\n if self.debug: print('range_guidance det')\n\n total_range_x = spatial_features_2d.shape[-2]\n total_range_y = spatial_features_2d.shape[-1]\n half_range_x = int(spatial_features_2d.shape[-2] * 0.5)\n half_range_y = int(spatial_features_2d.shape[-1] * 0.5)\n x_range = torch.abs(torch.arange(-half_range_y, half_range_y, 1).float() + 0.5).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1, total_range_x, 1).cuda()\n y_range = torch.abs(torch.arange(-half_range_x, half_range_x, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1,1,total_range_y).cuda()\n\n spatial_features_2d = torch.cat((spatial_features_2d, x_range, y_range), dim=1)\n # print(\"spatial_features_2d\", spatial_features_2d.shape)\n\n if self.joint_attention:\n if self.debug: print('joint_attention det fpn')\n if self.voxel_det_seconv_attention and self.voxel_det_se_attention:\n spatial_features_2d_out = torch.max(self.att_spatial_se_layer(spatial_features_2d), self.att_se_layer(spatial_features_2d))\n spatial_features_2d_det = spatial_features_2d_out\n elif self.voxel_det_seconv_attention:\n # print(\"spatial_features_2d before\", spatial_features_2d.shape)\n spatial_features_2d_det = self.att_spatial_se_layer(spatial_features_2d)\n elif self.voxel_det_se_attention:\n spatial_features_2d_det = self.att_se_layer(spatial_features_2d)\n\n else:\n spatial_features_2d_det = spatial_features_2d\n else:\n if self.voxel_det_seconv_attention and self.voxel_det_se_attention:\n spatial_features_2d_out = torch.max(self.att_spatial_se_layer_det(spatial_features_2d), self.att_se_layer_det(spatial_features_2d))\n spatial_features_2d_det = spatial_features_2d_out\n elif self.voxel_det_seconv_attention:\n # print(\"spatial_features_2d before\", spatial_features_2d.shape)\n spatial_features_2d_det = self.att_spatial_se_layer_det(spatial_features_2d)\n elif self.voxel_det_se_attention:\n spatial_features_2d_det = self.att_se_layer_det(spatial_features_2d)\n else:\n spatial_features_2d_det = spatial_features_2d\n\n # if self.dom_context:\n # dom_head_context = data_dict['dom_head_context']\n # dom_head_context_fpn = []\n # for layer in self.fpn_layers:\n # dom_head_context_fpn.append(data_dict[f'dom_head_context_fpn{layer}'])\n\n # if self.sep_fpn_dom_context:\n # dom_head_context_all = dom_head_context#torch.cat((dom_head_context_all, dom_head_context), dim=1)\n\n # else:\n # dom_head_context_all = torch.cat(dom_head_context_fpn, dim=1)\n\n # dom_head_context_all = torch.cat((dom_head_context_all, dom_head_context), dim=1) #dom_point_context\n\n # dom_head_context_all_reshape = dom_head_context_all.unsqueeze(-1).unsqueeze(-1).repeat(1,1,spatial_features_2d_det.shape[-2],spatial_features_2d_det.shape[-1])\n\n # spatial_features_2d_context = torch.cat((spatial_features_2d_det, dom_head_context_all_reshape), dim=1)\n # spatial_features_2d_det = spatial_features_2d_contextrange_guidance_new_conv_dom_context)\n if self.range_guidance_new_conv_dom_context:\n if self.debug: print('range_guidance_new_conv_dom_context det')\n dom_head_context = data_dict['dom_head_context']\n # print(\"dom_head_context\", dom_head_context.shape)\n # print(\"spatial_features_2d_det\", spatial_features_2d_det.shape)\n dom_head_context_fpn = []\n for layer in self.fpn_layers:\n dom_head_context_fpn.append(data_dict[f'dom_head_context_fpn{layer}'])\n\n if self.sep_fpn_dom_context:\n dom_head_context_all = dom_head_context#torch.cat((dom_head_context_all, dom_head_context), dim=1)\n else:\n dom_head_context_all = torch.cat(dom_head_context_fpn, dim=1)\n\n dom_head_context_all = torch.cat((dom_head_context_all, dom_head_context), dim=1) #dom_point_context\n\n # print(\"dom_head_context_all\", dom_head_context_all.shape)\n #.unsqueeze(-1).unsqueeze(-1)\n dom_head_context_all_reshape = dom_head_context_all.repeat(1,1,spatial_features_2d_det.shape[-2],spatial_features_2d_det.shape[-1])\n\n spatial_features_2d_context = torch.cat((spatial_features_2d_det, dom_head_context_all_reshape), dim=1)\n spatial_features_2d_det = spatial_features_2d_context\n\n cls_preds = self.conv_cls(spatial_features_2d_det)\n box_preds = self.conv_box(spatial_features_2d_det)\n\n cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]\n box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]\n\n self.forward_ret_dict['cls_preds'] = cls_preds\n self.forward_ret_dict['box_preds'] = box_preds\n\n if self.conv_dir_cls is not None:\n dir_cls_preds = self.conv_dir_cls(spatial_features_2d_det)\n dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()\n self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds\n else:\n dir_cls_preds = None\n\n if self.training:\n if pseudo:\n pseudo_weights = data_dict['pseudo_weights']\n else:\n pseudo_weights = None\n\n # print(\"gt_classes\", data_dict['gt_classes'].shape)\n # print(\"gt_classes\", data_dict['gt_classes'])\n # print(\"pseudo_weights\", pseudo_weights)\n\n targets_dict = self.assign_targets(\n gt_boxes=data_dict['gt_boxes'],\n pseudo=pseudo,\n pseudo_weights=pseudo_weights\n )\n\n self.forward_ret_dict.update(targets_dict)\n\n if not self.training or self.predict_boxes_when_training:\n batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(\n batch_size=data_dict['batch_size'],\n cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds\n )\n data_dict['batch_cls_preds'] = batch_cls_preds\n data_dict['batch_box_preds'] = batch_box_preds\n data_dict['cls_preds_normalized'] = False\n\n if self.rangeinv:\n # print(\"spatial_features_2d\", spatial_features_2d.shape) #512,128,128\n thresh = self.rm_thresh\n\n start_dim = int(spatial_features_2d.shape[-1]/4.)\n mid_dim = int(spatial_features_2d.shape[-1]/2.)\n end_dim = start_dim+int(spatial_features_2d.shape[-1]/2.)\n\n near_idx = torch.LongTensor([i for i in range(start_dim, mid_dim-thresh)]+[i for i in range(mid_dim+thresh, end_dim)])\n far_idx = torch.LongTensor([i for i in range(start_dim)]+[i for i in range(end_dim, spatial_features_2d.shape[-1])])\n\n if self.keep_x:\n near_feat_2d = spatial_features_2d[:,:,:,near_idx]\n far_feat_2d = spatial_features_2d[:,:,:, far_idx]\n elif self.keep_y:\n near_feat_2d = spatial_features_2d[:,:,near_idx,:]\n far_feat_2d = spatial_features_2d[:,:,far_idx,:]\n\n near_feat_2d_reverse = grad_reverse(near_feat_2d, l*-1)\n range_pred_near = self.conv_range(near_feat_2d_reverse)\n # print(\"near_range_pred\", near_range_pred.shape)\n far_feat_2d_reverse = grad_reverse(far_feat_2d, l*-1)\n range_pred_far = self.conv_range(far_feat_2d_reverse)\n # print(\"far_range_pred\", far_range_pred.shape)\n\n range_labels_near = torch.ones((range_pred_near.shape), dtype=torch.float32, device=spatial_features_2d.device)\n\n range_labels_far = torch.zeros((range_pred_far.shape), dtype=torch.float32, device=spatial_features_2d.device)\n\n targets_dict_range = {\n 'range_pred_near': range_pred_near,\n 'range_pred_far': range_pred_far,\n 'range_labels_near': range_labels_near,\n 'range_labels_far': range_labels_far,\n }\n self.forward_ret_dict.update(targets_dict_range)\n\n ############ FPN DET #############\n if self.num_fpn_up + self.num_fpn_down + self.num_fpn_downup > 0:\n # print(\"fpn\")\n for layer in self.fpn_layers:\n\n if self.range_guidance_new_conv_dom_context:\n if self.debug: print('range_guidance_new_conv_dom_context det fpn', layer)\n if self.sep_fpn_dom_context:\n dom_head_context_all = data_dict[f'dom_head_context_fpn{layer}']\n\n else:\n dom_head_context = data_dict['dom_head_context']\n dom_head_context_fpn = []\n for l in self.fpn_layers:\n dom_head_context_fpn.append(data_dict[f'dom_head_context_fpn{l}'])\n\n dom_head_context_all = torch.cat(dom_head_context_fpn, dim=1)\n dom_head_context_all = torch.cat((dom_head_context_all, dom_head_context), dim=1) #dom_point_context\n\n dom_head_context_all_fpn_reshape = dom_head_context_all.repeat(1,1,spatial_features_2d_fpn_det[layer].shape[-1],spatial_features_2d_fpn_det[layer].shape[-1])\n\n # combine with context\n spatial_features_2d_fpn_context = torch.cat((spatial_features_2d_fpn_det[layer], dom_head_context_all_fpn_reshape), dim=1)\n\n spatial_features_2d_fpn_det[layer] = spatial_features_2d_fpn_context\n\n if self.debug: print('det fpn', layer)\n cls_preds = self.conv_cls_fpn[layer](spatial_features_2d_fpn_det[layer])\n box_preds = self.conv_box_fpn[layer](spatial_features_2d_fpn_det[layer])\n\n cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]\n box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]\n\n # print(\"cls_preds2\", cls_preds.shape) # 1, 252, 252, 2\n # print(\"box_preds2\", box_preds.shape) # 1, 252, 252, 14\n\n self.forward_ret_dict[f'cls_preds_fpn{layer}'] = cls_preds\n self.forward_ret_dict[f'box_preds_fpn{layer}'] = box_preds\n\n if self.conv_dir_cls_fpn[layer] is not None:\n dir_cls_preds = self.conv_dir_cls_fpn[layer](spatial_features_2d_fpn_det[layer])\n dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()\n self.forward_ret_dict[f'dir_cls_preds_fpn{layer}'] = dir_cls_preds\n else:\n dir_cls_preds = None\n\n if self.training:\n if pseudo:\n pseudo_weights = data_dict['pseudo_weights']\n else:\n pseudo_weights = None\n\n targets_dict_fpn = self.assign_targets(\n gt_boxes=data_dict['gt_boxes'],\n pseudo=pseudo,\n pseudo_weights=pseudo_weights,\n fpn_layer=layer\n )\n\n self.forward_ret_dict.update(targets_dict_fpn)\n\n if not self.training or self.predict_boxes_when_training:\n batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(\n batch_size=data_dict['batch_size'],\n cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds,\n fpn_layer=layer\n )\n data_dict[f'batch_cls_preds_fpn{layer}'] = batch_cls_preds\n data_dict[f'batch_box_preds_fpn{layer}'] = batch_box_preds\n data_dict[f'cls_preds_normalized_fpn{layer}'] = False\n\n # print(\"data_dict fpn\", data_dict[f'batch_cls_preds_fpn{layer}'])\n # print(\"self.forward_ret_dict\", self.forward_ret_dict)\n\n\n return data_dict" ]
[ [ "torch.nn.Softmax", "torch.mean", "torch.zeros", "torch.cat", "torch.cdist", "torch.nn.Dropout", "torch.ones", "torch.randn", "torch.nn.ModuleDict", "torch.nn.Sigmoid", "torch.tensor", "torch.nn.functional.sigmoid", "torch.mul", "torch.bmm", "torch.arange", "numpy.log", "torch.nn.functional.conv2d", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.init.normal_", "torch.stack", "torch.nn.AdaptiveAvgPool2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vishalbelsare/audit-ai
[ "4891b1d3c813a0e6ce657eaee3f1b2ab50e8f429" ]
[ "auditai/utils/general.py" ]
[ "import numpy as np\nfrom scipy.stats import norm\nfrom scipy.special import gammaln\n\n\ndef two_tailed_ztest(success1, success2, total1, total2):\n \"\"\"\n Two-tailed z score for proportions\n\n Parameters\n -------\n success1 : int\n the number of success in `total1` trials/observations\n\n success2 : int\n the number of success in `total2` trials/observations\n\n total1 : int\n the number of trials or observations of class 1\n\n total2 : int\n the number of trials or observations of class 2\n\n Returns\n -------\n zstat : float\n z score for two tailed z-test\n p_value : float\n p value for two tailed z-test\n \"\"\"\n p1 = success1 / float(total1)\n p2 = success2 / float(total2)\n p_pooled = (success1 + success2) / float(total1 + total2)\n\n obs_ratio = (1. / total1 + 1. / total2)\n var = p_pooled * (1 - p_pooled) * obs_ratio\n\n # calculate z-score using foregoing values\n zstat = (p1 - p2) / np.sqrt(var)\n\n # calculate associated p-value for 2-tailed normal distribution\n p_value = norm.sf(abs(zstat)) * 2\n\n return zstat, p_value\n\n\ndef dirichln(arr):\n \"\"\"\n Dirichlet gamma function\n Albert (2007) Bayesian Computation with R, 1st ed., pg 178\n\n Parameters\n ----------\n arr : array or matrix of float values\n\n Returns\n -------\n val : float or array,\n logged Dirichlet transformed value if array or matrix\n \"\"\"\n val = np.sum(gammaln(arr)) - gammaln(np.sum(arr))\n return val\n\n\ndef get_unique_name(new_name, name_list, addendum='_new'):\n \"\"\"\n Utility function to return a new unique name if name is in list.\n\n Parameters\n ----------\n new_name : string\n name to be updated\n name_list: list\n list of existing names\n addendum: string\n addendum appended to new_name if new_name is in name_list\n\n Returns\n -------\n new_name : string,\n updated name\n\n Example\n -------\n new_name = 'feat1'\n name_list = ['feat1', 'feat2']\n\n first iteration: new_name returned = 'feat1_new'\n now with name_list being updated to include new feature:\n name_list = ['feat1', 'feat2', 'feat1_new']\n\n second iteration: new_name returned = 'feat1_new_new'\n \"\"\"\n # keep appending \"new\" until new_name is not in list\n while new_name in name_list:\n new_name += addendum\n return new_name\n" ]
[ [ "numpy.sum", "numpy.sqrt", "scipy.special.gammaln" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.18", "0.19" ], "tensorflow": [] } ]
cemfi/measure-detector
[ "024dde7c30145ec4d868d1cca6a50a3cf6bd977a" ]
[ "backend/server.py" ]
[ "from functools import cmp_to_key\nimport io\n\nimport hug\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\n\n\n# Initialize graph\ndetection_graph = tf.Graph()\ndetection_graph.as_default()\nod_graph_def = tf.GraphDef()\nwith tf.gfile.GFile('model.pb', 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\nsess = tf.Session()\n\n\[email protected]_middleware()\ndef process_data(request, response, resource):\n response.set_header('Access-Control-Allow-Origin', '*')\n\n\ndef compare_measure_bounding_boxes(self, other):\n \"\"\"Compares bounding boxes of two measures and returns which one should come first\"\"\"\n if self['ulx'] >= other['ulx'] and self['uly'] >= other['uly']:\n return +1 # self after other\n elif self['ulx'] < other['ulx'] and self['uly'] < other['uly']:\n return -1 # other after self\n else:\n overlap_y = min(self['lry'] - other['uly'], other['lry'] - self['uly']) \\\n / min(self['lry'] - self['uly'], other['lry'] - other['uly'])\n if overlap_y >= 0.5:\n if self['ulx'] < other['ulx']:\n return -1\n else:\n return 1\n else:\n if self['ulx'] < other['ulx']:\n return 1\n else:\n return -1\n\n\ndef infer(image: np.ndarray):\n ops = tf.get_default_graph().get_operations()\n all_tensor_names = {output.name for op in ops for output in op.outputs}\n tensor_dict = {}\n for key in [\n 'num_detections',\n 'detection_boxes',\n 'detection_scores',\n 'detection_classes'\n ]:\n tensor_name = key + ':0'\n\n if tensor_name in all_tensor_names:\n tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)\n\n image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n\n # Run inference\n output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})\n\n # All outputs are float32 numpy arrays, so convert types as appropriate\n output_dict['num_detections'] = int(output_dict['num_detections'][0])\n output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)\n output_dict['detection_boxes'] = output_dict['detection_boxes'][0]\n output_dict['detection_scores'] = output_dict['detection_scores'][0]\n\n return output_dict\n\n\[email protected]('/')\ndef user_interface():\n return('/usr/src/app',)\n\n\[email protected]('/upload')\ndef detect_measures(body, cors: hug.directives.cors=\"*\"):\n \"\"\"Takes an image file and returns measure bounding boxes as JSON\"\"\"\n\n image = Image.open(io.BytesIO(body['image'])).convert(\"RGB\")\n (image_width, image_height) = image.size\n image_np = np.array(image)\n\n output_dict = infer(image_np)\n measures = []\n\n for idx in range(output_dict['num_detections']):\n if output_dict['detection_classes'][idx] == 1 and output_dict['detection_scores'][idx] > 0.5:\n y1, x1, y2, x2 = output_dict['detection_boxes'][idx]\n\n y1 = y1 * image_height\n y2 = y2 * image_height\n x1 = x1 * image_width\n x2 = x2 * image_width\n\n measures.append({\n 'ulx': x1,\n 'uly': y1,\n 'lrx': x2,\n 'lry': y2\n })\n else:\n break\n\n measures.sort(key=cmp_to_key(compare_measure_bounding_boxes))\n\n return {'measures': measures}\n" ]
[ [ "tensorflow.Graph", "tensorflow.import_graph_def", "numpy.expand_dims", "tensorflow.gfile.GFile", "tensorflow.Session", "tensorflow.get_default_graph", "tensorflow.GraphDef", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
petrov-lab/tuba-seq
[ "d257988659f79c2fecfec72b1d7fe260c245b7dc" ]
[ "unadulterated/preprocess.py" ]
[ "#!/usr/bin/env python3\nimport pandas as pd\nimport numpy as np\nimport re\nimport os\nimport argparse\nimport params\nfrom tuba_seq import fastq\nfrom tuba_seq.shared import logPrint\n\n############### Input Parameters that will be retained ########################\n\ncluster_flank = 7 # Parameters used in Rogers et al (2017) Nat meth.\ntraining_flank = 17 # These have changed in the latest version. \n\n############### Input Parameters that will be deprecated ######################\n\nKMERS = 2 # Number of kmer searches used to find the beginning and end of double-barcodes\n # Hard-coded as 2, do not change.\nmatch_len = 6 # Length of each kmer\nsymmetric_immediate_truncation_of_read = slice(len(params.head) - len(params.tail), len(params.master_read)) \n # Unused sections of reads are immediately truncated to accelerate processing. \n # This truncation assumes len(head) > len(tail)\n\n###############################################################################\n\ntruncated_master_read = params.master_read[symmetric_immediate_truncation_of_read]\ncluster_distance_from_start = params.barcode_length + cluster_flank\n\nparser = argparse.ArgumentParser(description=\"Prepare FASTQ files for DADA training & clustering.\")\nparser.add_argument(\"--base_dir\", type=str, default=os.getcwd(),\nhelp='Base directory to work within. This directory must contain a folder entitled \"{:}\" containing all FASTQ files to process.'.format(params.original_dir))\nparser.add_argument(\"-v\", \"--verbose\", help='Output more Info', action=\"store_true\")\nparser.add_argument('-p', '--parallel', action='store_true', help='Multithreaded operation')\nparser.add_argument('-e', '--maxEE', type=float, default=2, help='Maximum Expected Errors per read (default 2.)')\n\nargs = parser.parse_args()\nbase_dir = args.base_dir.rstrip('/')\nos.chdir(base_dir)\n\nLog = logPrint(verbose=args.verbose) # An object to save all of the program's output based on the choice of verbosity\n\nif args.verbose and args.parallel:\n print(\"Verbose output is incompatible with parallel operation. Will use single thread...\")\n args.parallel = False\n\nif args.parallel:\n from tuba_seq.pmap import pmap as map\n\nfor Dir in [params.preprocessed_dir, params.training_dir]:\n if not os.path.exists(Dir):\n os.makedirs(Dir)\n\nfiles = list(filter(lambda fn: params.fastq_handle in fn, os.listdir(params.original_dir)))\n\nLog('Processing {:} Files in {:}/original/.'.format(len(files), base_dir))\n\ndef easy_N_fixes(DNA, searcher=re.compile('(...N...)'), maxN=3):\n \"\"\"Repair N bases in DNA using params.master_read. \n\nKeyword Arguments:\nsearcher -- the regular expression used to search for N bases\nmaxN -- the maximum number of N bases to try to repair (default 3)\n\nThis function cannot always repair N bases, e.g. in barcode regions.\n \"\"\"\n \n if DNA.count('N') > maxN:\n return DNA\n broken = searcher.split(DNA)\n try:\n return ''.join([re.findall(s.replace('N', '.'), truncated_master_read)[0] if 'N' in s else s for s in broken])\n except IndexError:\n return DNA\n\noffsets = match_len*np.arange(KMERS)\nhead_matchers = [fastq.singleMismatcher(params.head[-i-match_len:][:match_len]) for i in offsets]\ntail_matchers = [fastq.singleMismatcher(params.tail[i:i+match_len]) for i in offsets]\n \nstart_expected = min(len(params.head), len(params.tail))\nstop_expected = start_expected + params.barcode_length\n\ndef process_file(f):\n \"\"\"Processes a FASTQ file into a training file for DADA2 & a barcode clustering file\n\"\"\"\n df = (fastq.fastqDF.from_file(os.path.join(params.original_dir, f), use_Illumina_filter=True, fake_header=False)\n .co_slice(symmetric_immediate_truncation_of_read))\n short_filename = f.split(params.fastq_handle)[0]\n \n # Attempt #1 to repair N bases\n degen = df['DNA'].str.contains(\"N\")\n problems = df.loc[degen, 'DNA']\n df.loc[degen, 'DNA'] = problems.apply(easy_N_fixes)\n \n # Identify the beginning of the barcode region \n heads = [df['DNA'].apply(func.find) for func in head_matchers]\n starts = heads[0] + match_len\n wrong_starts = (starts - start_expected).abs() > params.allowable_deviation\n initial_wrongs = wrong_starts.sum()\n starts[wrong_starts] = heads[1][wrong_starts] + match_len*2\n wrong_starts = (starts - start_expected).abs() > params.allowable_deviation\n final_wrongs = wrong_starts.sum()\n \n # Identify the end of the barcode region\n tails = [df['DNA'].apply(func.find) for func in tail_matchers]\n stops = tails[0]\n wrong_stops = (stops - stop_expected).abs() > params.allowable_deviation\n truncated_degen = ((stops > starts) & (stop_expected - stops > params.allowable_deviation)).sum()\n Log(\"{:.1%} of reads had truncated/missing sgIDs & barcodes.\".format(truncated_degen/len(df)))\n initial_wrongs += wrong_stops.sum() \n stops[wrong_stops] = tails[1][wrong_stops] - match_len\n wrong_stops = (stops - stop_expected).abs() > params.allowable_deviation\n final_wrongs += wrong_stops.sum()\n \n # Discard reads with start/stop of barcode in the wrong location\n keep = ~(wrong_starts | wrong_stops)\n passed_kmers = df.select_reads(keep)\n Log('{:.1%} passed kmer tests.'.format(len(passed_kmers)/len(df)))\n starts = starts[keep]\n stops = stops[keep]\n\n # Only train DADA2 on reads lacking N bases, even if we can repair these N bases.\n # Otherwise, the error rate estimate will be biased. \n was_not_degen = ~passed_kmers['QC'].str.contains('#')\n train = passed_kmers.select_reads(was_not_degen)\n Log('Error training will use {:.1%} of remaining reads.'.format(len(train)/len(passed_kmers)))\n \n # Train DADA2 only on the barcode-flanking regions of reads\n train_starts = starts[was_not_degen].apply(lambda start: slice(start-training_flank, start))\n train_stops = stops[was_not_degen].apply(lambda stop: slice(stop, stop + training_flank))\n (train.vector_slice(train_starts, train_stops)\n .drop_abnormal_lengths()\n .write(params.training_dir+f))\n\n # Trim reads for DADA2 clustering \n slices = starts.apply(lambda start: slice(start - cluster_flank, start + cluster_distance_from_start))\n sliced = passed_kmers.vector_slice(slices).drop_abnormal_lengths()\n cluster = (sliced.select_reads(~sliced.isDegenerate())\n .drop_abnormal_lengths())\n \n # Discard reads with poor QC scores\n EEs = cluster.expected_errors()\n clean = cluster.select_reads(EEs <= args.maxEE)\n\n # Write clustering file\n clean.write(params.preprocessed_dir+('{short_filename}.fastq'.format(**locals())))\n \n #Output summary\n counts = dict(saved = initial_wrongs - final_wrongs,\n reads = len(df),\n passed_kmers = len(passed_kmers),\n cluster = len(cluster),\n clean = len(clean),\n truncated = truncated_degen,\n good_flanks = len(passed_kmers)+truncated_degen,\n EEs = EEs.sum())\n\n reads = counts['reads']\n percents = {k:v/reads for k, v in counts.items()}\n percents['EE_rate'] = counts['EEs']/(len(clean)*len(clean['DNA'].values[0]))\n Log(\"{:} ({:.2}M reads): {good_flanks:.0%} good flanks, {clean:.0%} used. Estimated Error Rate: {EE_rate:.3%}.\".format(\n short_filename, reads*1e-6, **percents), print_line=True)\n return counts\n\nall_output = map(process_file, files)\noutput_df = pd.DataFrame(list(all_output), index=files)\n\ntotals = output_df.sum()\n\nreads = totals['reads']\nmean_error_rate = totals.pop('EEs')/totals['clean']/(params.barcode_length + 2*cluster_flank)\n\npercents = {k:v/reads for k, v in totals.items()}\n\nLog(\"\"\"\nSummary for {:}:\n---------------------------------------\nProcessed {:.2}M Reads.\n{good_flanks:.1%} had reasonable headers & tails ({saved:.1%} were saved by 2nd kmer).\n{passed_kmers:.1%} had sgIDs & barcodes.\n{cluster:.1%} had fixable Ns.\n{clean:.1%} had <= {maxEE:g} expected errors and the average Error Rate was {error_rate:.4%}.\n\"\"\".format( base_dir, \n reads*1e-6, \n maxEE=args.maxEE,\n error_rate=mean_error_rate,\n **percents), print_line=True)\nLog.close()\n\n" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
richardgorham1/ds-prep-capstone
[ "69f87cb3312d160ea577b191b659c056414e837b" ]
[ "lib/least_squares.py" ]
[ "import math\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nfrom scipy.stats import t\nfrom scipy.stats import norm\n\ndef least_squares(x, y):\n #from SciPy Stats\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) \n \n #Calculate R-Squared (coefficient of determination) from r (corr. coef.)\n r_sq = r_value**2 \n y_hat_i = slope * x + intercept #The fitted line\n s_y = np.sqrt(np.sum((y - y_hat_i)**2) / (len(y) - 2)) #Std. dev. of y\n \n #t-crit. from SciPy Stats\n t_df = t.isf(0.025, len(x) - 2, loc = 0, scale = 1) \n \n #Prediction intervals of data\n y_pred_upper = y_hat_i \\\n + (t_df * s_y * np.sqrt(1 + (1 / len(x))\\\n + (x - x.mean())**2 / ((len(x) - 1) * x.var())))\n \n y_pred_lower = y_hat_i \\\n - (t_df * s_y * np.sqrt(1 + (1 / len(x)) \\\n + (x - x.mean())**2 / ((len(x) - 1) * x.var())))\n \n #Confidencce inverals for the line\n y_confidence_upper = y_hat_i \\\n + (t_df * s_y * np.sqrt((1 / len(x)) \\\n + (x - x.mean())**2 / ((len(x) - 1) * x.var())))\n \n y_confidence_lower = y_hat_i \\\n - (t_df * s_y * np.sqrt((1 / len(x)) \\\n + (x - x.mean())**2 / ((len(x) - 1) * (x.var()))))\n \n #Return series as data frames for charting\n l = pd.DataFrame(y_hat_i)\n upi = pd.DataFrame(y_pred_upper)\n lpi = pd.DataFrame(y_pred_lower)\n uci = pd.DataFrame(y_confidence_upper) \n lci = pd.DataFrame(y_confidence_lower) \n\n return {'m':np.round(slope,2), 'b':np.round(intercept,2),\\\n 'r_sq':np.round(r_sq,2), 'p':np.round(p_value,4),\\\n 'l':l, 'upi':upi, 'lpi':lpi, 'uci':uci, 'lci':lci}\n" ]
[ [ "numpy.round", "scipy.stats.linregress", "numpy.sum", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
cathyhaha/DensityPeakCluster
[ "89eb5f8a67011aea16e49e485474eb76f4319a9d" ]
[ "plot_utils.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_scatter_diagram(which_fig, x, y, x_label = 'x', y_label = 'y', title = 'title', style_list = None):\n\t'''\n\tPlot scatter diagram\n\n\tArgs:\n\t\twhich_fig : which sub plot\n\t\tx : x array\n\t\ty : y array\n\t\tx_label : label of x pixel\n\t\ty_label : label of y pixel\n\t\ttitle : title of the plot\n\t'''\n\tstyles = ['k', 'g', 'r', 'c', 'm', 'y', 'b', '#9400D3','#C0FF3E']\n\tassert len(x) == len(y)\n\tif style_list != None:\n\t\tassert len(x) == len(style_list) and len(styles) >= len(set(style_list))\n\tplt.figure(which_fig)\n\tplt.clf()\n\tif style_list == None:\n\t\tplt.plot(x, y, color=styles[0], linestyle='.', marker='.')\n\telse:\n\t\tclses = set(style_list)\n\t\txs, ys = {}, {}\n\t\tfor i in xrange(len(x)):\n\t\t\ttry:\n\t\t\t\txs[style_list[i]].append(x[i])\n\t\t\t\tys[style_list[i]].append(y[i])\n\t\t\texcept KeyError:\n\t\t\t\txs[style_list[i]] = [x[i]]\n\t\t\t\tys[style_list[i]] = [y[i]]\n\t\tadded = 1\n\t\tfor idx, cls in enumerate(clses):\n\t\t\tif cls == -1:\n\t\t\t\tstyle = styles[0]\n\t\t\t\tadded = 0\n\t\t\telse:\n\t\t\t\tstyle = styles[idx + added]\n\t\t\tplt.plot(xs[cls], ys[cls], color=style, linestyle='.', marker='.')\n\tplt.title(title)\n\tplt.xlabel(x_label)\n\tplt.ylabel(y_label)\n\tplt.show()\n\nif __name__ == '__main__':\n\tx = np.array([1, 2, 3, 4, 5, 6, 7, 8, 7, 7])\n\ty = np.array([2, 3, 4, 5, 6, 2, 4, 8, 5, 6])\n\tcls = np.array([1, 4, 2, 3, 5, -1, -1, 6, 6, 6])\n\tplot_scatter_diagram(0, x, y, style_list = cls)\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.plot", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
525309178/fp-growth
[ "3b151021bd2b37be8c0aa6fa4ed00b603afe9a7e" ]
[ "test3.py" ]
[ "from __future__ import print_function\n# python3\n# -*- coding: utf-8 -*-\n# @Author : lina\n# @Time : 2018/5/13 11:40\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nfrom keras.callbacks import ReduceLROnPlateau\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nfrom keras.callbacks import ReduceLROnPlateau\nimport numpy as np\nimport os\nnp.random.seed(1337) # for reproducibility\nfrom keras.preprocessing import sequence\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Embedding,BatchNormalization\nfrom keras.layers import LSTM, SimpleRNN, GRU\nfrom keras.datasets import imdb\nfrom keras.utils.np_utils import to_categorical\nfrom sklearn.metrics import (precision_score, recall_score,f1_score, accuracy_score,mean_squared_error,mean_absolute_error)\nfrom sklearn import metrics\nfrom sklearn.preprocessing import Normalizer, OneHotEncoder,LabelBinarizer\nfrom sklearn import preprocessing\nimport h5py\nfrom keras import callbacks\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger\n# 数据集\ndataset = [\n ['啤酒', '牛奶', '可乐'],\n ['尿不湿', '啤酒', '牛奶', '橙汁'],\n ['啤酒', '尿不湿'],\n ['啤酒', '可乐', '尿不湿'],\n ['啤酒', '牛奶', '可乐']\n] #type list\n###################################################################################################\ntrain_path = './KDDTrain+_2.csv'\ntest_path = './KDDTest+_2.csv'\nsave_path = '/home/administrator/PycharmProjects/Network-Intrusion-Detection-DNN/NSL-KDD-TEST/checkpoint'\n# traindata = pd.read_csv('/home/administrator/PycharmProjects/Network-Intrusion-Detection-DNN/NSL-KDD-TEST/dataset/KDDTrain+_2.csv', header=None)\n# testdata = pd.read_csv('/home/administrator/PycharmProjects/Network-Intrusion-Detection-DNN/NSL-KDD-TEST/dataset/KDDTest+_2.csv', header=None)\n\n\n# Step 1: Data preprocessing:\n # 1 attach the column names to the dataset 为数据集每列添加对应的属性名称\ncol_names = [\"duration\",\"protocol_type\",\"service\",\"flag\",\"src_bytes\",\n \"dst_bytes\",\"land\",\"wrong_fragment\",\"urgent\",\"hot\",\"num_failed_logins\",\n \"logged_in\",\"num_compromised\",\"root_shell\",\"su_attempted\",\"num_root\",\n \"num_file_creations\",\"num_shells\",\"num_access_files\",\"num_outbound_cmds\",\n \"is_host_login\",\"is_guest_login\",\"count\",\"srv_count\",\"serror_rate\",\n \"srv_serror_rate\",\"rerror_rate\",\"srv_rerror_rate\",\"same_srv_rate\",\n \"diff_srv_rate\",\"srv_diff_host_rate\",\"dst_host_count\",\"dst_host_srv_count\",\n \"dst_host_same_srv_rate\",\"dst_host_diff_srv_rate\",\"dst_host_same_src_port_rate\",\n \"dst_host_srv_diff_host_rate\",\"dst_host_serror_rate\",\"dst_host_srv_serror_rate\",\n \"dst_host_rerror_rate\",\"dst_host_srv_rerror_rate\",\"label\"]\n\n # 2 Load the Dataset\ndf = pd.read_csv(train_path, header=None, names = col_names)\n\ndf_test = pd.read_csv(test_path, header=None, names = col_names)\ndf_test.to_csv('test_table.csv')\n# shape, this gives the dimensions of the dataset\n# print('Dimensions of the Training set:',df.shape)\n# print('Dimensions of the Test set:',df_test.shape)\n\n# first five rows of dataset\n# print(df.head(5))\n# print(df.describe())\n\n# Label Distribution of Training and Test set\n# print('Label distribution Training set:')\n# print(df['label'].value_counts())\n# print()\n# print('Label distribution Test set:')\n# print(df_test['label'].value_counts())\n\n# 查看有哪些属性是类别而不是数值,将其转为数字标识 colums that are categorical and not binary yet: protocol_type (column 2), service (column 3), flag (column 4).\n# explore categorical features\n# print('Training set:')\n# for col_name in df.columns:\n# if df[col_name].dtypes == 'object' :\n# unique_cat = len(df[col_name].unique())\n# print(\"Feature '{col_name}' has {unique_cat} categories\".format(col_name=col_name, unique_cat=unique_cat))\n\n# see how distributed the feature service is, it is evenly distributed and therefore we need to make dummies for all.\n# print()\n# print('Distribution of categories in service:')\n# print(df['service'].value_counts().sort_values(ascending=False).head())\n\n# Test set\n# print('Test set:')\n# for col_name in df_test.columns:\n# if df_test[col_name].dtypes == 'object' :\n# unique_cat = len(df_test[col_name].unique())\n# print(\"Feature '{col_name}' has {unique_cat} categories\".format(col_name=col_name, unique_cat=unique_cat))\n\n\n # 3 选出需要数值化处理的类别特征'protocol_type', 'service', 'flag'\n# insert code to get a list of categorical columns into a variable, categorical_columns\ncategorical_columns=['protocol_type', 'service', 'flag','label']\n\n # 从原有数据集中先提取出这三个类别对应的数据到:df_categorical_values与testdf_categorical_values中\ndf_categorical_values = df[categorical_columns]\ntestdf_categorical_values = df_test[categorical_columns]\n# print(df_categorical_values.head())\n # 提取出剩余的离散特征\nse_column = [\"land\",\"logged_in\",\"root_shell\",\"su_attempted\",\"is_host_login\",\"is_guest_login\"]\ndf_sep_values = df[se_column]\ntestdf_sep_values = df_test[se_column]\nfor feature in se_column:\n df_sep_values[feature].replace([0,1],['no_'+feature,feature],inplace=True)\n# print(df_sep_values.head())\nnewdf = pd.DataFrame()\n# 将所有的离散数据放入到newdf中进行关联分析处理\nnewdf =pd.concat([df_sep_values,df_categorical_values], axis=1)\n# print(newdf.head())\n\n# 使用正则表达式将normal之外的所有攻击类型统一标记为attack\nnewdf['label'].replace('^((?!normal).)*$','attack',regex=True,inplace=True)\n# print(newdf.head(20))\nnewdf = np.array(newdf)\nnewdf = newdf.tolist()\n\n# print(newdf[1])\n# print(newdf[2])\n\n\n# 关联规则生成函数\n# Apriori算法\n\"\"\"\n由于Apriori算法假定项集中的项是按字典序排序的,而集合本身是无序的,所以我们在必要时需要进行set和list的转换;\n由于要使用字典(support_data)记录项集的支持度,需要用项集作为key,而可变集合无法作为字典的key,因此在合适时机应将项集转为固定集合frozenset。\n支持度\n置信度\n\"\"\"\n\n\nclass apriori_algorithm:\n\n # 算法初始化\n def __init__(self, minSupport, dataSet):\n self.minSupport = minSupport # 最小支持度\n self.dataSet = dataSet # 数据集\n\n # 加载数据集\n def loaddata(self):\n return [[1, 3, 4], [2, 3, 5], [1, 2, 3, 5], [2, 5]]\n\n # 生成单个物品的项集列表\n def generateC1(self, dataSet):\n C1 = [] # 用于存放生成的单个物品的项集列表\n # 遍历数据集\n for data in dataSet:\n for item in data:\n if [item] not in C1:\n C1.append([item])\n\n return C1\n\n # 遍历数据集,和Ck对比,计数\n def generateLk_by_Ck(self, dataSet, Ck, minSupport, support_data):\n \"\"\"\n Generate Lk by executing a delete policy from Ck.\n Args:\n data_set: 数据集\n Ck: A set which contains all all frequent candidate k-itemsets.\n min_support: The minimum support.\n support_data: A dictionary. The key is frequent itemset and the value is support.\n Returns:\n Lk: A set which contains all all frequent k-itemsets.\n \"\"\"\n D = map(set, dataSet)\n C = map(frozenset, Ck)\n C1 = list(C) # 关于map对象的遍历,在内循环中遍历完最后一个元素后,再次访问时会放回空列表,所以外循环第二次进入的时候是空的,需要将其转为list处理\n countData = dict()\n for d in D: # set遍历\n for c in C1:\n if c.issubset(d): # 子集判断,并非元素判断\n if c not in countData.keys(): # 将集合作为字典的键使用,c为[]型\n countData[c] = 1\n\n else:\n countData[c] += 1\n\n numItems = float(len(list(dataSet)))\n returnList = []\n supportData = dict()\n # 遍历前面得到的计数字典\n for key in countData:\n support = countData[key] / numItems\n if support >= minSupport:\n returnList.insert(0, key) # insert() 函数用于将指定对象插入列表的指定位置\n support_data[key] = support\n\n return returnList\n\n def generate_L(self, dataSet, k, min_support):\n \"\"\"\n Generate all frequent itemsets.\n Args:\n data_set:数据集\n k: 频繁项集中含有的最多的元素\n min_support: 最小支持度\n Returns:\n L: 出现的所有频繁项集\n support_data: 每个频繁项集对应的支持度\n \"\"\"\n support_data = {}\n C1 = self.generateC1(dataSet)\n L1 = self.generateLk_by_Ck(dataSet, C1, min_support, support_data)\n Lksub1 = L1.copy()\n\n L = []\n L.append(Lksub1)\n\n for i in range(2, k + 1):\n Ci = self.generateCK(Lksub1, i)\n Li = self.generateLk_by_Ck(dataSet, Ci, min_support, support_data)\n Lksub1 = Li.copy()\n L.append(Lksub1)\n return L, support_data\n\n # generateCK 候选频繁项集产生 参数 Lk频繁项集,k:项集元素个数\n def generateCK(self, Lk, k):\n Ck = set()\n len_Lk = len(list(Lk))\n list_Lk = list(Lk)\n for i in range(len_Lk):\n for j in range(1, len_Lk):\n l1 = list(list_Lk[i])\n l2 = list(list_Lk[j])\n l1.sort()\n l2.sort()\n if l1[0:k - 2] == l2[0:k - 2]:\n Ck_item = list_Lk[i] | list_Lk[j]\n if self.isCk(Ck_item, list_Lk):\n Ck.add(Ck_item)\n # Ck.add(Ck_item)\n return Ck\n\n # 频繁项集判断\n\n def isCk(self, Ck_item, list_Lk):\n for item in Ck_item:\n sub_Ck = Ck_item - frozenset([item])\n if sub_Ck not in list_Lk:\n return False\n return True\n\n # 生成关联规则\n def generate_big_rules(self, L, support_data, min_conf):\n \"\"\"\n Generate big rules from frequent itemsets.\n Args:\n L: 所有频繁项集的列表\n support_data: 每个频繁项集对应的支持度\n min_conf: 最小可信度\n \"\"\"\n big_rule_list = []\n sub_set_list = []\n for i in range(0, len(L)):\n for freq_set in L[i]:\n for sub_set in sub_set_list:\n if sub_set.issubset(freq_set):\n conf = support_data[freq_set] / support_data[freq_set - sub_set]\n big_rule = (freq_set - sub_set, sub_set, conf)\n\n if conf >= min_conf and big_rule not in big_rule_list:\n if len(sub_set) == 1:\n d = set(['normal'])\n if sub_set.intersection(d):\n print(freq_set - sub_set, \" => \", sub_set, \"conf: \", conf)\n big_rule_list.append(big_rule)\n sub_set_list.append(freq_set)\n return big_rule_list\n\n\nif __name__ == '__main__':\n minS = 0.5\n dataSet = [[1, 3, 4], [2, 3, 5], [1, 2, 3, 5], [2, 5]]\n apriori = apriori_algorithm(minSupport=minS, dataSet=newdf)\n\n # 获取根据设置的支持度得到的频繁项集:L,L[i]表示包含一个元素的频繁项集列表 及其对应的支持度:support_data\n L, support_data = apriori.generate_L(newdf, 10, minS)\n\n d = set(['normal'])\n for item in L[1]:\n if item.intersection(d):\n print(item)\n # for i in range(4):\n # for item in L[i]:\n # if item.intersection(d):\n # print(item)\n # print(type(L))\n # print(support_data)\n\n # 关联规则列表\n big_rule_list = apriori.generate_big_rules(L, support_data, 0.7)\n print(type(big_rule_list))\n\n # s = set([1, 1, 2, 2, 3, 3])\n # d = set(['normal'])\n # print(s.intersection(d))\n #\n # print(big_rule_list)\n\n\n\n\n\n" ]
[ [ "pandas.concat", "pandas.read_csv", "numpy.random.seed", "pandas.DataFrame", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
MLJeb/EstudioPopularidadBibliotecaUcab
[ "9b2ba98bff29a94d57529cae2d21013c32d22619" ]
[ "statisticsProjectI.py" ]
[ "import numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn; seaborn.set()\nimport openpyxl\nfrom openpyxl import load_workbook\nimport os.path\nfrom collections import Counter\nfrom statsmodels.distributions.empirical_distribution import ECDF\n\ndef percentile(n, name = None):\n def percentile_(x):\n return x.quantile(n)\n percentile_.__name__ = 'P({:2.0f})'.format(n*100) if name == None else name\n return percentile_\n\ndef difPercentiles(m,n, name= None):\n def percentile_(x):\n return x.quantile(n) - x.quantile(m)\n percentile_.__name__ = 'P({:2.0f}) - P({:2.0f})'.format(n*100, m*100) if name == None else name\n return percentile_\n#data.columns\n#Index(['Libro', 'Titulo del libro', 'Especialidad', 'Opinión del 1 al 6'], dtype='object')\ndata = pd.read_csv(\"bd2.csv\")\ndata.dropna(subset= ['Especialidad','Libro','Opinión del 1 al 6'], inplace=True)\ndataIndexes = ['Especialidad','Libro','Nº Valoraciones','Media','Cuasidesviación','Cu','Ca','Q1', 'Q2', 'Q3','Q3-Q1','Moda']\nnonDispersedData = data.groupby(['Especialidad','Libro']).filter(lambda x: x['Opinión del 1 al 6'].nunique() <= 1)\ndata = data.groupby(['Especialidad','Libro']).filter(lambda x: x['Opinión del 1 al 6'].nunique() > 1)\nbookGroupedRates = data.groupby(['Especialidad','Libro'])['Opinión del 1 al 6']\ndescribeTable = bookGroupedRates.agg(['count','std', 'mean', percentile(0.25,'Q1'),percentile(0.5,'Q2'), percentile(0.75,'Q3'),difPercentiles(0.25,0.75, 'Q3-Q1')])\nfirstLevelModes = bookGroupedRates.agg([('Moda', lambda x: x.mode().iloc[0]), ('Cu',lambda x: x.kurtosis()), ('Ca',lambda x: x.skew(bias = False))]).reset_index()\ndescribeTable.reset_index()\ndescribeTable = pd.merge(describeTable, firstLevelModes, how='left', left_on=['Especialidad','Libro'], right_on = ['Especialidad','Libro'])\ndescribeTable.rename(columns={'count': 'Nº Valoraciones','mean':'Media', 'std':'Cuasidesviación'}, inplace=True)\ndescribeTable.set_index(dataIndexes, inplace = True)\n#3) book valorations distributions\nopinionsfi = data.groupby(['Especialidad','Libro', 'Opinión del 1 al 6']).size()\n#print(opinionsfi.index.levels[1].values)\nvalRange = np.arange(1,7)\nmissingVal = {}\nfiColumns = ['Especialidad','Libro','Opinión del 1 al 6','fi'] \nfor col in fiColumns:\n missingVal[col] = []\n \n#create a dictionary with the books-rates info that are not present as a row (0 students given an x rating to a y book), adding a fi (absolute frecuency) of 0. \n#crear un diccionario con la información de libros-valoraciones que no están presentes como filas (0 estudiantes dando una valoración x a un libro y),\n#añadiendo una fi (frecuencia absoluta) 0\n\n#E.G: { 'Libro' : [2,3,4], 'Opinión del 1 al 6': [1,1,2], 'fi': [0,0,0]\ngroupedDataDict = {i: dict(Counter(x['Libro'])) for i, x in data.groupby('Especialidad')}\nfor especialty in groupedDataDict:\n groupedDataDict[especialty]['Valoraciones'] = []\n for book in groupedDataDict[especialty]: \n r = data[(data['Especialidad'] == especialty) & (data['Libro'] == book)]['Opinión del 1 al 6'].values\n if(len(r)> 0):\n groupedDataDict[especialty]['Valoraciones'].append(r) \n for val in valRange:\n j = (data[fiColumns[1]] == book) & (data[fiColumns[2]] == val)\n if(not j.any()):\n missingVal[fiColumns[0]].append(especialty)\n missingVal[fiColumns[1]].append(book)\n missingVal[fiColumns[2]].append(val)\n missingVal[fiColumns[3]].append(0)\n\n\nnonPresentVal = pd.DataFrame(missingVal, columns = fiColumns)\n\nopinionshi = opinionsfi / opinionsfi.groupby(['Especialidad','Libro']).sum() * 100\n\nbookRatesDistributions = opinionsfi.to_frame('fi').reset_index()\nbookRatesDistributions = pd.concat([bookRatesDistributions,nonPresentVal]).sort_values(by=['Especialidad','Libro', 'Opinión del 1 al 6'])\nbookRatesDistributions.set_index(['Especialidad','Libro', 'Opinión del 1 al 6'], inplace = True)\nbookRatesDistributions['hi'] = opinionshi\nbookRatesDistributions['Fi'] = opinionsfi.groupby(['Especialidad','Libro']).cumsum()\nbookRatesDistributions['Hi'] = opinionshi.groupby(['Especialidad','Libro']).cumsum()\n\n# to this columns fill 0's int the rows that were added because of no records for a book rate\n# llenar estas columnas con 0's en las filas que fueron añadidas debido a que su valoración no poseía registros\nbookRatesDistributions['hi'].fillna(0, inplace = True) \nbookRatesDistributions['Fi'].fillna(0, inplace = True) \nbookRatesDistributions['Hi'].fillna(0, inplace = True) \n# join the general information\nfinalTable = describeTable.join(bookRatesDistributions, how='inner')\nfinalTable.set_index(finalTable.index.reorder_levels([*dataIndexes, 'Opinión del 1 al 6']), inplace = True)\nfinalTable.sort_values(by=['Especialidad','Nº Valoraciones','Opinión del 1 al 6'], inplace = True, ascending = [True,False, True])\n\npath = \"./output3.xlsx\"\nwriter = pd.ExcelWriter(path, engine = 'openpyxl')\nwriter.book = openpyxl.Workbook()\nfinalTable.to_excel(writer, sheet_name= \"tabla de datos dispersos\") \nif(not nonDispersedData.empty):\n nonDispersedData = nonDispersedData.groupby(['Especialidad','Libro','Opinión del 1 al 6']).size().to_frame('Nº Valoraciones').sort_values(by=['Especialidad', 'Nº Valoraciones'],ascending = [True,False])\n nonDispersedData.to_excel(writer, sheet_name = \"tabla de datos no dispersos\") \nwriter.save()\nwriter.close()\n\nbins = np.arange(0, 6 + 1.5) - 0.5\ni = 0\nfor especialty in groupedDataDict:\n labels = [label for label in list(groupedDataDict[especialty].keys()) if label != 'Valoraciones']\n fig, axs = plt.subplots(1,3)\n _ = axs[0].hist(groupedDataDict[especialty]['Valoraciones'], bins, label = labels)\n axs[0].set_xticks(bins + 0.5)\n j = 0\n for arr in groupedDataDict[especialty]['Valoraciones']:\n ecdf = ECDF(arr)\n x = np.linspace(0, 6)\n y = ecdf(x)\n axs[1].step(x,y,label = labels[j], alpha = 0.5)\n j+=1\n axs[2].set(ylim=(0, 7))\n axs[2].boxplot(groupedDataDict[especialty]['Valoraciones'])\n #axs[2].boxplot(groupedDataDict[especialty]['Valoraciones'], whis = [0,100]) min-max as whisper\n axs[2].set_xticklabels(labels)\n axs[0].legend(prop={'size': 10})\n axs[1].legend(prop={'size': 10})\n fig.suptitle(especialty, fontsize=20)\n plt.figure(i)\n i+=1\nplt.close(0)\nplt.show()\n\"\"\"\n nBooks = len(labels)\n if nBooks > 0:\n fig1, axs1 = plt.subplots(1,nBooks)\n k = 0\n for book in labels:\n axs1[k].pie(finalTable.loc[especialty, book]['hi'].values, labels=valRange, autopct='%1.1f%%',shadow=True, startangle=90)\n axs1[k].axis('equal')\n axs1[k].set_title(book)\n axs1[k].legend(prop={'size': 10})\n k+=1\n fig1.suptitle(especialty, fontsize=20)\n\"\"\"\n\n" ]
[ [ "pandas.merge", "pandas.read_csv", "pandas.concat", "numpy.linspace", "numpy.arange", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.pyplot.close", "pandas.ExcelWriter", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
mannyfin/PythonLabView
[ "b65624118d51a803c87147c5be0a6687541cea6e" ]
[ "Eurotherm_Interpolation.py" ]
[ "import sys\nimport pandas as pd\nimport os\nfrom thermocouples_reference import thermocouples\n\ndef interp(inputmV,mV_table, T_table):\n \"\"\"\n\n :param inputmV: input value to interpolate\n :param mV_table: the two mV values in between the input mV value\n :param T_table: the two Temp values in between the input corresponding mV value\n :return: interpT : interpolated Temperature\n \"\"\"\n T1, T2 = T_table\n low, high = mV_table\n\n interpT = T1 + (T2 - T1) * (inputmV - low) / (high - low)\n\n return interpT\n\nif __name__ == \"__main__\":\n # labview_mV = float(input('input a number: '))\n # print(os.getcwd())\n typeC = thermocouples['C']\n os.chdir('C:\\\\Users\\\\Administrator\\\\Desktop\\\\PythonProjects\\\\LabViewtest\\\\')\n df = pd.read_excel('Type C calibration_corrected.xlsx')\n labview_mV = float(sys.argv[1])\n # CJC temp in K\n CJC_temp = float(sys.argv[2])\n\n adjusted_mV = labview_mV + typeC.emf_mVC(CJC_temp)\n\n temp_df = df.iloc[(df['mV'] - adjusted_mV).abs().argsort()[:2]]\n\n # print(os.getcwd())\n # imported values from LabView are STRINGS, we need to cast them into floats to use them...\n\n\n\n # print(sys.argv[1])\n\n temperature = interp(adjusted_mV, temp_df['mV'], temp_df['T'])\n # print('interp temp = {0}\\n'.format(round(temperature,2)))\n print('{0}'.format(round(temperature,2)))\n \"plot the temperature and save the temperature value in labview\"" ]
[ [ "pandas.read_excel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
easilylazy/pattern-recognition
[ "2b95689bb3f34e4821a0211b19b76164aa6e615f" ]
[ "homework-12345/homework5/wholetest.py" ]
[ "\n# %%\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom torchtext.legacy import data\nfrom torchtext.legacy import datasets\nfrom torchtext.vocab import Vectors, GloVe, CharNGram#, FastTex\nfrom torch.autograd import Variable\n\nimport numpy as np\nfrom os import stat\nimport sys, getopt\n\ntorch.manual_seed(2)\ndevice=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\n# set up fields\nTEXT = data.Field()\nLABEL = data.Field(sequential=False,dtype=torch.long)\n\n# make splits for data\n# DO NOT MODIFY: fine_grained=True, train_subtrees=False\ntrain, val, test = datasets.SST.splits(\n TEXT, LABEL, fine_grained=True, train_subtrees=False)\n\n\n\n\n# %%\n\n# build the vocabulary\n# you can use other pretrained vectors, refer to https://github.com/pytorch/text/blob/master/torchtext/vocab.py\nTEXT.build_vocab(train, vectors=Vectors(name='vector.txt', cache='./data'))\nLABEL.build_vocab(train)\n# We can also see the vocabulary directly using either the stoi (string to int) or itos (int to string) method.\n\npretrained_embeddings = TEXT.vocab.vectors\n\nprint(pretrained_embeddings.shape)\n\n\n# %%\n\n# make iterator for splits\ntrain_iter, val_iter, test_iter = data.BucketIterator.splits(\n (train, val, test), batch_size=64)\n\n# print batch information\nbatch = next(iter(train_iter)) # for batch in train_iter\n\n# 超参数\nmax_len = 32 #句子最大长度\nembedding_size = 300\nhidden_size = 32\nbatch_size = 2210\nlabel_num = 6\nnum_layers=2\n\n\nclass Classify(nn.Module):\n def __init__(self,vocab_len, embedding_table):\n super(Classify, self).__init__()\n self.max_len = max_len\n self.batch_size = batch_size\n # 这里我只是默认初始化词向量,也可以使用torch.from_numpy来加载预训练词向量\n self.embedding_table = nn.Embedding(vocab_len, embedding_size)\n self.embedding_size = embedding_size\n self.hidden_size= hidden_size\n self.label_num = label_num\n self.lstm = nn.LSTM(input_size=self.embedding_size, hidden_size=self.hidden_size,num_layers=num_layers,dropout=0.8)#,bidirectional=True)\n self.init_w = Variable(torch.Tensor(1, self.hidden_size), requires_grad=True)\n torch.nn.init.uniform_(self.init_w)\n self.init_w = nn.Parameter(self.init_w).to(device)\n self.linear = nn.Linear(self.hidden_size, self.label_num)\n self.criterion = nn.CrossEntropyLoss()\n self.optim = torch.optim.Adam(self.parameters(),lr=1e-3)\n \n def forward(self, input, batch_size):\n input = self.embedding_table(input.long()) # input:[batch_size, max_len, embedding_size]\n h0 = Variable(torch.zeros(num_layers, batch_size, self.hidden_size)).to(device)\n c0 = Variable(torch.zeros(num_layers, batch_size, self.hidden_size)).to(device)\n lstm_out, _ = self.lstm(input.permute(1,0,2),(h0,c0))\n lstm_out = torch.tanh(lstm_out) # [max_len, bach_size, hidden_size]\n M = torch.matmul(self.init_w, lstm_out.permute(1,2,0))\n alpha = F.softmax(M,dim=0) # [batch_size, 1, max_len]\n out = torch.matmul(alpha, lstm_out.permute(1,0,2)).squeeze() # out:[batch_size, hidden_size]\n predict = F.softmax(self.linear(out)) # out:[batch_size, label_num]\n return predict\n\n\n\n\n# %%\n# train_, test_, vocab = processData()\n# embedding_table = word_embedding(len(vocab), embedding_size)\ntrain_iter, val_iter, test_iter = data.BucketIterator.splits(\n (train, val, test), batch_size=20)\n\n\ndef feature_scalling(X):\n mmin = X.min()\n mmax = X.max()\n return (X - mmin) / (mmax - mmin), mmin, mmax\n\nx_train, mmin, mmax = feature_scalling(next(iter(train_iter)).text)\nprint(mmin,mmax)\nprint(x_train)\n# %%\n\n# # print batch information\n# net = Classify(len(TEXT.vocab),pretrained_embeddings)\n# net=net.to(device)\n# net.embedding_table.weight.data.copy_(pretrained_embeddings)\n# # embedding_table)\n# print(net.embedding_table)\n# optim = net.optim\n\npath='pth\\\\dr__lr_0.01_de_0.01_len_32_hid_32_emb_300_epo_40_bat_15_eval_100_loss_1.401_acc_0.6213.pth'\n # dr__lr_0.01_de_0.01_len_32_hid_32_emb_300_epo_40_bat_15_eval_100_loss_1.5905_acc_0.5297_loss_1.4428_acc_0.5941_loss_1.3352_acc_0.6286_loss_1.1809_acc_0.6295_loss_1.1129_acc_0.649.pth'\nnet=torch.load(path,map_location=device)\nnet.eval()\nwith torch.no_grad():\n print('testing (epoch:',1,')')\n num = 0\n for k in range(1):\n batch = next(iter(train_iter)) # for batch in train_iter\n x=batch.text.transpose(0,1).to(torch.float32)\n print(x)\n\n x=Variable(x).to(device)\n y=batch.label-1\n x=x.to(device)\n y=y.to(device)\n y_hat = net.forward(x, len(x))\n y_hat = np.argmax(y_hat.cpu().numpy(),axis=1)\n print(len(np.where((0-y.cpu().numpy())==0)[0]))\n print(len(np.where((1-y.cpu().numpy())==0)[0]))\n print(len(np.where((2-y.cpu().numpy())==0)[0]))\n print(len(np.where((3-y.cpu().numpy())==0)[0]))\n print(len(np.where((4-y.cpu().numpy())==0)[0]))\n\n print(len(np.where((0-y_hat)==0)[0]))\n print(len(np.where((1-y_hat)==0)[0]))\n print(len(np.where((2-y_hat)==0)[0]))\n print(len(np.where((3-y_hat)==0)[0]))\n print(len(np.where((4-y_hat)==0)[0]))\n num=len(np.where((y_hat-y.cpu().numpy())==0)[0])\n print( num,batch_size)\n acc = round(num/batch_size, 4)\n print(y)\n print(y_hat)\n # if acc > max_acc:\n # max_acc = acc\n print('epoch:', 1, ' | accuracy = ', acc)\n\nbatch_size = 2000\n\n\ntrain_iter, val_iter, test_iter = data.BucketIterator.splits(\n (train, val, test), batch_size=batch_size)\ntest_batch=(len(test)//batch_size)\ntrain_batch=(len(train)//batch_size)\ntotal_test=batch_size*test_batch\ntotal_train=batch_size*train_batch\nloss_list=[]\nacc_list=[]\nej = 0\n\nbatch = next(iter(test_iter)) # for batch in train_iter\nprint(len(test_iter))\nprint(len(train_iter))\n# for i in range(len(test_iter)):\n# print(test_iter[i][:9])\nwith torch.no_grad():\n print('testing (epoch:',10,')')\n num = 0\n record=np.zeros(5)\n for i, batch in enumerate(train_iter):\n x=batch.text.transpose(0,1).to(torch.float32)\n x=Variable(x).to(device)\n y=batch.label-1\n print(y[:19])\n x=x.to(device)\n y=y.to(device)\n y_hat = net.forward(x, len(x))\n y_hat = np.argmax(y_hat.cpu().numpy(),axis=1)\n num=len(np.where((y_hat-y.cpu().numpy())==0)[0])\n print(num)\n record[0]+=(len(np.where((0-y.cpu().numpy())==0)[0]))\n record[1]+=(len(np.where((1-y.cpu().numpy())==0)[0]))\n record[2]+=(len(np.where((2-y.cpu().numpy())==0)[0]))\n record[3]+=(len(np.where((3-y.cpu().numpy())==0)[0]))\n record[4]+=(len(np.where((4-y.cpu().numpy())==0)[0]))\n print( num,total_test)\n print(record)\n acc = round(num/total_test, 4)\n print('epoch:', 10, ' | accuracy = ', acc)" ]
[ [ "torch.nn.init.uniform_", "torch.nn.CrossEntropyLoss", "torch.nn.functional.softmax", "torch.nn.Parameter", "torch.Tensor", "torch.load", "torch.nn.LSTM", "torch.manual_seed", "torch.zeros", "torch.nn.Embedding", "torch.tanh", "torch.nn.Linear", "torch.no_grad", "torch.cuda.is_available", "numpy.zeros", "numpy.where", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HelmchenLabSoftware/mesostat-dev
[ "8baa7120b892fe0df893cdcf0f20f49876643d75" ]
[ "mesostat/utils/matlab_helper.py" ]
[ "import os, time\nimport scipy.io as spio\n\n# Print root level values of dict (result of loadmat)\ndef inspect_mfile(d):\n for k,v in d.items():\n if isinstance(v, float) or isinstance(v, int) or isinstance(v, list):\n print(k, v)\n else:\n print(k, v.shape)\n\n# Convert \"scipy.io.matlab.mio5_params.mat_struct object\" to dict\ndef matstruct2dict(matstruct):\n return {s : [getattr(matstruct, s)] for s in dir(matstruct) if s[0]!='_'}\n\ndef loadmat(filename, waitRetry=None):\n '''\n this function should be called instead of direct spio.loadmat\n as it cures the problem of not properly recovering python dictionaries\n from mat files. It calls the function check keys to cure all entries\n which are still mat-objects\n '''\n \n # Test if file is accessible, and retry indefinitely if required\n fileAccessible = os.path.isfile(filename)\n if not fileAccessible:\n if waitRetry is None:\n raise ValueError(\"Matlab file can not be accessed\", filename)\n else:\n while not fileAccessible:\n print(\"... can't reach file\", filename, \", waiting\", waitRetry, \"seconds\")\n time.sleep(waitRetry)\n fileAccessible = os.path.isfile(filename)\n\n # Load data\n data = spio.loadmat(filename, struct_as_record=False, squeeze_me=True)\n \n # Get rid of useless keys\n data = {k : v for k, v in data.items() if k[0] != '_'}\n \n return _check_keys(data)\n\ndef _check_keys(d):\n '''\n checks if entries in dictionary are mat-objects. If yes\n todict is called to change them to nested dictionaries\n '''\n for key in d:\n if isinstance(d[key], spio.matlab.mio5_params.mat_struct):\n d[key] = _todict(d[key])\n return d \n\ndef _todict(matobj):\n '''\n A recursive function which constructs from matobjects nested dictionaries\n '''\n d = {}\n for strg in matobj._fieldnames:\n elem = matobj.__dict__[strg]\n if isinstance(elem, spio.matlab.mio5_params.mat_struct):\n d[strg] = _todict(elem)\n else:\n d[strg] = elem\n return dict\n\n\n# # Recursively convert \"scipy.io.matlab.mio5_params.mat_struct\" objects to dicts\n# def _check_keys(d):\n# for k,v in d.items():\n# print(k, type(v))\n# if isinstance(v, spio.matlab.mio5_params.mat_struct):\n# v_tmp = {s : [getattr(v, s)] for s in dir(v) if s[0]!='_'}\n# d[k] = _check_keys(v_tmp)\n# elif isinstance(v, dict):\n# d[k] = _check_keys(v)\n# return d" ]
[ [ "scipy.io.loadmat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
AAAI-DISIM-UnivAQ/nlpia
[ "39bf2ceff2128e5bb7ad233ced55cb55fe70be4a" ]
[ "src/nlpia/book/examples/ch10_movie_dialog_chatbot_v2.py" ]
[ "r\"\"\" Build character sequence-to-sequence training set\n\n>>> df = get_data('moviedialog')\n>>> df.columns = 'statement reply'.split()\n>>> df = df.dropna()\n>>> input_texts, target_texts = [], [] # <1>\n>>> start_token, stop_token = '\\t', '\\n' # <3>\n>>> input_vocab = set(start_token+stop_token) # <2>\n>>> output_vocab = set()\n>>> n_samples = min(100000, len(df)) # <4>\n\n>>> df['target'] = start_token + df.reply + stop_token\n>>> for statement in df.statement:\n... input_vocab.update(set(statement))\n>>> for reply in df.reply:\n... output_vocab.update(set(reply))\n>>> input_vocab = tuple(sorted(input_vocab))\n>>> output_vocab = tuple(sorted(output_vocab))\n>>> input_vocabulary = tuple(sorted(input_vocab))\n>>> output_vocabulary = tuple(sorted(output_vocab))\n\n>>> max_encoder_seq_len = df.statement.str.len().max() # <3>\n>>> max_decoder_seq_len = df.target.str.len().max()\n>>> max_encoder_seq_len, max_decoder_seq_len\n(100, 102)\n\"\"\"\nimport os\nfrom nlpia.loaders import get_data\n\ndf = get_data('moviedialog')\ndf.columns = 'statement reply'.split()\ndf = df.dropna()\ninput_texts, target_texts = [], [] # <1>\nstart_token, stop_token = '\\t\\n' # <3>\ninput_vocab = set() # <2>\noutput_vocab = set(start_token + stop_token)\nn_samples = min(100000, len(df)) # <4>\n\ndf['target'] = start_token + df.reply + stop_token\n[input_vocab.update(set(statement)) for statement in df.statement]\n[output_vocab.update(set(reply)) for reply in df.reply]\ninput_vocab = tuple(sorted(input_vocab)) # <6>\noutput_vocab = tuple(sorted(output_vocab))\n\nmax_encoder_seq_len = df.statement.str.len().max()\n# max_encoder_seq_len\n# 100\nmax_decoder_seq_len = df.target.str.len().max()\n# max_decoder_seq_len\n# 102\n\n# <1> The arrays hold the input and target text read from the corpus file.\n# <2> The sets hold the seen characters in the input and target text.\n# <3> The target sequence is annotated with a start (first) and stop (last) token; the characters representing the tokens are defined here. These tokens can't be part of the normal sequence text and should be uniquely used as start and stop tokens.\n# <4> `max_training_samples` defines how many lines are used for the training.\n# It is the lower number of either a user-defined maximum or the total number of lines loaded from the file.\n# <6> Compile the vocabulary -- set of the unique characters seen in the input_texts\n\n\n\"\"\" Construct character sequence encoder-decoder training set\n\"\"\"\nimport numpy as np # <1> # noqa\n\nencoder_input_onehot = np.zeros(\n (len(df), max_encoder_seq_len, len(input_vocab)),\n dtype='float32') # <2>\ndecoder_input_onehot = np.zeros(\n (len(df), max_decoder_seq_len, len(output_vocab)),\n dtype='float32')\ndecoder_target_onehot = np.zeros(\n (len(df), max_decoder_seq_len, len(output_vocab)),\n dtype='float32')\n\nfor i, (input_text, target_text) in enumerate(\n zip(df.statement, df.target)): # <3>\n for t, c in enumerate(input_text): # <4>\n k = input_vocab.index(c)\n encoder_input_onehot[i, t, k] = 1. # <5>\n k = np.array([output_vocab.index(c) for c in target_text])\n decoder_input_onehot[i, np.arange(len(target_text)), k] = 1.\n decoder_target_onehot[i, np.arange(len(target_text) - 1), k[1:]] = 1.\n# <1> You use numpy for the matrix manipulations.\n# <2> The training tensors are initialized as zero tensors with the shape of number of samples (this number should be equal for the input and target samples) times the maximum number of sequence tokens times the number of possible characters.\n# <3> Loop over the training samples; input and target texts need to match.\n# <4> Loop over each character of each sample.\n# <5> Set the index for the character at each time step to one; all other indices remain at zero. This creates the one-hot encoded representation of the training samples.\n# <6> For the training data for the decoder, you create the `decoder_input_data` and `decoder_target_data` (which is one time step behind the _decoder_input_data_).\n\n\n\"\"\"Construct and train a character sequence encoder-decoder network\n\"\"\"\nfrom keras.models import Model # noqa\nfrom keras.layers import Input, LSTM, Dense # noqa\n\nbatch_size = 64 # <1>\nepochs = 100 # <2>\nnum_neurons = 256 # <3>\n\nencoder_inputs = Input(shape=(None, len(input_vocab)))\nencoder = LSTM(num_neurons, return_state=True)\nencoder_outputs, state_h, state_c = encoder(encoder_inputs)\nencoder_states = [state_h, state_c]\n\ndecoder_inputs = Input(shape=(None, len(output_vocab)))\ndecoder_lstm = LSTM(num_neurons, return_sequences=True,\n return_state=True)\ndecoder_outputs, _, _ = decoder_lstm(decoder_inputs,\n initial_state=encoder_states)\ndecoder_dense = Dense(len(output_vocab), activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\nmodel = Model([encoder_inputs, decoder_inputs], decoder_outputs)\n\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy',\n metrics=['acc'])\nmodel.fit([encoder_input_onehot, decoder_input_onehot],\n decoder_target_onehot, batch_size=batch_size, epochs=epochs,\n validation_split=0.1) # <4>\n# 57915/57915 [==============================] - 296s 5ms/step - loss: 0.7575 - acc: 0.1210 - val_loss: 0.6521 - val_acc: 0.1517\n# Epoch 2/100\n# 57915/57915 [==============================] - 283s 5ms/step - loss: 0.5924 - acc: 0.1613 - val_loss: 0.5738 - val_acc: 0.1734\n# ...\n# 57915/57915 [==============================] - 276s 5ms/step - loss: 0.4235 - acc: 0.2075 - val_loss: 0.4688 - val_acc: 0.2034\n# Epoch 21/100\n# 57915/57915 [==============================] - 277s 5ms/step - loss: 0.4217 - acc: 0.2080 - val_loss: 0.4680 - val_acc: 0.2037\n# Epoch 22/100\n# 57915/57915 [==============================] - 278s 5ms/step - loss: 0.4198 - acc: 0.2084 - val_loss: 0.4686 - val_acc: 0.2035\n# ...\n# Epoch 69/100 [1480/1902]\n# 57915/57915 [==============================] - 276s 5ms/step - loss: 0.3830 - acc: 0.2191 - val_loss: 0.4912 - val_acc: 0.2008\n# Epoch 70/100\n# 57915/57915 [==============================] - 277s 5ms/step - loss: 0.3826 - acc: 0.2193 - val_loss: 0.4902 - val_acc: 0.2007\n# Epoch 71/100\n# ...\n# Epoch 99/100\n# 57915/57915 [==============================] - 277s 5ms/step - loss: 0.3738 - acc: 0.2220 - val_loss: 0.5000 - val_acc: 0.1994\n# Epoch 100/100\n# 57915/57915 [==============================] - 278s 5ms/step - loss: 0.3736 - acc: 0.2220 - val_loss: 0.5017 - val_acc: 0.1992\n\n\"\"\" .Construct response generator model\n>>> encoder_model = Model(encoder_inputs, encoder_states)\n>>> thought_input = [\n... Input(shape=(num_neurons,)), Input(shape=(num_neurons,))]\n>>> decoder_outputs, state_h, state_c = decoder_lstm(\n... decoder_inputs, initial_state=thought_input)\n>>> decoder_states = [state_h, state_c]\n>>> decoder_outputs = decoder_dense(decoder_outputs)\n\n>>> decoder_model = Model(\n... inputs=[decoder_inputs] + thought_input,\n... output=[decoder_outputs] + decoder_states)\n\"\"\"\nencoder_model = Model(encoder_inputs, encoder_states)\nthought_input = [\n Input(shape=(num_neurons,)), Input(shape=(num_neurons,))]\ndecoder_outputs, state_h, state_c = decoder_lstm(\n decoder_inputs, initial_state=thought_input)\ndecoder_states = [state_h, state_c]\ndecoder_outputs = decoder_dense(decoder_outputs)\n\ndecoder_model = Model(\n inputs=[decoder_inputs] + thought_input,\n output=[decoder_outputs] + decoder_states)\n\nr\"\"\"\n>>> def decode_sequence(input_seq):\n... thought = encoder_model.predict(input_seq) # <1>\n\n... target_seq = np.zeros((1, 1, len(output_vocab))) # <2>\n... target_seq[0, 0, target_token_index[stop_token]\n... ] = 1. # <3>\n... stop_condition = False\n... generated_sequence = ''\n\n... while not stop_condition:\n... output_tokens, h, c = decoder_model.predict(\n... [target_seq] + thought) # <4>\n\n... generated_token_idx = np.argmax(output_tokens[0, -1, :])\n... generated_char = reverse_target_char_index[generated_token_idx]\n... generated_sequence += generated_char\n... if (generated_char == stop_token or\n... len(generated_sequence) > max_decoder_seq_len\n... ): # <5>\n... stop_condition = True\n\n... target_seq = np.zeros((1, 1, len(output_vocab))) # <6>\n... target_seq[0, 0, generated_token_idx] = 1.\n... thought = [h, c] # <7>\n\n... return generated_sequence\n\"\"\"\n\n\ndef decode_sequence(input_seq):\n thought = encoder_model.predict(input_seq) # <1>\n\n target_seq = np.zeros((1, 1, len(output_vocab))) # <2>\n target_seq[0, 0, output_vocab.index(start_token)\n ] = 1. # <3>\n stop_condition = False\n generated_sequence = ''\n\n while not stop_condition:\n output_tokens, h, c = decoder_model.predict(\n [target_seq] + thought) # <4>\n\n generated_token_idx = np.argmax(output_tokens[0, -1, :])\n generated_char = output_vocab[generated_token_idx]\n generated_sequence += generated_char\n if (generated_char == stop_token or\n len(generated_sequence) > max_decoder_seq_len\n ): # <5>\n stop_condition = True\n\n target_seq = np.zeros((1, 1, len(output_vocab))) # <6>\n target_seq[0, 0, generated_token_idx] = 1.\n thought = [h, c] # <7>\n\n return generated_sequence\n\n\ndef respond(input_text):\n input_text = input_text.lower()\n input_text = ''.join(c if c in input_vocab else ' ' for c in input_text)\n input_seq = np.zeros((1, max_encoder_seq_len, len(input_vocab)), dtype='float32')\n for t, c in enumerate(input_text):\n input_seq[0, t, input_vocab.index(c)] = 1.\n decoded_sentence = decode_sequence(input_seq)\n print('Human: {}'.format(input_text))\n print('Bot:', decoded_sentence)\n return decoded_sentence\n\n\n\"\"\"\nrespond('Hi Rosa, how are you?')\nrespond('Hi Jim, how are you?')\nrespond('Hi Barak, how are you?')\nrespond('Hi Amy, how are you?')\nrespond('Hi Paris, how are you?')\nrespond('Hi Joe, how are you?')\nrespond('Hi Jane, how are you?')\nrespond('Hey Jane, how are you?')\nrespond('Hey Jon, how are you?')\nrespond('Hey John, how are you?')\nrespond('Hey Joe, how are you?')\nrespond('Hey Jim, how are you?')\nrespond('Hey Ashley, how are you?')\nrespond('Hey my love, how are you?')\nrespond('Hey Arzu, how are you?')\nrespond(\"I'm talking about us.\")\nrespond(\"What are you trying to say?\")\nIn [38]: respond('Hi Rosa, how are you?')\nHuman: hi rosa, how are you?\nBot: hello, he was the one when you wanted to see you again.\n\nIn [39]: respond('Hi Jim, how are you?')\nHuman: hi jim, how are you?\nBot: how are you?\n\nIn [40]: respond('Hi Barak, how are you?')\nHuman: hi barak, how are you?\nBot: hello, he was the one when i get the best way to get the show off the back.\n\nIn [41]: respond('Hi Amy, how are you?')\nHuman: hi amy, how are you?\nBot: hello, man. i was a second.\n\nIn [42]: respond('Hi Paris, how are you?')\nHuman: hi paris, how are you?\nBot: his heart studs the fucking chark off.\n\nIn [43]: respond('Hi Joe, how are you?')\nHuman: hi joe, how are you?\nBot: his his hate is beautiful on the way i was the one who wanted to say that.\n\nIn [44]: respond('Hi Jane, how are you?')\nHuman: hi jane, how are you?\nBot: hello, hello, martine. i got a second. i'm a fine boy from the ship.\n\nIn [45]: respond('Hey Jane, how are you?') [870/1906]\nHuman: hey jane, how are you?\nBot: hello, mr. decker. what do you mean what happened?\n\nIn [46]: respond('Hey Jon, how are you?')\nHuman: hey jon, how are you?\nBot: hello, mr. decker. what do you mean what happened?\n\nIn [47]: respond('Hey John, how are you?')\nHuman: hey john, how are you?\nBot: hello, mr. decker. what do you mean what happened?\n\nIn [48]: respond('Hey Joe, how are you?')\nHuman: hey joe, how are you?\nBot: hello, mr. decker. what do you mean what happened?\n\nIn [49]: respond('Hey Jim, how are you?')\nHuman: hey jim, how are you?\nBot: how much money i want to say that?\n\nIn [50]: respond('Hey Ashley, how are you?')\nHuman: hey ashley, how are you?\nBot: his his morning.\n\nIn [51]: respond('Hey my love, how are you?')\nHuman: hey my love, how are you?\nBot: here. i was just thinking about it.\n\nIn [52]: respond('Hey Arzu, how are you?')\nHuman: hey arzu, how are you?\nBot: hi. what are you talking about?\n\nIn [53]: respond(\"I'm talking about us.\")\nHuman: i'm talking about us.\nBot: i know.\n\nIn [54]: respond(\"What are you trying to say?\")\nHuman: what are you trying to say?\nBot: i don't know.\n\"\"\"\n" ]
[ [ "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
poechsel/iree
[ "c242129687621a923fac192d17389bf469a6666c" ]
[ "integrations/tensorflow/e2e/explicit_backend_test.py" ]
[ "# Lint as: python3\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests explicitly specifying a backend in Python.\"\"\"\n\nimport numpy as np\nfrom pyiree.tf.support import tf_test_utils\nimport tensorflow.compat.v2 as tf\n\n\nclass SimpleArithmeticModule(tf.Module):\n\n @tf.function(input_signature=[\n tf.TensorSpec([4], tf.float32),\n tf.TensorSpec([4], tf.float32)\n ])\n def simple_mul(self, a, b):\n return a * b\n\n\n@tf_test_utils.compile_modules(simple_arithmetic=SimpleArithmeticModule)\nclass ExplicitBackendTest(tf_test_utils.SavedModelTestCase):\n\n def test_explicit(self):\n a = np.array([1., 2., 3., 4.], dtype=np.float32)\n b = np.array([400., 5., 6., 7.], dtype=np.float32)\n\n # Demonstrates simple, one by one invocation of functions against\n # different explicit backends. Individual backends can be accessed off of\n # the module by name ('tf', 'iree_vmla' below).\n tf_c = self.modules.simple_arithmetic.tf.simple_mul(a, b)\n print(\"TF Result:\", tf_c)\n iree_c = self.modules.simple_arithmetic.iree_vmla.simple_mul(a, b)\n print(\"IREE Result:\", iree_c)\n self.assertAllClose(tf_c, iree_c)\n\n def test_multi(self):\n a = np.array([1., 2., 3., 4.], dtype=np.float32)\n b = np.array([400., 5., 6., 7.], dtype=np.float32)\n\n # Evaluating against multiple backends can be done with the multi() method,\n # which takes a regex string matching backend names. This also returns a\n # MultiResults tuple with actual results keyed by backend name. These also\n # have convenience methods like print() and assert_all_close().\n vmod = self.modules.simple_arithmetic.multi(\"tf|iree\")\n r = vmod.simple_mul(a, b)\n r.print().assert_all_close()\n\n def test_all(self):\n a = np.array([1., 2., 3., 4.], dtype=np.float32)\n b = np.array([400., 5., 6., 7.], dtype=np.float32)\n\n # Evaluating against all backends can be done with the special 'all'\n # backend name. This also returns a MultiResults tuple with actual results\n # keyed by backend name.\n r = self.modules.simple_arithmetic.all.simple_mul(a, b)\n r.print().assert_all_close()\n\n\nif __name__ == \"__main__\":\n if hasattr(tf, \"enable_v2_behavior\"):\n tf.enable_v2_behavior()\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v2.enable_v2_behavior", "numpy.array", "tensorflow.compat.v2.TensorSpec", "tensorflow.compat.v2.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
saguilarDevel/schc-sigfox
[ "27e14edc0e350f027e6d7b300ec79b6aa92dc06d" ]
[ "stats/server_analytics.py" ]
[ "import pandas as pd\nimport json\n\npd.set_option('display.max_columns', None)\n\n# json_file = json.loads('/Users/sergioaguilar/PycharmProjects/SCHCfox/stats/files/stats_file_v2.json')\nwith open('files/server/fragments_stats_v4.0.json') as json_file:\n data = json.load(json_file)\n\ndf1 = pd.read_json(str(json.dumps(data, sort_keys=True)))\nprint(df1)\ndf1_transposed = df1.T # or df1.transpose()\nprint(df1_transposed)\ndf1_transposed.astype({\"FCN\": str, \"RULE_ID\": str, \"W\": str, \"ack\": str, \"ack_send\": str,\n \"data\": str, \"downlink_enable\": bool, \"fragment_size\": int,\n \"lost\": bool, \"send_time\": float, \"sending_end\": float,\n \"sending_start\": float,\n \"seqNumber\": int})\nprint(df1_transposed)\nprint(df1_transposed['FCN'].isin(['111']))\ndf_nowait = df1_transposed[df1_transposed['FCN'].isin(['111'])]\nprint(df_nowait)\ndf1_transposed['Branded'] = df1_transposed['FCN'].str.contains('111')*1\nprint(df1_transposed)\n# df1_transposed.to_excel('test_stats_2.2.xlsx', engine='xlsxwriter')\n\n\n\n\n\n\n\n\n\n" ]
[ [ "pandas.set_option" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
okara83/Becoming-a-Data-Scientist
[ "f09a15f7f239b96b77a2f080c403b2f3e95c9650" ]
[ "Cloud Services and Computing/Amazon AWS/Lambdas/Lambdas-Practice-Web-Examples/aws-tutorial-code-master/lambda/lambda_read_excel_file_s3_trigger.py" ]
[ "# -*- coding: utf-8 -*-\n__author__ = \"Chirag Rathod (Srce Cde)\"\n__license__ = \"MIT\"\n__email__ = \"[email protected]\"\n__maintainer__ = \"Chirag Rathod (Srce Cde)\"\n\n\nimport json\nimport io\nfrom urllib.parse import unquote_plus\nimport boto3\nimport pandas as pd\n\n\ndef lambda_handler(event, context):\n s3 = boto3.client(\"s3\")\n s3_resource = boto3.resource(\"s3\")\n if event:\n s3_records = event[\"Records\"][0]\n bucket_name = str(s3_records[\"s3\"][\"bucket\"][\"name\"])\n file_name = unquote_plus(str(s3_records[\"s3\"][\"object\"][\"key\"]))\n file_obj = s3.get_object(Bucket=bucket_name, Key=file_name)\n file_content = file_obj[\"Body\"].read()\n\n read_excel_data = io.BytesIO(file_content)\n\n df = pd.read_excel(read_excel_data)\n df = df.assign(dummy=\"dummy_value\")\n df.to_csv(\"/tmp/updated.csv\")\n\n s3_resource.Bucket(\"bucket-name\").upload_file(\"/tmp/updated.csv\", \"updated.csv\")\n\n return {\"statusCode\": 200, \"body\": json.dumps(\"Hello from Lambda!\")}\n" ]
[ [ "pandas.read_excel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
rsjones94/data_inspection
[ "b3f57c3eea1b800e629ff79c3f9922daf64f7b8f" ]
[ "data_inspection/__main__.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nMain entry point for data_inspection. This script reads in\ntabular patient data and analyzes it for outliers. First, it inspects specified\ncolumns for data integrity (missing values) and produces histograms if appropriate.\n\nThen it analyzes specified 2d relationships, producing scatter plots and identifying\noutliers.\n\nFinally it runs the DBSCAN algorithm to flag any potential outliers.\n\nNote that on my machine this uses the venv \"tabular_analysis\"\n\"\"\"\n\nimport os\nimport shutil\nfrom collections import Counter\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.neighbors import LocalOutlierFactor\nfrom sklearn import preprocessing\n\nfrom support import is_empty, numbery_string_to_number\n\n\ndata_path = r'/Users/skyjones/Documents/inspection/SCD_pt_data_labels_piped.csv'\nout_folder = r'/Users/skyjones/Documents/inspection/analysis' # should not exist\n\n# column that contains the unique deidentified patient ID\nstudy_id_col = 'Study ID'\n\n# columns we want to inspect for completeness and produce histograms/barplots for\n# each key is a column name, and the value is True if there MUST be a value and\n# False if there does not need to be a value. If there must be a value if and\n# only if another column(s) is filled, then the value should be a list of those columns\nsingle_cols = {\n 'Age': True,\n 'Race': True,\n 'Hemoglobin genotype': True,\n 'Gender': True,\n 'BMI': True,\n 'Specify total HU daily dosage (mg)': True,\n 'HTN': True,\n 'Diabetes': True,\n 'Coronary artery disease': True,\n 'High cholesterol': True,\n 'Hgb': True,\n 'Hct/PCV': True,\n 'MRI 1 - Pulse ox results': True, # note the extra space\n 'MRI 2 - Pulse ox results': True,\n 'MRI 3 - Pulse ox results': True,\n 'MCV': True,\n 'Receiving regular blood transfusions': True,\n r'Initial hemoglobin S% (pretransfusion if applicable)': True,\n r'Results': True, # this is posttransfusion HbS%, and it's amazing it's the only column with this name\n r'MRI 1 - SBP': True,\n r'MRI 1 - DBP': True,\n r'MRI 2 - SBP': True,\n r'MRI 2 - DBP': True,\n r'MRI 3 - SBP': True,\n r'MRI 3 - DBP': True,\n }\n\n# 2d relationships we want to use to check for outliers. [independent, dependent]\n# numeric data only pls\ndouble_cols = [['Specify total HU daily dosage (mg)', 'MCV'],\n ['Specify total HU daily dosage (mg)', 'Initial hemoglobin S% (pretransfusion if applicable)'],\n ['Age', 'MRI 1 - SBP'],\n ['Age', 'MRI 1 - DBP'],\n ['Age', 'MRI 2 - SBP'],\n ['Age', 'MRI 2 - DBP'],\n ['Age', 'MRI 3 - SBP'],\n ['Age', 'MRI 3 - DBP']]\n\ncontam = 0.07 # estimated % of data that are outliers\n\ntext_size = 7\n\nnp.random.seed(1)\n\n#######################################\n\n###### setup\n\nmono_folder = os.path.join(out_folder, 'mono')\nbi_folder = os.path.join(out_folder, 'bi')\nmulti_folder = os.path.join(out_folder, 'multi')\ncustom_folder = os.path.join(out_folder, 'custom')\n\noverview_report = os.path.join(out_folder, 'overview.txt')\nmissing_data_report = os.path.join(out_folder, 'missing_data.csv')\noutliers_report = os.path.join(out_folder, 'outliers.csv')\n\ntry:\n os.mkdir(out_folder)\nexcept FileExistsError:\n no_answer = True\n while no_answer:\n ans = input('The output directory exists. Overwrite? [y/n]\\n')\n if ans == 'y':\n no_answer = False\n shutil.rmtree(out_folder)\n os.mkdir(out_folder)\n elif ans == 'n':\n raise FileExistsError('File exists. Process aborted')\n else:\n print('Response must be \"y\" or \"n\"')\n\n\nlog_file = os.path.join(out_folder, 'log.txt')\nlog = open(log_file, 'w')\n \nos.mkdir(mono_folder)\nos.mkdir(bi_folder)\nos.mkdir(multi_folder)\nos.mkdir(custom_folder)\n\nsep = '|'\ndf = pd.read_csv(data_path, sep=sep, low_memory=False, dtype={study_id_col:'object'})\n\nproblem_pts_cols = [study_id_col]\nproblem_pts_cols.extend(single_cols.keys())\nproblem_pts = pd.DataFrame(columns=problem_pts_cols)\nproblem_pts = problem_pts.set_index('Study ID') # this data will relate pt IDs to a list of columns for which data\n# is missing, iff that missing data is marked as essential (by the variable single_cols)\n\noutlier_pts = {} # this data will relate pt IDs to a list of columns for which\n# the data seems to be an outlier\n\n###### plot and inspect the monodimensional data\nproblem_patients_dict = {}\nfor col in single_cols:\n data = df[col]\n pts = df[study_id_col]\n plt.figure(figsize=(8,12))\n plt.title(col)\n \n print(f'Plotting: {col}. dtype is {data.dtype}')\n if data.dtype == 'object':\n counts = Counter(data)\n if np.nan in counts:\n counts['nan'] = counts[np.nan]\n del counts[np.nan]\n n_v = [(n,v) for n,v in counts.most_common()]\n names = [n for n,v in n_v]\n values = [v for n,v in n_v]\n plt.ylabel('Count')\n plt.bar(names, values)\n else:\n # plt.hist(data)\n data_drop = data.dropna()\n result = plt.boxplot(data_drop, notch=True)\n plt.ylabel('Value')\n points = result['fliers'][0].get_data()\n exes = points[0]+.01\n whys = points[1]\n for x,y in zip(exes,whys):\n matches = pts[data == y]\n label = ''\n for m in matches:\n label += f'{m} + '\n label = label[:-3]\n plt.annotate(label, (x,y), fontsize=8)\n # plt.xlabel('Value')\n \n scrub_col = col.replace('/', '-') # replace slashes with dashes to protect filepath\n fig_name = os.path.join(mono_folder, f'{scrub_col}.png')\n plt.savefig(fig_name)\n plt.close()\n\n print('Evaluating completeness')\n for i, row in df.iterrows():\n # explicit comparisons of bools needed because we are exploiting the ability to mix key datatypes\n if not is_empty(row[col]):\n has_data = True\n # print('Is not empty')\n elif single_cols[col] is False:\n has_data = True\n # print('Does not need data')\n elif single_cols[col] is True: # if data is required\n has_data = False\n # print('Does not have data and deffo needs it')\n else: # if we get here, need to see if the companion columns are filled\n # if all companion columns are filled, then data is required\n companions = [row[c] for c in single_cols[col]]\n has_required_companions = all([not is_empty(row[c]) for c in single_cols[col]])\n has_data = not has_required_companions\n \n if not has_data:\n pt_id = row[study_id_col]\n try:\n problem_patients_dict[pt_id].append(col)\n except KeyError:\n problem_patients_dict[pt_id] = [col]\n \n \n# write the missing data report\nfor pt, cols in problem_patients_dict.items():\n insert = pd.Series({col:1 for col in cols}, name=pt)\n problem_pts = problem_pts.append(insert, ignore_index=False)\nproblem_pts = problem_pts.sort_index()\nproblem_pts.to_csv(missing_data_report)\n\nprint('\\n')\n###### do the 2d analyses\nfor ind_col, dep_col in double_cols:\n print(f'2d: {ind_col} and {dep_col}')\n fig_name = os.path.join(bi_folder, f'{dep_col}-v-{ind_col}.png')\n plt.figure()\n plt.title(f'{dep_col} vs. {ind_col}')\n \n x = df[ind_col]\n y = df[dep_col]\n pt_id = df[study_id_col]\n \n try:\n \n x = [numbery_string_to_number(i) for i in x]\n y = [numbery_string_to_number(i) for i in y]\n \n \n data = np.array( [np.array( [a,b] ) for a,b,c in zip(x,y,pt_id) if all([not np.isnan(a), not(np.isnan(b))]) ] )\n pts = [ c for a,b,c in zip(x,y,pt_id) if all([not np.isnan(a), not(np.isnan(b))]) ]\n clf = IsolationForest(max_samples='auto', random_state=1, contamination=contam)\n preds = clf.fit_predict(data)\n \n x = data[:,0]\n y = data[:,1]\n \n plt.scatter(x, y, c=preds)\n \n for pt, x, y, p in zip(pts, x, y, preds):\n if p == -1:\n plt.annotate(pt, (x,y))\n\n plt.xlabel(ind_col)\n plt.ylabel(dep_col)\n plt.savefig(fig_name)\n plt.close()\n except ValueError as e:\n print(f'Error analyzing -{ind_col}- against -{dep_col}-')\n log.write(f'Error analyzing -{ind_col}- against -{dep_col}-:\\n\\t{e}\\n')\n plt.close()\n continue\n \n \n###### multivariate outlier detection\nprint('\\nRunning multivariate outlier analysis')\n\nmultifile = os.path.join(multi_folder, 'multivariate_detection.png')\nmulticolsfile = os.path.join(multi_folder, 'multivariate_cols.csv')\nmultisubsetfile = os.path.join(multi_folder, 'multivariate_subset.csv')\n\ndump_folder = os.path.join(multi_folder, 'bin')\nos.mkdir(dump_folder)\n\ninclude_thresh = 0.3 # the minimum percentage of non-nan entries a column must have to be included in the multivariate analysis\n \n# figure out which columns are numeric\nexes = df[study_id_col]\n\nnumeric_cols = [c for c in df.columns if df[c].dtype != 'object']\nnumeric_cols = [n for n in numeric_cols if len(df[n].unique()) > 1] # has to not just be NaN\nnumeric_cols = [n for n in numeric_cols if 'Accession' not in n]\n\nnumeric_cols_nonthreshed = numeric_cols.copy()\n\nnumeric_cols = [n for n in numeric_cols if sum(~df[n].isna()) / len(df) > include_thresh] # has to have more non-NaN than the threshold\n\n\nmultidata = df[numeric_cols]\nmultidata_filled = multidata.fillna(multidata.mean())\n\n# normalize the data\nx = multidata_filled.values #returns a numpy array\nmin_max_scaler = preprocessing.MinMaxScaler()\nx_scaled = min_max_scaler.fit_transform(x)\nmultidata_filled = pd.DataFrame(x_scaled)\n\nclf = LocalOutlierFactor(n_neighbors=20, contamination=contam)\n\ny_pred = clf.fit_predict(multidata_filled)\ny_pred_unsort = y_pred.copy()\nx_scores = clf.negative_outlier_factor_\nx_scores_unsort = x_scores.copy()\n\nexes, y_pred, x_scores = zip(*sorted(zip(exes, y_pred, x_scores), key=lambda pair: pair[2]))\n\nfilt_exes = [x for x,p in zip(exes, y_pred) if p == -1]\nfilt_scores = [x for x,p in zip(x_scores, y_pred) if p == -1]\n\nindex = np.arange(0, len(filt_scores), step=1)\n\n\nfig = plt.figure()\nplt.title(f'Multivariate outlier detection\\n(n columns more than {include_thresh} filled = {len(numeric_cols)})')\nplt.scatter(index, filt_scores)\nplt.ylabel('Outlier factor')\nplt.xlabel('Rank')\nplt.xticks(index, index)\n\nfor pt, x, y in zip(filt_exes, index, filt_scores):\n plt.annotate(pt, (x,y), verticalalignment='top', size=text_size)\n\nplt.savefig(multifile)\nplt.close()\n\nout_cols = pd.Series(numeric_cols)\nout_cols.to_csv(multicolsfile)\n\nid_and_num = [study_id_col]\nid_and_num.extend(numeric_cols)\nout_multi = df[id_and_num]\nout_multi.insert(1, 'abnormal', y_pred_unsort)\nout_multi.insert(1, 'outlier_factor', x_scores_unsort)\nout_multi = out_multi[out_multi['abnormal'] == -1]\n\nout_multi.to_csv(multisubsetfile)\n\nprint('Dumping numeric plots')\n\nfor col in numeric_cols_nonthreshed:\n print(f'Dumping: {col}')\n data = df[col]\n pts = df[study_id_col]\n plt.figure(figsize=(8,12))\n \n data_drop = data.dropna()\n plt.title(f'{col}\\n(n = {len(data_drop)})')\n result = plt.boxplot(data_drop, notch=True)\n plt.ylabel('Value')\n points = result['fliers'][0].get_data()\n exes = points[0]+.01\n whys = points[1]\n for x,y in zip(exes,whys):\n matches = pts[data == y]\n label = ''\n for m in matches:\n label += f'{m} + '\n label = label[:-3]\n plt.annotate(label, (x,y), fontsize=text_size)\n \n scrub_col = col.replace('/', '-') # replace slashes with dashes to protect filepath\n fig_name = os.path.join(dump_folder, f'{scrub_col}.png')\n plt.savefig(fig_name)\n plt.close()\n\n\n\n###### custom analyses\nprint('\\nRunning custom analyses')\n\n# see whose HbS actually increased postransfusion\n\nfig_name = os.path.join(custom_folder, r'anomalous_posttransfusion_HbS_increases.png')\nplt.figure(figsize=(8,30))\nplt.title(r'Post- vs. Pre-transfusion HbS %')\nplt.xlabel(r'Transfusion status')\nplt.ylabel(r'% HbS')\n\n\nfor status, pre, post, pt in zip(df['Receiving regular blood transfusions'], df['Initial hemoglobin S% (pretransfusion if applicable)'], df['Results'], df[study_id_col]):\n if status == 'No':\n continue\n \n if pd.isnull(post) and not pd.isnull(pre):\n bad = -1\n col='orange'\n al = 1\n elif post >= pre:\n bad = 2\n col ='red'\n al = 1\n elif post >= pre*.9:\n bad = 1\n col = 'blue'\n al = 1\n else:\n bad = 0\n col = 'green'\n al = 0.2\n \n exes = [0,1]\n whys = [pre,post]\n \n plt.scatter(exes, whys, color=col, alpha=al)\n plt.plot(exes, whys, color=col, alpha=al)\n \n if bad >= 1:\n plt.annotate(pt, (exes[1]+0.02, whys[1]), size=text_size)\n if bad == -1:\n plt.annotate(pt, (exes[0]-0.05, whys[0]), size=text_size)\n \nnorm_artist = plt.Circle((0,0), color='green')\nbad_artist = plt.Circle((0,0), color='blue')\nvbad_artist = plt.Circle((0,0), color='red')\nmissing_artist = plt.Circle((0,0), color='orange')\nplt.legend((norm_artist, bad_artist, vbad_artist, missing_artist),\n ('Normal', 'HbS reduction <10%', 'HbS constancy or increase', 'No post-transfusion value'))\nplt.xlim(-0.2,1.2)\nplt.ylim(0,100)\nplt.savefig(fig_name)\nplt.close()\n \nlog.close()\n \n \n \n \n \n \n \n " ]
[ [ "matplotlib.pyplot.legend", "pandas.Series", "pandas.DataFrame", "sklearn.neighbors.LocalOutlierFactor", "matplotlib.pyplot.plot", "sklearn.preprocessing.MinMaxScaler", "pandas.read_csv", "matplotlib.pyplot.Circle", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.isnan", "matplotlib.pyplot.ylim", "matplotlib.pyplot.annotate", "matplotlib.pyplot.savefig", "numpy.array", "sklearn.ensemble.IsolationForest", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.boxplot", "numpy.random.seed", "matplotlib.pyplot.scatter", "pandas.isnull", "matplotlib.pyplot.xlim", "matplotlib.pyplot.bar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
mismayil/kogito
[ "e62b010d6787ddae0035ed2bc596619ec31fd6b9" ]
[ "kogito/models/gpt2/comet.py" ]
[ "from typing import Optional\nimport numpy as np\nimport torch\nfrom torch import cuda\nfrom torch.utils.data import DataLoader\nfrom transformers import GPT2LMHeadModel, GPT2Tokenizer\nfrom pytorch_lightning.loggers import WandbLogger\nimport pytorch_lightning as pl\n\nfrom kogito.models.gpt2.utils import GPT2Finetuner\nfrom kogito.core.dataset import KnowledgeDataset\nfrom kogito.core.model import KnowledgeModel\nfrom kogito.core.knowledge import KnowledgeGraph, GEN_TOKEN, EOS_TOKEN, PAD_TOKEN\nfrom kogito.core.relation import KG_RELATIONS\n\ndevice = \"cuda\" if cuda.is_available() else \"cpu\"\n\n\nclass COMETGPT2(KnowledgeModel):\n \"\"\"COMET model based on GPT-2\"\"\"\n\n def __init__(self, model_name_or_path: str = \"gpt2\") -> None:\n \"\"\"Initialize COMET model\n\n Args:\n model_name_or_path (str, optional): HuggingFace model name or local model path. Defaults to \"gpt2\".\n \"\"\"\n self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path)\n self.tokenizer = GPT2Tokenizer.from_pretrained(model_name_or_path)\n self.model.to(device)\n\n def train(\n self,\n train_graph: KnowledgeGraph,\n val_graph: KnowledgeGraph,\n batch_size: int = 8,\n in_len: int = 16,\n out_len: int = 34,\n summary_len: int = 0,\n epochs: int = 1,\n lr: float = 5e-5,\n seed: int = 42,\n log_wandb: bool = False,\n output_dir: Optional[str] = None,\n ) -> KnowledgeModel:\n \"\"\"Train a COMET model\n\n Args:\n train_graph (KnowledgeGraph): Training dataset\n val_graph (KnowledgeGraph): Validation dataset\n batch_size (int, optional): Batch size. Defaults to 2.\n in_len (int, optional): Input length. Defaults to 16.\n out_len (int, optional): Output length. Defaults to 34.\n summary_len (int, optional): Summary length. Defaults to 0.\n epochs (int, optional): Number of epochs. Defaults to 3.\n lr (float, optional): Learning rate. Defaults to 1e-5.\n seed (int, optional): Random seed. Defaults to 42.\n log_wandb (bool, optional): Whether to log to wandb. Defaults to False.\n output_dir (Optional[str], optional): Directory to save intermediate model checkpoints. Defaults to None.\n\n Returns:\n KnowledgeModel: Trained knowledge model\n \"\"\"\n torch.manual_seed(seed)\n np.random.seed(seed)\n torch.backends.cudnn.deterministic = True\n self.tokenizer.add_special_tokens(\n {\n \"eos_token\": EOS_TOKEN,\n \"pad_token\": PAD_TOKEN,\n \"additional_special_tokens\": [\n str(relation) for relation in KG_RELATIONS\n ]\n + [GEN_TOKEN],\n }\n )\n\n train_dataset = KnowledgeDataset(\n train_graph,\n tokenizer=self.tokenizer,\n source_len=out_len,\n target_len=summary_len,\n )\n val_dataset = KnowledgeDataset(\n val_graph,\n tokenizer=self.tokenizer,\n source_len=in_len,\n target_len=out_len - in_len,\n is_eval=True,\n )\n\n self.model.resize_token_embeddings(len(self.tokenizer))\n\n train_params = {\"batch_size\": batch_size, \"shuffle\": True, \"num_workers\": 0}\n val_params = {\"batch_size\": batch_size, \"shuffle\": False, \"num_workers\": 0}\n\n train_loader = DataLoader(train_dataset, **train_params, drop_last=True)\n val_loader = DataLoader(val_dataset, **val_params, drop_last=True)\n\n logger = True\n\n if log_wandb:\n config = {\n \"batch_size\": batch_size,\n \"epochs\": epochs,\n \"learning_rate\": lr,\n \"seed\": seed,\n \"in_len\": in_len,\n \"summary_len\": summary_len,\n \"out_len\": out_len,\n }\n logger = WandbLogger(project=\"kogito-comet-gpt2\")\n logger.experiment.config.update(config)\n\n finetuner = GPT2Finetuner(model=self.model, learning_rate=lr)\n trainer = pl.Trainer(\n default_root_dir=output_dir,\n max_epochs=epochs,\n logger=logger,\n accelerator=\"auto\",\n )\n trainer.fit(\n finetuner, train_dataloaders=train_loader, val_dataloaders=val_loader\n )\n\n self.save_pretrained(f\"{output_dir}/final_model\")\n\n return self.model\n\n def generate(\n self,\n input_graph: KnowledgeGraph,\n max_length: int = 34,\n in_len: int = 16,\n out_len: int = 34,\n top_k: int = 1,\n temperature: float = 1.0,\n top_p: float = 0.9,\n repetition_penalty: float = 1.0,\n num_beams: int = 10,\n num_return_sequences: int = 10,\n ) -> KnowledgeGraph:\n \"\"\"Generate inferences from knowledge model\n\n Args:\n input_graph (KnowledgeGraph): Input dataset\n max_length (int, optional): Maximum output length. Defaults to 34.\n in_len (int, optional): Input length. Defaults to 16.\n out_len (int, optional): Output length. Defaults to 34.\n top_k (int, optional): Top k inferences to consider. Defaults to 1.\n temperature (float, optional): GPT-2 temperature parameter. Defaults to 1.0.\n top_p (float, optional): GPT-2 top_p parameter. Defaults to 0.9.\n repetition_penalty (float, optional): GPT-2 repetition_penalty parameter. Defaults to 1.0.\n num_beams (int, optional): GPT-2 num_beams parameter. Defaults to 10.\n num_return_sequences (int, optional): GPT-2 num_return_sequences parameter. Defaults to 10.\n\n Returns:\n KnowledgeGraph: Completed knowledge graph\n \"\"\"\n params = {\"batch_size\": 1, \"shuffle\": False, \"num_workers\": 0}\n dataset = KnowledgeDataset(\n input_graph,\n tokenizer=self.tokenizer,\n source_len=in_len,\n target_len=out_len - in_len,\n is_eval=True,\n )\n loader = DataLoader(dataset, **params, drop_last=False)\n\n self.model.eval()\n\n outputs = []\n\n with torch.no_grad():\n for input_kg, data in zip(input_graph, loader):\n ids = data[\"source_ids\"].to(device)\n mask = data[\"source_mask\"].to(device)\n\n generated_ids = self.model.generate(\n input_ids=ids,\n attention_mask=mask,\n temperature=temperature,\n do_sample=False,\n max_length=max_length,\n top_p=top_p,\n top_k=top_k,\n repetition_penalty=repetition_penalty,\n num_return_sequences=num_return_sequences if top_k > 1 else 1,\n num_beams=num_beams,\n )\n\n generations = [\n self.tokenizer.decode(g, clean_up_tokenization_spaces=True)\n for g in generated_ids\n ]\n\n output_kg = input_kg.copy()\n output_kg.tails = generations\n outputs.append(output_kg)\n\n return KnowledgeGraph(outputs)\n\n def save_pretrained(self, save_path: str) -> None:\n \"\"\"Save pretrained model\n\n Args:\n save_path (str): Directory to save model to\n \"\"\"\n if save_path:\n self.model.save_pretrained(save_path)\n self.tokenizer.save_pretrained(save_path)\n\n @classmethod\n def from_pretrained(cls, model_name_or_path: str) -> KnowledgeModel:\n \"\"\"Load pretrained model\n\n Args:\n model_name_or_path (str): HuggingFace model name or local model path\n\n Returns:\n KnowledgeModel: Loaded knowledge model\n \"\"\"\n return cls(model_name_or_path)\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CLT29/pvse
[ "bf5232148396ee5051564ef68a48538de0ddbc84" ]
[ "video_transforms.py" ]
[ "from __future__ import division\nimport torch\nimport math\nimport sys\nimport random\nfrom PIL import Image, ImageOps, ImageEnhance\ntry:\n import accimage\nexcept ImportError:\n accimage = None\nimport numpy as np\nimport numbers\nimport types\nimport collections\nimport warnings\n\nimport torchvision.transforms.functional as F\n\nif sys.version_info < (3, 3):\n Sequence = collections.Sequence\n Iterable = collections.Iterable\nelse:\n Sequence = collections.abc.Sequence\n Iterable = collections.abc.Iterable\n\n\n__all__ = [\"Compose\", \"ToTensor\", \"ToPILImage\", \"Normalize\", \"Resize\", \"Scale\", \"CenterCrop\", \"Pad\",\n \"Lambda\", \"RandomApply\", \"RandomChoice\", \"RandomOrder\", \"RandomCrop\", \"RandomHorizontalFlip\",\n \"RandomVerticalFlip\", \"RandomResizedCrop\", \"RandomSizedCrop\", \"FiveCrop\", \"TenCrop\", \"LinearTransformation\",\n \"ColorJitter\", \"RandomRotation\", \"RandomAffine\", \"Grayscale\", \"RandomGrayscale\"]\n\n_pil_interpolation_to_str = {\n Image.NEAREST: 'PIL.Image.NEAREST',\n Image.BILINEAR: 'PIL.Image.BILINEAR',\n Image.BICUBIC: 'PIL.Image.BICUBIC',\n Image.LANCZOS: 'PIL.Image.LANCZOS',\n}\n\n\nclass Compose(object):\n \"\"\"Composes several transforms together.\n\n Args:\n transforms (list of ``Transform`` objects): list of transforms to compose.\n\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img):\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass ToTensor(object):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n Converts a PIL Image or numpy.ndarray (H x W x C) in the range\n [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].\n \"\"\"\n\n def __call__(self, pic_list):\n \"\"\"\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return torch.stack([F.to_tensor(pic) for pic in pic_list])\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass ToPILImage(object):\n \"\"\"Convert a tensor or an ndarray to PIL Image.\n\n Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape\n H x W x C to a PIL Image while preserving the value range.\n\n Args:\n mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).\n If ``mode`` is ``None`` (default) there are some assumptions made about the input data:\n 1. If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.\n 2. If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.\n 3. If the input has 1 channel, the ``mode`` is determined by the data type (i,e,\n ``int``, ``float``, ``short``).\n\n .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes\n \"\"\"\n def __init__(self, mode=None):\n self.mode = mode\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.\n\n Returns:\n PIL Image: Image converted to PIL Image.\n\n \"\"\"\n return F.to_pil_image(pic, self.mode)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n if self.mode is not None:\n format_string += 'mode={0}'.format(self.mode)\n format_string += ')'\n return format_string\n\n\nclass Normalize(object):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform\n will normalize each channel of the input ``torch.*Tensor`` i.e.\n ``input[channel] = (input[channel] - mean[channel]) / std[channel]``\n\n .. note::\n This transform acts in-place, i.e., it mutates the input tensor.\n\n Args:\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channel.\n \"\"\"\n\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n for i in range(tensor.size(0)):\n F.normalize(tensor[i], self.mean, self.std)\n return tensor\n\n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)\n\n\nclass Resize(object):\n \"\"\"Resize the input PIL Image to the given size.\n\n Args:\n size (sequence or int): Desired output size. If size is a sequence like\n (h, w), output size will be matched to this. If size is an int,\n smaller edge of the image will be matched to this number.\n i.e, if height > width, then image will be rescaled to\n (size * height / width, size)\n interpolation (int, optional): Desired interpolation. Default is\n ``PIL.Image.BILINEAR``\n \"\"\"\n\n def __init__(self, size, interpolation=Image.BILINEAR):\n assert isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)\n self.size = size\n self.interpolation = interpolation\n\n def __call__(self, img_list):\n \"\"\"\n Args:\n img (PIL Image): Image to be scaled.\n\n Returns:\n PIL Image: Rescaled image.\n \"\"\"\n return [F.resize(img, self.size, self.interpolation) for img in img_list]\n\n def __repr__(self):\n interpolate_str = _pil_interpolation_to_str[self.interpolation]\n return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)\n\n\nclass Scale(Resize):\n \"\"\"\n Note: This transform is deprecated in favor of Resize.\n \"\"\"\n def __init__(self, *args, **kwargs):\n warnings.warn(\"The use of the transforms.Scale transform is deprecated, \" +\n \"please use transforms.Resize instead.\")\n super(Scale, self).__init__(*args, **kwargs)\n\n\nclass CenterCrop(object):\n \"\"\"Crops the given PIL Image at the center.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n \"\"\"\n\n def __init__(self, size):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, img_list):\n \"\"\"\n Args:\n img (PIL Image): Image to be cropped.\n\n Returns:\n PIL Image: Cropped image.\n \"\"\"\n return [F.center_crop(img, self.size) for img in img_list]\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass Pad(object):\n \"\"\"Pad the given PIL Image on all sides with the given \"pad\" value.\n\n Args:\n padding (int or tuple): Padding on each border. If a single int is provided this\n is used to pad all borders. If tuple of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a tuple of length 4 is provided\n this is the padding for the left, top, right and bottom borders\n respectively.\n fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.\n Default is constant.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value at the edge of the image\n\n - reflect: pads with reflection of image without repeating the last value on the edge\n\n For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image repeating the last value on the edge\n\n For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n \"\"\"\n\n def __init__(self, padding, fill=0, padding_mode='constant'):\n assert isinstance(padding, (numbers.Number, tuple))\n assert isinstance(fill, (numbers.Number, str, tuple))\n assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']\n if isinstance(padding, Sequence) and len(padding) not in [2, 4]:\n raise ValueError(\"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n self.padding = padding\n self.fill = fill\n self.padding_mode = padding_mode\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be padded.\n\n Returns:\n PIL Image: Padded image.\n \"\"\"\n return F.pad(img, self.padding, self.fill, self.padding_mode)\n\n def __repr__(self):\n return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\\\n format(self.padding, self.fill, self.padding_mode)\n\n\nclass Lambda(object):\n \"\"\"Apply a user-defined lambda as a transform.\n\n Args:\n lambd (function): Lambda/function to be used for transform.\n \"\"\"\n\n def __init__(self, lambd):\n assert isinstance(lambd, types.LambdaType)\n self.lambd = lambd\n\n def __call__(self, img):\n return self.lambd(img)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass RandomTransforms(object):\n \"\"\"Base class for a list of transformations with randomness\n\n Args:\n transforms (list or tuple): list of transformations\n \"\"\"\n\n def __init__(self, transforms):\n assert isinstance(transforms, (list, tuple))\n self.transforms = transforms\n\n def __call__(self, *args, **kwargs):\n raise NotImplementedError()\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass RandomApply(RandomTransforms):\n \"\"\"Apply randomly a list of transformations with a given probability\n\n Args:\n transforms (list or tuple): list of transformations\n p (float): probability\n \"\"\"\n\n def __init__(self, transforms, p=0.5):\n super(RandomApply, self).__init__(transforms)\n self.p = p\n\n def __call__(self, img):\n if self.p < random.random():\n return img\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += '\\n p={}'.format(self.p)\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass RandomOrder(RandomTransforms):\n \"\"\"Apply a list of transformations in a random order\n \"\"\"\n def __call__(self, img):\n order = list(range(len(self.transforms)))\n random.shuffle(order)\n for i in order:\n img = self.transforms[i](img)\n return img\n\n\nclass RandomChoice(RandomTransforms):\n \"\"\"Apply single transformation randomly picked from a list\n \"\"\"\n def __call__(self, img):\n t = random.choice(self.transforms)\n return t(img)\n\n\nclass RandomCrop(object):\n \"\"\"Crop the given PIL Image at a random location.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n padding (int or sequence, optional): Optional padding on each border\n of the image. Default is None, i.e no padding. If a sequence of length\n 4 is provided, it is used to pad left, top, right, bottom borders\n respectively. If a sequence of length 2 is provided, it is used to\n pad left/right, top/bottom borders, respectively.\n pad_if_needed (boolean): It will pad the image if smaller than the\n desired size to avoid raising an exception.\n fill: Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value on the edge of the image\n\n - reflect: pads with reflection of image (without repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image (repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n\n \"\"\"\n\n def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode='constant'):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n self.padding = padding\n self.pad_if_needed = pad_if_needed\n self.fill = fill\n self.padding_mode = padding_mode\n\n @staticmethod\n def get_params(img, output_size):\n \"\"\"Get parameters for ``crop`` for a random crop.\n\n Args:\n img (PIL Image): Image to be cropped.\n output_size (tuple): Expected output size of the crop.\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.\n \"\"\"\n w, h = img.size\n th, tw = output_size\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = random.randint(0, h - th)\n j = random.randint(0, w - tw)\n return i, j, th, tw\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be cropped.\n\n Returns:\n PIL Image: Cropped image.\n \"\"\"\n if self.padding is not None:\n img = F.pad(img, self.padding, self.fill, self.padding_mode)\n\n # pad the width if needed\n if self.pad_if_needed and img.size[0] < self.size[1]:\n img = F.pad(img, (self.size[1] - img.size[0], 0), self.fill, self.padding_mode)\n # pad the height if needed\n if self.pad_if_needed and img.size[1] < self.size[0]:\n img = F.pad(img, (0, self.size[0] - img.size[1]), self.fill, self.padding_mode)\n\n i, j, h, w = self.get_params(img, self.size)\n\n return F.crop(img, i, j, h, w)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)\n\n\nclass RandomHorizontalFlip(object):\n \"\"\"Horizontally flip the given PIL Image randomly with a given probability.\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n self.p = p\n\n def __call__(self, img_list):\n \"\"\"\n Args:\n img (PIL Image): Image to be flipped.\n\n Returns:\n PIL Image: Randomly flipped image.\n \"\"\"\n if random.random() < self.p:\n return [F.hflip(img) for img in img_list]\n return img_list\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomVerticalFlip(object):\n \"\"\"Vertically flip the given PIL Image randomly with a given probability.\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n self.p = p\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be flipped.\n\n Returns:\n PIL Image: Randomly flipped image.\n \"\"\"\n if random.random() < self.p:\n return F.vflip(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomResizedCrop(object):\n \"\"\"Crop the given PIL Image to random size and aspect ratio.\n\n A crop of random size (default: of 0.08 to 1.0) of the original size and a random\n aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop\n is finally resized to given size.\n This is popularly used to train the Inception networks.\n\n Args:\n size: expected output size of each edge\n scale: range of size of the origin size cropped\n ratio: range of aspect ratio of the origin aspect ratio cropped\n interpolation: Default: PIL.Image.BILINEAR\n \"\"\"\n\n def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):\n self.size = (size, size)\n self.interpolation = interpolation\n self.scale = scale\n self.ratio = ratio\n\n @staticmethod\n def get_params(img, scale, ratio):\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image): Image to be cropped.\n scale (tuple): range of size of the origin size cropped\n ratio (tuple): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n \"\"\"\n for attempt in range(10):\n area = img.size[0] * img.size[1]\n target_area = random.uniform(*scale) * area\n aspect_ratio = random.uniform(*ratio)\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if random.random() < 0.5:\n w, h = h, w\n\n if w <= img.size[0] and h <= img.size[1]:\n i = random.randint(0, img.size[1] - h)\n j = random.randint(0, img.size[0] - w)\n return i, j, h, w\n\n # Fallback\n w = min(img.size[0], img.size[1])\n i = (img.size[1] - w) // 2\n j = (img.size[0] - w) // 2\n return i, j, w, w\n\n def __call__(self, img_list):\n \"\"\"\n Args:\n img (PIL Image): Image to be cropped and resized.\n\n Returns:\n PIL Image: Randomly cropped and resized image.\n \"\"\"\n i, j, h, w = self.get_params(img_list[0], self.scale, self.ratio)\n return [F.resized_crop(img, i, j, h, w, self.size, self.interpolation) for img in img_list]\n\n def __repr__(self):\n interpolate_str = _pil_interpolation_to_str[self.interpolation]\n format_string = self.__class__.__name__ + '(size={0}'.format(self.size)\n format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))\n format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))\n format_string += ', interpolation={0})'.format(interpolate_str)\n return format_string\n\n\nclass RandomSizedCrop(RandomResizedCrop):\n \"\"\"\n Note: This transform is deprecated in favor of RandomResizedCrop.\n \"\"\"\n def __init__(self, *args, **kwargs):\n warnings.warn(\"The use of the transforms.RandomSizedCrop transform is deprecated, \" +\n \"please use transforms.RandomResizedCrop instead.\")\n super(RandomSizedCrop, self).__init__(*args, **kwargs)\n\n\nclass FiveCrop(object):\n \"\"\"Crop the given PIL Image into four corners and the central crop\n\n .. Note::\n This transform returns a tuple of images and there may be a mismatch in the number of\n inputs and targets your Dataset returns. See below for an example of how to deal with\n this.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an ``int``\n instead of sequence like (h, w), a square crop of size (size, size) is made.\n\n Example:\n >>> transform = Compose([\n >>> FiveCrop(size), # this is a list of PIL Images\n >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor\n >>> ])\n >>> #In your test loop you can do the following:\n >>> input, target = batch # input is a 5d tensor, target is 2d\n >>> bs, ncrops, c, h, w = input.size()\n >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops\n >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops\n \"\"\"\n\n def __init__(self, size):\n self.size = size\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n assert len(size) == 2, \"Please provide only two dimensions (h, w) for size.\"\n self.size = size\n\n def __call__(self, img):\n return F.five_crop(img, self.size)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass TenCrop(object):\n \"\"\"Crop the given PIL Image into four corners and the central crop plus the flipped version of\n these (horizontal flipping is used by default)\n\n .. Note::\n This transform returns a tuple of images and there may be a mismatch in the number of\n inputs and targets your Dataset returns. See below for an example of how to deal with\n this.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n vertical_flip(bool): Use vertical flipping instead of horizontal\n\n Example:\n >>> transform = Compose([\n >>> TenCrop(size), # this is a list of PIL Images\n >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor\n >>> ])\n >>> #In your test loop you can do the following:\n >>> input, target = batch # input is a 5d tensor, target is 2d\n >>> bs, ncrops, c, h, w = input.size()\n >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops\n >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops\n \"\"\"\n\n def __init__(self, size, vertical_flip=False):\n self.size = size\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n assert len(size) == 2, \"Please provide only two dimensions (h, w) for size.\"\n self.size = size\n self.vertical_flip = vertical_flip\n\n def __call__(self, img):\n return F.ten_crop(img, self.size, self.vertical_flip)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)\n\n\nclass LinearTransformation(object):\n \"\"\"Transform a tensor image with a square transformation matrix computed\n offline.\n\n Given transformation_matrix, will flatten the torch.*Tensor, compute the dot\n product with the transformation matrix and reshape the tensor to its\n original shape.\n\n Applications:\n - whitening: zero-center the data, compute the data covariance matrix\n [D x D] with np.dot(X.T, X), perform SVD on this matrix and\n pass it as transformation_matrix.\n\n Args:\n transformation_matrix (Tensor): tensor [D x D], D = C x H x W\n \"\"\"\n\n def __init__(self, transformation_matrix):\n if transformation_matrix.size(0) != transformation_matrix.size(1):\n raise ValueError(\"transformation_matrix should be square. Got \" +\n \"[{} x {}] rectangular matrix.\".format(*transformation_matrix.size()))\n self.transformation_matrix = transformation_matrix\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be whitened.\n\n Returns:\n Tensor: Transformed image.\n \"\"\"\n if tensor.size(0) * tensor.size(1) * tensor.size(2) != self.transformation_matrix.size(0):\n raise ValueError(\"tensor and transformation matrix have incompatible shape.\" +\n \"[{} x {} x {}] != \".format(*tensor.size()) +\n \"{}\".format(self.transformation_matrix.size(0)))\n flat_tensor = tensor.view(1, -1)\n transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)\n tensor = transformed_tensor.view(tensor.size())\n return tensor\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += (str(self.transformation_matrix.numpy().tolist()) + ')')\n return format_string\n\n\nclass ColorJitter(object):\n \"\"\"Randomly change the brightness, contrast and saturation of an image.\n\n Args:\n brightness (float or tuple of float (min, max)): How much to jitter brightness.\n brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]\n or the given [min, max]. Should be non negative numbers.\n contrast (float or tuple of float (min, max)): How much to jitter contrast.\n contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]\n or the given [min, max]. Should be non negative numbers.\n saturation (float or tuple of float (min, max)): How much to jitter saturation.\n saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]\n or the given [min, max]. Should be non negative numbers.\n hue (float or tuple of float (min, max)): How much to jitter hue.\n hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].\n Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.\n \"\"\"\n def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):\n self.brightness = self._check_input(brightness, 'brightness')\n self.contrast = self._check_input(contrast, 'contrast')\n self.saturation = self._check_input(saturation, 'saturation')\n self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),\n clip_first_on_zero=False)\n\n def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):\n if isinstance(value, numbers.Number):\n if value < 0:\n raise ValueError(\"If {} is a single number, it must be non negative.\".format(name))\n value = [center - value, center + value]\n if clip_first_on_zero:\n value[0] = max(value[0], 0)\n elif isinstance(value, (tuple, list)) and len(value) == 2:\n if not bound[0] <= value[0] <= value[1] <= bound[1]:\n raise ValueError(\"{} values should be between {}\".format(name, bound))\n else:\n raise TypeError(\"{} should be a single number or a list/tuple with lenght 2.\".format(name))\n\n # if value is 0 or (1., 1.) for brightness/contrast/saturation\n # or (0., 0.) for hue, do nothing\n if value[0] == value[1] == center:\n value = None\n return value\n\n @staticmethod\n def get_params(brightness, contrast, saturation, hue):\n \"\"\"Get a randomized transform to be applied on image.\n\n Arguments are same as that of __init__.\n\n Returns:\n Transform which randomly adjusts brightness, contrast and\n saturation in a random order.\n \"\"\"\n transforms = []\n\n if brightness is not None:\n brightness_factor = random.uniform(brightness[0], brightness[1])\n transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))\n\n if contrast is not None:\n contrast_factor = random.uniform(contrast[0], contrast[1])\n transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))\n\n if saturation is not None:\n saturation_factor = random.uniform(saturation[0], saturation[1])\n transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))\n\n if hue is not None:\n hue_factor = random.uniform(hue[0], hue[1])\n transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))\n\n random.shuffle(transforms)\n transform = Compose(transforms)\n\n return transform\n\n def __call__(self, img_list):\n \"\"\"\n Args:\n img (PIL Image): Input image.\n\n Returns:\n PIL Image: Color jittered image.\n \"\"\"\n transform = self.get_params(self.brightness, self.contrast,\n self.saturation, self.hue)\n return [transform(img) for img in img_list]\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += 'brightness={0}'.format(self.brightness)\n format_string += ', contrast={0}'.format(self.contrast)\n format_string += ', saturation={0}'.format(self.saturation)\n format_string += ', hue={0})'.format(self.hue)\n return format_string\n\n\nclass RandomRotation(object):\n \"\"\"Rotate the image by angle.\n\n Args:\n degrees (sequence or float or int): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees).\n resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):\n An optional resampling filter. See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to PIL.Image.NEAREST.\n expand (bool, optional): Optional expansion flag.\n If true, expands the output to make it large enough to hold the entire rotated image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (2-tuple, optional): Optional center of rotation.\n Origin is the upper left corner.\n Default is the center of the image.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(self, degrees, resample=False, expand=False, center=None):\n if isinstance(degrees, numbers.Number):\n if degrees < 0:\n raise ValueError(\"If degrees is a single number, it must be positive.\")\n self.degrees = (-degrees, degrees)\n else:\n if len(degrees) != 2:\n raise ValueError(\"If degrees is a sequence, it must be of len 2.\")\n self.degrees = degrees\n\n self.resample = resample\n self.expand = expand\n self.center = center\n\n @staticmethod\n def get_params(degrees):\n \"\"\"Get parameters for ``rotate`` for a random rotation.\n\n Returns:\n sequence: params to be passed to ``rotate`` for random rotation.\n \"\"\"\n angle = random.uniform(degrees[0], degrees[1])\n\n return angle\n\n def __call__(self, img):\n \"\"\"\n img (PIL Image): Image to be rotated.\n\n Returns:\n PIL Image: Rotated image.\n \"\"\"\n\n angle = self.get_params(self.degrees)\n\n return F.rotate(img, angle, self.resample, self.expand, self.center)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)\n format_string += ', resample={0}'.format(self.resample)\n format_string += ', expand={0}'.format(self.expand)\n if self.center is not None:\n format_string += ', center={0}'.format(self.center)\n format_string += ')'\n return format_string\n\n\nclass RandomAffine(object):\n \"\"\"Random affine transformation of the image keeping center invariant\n\n Args:\n degrees (sequence or float or int): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees). Set to 0 to deactivate rotations.\n translate (tuple, optional): tuple of maximum absolute fraction for horizontal\n and vertical translations. For example translate=(a, b), then horizontal shift\n is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is\n randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.\n scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is\n randomly sampled from the range a <= scale <= b. Will keep original scale by default.\n shear (sequence or float or int, optional): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees). Will not apply shear by default\n resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):\n An optional resampling filter. See `filters`_ for more information.\n If omitted, or if the image has mode \"1\" or \"P\", it is set to PIL.Image.NEAREST.\n fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(self, degrees, translate=None, scale=None, shear=None, resample=False, fillcolor=0):\n if isinstance(degrees, numbers.Number):\n if degrees < 0:\n raise ValueError(\"If degrees is a single number, it must be positive.\")\n self.degrees = (-degrees, degrees)\n else:\n assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \\\n \"degrees should be a list or tuple and it must be of length 2.\"\n self.degrees = degrees\n\n if translate is not None:\n assert isinstance(translate, (tuple, list)) and len(translate) == 2, \\\n \"translate should be a list or tuple and it must be of length 2.\"\n for t in translate:\n if not (0.0 <= t <= 1.0):\n raise ValueError(\"translation values should be between 0 and 1\")\n self.translate = translate\n\n if scale is not None:\n assert isinstance(scale, (tuple, list)) and len(scale) == 2, \\\n \"scale should be a list or tuple and it must be of length 2.\"\n for s in scale:\n if s <= 0:\n raise ValueError(\"scale values should be positive\")\n self.scale = scale\n\n if shear is not None:\n if isinstance(shear, numbers.Number):\n if shear < 0:\n raise ValueError(\"If shear is a single number, it must be positive.\")\n self.shear = (-shear, shear)\n else:\n assert isinstance(shear, (tuple, list)) and len(shear) == 2, \\\n \"shear should be a list or tuple and it must be of length 2.\"\n self.shear = shear\n else:\n self.shear = shear\n\n self.resample = resample\n self.fillcolor = fillcolor\n\n @staticmethod\n def get_params(degrees, translate, scale_ranges, shears, img_size):\n \"\"\"Get parameters for affine transformation\n\n Returns:\n sequence: params to be passed to the affine transformation\n \"\"\"\n angle = random.uniform(degrees[0], degrees[1])\n if translate is not None:\n max_dx = translate[0] * img_size[0]\n max_dy = translate[1] * img_size[1]\n translations = (np.round(random.uniform(-max_dx, max_dx)),\n np.round(random.uniform(-max_dy, max_dy)))\n else:\n translations = (0, 0)\n\n if scale_ranges is not None:\n scale = random.uniform(scale_ranges[0], scale_ranges[1])\n else:\n scale = 1.0\n\n if shears is not None:\n shear = random.uniform(shears[0], shears[1])\n else:\n shear = 0.0\n\n return angle, translations, scale, shear\n\n def __call__(self, img):\n \"\"\"\n img (PIL Image): Image to be transformed.\n\n Returns:\n PIL Image: Affine transformed image.\n \"\"\"\n ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img.size)\n return F.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor)\n\n def __repr__(self):\n s = '{name}(degrees={degrees}'\n if self.translate is not None:\n s += ', translate={translate}'\n if self.scale is not None:\n s += ', scale={scale}'\n if self.shear is not None:\n s += ', shear={shear}'\n if self.resample > 0:\n s += ', resample={resample}'\n if self.fillcolor != 0:\n s += ', fillcolor={fillcolor}'\n s += ')'\n d = dict(self.__dict__)\n d['resample'] = _pil_interpolation_to_str[d['resample']]\n return s.format(name=self.__class__.__name__, **d)\n\n\nclass Grayscale(object):\n \"\"\"Convert image to grayscale.\n\n Args:\n num_output_channels (int): (1 or 3) number of channels desired for output image\n\n Returns:\n PIL Image: Grayscale version of the input.\n - If num_output_channels == 1 : returned image is single channel\n - If num_output_channels == 3 : returned image is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, num_output_channels=1):\n self.num_output_channels = num_output_channels\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be converted to grayscale.\n\n Returns:\n PIL Image: Randomly grayscaled image.\n \"\"\"\n return F.to_grayscale(img, num_output_channels=self.num_output_channels)\n\n def __repr__(self):\n return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)\n\n\nclass RandomGrayscale(object):\n \"\"\"Randomly convert image to grayscale with a probability of p (default 0.1).\n\n Args:\n p (float): probability that image should be converted to grayscale.\n\n Returns:\n PIL Image: Grayscale version of the input image with probability p and unchanged\n with probability (1-p).\n - If input image is 1 channel: grayscale version is 1 channel\n - If input image is 3 channel: grayscale version is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, p=0.1):\n self.p = p\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be converted to grayscale.\n\n Returns:\n PIL Image: Randomly grayscaled image.\n \"\"\"\n num_output_channels = 1 if img.mode == 'L' else 3\n if random.random() < self.p:\n return F.to_grayscale(img, num_output_channels=num_output_channels)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={0})'.format(self.p)\n" ]
[ [ "torch.mm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anonymous-authorss/DS-Pipeline
[ "8304adfe7c1b082ad2225d6d5abf16fd30278cd9" ]
[ "notebooks/featured-70/deep-learning-in-the-deep-blue-lb-1-279.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # Start-to-Finish Solution in Keras\n# \n# Here is my basic method for getting a LB submission churned out. No parameter tuning or data augmentation has been attempted, which should increase the score significantly. \n\n# In[1]:\n\n\nimport os, cv2, random\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import log_loss\nfrom sklearn.preprocessing import LabelEncoder\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import ticker\nimport seaborn as sns\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nfrom keras.models import Sequential\nfrom keras.layers import Dropout, Flatten, Convolution2D, MaxPooling2D, ZeroPadding2D, Dense, Activation\nfrom keras.optimizers import RMSprop, Adam\nfrom keras.callbacks import EarlyStopping\nfrom keras.utils import np_utils\nfrom keras import backend as K\n\nTRAIN_DIR = '../input/train/'\nTEST_DIR = '../input/test_stg1/'\nFISH_CLASSES = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']\nROWS = 90 #720\nCOLS = 160 #1280\nCHANNELS = 3\n\n\n# # Loading and Preprocessing Data\n# \n# Not much processing, other than resizing to 90x160, but you will probably want to run larger images on a GPU for a higher score. I am also keeping track of the labels as I loop through each image folder. \n\n# In[2]:\n\n\ndef get_images(fish):\n \"\"\"Load files from train folder\"\"\"\n fish_dir = TRAIN_DIR+'{}'.format(fish)\n images = [fish+'/'+im for im in os.listdir(fish_dir)]\n return images\n\ndef read_image(src):\n \"\"\"Read and resize individual images\"\"\"\n im = cv2.imread(src, cv2.IMREAD_COLOR)\n im = cv2.resize(im, (COLS, ROWS), interpolation=cv2.INTER_CUBIC)\n return im\n\n\nfiles = []\ny_all = []\n\nfor fish in FISH_CLASSES:\n fish_files = get_images(fish)\n files.extend(fish_files)\n \n y_fish = np.tile(fish, len(fish_files))\n y_all.extend(y_fish)\n print(\"{0} photos of {1}\".format(len(fish_files), fish))\n \ny_all = np.array(y_all)\n\n\n# In[3]:\n\n\nX_all = np.ndarray((len(files), ROWS, COLS, CHANNELS), dtype=np.uint8)\n\nfor i, im in enumerate(files): \n X_all[i] = read_image(TRAIN_DIR+im)\n if i%1000 == 0: print('Processed {} of {}'.format(i, len(files)))\n\nprint(X_all.shape)\n\n\n# In[4]:\n\n\n## Uncomment to check out a fish from each class\n#uniq = np.unique(y_all, return_index=True)\n# for f, i in zip(uniq[0], uniq[1]):\n #plt.imshow(X_all[i])\n #plt.title(f)\n #plt.show()\n\n\n# # Splitting the Training Data\n# \n# One-Hot-Encode the labels, then create a stratified train/validation split. \n\n# In[5]:\n\n\n# One Hot Encoding Labels\ny_all = LabelEncoder().fit_transform(y_all)\ny_all = np_utils.to_categorical(y_all)\n\nX_train, X_valid, y_train, y_valid = train_test_split(X_all, y_all, \n test_size=0.2, random_state=23, \n stratify=y_all)\n\n\n# ## The Model\n# \n# Pretty typical CNN in Keras with a plenty of dropout regularization between the fully connected layers. Note: I set the epochs to 1 to avoid timing out - change it to around 20. \n\n# In[6]:\n\n\noptimizer = RMSprop(lr=1e-4)\nobjective = 'categorical_crossentropy'\n\ndef center_normalize(x):\n return (x - K.mean(x)) / K.std(x)\n\nmodel = Sequential()\n\nmodel.add(Activation(activation=center_normalize, input_shape=(ROWS, COLS, CHANNELS)))\n\nmodel.add(Convolution2D(32, 5, 5, border_mode='same', activation='relu', dim_ordering='tf'))\nmodel.add(Convolution2D(32, 5, 5, border_mode='same', activation='relu', dim_ordering='tf'))\nmodel.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='tf'))\n\nmodel.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu', dim_ordering='tf'))\nmodel.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu', dim_ordering='tf'))\nmodel.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='tf'))\n\nmodel.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu', dim_ordering='tf'))\nmodel.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu', dim_ordering='tf'))\nmodel.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='tf'))\n\nmodel.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu', dim_ordering='tf'))\nmodel.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu', dim_ordering='tf'))\nmodel.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='tf'))\n\n\nmodel.add(Flatten())\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(len(FISH_CLASSES)))\nmodel.add(Activation('sigmoid'))\n\nmodel.compile(loss=objective, optimizer=optimizer)\n\n\n# In[7]:\n\n\nearly_stopping = EarlyStopping(monitor='val_loss', patience=4, verbose=1, mode='auto') \n \nmodel.fit(X_train, y_train, batch_size=64, nb_epoch=1,\n validation_split=0.2, verbose=1, shuffle=True, callbacks=[early_stopping])\n\n\n# In[8]:\n\n\npreds = model.predict(X_valid, verbose=1)\nprint(\"Validation Log Loss: {}\".format(log_loss(y_valid, preds)))\n\n\n# # Predicting the Test Set\n# \n# Finishing off with predictions on the test set. Scored LB 1.279 \n\n# In[9]:\n\n\ntest_files = [im for im in os.listdir(TEST_DIR)]\ntest = np.ndarray((len(test_files), ROWS, COLS, CHANNELS), dtype=np.uint8)\n\nfor i, im in enumerate(test_files): \n test[i] = read_image(TEST_DIR+im)\n \ntest_preds = model.predict(test, verbose=1)\n\n\n# In[10]:\n\n\nsubmission = pd.DataFrame(test_preds, columns=FISH_CLASSES)\nsubmission.insert(0, 'image', test_files)\nsubmission.head()\n\n" ]
[ [ "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.metrics.log_loss", "numpy.array", "sklearn.preprocessing.LabelEncoder" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
jpchavat/nilmtk
[ "906ad988e58d3738917a8b838d853aecaf69d150" ]
[ "nilmtk/dataset_converters/iawe/convert_iawe.py" ]
[ "from __future__ import print_function, division\nimport pandas as pd\nimport numpy as np\nfrom os.path import join\nfrom nilmtk.datastore import Key\nfrom nilmtk.measurement import LEVEL_NAMES\nfrom nilmtk.utils import check_directory_exists, get_datastore, get_module_directory\nfrom nilm_metadata import convert_yaml_to_hdf5\nfrom copy import deepcopy\n\ndef reindex_fill_na(df, idx):\n df_copy = deepcopy(df)\n df_copy = df_copy.reindex(idx)\n\n power_columns = [\n x for x in df.columns if x[0] in ['power']]\n non_power_columns = [x for x in df.columns if x not in power_columns]\n\n for power in power_columns:\n df_copy[power].fillna(0, inplace=True)\n for measurement in non_power_columns:\n df_copy[measurement].fillna(\n df[measurement].median(), inplace=True)\n\n return df_copy\n\n\ncolumn_mapping = {\n 'frequency': ('frequency', \"\"),\n 'voltage': ('voltage', \"\"),\n 'W': ('power', 'active'),\n 'energy': ('energy', 'apparent'),\n 'A': ('current', ''),\n 'reactive_power': ('power', 'reactive'),\n 'apparent_power': ('power', 'apparent'),\n 'power_factor': ('pf', ''),\n 'PF': ('pf', ''),\n 'phase_angle': ('phi', ''),\n 'VA': ('power', 'apparent'),\n 'VAR': ('power', 'reactive'),\n 'VLN': ('voltage', \"\"),\n 'V': ('voltage', \"\"),\n 'f': ('frequency', \"\")\n}\n\nTIMESTAMP_COLUMN_NAME = \"timestamp\"\nTIMEZONE = \"Asia/Kolkata\"\nSTART_DATETIME, END_DATETIME = '2013-07-13', '2013-08-04'\nFREQ = \"1T\"\n\n\ndef convert_iawe(iawe_path, output_filename, format=\"HDF\"):\n \"\"\"\n Parameters\n ----------\n iawe_path : str\n The root path of the iawe dataset.\n output_filename : str\n The destination filename (including path and suffix).\n \"\"\"\n\n check_directory_exists(iawe_path)\n idx = pd.date_range(start=START_DATETIME, end=END_DATETIME, freq=FREQ)\n idx = idx.tz_localize('GMT').tz_convert(TIMEZONE)\n\n # Open data store\n store = get_datastore(output_filename, format, mode='w')\n electricity_path = join(iawe_path, \"electricity\")\n\n # Mains data\n for chan in range(1, 12):\n key = Key(building=1, meter=chan)\n filename = join(electricity_path, \"%d.csv\" % chan)\n print('Loading ', chan)\n df = pd.read_csv(filename, dtype=np.float64, na_values='\\\\N')\n df.drop_duplicates(subset=[\"timestamp\"], inplace=True)\n df.index = pd.to_datetime(df.timestamp.values, unit='s', utc=True)\n df = df.tz_convert(TIMEZONE)\n df = df.drop(TIMESTAMP_COLUMN_NAME, 1)\n df.columns = pd.MultiIndex.from_tuples(\n [column_mapping[x] for x in df.columns],\n names=LEVEL_NAMES\n )\n df = df.apply(pd.to_numeric, errors='ignore')\n df = df.dropna()\n df = df.astype(np.float32)\n df = df.sort_index()\n df = df.resample(\"1T\").mean()\n df = reindex_fill_na(df, idx)\n assert df.isnull().sum().sum() == 0\n store.put(str(key), df)\n store.close()\n \n metadata_dir = join(get_module_directory(), 'dataset_converters', 'iawe', 'metadata')\n convert_yaml_to_hdf5(metadata_dir, output_filename)\n\n print(\"Done converting iAWE to HDF5!\")\n\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "pandas.MultiIndex.from_tuples", "pandas.date_range" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
CoAxLab/brainhack-physio-project
[ "b87cd6c6db486639f271b786ca1cf4aa27a70fad" ]
[ "niphlem/tests/test_filters.py" ]
[ "import numpy as np\nfrom niphlem.clean import _transform_filter\n\n\ndef test_transform():\n\n rng = np.random.RandomState(1234)\n n_samples = 200\n\n low_pass = None\n high_pass = None\n sampling_rate = None\n\n eps = 100*np.finfo(np.float64).eps\n # Compare output for different options.\n # single timeseries\n data = 3.1 + 2.5*rng.standard_normal(size=n_samples)\n\n transform = None\n data_transform = _transform_filter(data,\n transform=transform,\n high_pass=high_pass,\n low_pass=low_pass,\n sampling_rate=sampling_rate)\n assert abs(np.mean(data_transform)) < eps\n\n transform = \"abs\"\n data_transform = _transform_filter(data,\n transform=transform,\n high_pass=high_pass,\n low_pass=low_pass,\n sampling_rate=sampling_rate)\n assert abs(np.mean(data_transform)) < eps\n\n transform = \"zscore\"\n data_transform = _transform_filter(data,\n transform=transform,\n high_pass=high_pass,\n low_pass=low_pass,\n sampling_rate=sampling_rate)\n assert abs(np.mean(data_transform)) < eps\n assert np.allclose(np.std(data_transform), 1.0)\n\n\ndef test_filter():\n\n from scipy.signal import periodogram\n\n # Create signal with sampling 50 Hz, that has\n # a frequency signal of 5, 10 and 15 Hz.\n sampling_rate = 50\n times = np.arange(1000)/sampling_rate\n signal = np.sin(2*np.pi*5*times) +\\\n np.sin(2*np.pi*10*times) + np.sin(2*np.pi*15*times)\n\n # band pass filter betweenn 6 and 12\n low_pass = 12\n high_pass = 6\n\n signal_transform = _transform_filter(signal,\n high_pass=high_pass,\n low_pass=low_pass,\n sampling_rate=sampling_rate)\n freqs, Pxx = periodogram(signal_transform, fs=sampling_rate)\n # Uncomment to see the plot and how the filtered worked\n # plt.plot(freqs, Pxx)\n\n # Verify that the filtered frequencies are removed with respect\n # to passed frequencies\n Pxx_passed = np.sum(Pxx[(freqs < low_pass * 2.) &\n (freqs > high_pass / 2.)])\n Pxx_filtered = np.sum(Pxx[(freqs >= low_pass * 2.) |\n (freqs <= high_pass / 2)])\n assert Pxx_filtered < 1e-3*Pxx_passed\n\n # low pass filter below 12 Hz\n low_pass = 12\n high_pass = None\n\n signal_transform = _transform_filter(signal,\n high_pass=high_pass,\n low_pass=low_pass,\n sampling_rate=sampling_rate)\n\n freqs, Pxx = periodogram(signal_transform, fs=sampling_rate)\n\n # Uncomment to see the plot and how the filtered worked\n # plt.plot(freqs, Pxx)\n\n Pxx_passed = np.sum(Pxx[freqs < low_pass * 2.])\n Pxx_filtered = np.sum(Pxx[freqs >= low_pass * 2.])\n assert Pxx_filtered < 1e-3*Pxx_passed\n\n # high pass filter above 6 Hz\n low_pass = None\n high_pass = 6\n\n signal_transform = _transform_filter(signal,\n high_pass=high_pass,\n low_pass=low_pass,\n sampling_rate=sampling_rate)\n\n freqs, Pxx = periodogram(signal_transform, fs=sampling_rate)\n\n # Uncomment to see the plot and how the filtered worked\n # plt.plot(freqs, Pxx)\n\n Pxx_passed = np.sum(Pxx[freqs > high_pass / 2.])\n Pxx_filtered = np.sum(Pxx[freqs <= high_pass / 2])\n assert Pxx_filtered < 1e-3*Pxx_passed\n" ]
[ [ "numpy.arange", "numpy.sin", "numpy.finfo", "numpy.std", "numpy.mean", "scipy.signal.periodogram", "numpy.random.RandomState", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
jayBana/InventoryMan
[ "0826f9c98062fb6600f77a721311cbf27719e528" ]
[ "scripts/01-process_pe/05_sum_items.py" ]
[ "#!/usr/bin/python3\n\n'''\nscript that sums the daily product usage for items from a list\n'''\n\nimport os\nimport os.path\nimport csv\nimport json\nimport pandas as pd\nfrom pandas import ExcelWriter, merge\n\n\ndef main():\n # first argument the path for file to be processed\n dir_path = '..' + os.sep + '..' + os.sep + 'data' + os.sep + 'peData' + os.sep\n file_path = dir_path + 'daily_ingredient_usage.xlsx'\n\n # read sheet\n table = pd.read_excel(file_path, 'Sheet', index_col=None, na_values=['NA'])\n\n # read the product list for items that we are interested in\n list_to_sum = csv.reader(open(dir_path + 'list_sum_up.csv', 'rU'))\n\n # read the list of all products\n with open(dir_path + 'list_products.json', encoding='utf-8') as data_file:\n products = json.load(data_file)\n\n # create dictionary where we save the product codes for each product\n # some products maybe part of a set menu\n dict_to_sum = {}\n\n # each line in csv reader object\n for line in list_to_sum:\n # create empty set\n ids = set()\n # check against each name variation\n for l in line:\n # find the product keys for names\n for k in products.keys():\n if l.strip() in products[k]['Description']:\n ids.add(int(k))\n\n # save set of product keys for each item\n dict_to_sum[line[0]] = ids\n\n # create a final table\n final_table = pd.DataFrame(table['Date']).groupby(['Date'], as_index=False).sum()\n\n # sum up products for each day\n for k, v in dict_to_sum.items():\n # sum up product if in set of product keys per item\n result = table.loc[table['Stock Code'].isin(v)].groupby(['Date'], as_index=False).sum()[['Date', 'Unit Sales']]\n # create the name for saving the file\n prod_name = 'prod_' + k.lower().replace(' ', '_')\n # rename the summed column\n result.rename(columns={result.columns[1]: prod_name}, inplace=True)\n # merge results together\n final_table = merge(final_table, result, how='outer', on='Date')\n\n # get the list of ingredients\n ingredients = []\n for l in list(table.columns.values):\n if 'ing_' in l:\n ingredients.append(l)\n\n # for each ingredient\n for i in ingredients:\n # sum daily ingredient usage\n result = table[['Date', i]].groupby(['Date'], as_index=False).sum()\n # merge results together\n final_table = merge(final_table, result, how='outer', on='Date')\n\n # fill NaN values\n final_table = final_table.fillna(0)\n\n # define where to save the file\n file_path = dir_path + 'daily_summed_usage' + os.sep + 'merged_table.csv'\n # save it as a csv file\n with open(file_path, mode='w') as fp:\n final_table.to_csv(fp, index=False)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.merge", "pandas.read_excel", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
annalunde/master
[ "2552d43713e8ebca0b0e57bc5bebd1eaeeac1875" ]
[ "heuristic/improvement/reopt/reopt_operators.py" ]
[ "import copy\nimport math\nimport sys\n\nimport numpy.random as rnd\nfrom datetime import datetime\nimport pandas as pd\nfrom datetime import timedelta\nimport traceback\nfrom heuristic.construction.construction import ConstructionHeuristic\nfrom config.construction_config import *\nfrom heuristic.improvement.reopt.reopt_repair_generator import ReOptRepairGenerator\n\n\nclass ReOptOperators:\n def __init__(self, alns, sim_clock, vehicle_clocks):\n self.destruction_degree = alns.destruction_degree\n self.constructor = alns.constructor\n self.T_ij = self.constructor.T_ij\n self.reopt_repair_generator = ReOptRepairGenerator(self.constructor)\n self.sim_clock = sim_clock\n self.vehicle_clocks = vehicle_clocks\n\n # Find number of requests to remove based on degree of destruction\n def nodes_to_remove(self, route_plan):\n\n # Count number of requests in route_plan\n total_requests = 0\n for row in route_plan:\n for col in row:\n if col[0]:\n total_requests += 0.5\n\n # Calculate number of requests to remove\n num_remove = math.ceil(total_requests * self.destruction_degree)\n return num_remove\n\n def random_removal(self, current_route_plan, current_infeasible_set):\n destroyed_route_plan = copy.deepcopy(current_route_plan)\n to_remove = []\n removed_requests = []\n index_removed_requests = []\n possible_removals = self.find_possible_removals(destroyed_route_plan)\n empty = 0\n for vehicle in possible_removals:\n empty += len(vehicle)\n\n if not empty:\n return current_route_plan, removed_requests, index_removed_requests, False\n\n # Number of requests to remove\n num_remove = self.nodes_to_remove(possible_removals)\n\n # Find the requests to remove\n while len(to_remove)/2 < num_remove:\n\n # Pick random node in route plan to remove and to compare other nodes to\n rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]\n rnd.shuffle(rows)\n\n for row in rows:\n if len(possible_removals[row]) == 2:\n col = 0\n break\n else:\n col = rnd.randint(\n 0, len(possible_removals[row]))\n break\n node = possible_removals[row][col]\n destroy_node = destroyed_route_plan[row][node[6]]\n\n # Find col-index of associated pickup/drop-off node\n index, pickup = self.find_associated_node(\n row, col, possible_removals)\n associated_node = possible_removals[row][index]\n destroy_associated_node = destroyed_route_plan[row][associated_node[6]]\n\n # Skip already added nodes\n if [node, row, destroy_node] in to_remove or [associated_node, row, destroy_associated_node] in to_remove:\n continue\n\n # Add both pickup and drop-off node to to_remove\n to_remove.append([node, row, destroy_node])\n to_remove.append([associated_node, row, destroy_associated_node])\n\n # Remove nearest nodes from destroyed route plan and from possible_removals\n for n in to_remove:\n index_removed_requests.append(\n (n[0][0], n[1], n[0][6]))\n for n in to_remove:\n possible_removals[n[1]].remove(n[0])\n destroyed_route_plan[n[1]].remove(n[2])\n\n # Add request id to removed_requests\n if not n[0][0] % int(n[0][0]):\n removed_requests.append((n[0][0], n[0][5]))\n\n return destroyed_route_plan, removed_requests, index_removed_requests, True\n\n def worst_deviation_removal(self, current_route_plan, current_infeasible_set):\n destroyed_route_plan = copy.deepcopy(current_route_plan)\n to_remove = []\n removed_requests = []\n index_removed_requests = []\n possible_removals = self.find_possible_removals(destroyed_route_plan)\n empty = 0\n for vehicle in possible_removals:\n empty += len(vehicle)\n\n if not empty:\n return current_route_plan, removed_requests, index_removed_requests, False\n\n # Number of requests to remove\n num_remove = self.nodes_to_remove(possible_removals)\n\n # Find the requests to remove\n for j in range(num_remove):\n worst_deviation = timedelta(0)\n worst_node = None\n\n rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]\n\n for row in rows:\n for col in range(0, len(possible_removals[row])):\n\n temp = possible_removals[row][col]\n destroyed_temp = destroyed_route_plan[row][temp[6]]\n\n # Skip already added nodes\n if [temp, row, destroyed_temp] in to_remove:\n continue\n\n # Find associated drop off/pickup node\n index, pickup = self.find_associated_node(\n row, col, possible_removals)\n associated_temp = possible_removals[row][index]\n destroyed_associated_temp = destroyed_route_plan[row][associated_temp[6]]\n\n temp_deviation = temp[2]\n associated_temp_deviation = associated_temp[2]\n\n if temp_deviation < timedelta(0):\n temp_deviation = timedelta(\n seconds=-temp_deviation.total_seconds())\n\n if associated_temp_deviation < timedelta(0):\n associated_temp_deviation = timedelta(\n seconds=-associated_temp_deviation.total_seconds())\n\n # Calculate total deviation for request\n deviation = temp_deviation + associated_temp_deviation\n\n # Update worst deviation so far\n if deviation > worst_deviation and deviation > timedelta(0):\n worst_deviation = deviation\n worst_node = [temp, row, destroyed_temp]\n worst_associated_node = [\n associated_temp, row, destroyed_associated_temp]\n\n # Add node with worst deviation to list of nodes to remove\n\n if worst_node is not None and worst_node in to_remove:\n continue\n if worst_node is not None:\n to_remove.append(worst_node)\n to_remove.append(worst_associated_node)\n\n # If not enough nodes have deviation > 0, remove the rest randomly\n if len(to_remove)/2 < num_remove:\n to_remove = self.worst_deviation_random_removal(\n destroyed_route_plan, possible_removals, num_remove, to_remove)\n\n # Remove nearest nodes from destroyed route plan and from possible_removals\n for n in to_remove:\n index_removed_requests.append(\n (n[0][0], n[1], n[0][6]))\n for n in to_remove:\n possible_removals[n[1]].remove(n[0])\n destroyed_route_plan[n[1]].remove(n[2])\n\n # Add request id to removed_requests\n if not n[0][0] % int(n[0][0]):\n removed_requests.append((n[0][0], n[0][5]))\n\n return destroyed_route_plan, removed_requests, index_removed_requests, True\n\n # Related in travel time\n def distance_related_removal(self, current_route_plan, current_infeasible_set):\n destroyed_route_plan = copy.deepcopy(current_route_plan)\n removed_requests = []\n index_removed_requests = []\n possible_removals = self.find_possible_removals(destroyed_route_plan)\n empty = 0\n for vehicle in possible_removals:\n empty += len(vehicle)\n\n if not empty:\n return current_route_plan, removed_requests, index_removed_requests, False\n\n # Number of requests to remove\n num_remove = self.nodes_to_remove(possible_removals)\n\n if len(current_infeasible_set) != 0:\n # Pick random node in infeasible_set to compare other nodes to - always pickup nodes\n initial_node = current_infeasible_set[rnd.randint(\n 0, len(current_infeasible_set))]\n node = self.get_pickup(initial_node)\n pickup = True\n\n # Find associated node - dropoff node\n associated_node = self.get_dropoff(initial_node)\n\n to_remove = []\n\n else:\n # Pick random node in route plan to remove and to compare other nodes to\n rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]\n rnd.shuffle(rows)\n\n for row_index in rows:\n if len(possible_removals[row_index]) == 2:\n col_index = 0\n break\n else:\n col_index = rnd.randint(\n 0, len(possible_removals[row_index]))\n break\n node = possible_removals[row_index][col_index]\n destroy_node = destroyed_route_plan[row_index][node[6]]\n\n # Find associated node\n index, pickup = self.find_associated_node(\n row_index, col_index, possible_removals)\n associated_node = possible_removals[row_index][index]\n destroy_associated_node = destroyed_route_plan[row_index][associated_node[6]]\n\n # List of nodes to remove\n to_remove = [[node, row_index, destroy_node], [\n associated_node, row_index, destroy_associated_node]]\n\n # Remaining number of nodes to remove\n num_remove -= 1\n\n # Find the requests to remove\n for j in range(num_remove):\n\n # To do: finne ut hva denne initielt skal settes som\n best_diff = 48 * 60 * 60\n rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]\n\n for row in rows:\n for col in range(0, len(possible_removals[row])):\n\n # Drop off/pickup of request to compare\n temp = possible_removals[row][col]\n destroyed_temp = destroyed_route_plan[row][temp[6]]\n\n # Skip already added nodes\n if [temp, row, destroyed_temp] in to_remove:\n continue\n\n # Find associated drop off/pickup node of request to compare\n temp_index, temp_pickup = self.find_associated_node(\n row, col, possible_removals)\n associated_temp = possible_removals[row][temp_index]\n destroyed_associated_temp = destroyed_route_plan[row][associated_temp[6]]\n\n # Find difference in distance between pickup and drop-off of requests\n if (temp_pickup == pickup) & pickup:\n diff = self.travel_time_difference(temp[0], node[0])\n\n elif (temp_pickup == pickup) & (not pickup):\n diff = self.travel_time_difference(\n associated_temp[0], associated_node[0])\n\n elif (temp_pickup != pickup) & pickup:\n diff = self.travel_time_difference(\n associated_temp[0], node[0])\n\n else:\n diff = self.travel_time_difference(\n temp[0], associated_node[0])\n\n # Compare with smallest difference in current iteration\n if diff < best_diff:\n best_diff = diff\n nearest_node = [temp, row, destroyed_temp]\n nearest_associated_node = [\n associated_temp, row, destroyed_associated_temp]\n\n to_remove.append(nearest_node)\n to_remove.append(nearest_associated_node)\n\n # Remove nearest nodes from destroyed route plan and from possible_removals\n for n in to_remove:\n index_removed_requests.append(\n (n[0][0], n[1], n[0][6]))\n for n in to_remove:\n possible_removals[n[1]].remove(n[0])\n destroyed_route_plan[n[1]].remove(n[2])\n\n # Add request id to removed_requests\n if not n[0][0] % int(n[0][0]):\n removed_requests.append((n[0][0], n[0][5]))\n\n return destroyed_route_plan, removed_requests, index_removed_requests, True\n\n # Related in service time\n def time_related_removal(self, current_route_plan, current_infeasible_set):\n destroyed_route_plan = copy.deepcopy(current_route_plan)\n removed_requests = []\n index_removed_requests = []\n possible_removals = self.find_possible_removals(destroyed_route_plan)\n empty = 0\n for vehicle in possible_removals:\n empty += len(vehicle)\n\n if not empty:\n return current_route_plan, removed_requests, index_removed_requests, False\n\n # Number of requests to remove\n num_remove = self.nodes_to_remove(possible_removals)\n\n if len(current_infeasible_set) != 0:\n # Pick random node in infeasible_set to compare other nodes to - always pickup nodes\n initial_node = current_infeasible_set[rnd.randint(\n 0, len(current_infeasible_set))]\n node = self.get_pickup(initial_node)\n pickup = True\n\n # Find associated node - dropoff node\n associated_node = self.get_dropoff(initial_node)\n\n to_remove = []\n\n else:\n # Pick random node in route plan to remove and to compare other nodes to\n rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]\n rnd.shuffle(rows)\n\n for row_index in rows:\n if len(possible_removals[row_index]) == 2:\n col_index = 0\n break\n else:\n col_index = rnd.randint(\n 0, len(possible_removals[row_index]))\n break\n node = possible_removals[row_index][col_index]\n destroy_node = destroyed_route_plan[row_index][node[6]]\n\n # Find associated node\n index, pickup = self.find_associated_node(\n row_index, col_index, possible_removals)\n associated_node = possible_removals[row_index][index]\n destroy_associated_node = destroyed_route_plan[row_index][associated_node[6]]\n\n # List of nodes to remove\n to_remove = [[node, row_index, destroy_node], [\n associated_node, row_index, destroy_associated_node]]\n\n # Remaining number of nodes to remove\n num_remove -= 1\n\n # Find the requests to remove\n for j in range(num_remove):\n\n # To do: finne ut hva denne initielt skal settes som\n best_diff = 48 * 60 * 60\n rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]\n\n for row in rows:\n for col in range(0, len(possible_removals[row])):\n\n temp = possible_removals[row][col]\n destroyed_temp = destroyed_route_plan[row][temp[6]]\n\n # Skip already added nodes\n if [temp, row, destroyed_temp] in to_remove:\n continue\n\n # Find associated drop off/pickup node\n temp_index, temp_pickup = self.find_associated_node(\n row, col, possible_removals)\n associated_temp = possible_removals[row][temp_index]\n destroyed_associated_temp = destroyed_route_plan[row][associated_temp[6]]\n\n # Find difference between pickup-times and drop off-times of requests\n if temp_pickup == pickup:\n diff = self.time_difference(\n temp, node, associated_temp, associated_node)\n\n else:\n diff = self.time_difference(\n temp, associated_node, associated_temp, node)\n\n # Compare with smallest difference in current iteration\n if diff < best_diff:\n best_diff = diff\n nearest_node = [temp, row, destroyed_temp]\n nearest_associated_node = [\n associated_temp, row, destroyed_associated_temp]\n\n to_remove.append(nearest_node)\n to_remove.append(nearest_associated_node)\n\n # Remove nearest nodes from destroyed route plan and from possible_removals\n for n in to_remove:\n index_removed_requests.append(\n (n[0][0], n[1], n[0][6]))\n for n in to_remove:\n possible_removals[n[1]].remove(n[0])\n destroyed_route_plan[n[1]].remove(n[2])\n\n # Add request id to removed_requests\n if not n[0][0] % int(n[0][0]):\n removed_requests.append((n[0][0], n[0][5]))\n\n return destroyed_route_plan, removed_requests, index_removed_requests, True\n\n # Related in both service time and travel time\n def related_removal(self, current_route_plan, current_infeasible_set):\n destroyed_route_plan = copy.deepcopy(current_route_plan)\n removed_requests = []\n index_removed_requests = []\n possible_removals = self.find_possible_removals(destroyed_route_plan)\n empty = 0\n for vehicle in possible_removals:\n empty += len(vehicle)\n\n if not empty:\n return current_route_plan, removed_requests, index_removed_requests, False\n\n # Number of requests to remove\n num_remove = self.nodes_to_remove(possible_removals)\n\n if len(current_infeasible_set) != 0:\n # Pick random node in infeasible_set to compare other nodes to - always pickup nodes\n initial_node = current_infeasible_set[rnd.randint(\n 0, len(current_infeasible_set))]\n node = self.get_pickup(initial_node)\n pickup = True\n\n # Find associated node - dropoff node\n associated_node = self.get_dropoff(initial_node)\n\n to_remove = []\n\n else:\n # Pick random node in route plan to remove and to compare other nodes to\n rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]\n rnd.shuffle(rows)\n\n for row_index in rows:\n if len(possible_removals[row_index]) == 2:\n col_index = 0\n break\n else:\n col_index = rnd.randint(\n 0, len(possible_removals[row_index]))\n break\n node = possible_removals[row_index][col_index]\n destroy_node = destroyed_route_plan[row_index][node[6]]\n\n # Find associated node\n index, pickup = self.find_associated_node(\n row_index, col_index, possible_removals)\n associated_node = possible_removals[row_index][index]\n destroy_associated_node = destroyed_route_plan[row_index][associated_node[6]]\n\n # List of nodes to remove\n to_remove = [[node, row_index, destroy_node], [\n associated_node, row_index, destroy_associated_node]]\n\n # Remaining number of nodes to remove\n num_remove -= 1\n\n # Find the requests to remove\n for j in range(num_remove):\n\n # To do: finne ut hva denne initielt skal settes som\n best_diff = 48 * 60 * 60\n rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]\n\n for row in rows:\n for col in range(0, len(possible_removals[row])):\n\n temp = possible_removals[row][col]\n destroyed_temp = destroyed_route_plan[row][temp[6]]\n\n # Skip already added nodes\n if [temp, row, destroyed_temp] in to_remove:\n continue\n\n # Find associated drop off/pickup node\n temp_index, temp_pickup = self.find_associated_node(\n row, col, possible_removals)\n associated_temp = possible_removals[row][temp_index]\n destroyed_associated_temp = destroyed_route_plan[row][associated_temp[6]]\n\n # Find difference between requests\n if (temp_pickup == pickup) & pickup:\n diff_distance = self.travel_time_difference(\n temp[0], node[0])\n diff_time = self.time_difference(\n temp, node, associated_temp, associated_node)\n\n elif (temp_pickup == pickup) & (not pickup):\n diff_distance = self.travel_time_difference(\n associated_temp[0], associated_node[0])\n diff_time = self.time_difference(\n temp, node, associated_temp, associated_node)\n\n elif (temp_pickup != pickup) & pickup:\n diff_distance = self.travel_time_difference(\n associated_temp[0], node[0])\n diff_time = self.time_difference(\n temp, associated_node, associated_temp, node)\n\n else:\n diff_distance = self.travel_time_difference(\n temp[0], associated_node[0])\n diff_time = self.time_difference(\n temp, associated_node, associated_temp, node)\n\n diff = diff_distance + diff_time\n\n # Compare with smallest difference in current iteration\n if diff < best_diff:\n best_diff = diff\n nearest_node = [temp, row, destroyed_temp]\n nearest_associated_node = [\n associated_temp, row, destroyed_associated_temp]\n\n to_remove.append(nearest_node)\n to_remove.append(nearest_associated_node)\n\n # Remove nearest nodes from destroyed route plan and from possible_removals\n for n in to_remove:\n index_removed_requests.append(\n (n[0][0], n[1], n[0][6]))\n for n in to_remove:\n possible_removals[n[1]].remove(n[0])\n destroyed_route_plan[n[1]].remove(n[2])\n\n # Add request id to removed_requests\n if not n[0][0] % int(n[0][0]):\n removed_requests.append((n[0][0], n[0][5]))\n\n return destroyed_route_plan, removed_requests, index_removed_requests, True\n\n # Repair operators\n def greedy_repair(self, destroyed_route_plan, removed_requests, initial_infeasible_set, current_route_plan, index_removed_requests, delayed, still_delayed_nodes):\n unassigned_requests = removed_requests.copy() + initial_infeasible_set.copy()\n unassigned_requests.sort(key=lambda x: x[0])\n route_plan = copy.deepcopy(destroyed_route_plan)\n current_objective = timedelta(0)\n infeasible_set = []\n unassigned_requests = pd.DataFrame(unassigned_requests)\n for i in range(unassigned_requests.shape[0]):\n # while not unassigned_requests.empty:\n rid = unassigned_requests.iloc[i][0]\n request = unassigned_requests.iloc[i][1]\n index_removal = [\n i for i in index_removed_requests if i[0] == rid or i[0] == rid+0.5]\n\n route_plan, new_objective, infeasible_set, vehicle_clocks = self.reopt_repair_generator.generate_insertions(\n route_plan=route_plan, request=request, rid=rid, infeasible_set=infeasible_set,\n initial_route_plan=current_route_plan, index_removed=index_removal, sim_clock=self.sim_clock, objectives=False, delayed=delayed, still_delayed_nodes=still_delayed_nodes,\n vehicle_clocks=self.vehicle_clocks)\n\n self.vehicle_clocks = vehicle_clocks\n\n # update current objective\n current_objective = new_objective\n\n return route_plan, current_objective, infeasible_set\n\n def regret_2_repair(self, destroyed_route_plan, removed_requests, initial_infeasible_set, current_route_plan, index_removed_requests, delayed, still_delayed_nodes):\n unassigned_requests = removed_requests.copy() + initial_infeasible_set.copy()\n unassigned_requests.sort(key=lambda x: x[0])\n route_plan = copy.deepcopy(destroyed_route_plan)\n current_objective = timedelta(0)\n infeasible_set = []\n unassigned_requests = pd.DataFrame(unassigned_requests)\n regret_values = []\n initial_vehicle_clocks = copy.deepcopy(self.vehicle_clocks)\n for i in range(unassigned_requests.shape[0]):\n rid = unassigned_requests.iloc[i][0]\n request = unassigned_requests.iloc[i][1]\n index_removal = [\n i for i in index_removed_requests if i[0] == rid or i[0] == rid+0.5]\n\n first_objective, second_objective, vehicle_clocks = self.reopt_repair_generator.generate_insertions(\n route_plan=route_plan, request=request, rid=rid, infeasible_set=infeasible_set,\n initial_route_plan=current_route_plan, index_removed=index_removal, sim_clock=self.sim_clock, objectives=2, delayed=delayed, still_delayed_nodes=still_delayed_nodes,\n vehicle_clocks=self.vehicle_clocks)\n\n self.vehicle_clocks = vehicle_clocks\n\n regret_values.append(\n (rid, request, second_objective-first_objective))\n\n regret_values.sort(key=lambda x: x[2])\n\n # iterate through requests in order of regret k value\n self.vehicle_clocks = initial_vehicle_clocks\n\n for i in reversed(regret_values):\n rid = i[0]\n request = i[1]\n index_removal = [\n i for i in index_removed_requests if i[0] == rid or i[0] == rid+0.5]\n\n route_plan, new_objective, infeasible_set, vehicle_clocks = self.reopt_repair_generator.generate_insertions(\n route_plan=route_plan, request=request, rid=rid, infeasible_set=infeasible_set,\n initial_route_plan=current_route_plan, index_removed=index_removal, sim_clock=self.sim_clock, objectives=0, delayed=delayed, still_delayed_nodes=still_delayed_nodes,\n vehicle_clocks=self.vehicle_clocks)\n\n self.vehicle_clocks = vehicle_clocks\n\n # update current objective\n current_objective = new_objective\n\n return route_plan, current_objective, infeasible_set\n\n def regret_3_repair(self, destroyed_route_plan, removed_requests, initial_infeasible_set, current_route_plan, index_removed_requests, delayed, still_delayed_nodes):\n unassigned_requests = removed_requests.copy() + initial_infeasible_set.copy()\n unassigned_requests.sort(key=lambda x: x[0])\n route_plan = copy.deepcopy(destroyed_route_plan)\n current_objective = timedelta(0)\n infeasible_set = []\n unassigned_requests = pd.DataFrame(unassigned_requests)\n regret_values = []\n initial_vehicle_clocks = copy.deepcopy(self.vehicle_clocks)\n for i in range(unassigned_requests.shape[0]):\n rid = unassigned_requests.iloc[i][0]\n request = unassigned_requests.iloc[i][1]\n index_removal = [\n i for i in index_removed_requests if i[0] == rid or i[0] == rid+0.5]\n\n first_objective, third_objective, vehicle_clocks = self.reopt_repair_generator.generate_insertions(\n route_plan=route_plan, request=request, rid=rid, infeasible_set=infeasible_set,\n initial_route_plan=current_route_plan, index_removed=index_removal, sim_clock=self.sim_clock, objectives=3, delayed=delayed, still_delayed_nodes=still_delayed_nodes,\n vehicle_clocks=self.vehicle_clocks)\n\n self.vehicle_clocks = vehicle_clocks\n\n regret_values.append(\n (rid, request, third_objective-first_objective))\n\n regret_values.sort(key=lambda x: x[2])\n\n # iterate through requests in order of regret k value\n self.vehicle_clocks = initial_vehicle_clocks\n for i in reversed(regret_values):\n rid = i[0]\n request = i[1]\n index_removal = [\n i for i in index_removed_requests if i[0] == rid or i[0] == rid+0.5]\n\n route_plan, new_objective, infeasible_set, vehicle_clocks = self.reopt_repair_generator.generate_insertions(\n route_plan=route_plan, request=request, rid=rid, infeasible_set=infeasible_set,\n initial_route_plan=current_route_plan, index_removed=index_removal, sim_clock=self.sim_clock, objectives=0, delayed=delayed, still_delayed_nodes=still_delayed_nodes,\n vehicle_clocks=self.vehicle_clocks)\n\n self.vehicle_clocks = vehicle_clocks\n\n # update current objective\n current_objective = new_objective\n\n return route_plan, current_objective, infeasible_set\n\n # Function to find random requests to remove if worst deviation removal does not remove enough\n\n def worst_deviation_random_removal(self, destroyed_route_plan, possible_removals, num_remove, to_remove):\n\n # Find the requests to remove\n while len(to_remove)/2 < num_remove:\n\n # Pick random node in route plan to remove and to compare other nodes to\n rows = [i for i in range(0, len(possible_removals)) if len(possible_removals[i]) > 0]\n rnd.shuffle(rows)\n\n for row in rows:\n if len(possible_removals[row]) == 2:\n col = 0\n break\n else:\n col = rnd.randint(\n 0, len(possible_removals[row]))\n break\n node = possible_removals[row][col]\n destroy_node = destroyed_route_plan[row][node[6]]\n\n # Find col-index of associated pickup/drop-off node\n index, pickup = self.find_associated_node(\n row, col, possible_removals)\n associated_node = possible_removals[row][index]\n destroy_associated_node = destroyed_route_plan[row][associated_node[6]]\n\n # Skip already added nodes\n if [node, row, destroy_node] in to_remove or [associated_node, row, destroy_associated_node] in to_remove:\n continue\n\n # Add both pickup and drop-off node to to_remove\n to_remove.append([node, row, destroy_node])\n to_remove.append([associated_node, row, destroy_associated_node])\n\n return to_remove\n\n # Function to calculate total travel time differences between requests\n def travel_time_difference(self, request_1, request_2):\n num_requests = int(len(self.T_ij) / 2)\n idx_1 = request_1 - 1\n idx_2 = request_2 - 1\n return self.T_ij[idx_1][idx_2] + \\\n self.T_ij[idx_1 + num_requests][idx_2 + num_requests] + \\\n self.T_ij[idx_1 + num_requests][idx_2] + \\\n self.T_ij[idx_1][idx_2 + num_requests]\n\n # Function to calculate service time differences between requests\n @ staticmethod\n def time_difference(pickup_1, pickup_2, dropoff_1, dropoff_2):\n return abs((pickup_1[1] - pickup_2[1]).total_seconds()) + abs((dropoff_1[1] - dropoff_2[1]).total_seconds())\n\n # Function to find associated pickup/drop-off of a node.\n @ staticmethod\n def find_associated_node(row, col, route_plan):\n node = route_plan[row][col]\n\n if node[0] % int(node[0]):\n # Node is drop-off, must find pickup\n pickup = False\n request = node[0] - 0.5\n for index in range(col):\n temp = route_plan[row][index]\n if temp[0] == request:\n return index, pickup\n\n else:\n # Node is pickup, must find drop-off\n pickup = True\n request = node[0] + 0.5\n for index in range(len(route_plan[row])):\n temp = route_plan[row][index]\n if temp[0] == request:\n return index, pickup\n\n @ staticmethod\n def find_associated_node_infeasible(infeasible_set, node):\n if node[0] % int(node[0]):\n # Node is drop-off, must find pickup\n pickup = False\n request = node[0] - 0.5\n for index in range(len(infeasible_set)):\n temp = infeasible_set[index]\n if temp[0] == request:\n return index, pickup\n\n else:\n # Node is pickup, must find drop-off\n pickup = True\n request = node[0] + 0.5\n for index in range(len(infeasible_set)):\n temp = infeasible_set[index]\n if temp[0] == request:\n return index, pickup\n\n def get_pickup(self, node):\n # Node is pickup, find requested pickup time or calculated pickup time\n rid = node[0]\n if not pd.isnull(node[1][\"Requested Pickup Time\"]):\n time = node[1][\"Requested Pickup Time\"]\n else:\n time = node[1][\"Requested Dropoff Time\"] - self.constructor.travel_time(\n rid - 1, self.constructor.n + rid - 1, True)\n\n node = (rid, time)\n return node\n\n def get_dropoff(self, node):\n # Node is dropoff, find requested dropoff time or calculated dropoff time\n rid = node[0]\n d_rid = rid + 0.5\n if not pd.isnull(node[1][\"Requested Dropoff Time\"]):\n time = node[1][\"Requested Dropoff Time\"]\n else:\n time = node[1][\"Requested Pickup Time\"] + self.constructor.travel_time(\n rid - 1, self.constructor.n + rid - 1, True)\n\n node = (d_rid, time)\n return node\n\n def find_possible_removals(self, route_plan):\n\n possible_removals = [[(rid, t, d, p, w, request, idx + 1) for idx, (rid, t, d, p, w, request) in\n enumerate(route_plan[vehicle][1:]) if t > self.vehicle_clocks[vehicle]] for vehicle in\n range(0, len(route_plan))]\n\n vehicles = [vehicle for vehicle in possible_removals if len(vehicle) > 0]\n\n for vehicle in vehicles:\n rids = [rid for (rid, t, d, p, w, request, idx) in vehicle]\n to_remove = []\n for node in vehicle:\n if node[0] % int(node[0]):\n if not node[0] - 0.5 in rids:\n to_remove.append(node)\n else:\n if not node[0] + 0.5 in rids:\n to_remove.append(node)\n\n for node in to_remove:\n possible_removals[possible_removals.index(\n vehicle)].remove(node)\n\n return possible_removals\n" ]
[ [ "pandas.isnull", "numpy.random.shuffle", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
MoritzLange/py-tedopa
[ "38558c62f5a1cb0cc43b490f50f0674f4a0464c9" ]
[ "tests/test_tmps_for_transverse_ising_model.py" ]
[ "\"\"\"\nTest to check the whole TMPS algorithm for the transverse Ising model\n\"\"\"\n\nfrom scipy.linalg import expm\nfrom scipy.linalg import sqrtm\nimport numpy as np\nimport mpnum as mp\nfrom tedopa import tmps\n\nprecision = 1e-7 # required precision of the tMPS results\n\n\ndef test_mpo_trotter2():\n n = 4 # number of sites\n\n state = generate_state(n=n)\n J = 1\n B = 1\n times = [1, 2]\n hamiltonian = generate_hamiltonian(n=n, J=J, B=B)\n\n mpo_state = tmps.matrix_to_mpo(state, [[2, 2]] * n)\n\n num_trotter_slices = 101\n\n times, sites, evolved_states = \\\n tmps.evolve(state=mpo_state, hamiltonians=[B * sx(),\n J * np.kron(sz(),\n sz())],\n ts=times, num_trotter_slices=num_trotter_slices,\n method='mpo',\n trotter_compr=dict(method='svd', relerr=1e-20),\n trotter_order=2, compr=dict(method='svd', relerr=1e-20))\n\n rho_t_arr = [exp(state=state, hamiltonian=hamiltonian, t=times[i])\n for i in range(len(times))]\n\n rho_t_mpo = [\n evolved_states[i].to_array_global().reshape([2 ** n, 2 ** n]) for i\n in range(len(times))]\n\n fidelities = [np.trace(sqrtm(\n sqrtm(rho_t_arr[i]).dot(rho_t_mpo[i]).dot(sqrtm(rho_t_arr[i])))) for\n i in range(len(times))]\n\n for i in range(len(times)):\n assert np.isclose(1, fidelities[i], rtol=precision)\n\n\ndef test_pmps_trotter2():\n n = 5 # number of sites\n\n state = generate_state(n=n)\n J = 1\n B = 1\n times = [1, 2]\n hamiltonian = generate_hamiltonian(n=n, J=J, B=B)\n\n mpo_state = tmps.matrix_to_mpo(state, [[2, 2]] * n)\n pmps_state = mp.mpo_to_pmps(mpo_state)\n\n num_trotter_slices = 102\n\n times, sites, evolved_states = \\\n tmps.evolve(state=pmps_state, hamiltonians=[B * sx(),\n J * np.kron(sz(),\n sz())],\n ts=times, num_trotter_slices=num_trotter_slices,\n method='pmps',\n trotter_compr=dict(method='svd', relerr=1e-20),\n trotter_order=2, compr=dict(method='svd', relerr=1e-20))\n\n rho_t_arr = [exp(state=state, hamiltonian=hamiltonian, t=times[i])\n for i in range(len(times))]\n\n rho_t_pmps = [\n mp.pmps_to_mpo(evolved_states[i]).to_array_global().reshape(\n [2 ** n, 2 ** n]) for i in range(len(times))]\n\n fidelities = [np.trace(sqrtm(\n sqrtm(rho_t_arr[i]).dot(rho_t_pmps[i]).dot(sqrtm(rho_t_arr[i]))))\n for i in range(len(times))]\n\n for i in range(len(times)):\n assert np.isclose(1, fidelities[i], rtol=precision)\n\n\ndef test_pmps_trotter4():\n n = 5 # number of sites\n\n state = generate_state(n=n)\n J = 1\n B = 1\n times = [1, 2]\n hamiltonian = generate_hamiltonian(n=n, J=J, B=B)\n\n mpo_state = tmps.matrix_to_mpo(state, [[2, 2]] * n)\n pmps_state = mp.mpo_to_pmps(mpo_state)\n\n num_trotter_slices = 110\n\n times, sites, evolved_states = \\\n tmps.evolve(state=pmps_state, hamiltonians=[B * sx(),\n J * np.kron(sz(),\n sz())],\n ts=times, num_trotter_slices=num_trotter_slices,\n method='pmps',\n trotter_compr=dict(method='svd', relerr=1e-20),\n trotter_order=4, compr=dict(method='svd', relerr=1e-20))\n\n rho_t_arr = [exp(state=state, hamiltonian=hamiltonian, t=times[i])\n for i in range(len(times))]\n\n rho_t_pmps = [\n mp.pmps_to_mpo(evolved_states[i]).to_array_global().reshape(\n [2 ** n, 2 ** n]) for i in range(len(times))]\n\n fidelities = [np.trace(sqrtm(\n sqrtm(rho_t_arr[i]).dot(rho_t_pmps[i]).dot(sqrtm(rho_t_arr[i]))))\n for i in range(len(times))]\n\n for i in range(len(times)):\n assert np.isclose(1, fidelities[i], rtol=precision)\n\n\n############# Pauli matrices ################\ndef sx():\n return np.array([[0, 1], [1, 0]])\n\n\ndef sy():\n return np.array([[0, -1j], [1j, 0]])\n\n\ndef sz():\n return np.array([[1, 0], [0, -1]])\n\n\n############ Other matrices #################\ndef generate_state(n):\n \"\"\"\n Generates a density matrix for a state in the transverse Ising model\n with n sites.\n\n Args:\n n (int): Number of sites\n\n Returns:\n numpy.ndarray: A density matrix for that state\n \"\"\"\n state = np.zeros((2 ** n, 2 ** n))\n state[0, 0] = 1\n return state\n\n\ndef generate_hamiltonian(n=5, J=1, B=1):\n \"\"\"\n Generates the full Hamiltonian for the transverse Ising model,\n as defined in the respective Wikipedia article as of 28/11/2017\n\n Args:\n n (int): Number of sites\n J (int): Strength of interaction within every pair of\n two adjacent sites\n B (int): Strength of the magnetic field applied\n\n Returns:\n numpy.ndarray: The full Hamiltonian\n \"\"\"\n\n if n < 2:\n n = 2\n\n hamiltonian = np.zeros((2 ** n, 2 ** n))\n\n for i in range(1, n):\n # calculate the outer products for the sites left of site i and i+1\n # in the sum of the Hamiltonian\n if i > 1:\n left = np.identity(2 ** (i - 1))\n part1 = np.kron(np.kron(left, sz()), sz())\n part2 = np.kron(left, sx())\n if i == 1:\n part1 = np.kron(sz(), sz())\n part2 = sx()\n # calculate the outer products for the sites right of site i and i+1\n # in the sum of the Hamiltonian\n if i < n - 1:\n right = np.identity(2 ** (n - 1 - i))\n part1 = np.kron(part1, right)\n part2 = np.kron(np.kron(part2, right), np.identity(2))\n if i == n - 1:\n part2 = np.kron(part2, np.identity(2))\n # add everything to the sum\n hamiltonian = hamiltonian + J * part1 + B * part2\n\n # finally add the Sx for the last site which was not\n # taken care of in above loop\n hamiltonian = hamiltonian + B * np.kron(np.identity(2 ** (n - 1)),\n sx())\n\n return hamiltonian\n\n\n############## Time evolution ################\ndef exp(state, hamiltonian, t=1):\n \"\"\"\n Evolve the state in time by using the classical approach of\n exponentiating the full Hamiltonian and applying\n the result to the density matrix.\n\n Args:\n state (numpy.ndarray): The state to be evolved\n hamiltonian (numpy.ndarray): The Hamiltonian of the system\n t (float): The time for the evolution\n\n Returns:\n numpy.ndarray: The evolved state\n \"\"\"\n\n U = expm(-1j * t * hamiltonian)\n U_dagger = expm(1j * t * hamiltonian)\n newState = U.dot(state).dot(U_dagger)\n return newState\n" ]
[ [ "numpy.kron", "scipy.linalg.expm", "numpy.identity", "scipy.linalg.sqrtm", "numpy.array", "numpy.zeros", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] } ]
tnsogs02/CVCAM
[ "5f4cd416850f5da2e8f7703c2980e9ff5068f8e3" ]
[ "cvcam.py" ]
[ "import time\nimport numpy as np\nimport cv2\nimport json\nfrom gpiozero import LED\nfrom gpiozero import Buzzer\nimport board\nimport busio\nimport adafruit_lsm9ds0\nstartTime = time.time()\nmotion = False\nusbDir = '/media/pi/SAVE'\nwith open(usbDir+'/config.json') as f:\n cfg = json.load(f)\nbuzzer = Buzzer(cfg['buzzer'])\nstatLED = LED(cfg['statLED'])\nstatLED.on()\n\ni2c = busio.I2C(board.SCL, board.SDA)\nsensor = adafruit_lsm9ds0.LSM9DS0_I2C(i2c)\n\ntry:\n cap = cv2.VideoCapture(50)\n width = 1280\n height = 960\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n ret, frame = cap.read()\n avg = cv2.blur(frame, (4, 4))\n avg_float = np.float32(avg)\n fourcc = cv2.VideoWriter_fourcc('m','p','4','v')\n while(cap.isOpened()):\n accel_x, accel_y, accel_z = sensor.acceleration\n acc = pow((pow(accel_x,2)+pow(accel_y,2)+pow(accel_z,2)),0.5)\n print(acc)\n if abs(acc-cfg['accMid'])>=cfg['acc'] and (time.time() - startTime) > 10:\n buzzer.on()\n print('Warning! Camera has been moved!')\n statLED.on()\n cycTime = time.time()\n ret, frame = cap.read()\n if ret == False:\n break\n blur = cv2.blur(frame, (4, 4))\n diff = cv2.absdiff(avg, blur)\n gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)\n ret, thresh = cv2.threshold(gray, cfg['thresh'], 255, cv2.THRESH_BINARY)\n kernel = np.ones((5, 5), np.uint8)\n thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)\n thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=2)\n cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for c in cnts:\n if cv2.contourArea(c) < cfg['contourValue']:\n continue\n mTime = time.time()\n if motion != True and cfg['recording'] == 'enable':\n motion = True\n stTime = time.time()\n filename = usbDir+'/'+time.strftime(\"%Y-%m-%d_%H%M%S\", time.localtime())+'.mp4'\n out = cv2.VideoWriter(filename,fourcc,cfg['fps'],(640,480),isColor=True)\n (x, y, w, h) = cv2.boundingRect(c)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n frame = cv2.resize(frame, (640, 480))\n if motion:\n out.write(frame)\n if (time.time() - mTime) > cfg['freezeLength']:\n motion = False\n out.release()\n if(time.time() - stTime) > cfg['videoMaxLength']:\n motion = False\n out.release()\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cv2.accumulateWeighted(blur, avg_float, 0.01)\n avg = cv2.convertScaleAbs(avg_float)\n cap.release()\n if motion:\n out.release()\n print('Saved')\n statLED.off()\nexcept:\n if motion:\n out.release()\n print('ERROR')\n statLED.off()\n" ]
[ [ "numpy.float32", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ndraeger/rt1
[ "8cf30a3b3604b78b1422388e479b28c921d01c09" ]
[ "rt1/general_functions.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"helper functions that are used both in rtfits and rtplots\"\"\"\nimport sys\nimport numpy as np\nfrom itertools import tee, islice\nfrom collections import OrderedDict\n\n\ndef rectangularize(array, return_mask=False, dim=None,\n return_masked=False, dtype=None):\n \"\"\"\n return a rectangularized version of the input-array by repeating the\n last value to obtain the smallest possible rectangular shape.\n\n input:\n - array = [[1,2,3], [1], [1,2]]\n\n output:\n - return_masked=False: [[1,2,3], [1,1,1], [1,2,2]]\n - return_masked=True: [[1,2,3], [1,--,--], [1,2,--]]\n\n Parameters\n ----------\n array: list of lists\n the input-data that is intended to be rectangularized\n return_mask: bool (default = False)\n indicator if weights and mask should be evaluated or not\n dim: int (default = None)\n the dimension of the rectangularized array\n if None, the shortest length of all sub-lists will be used\n return_masked: bool (default=False)\n indicator if a masked-array should be returned\n dtype: type (default = None)\n the dtype of the returned array. If None, the dtype of the first\n element will be used\n\n Returns\n -------\n new_array: array-like\n a rectangularized version of the input-array\n mask: array-like (only if 'weights_and_mask' is True)\n a mask indicating the added values\n\n \"\"\"\n # use this method to get the dtype of the first element since it works with\n # pandas-Series, lists, arrays, dict-value views, etc.\n if dtype is None:\n dtype = np.array(next(islice(array, 1))).dtype\n\n if dim is None:\n # get longest dimension of sub-arrays\n dim = len(max(array, key=len))\n\n if return_mask is True or return_masked is True:\n newarray = np.empty((len(array), dim), dtype=dtype)\n mask = np.full((len(array), dim), False, dtype=bool)\n\n for i, s in enumerate(array):\n le = len(s)\n newarray[i, :le] = s\n newarray[i, le:] = s[-1]\n mask[i, le:] = True\n\n if return_masked is True:\n return np.ma.masked_array(newarray, mask)\n else:\n return [newarray, mask]\n else:\n newarray = np.empty((len(array), dim), dtype=dtype)\n for i, s in enumerate(array):\n le = len(s)\n newarray[i, :le] = s\n newarray[i, le:] = s[-1]\n return newarray\n\n\ndef meandatetime(datetimes):\n \"\"\"\n calculate the average date from a given list of datetime-objects\n (can be applied to a pandas-Series via Series.apply(meandatetime))\n\n Parameters\n ----------\n datetimes: list\n a list of datetime-objects\n Returns\n -------\n meandate: Timestamp\n\n \"\"\"\n if len(datetimes) == 1:\n return datetimes[0]\n\n x = datetimes\n deltas = (x[0] - x[1:])/len(x)\n meandelta = sum(deltas)\n meandate = x[0] - meandelta\n return meandate\n\n\ndef dBsig0convert(val, inc,\n dB, sig0,\n fitdB, fitsig0):\n \"\"\"\n A convenience-function to convert an array of measurements (and it's\n associated incidence-angles).\n - between linear- and dB units `( val_dB = 10 * log10(val_linear) )`\n - between sigma0 and intensity `( sig0 = 4 * pi * cos(inc) * I )`\n\n Parameters\n ----------\n val: array-like\n the backscatter-values that should be converted\n inc: array-like\n the associated incidence-angle values (in radians)\n dB: bool\n indicator if the output-dataset should be in dB or not\n sig0: bool\n indicator if the output-values should be intensity or sigma_0\n fitdB: bool\n indicator if the input-values have been provided in linear-units\n or in dB\n fitsig0: bool\n indicator if the input-values are given as sigma0 or intensity\n\n Returns\n -------\n val : array-like\n the converted values\n\n \"\"\"\n\n if sig0 is not fitsig0:\n # if results are provided in dB convert them to linear units before\n # applying the sig0-intensity conversion\n if fitdB is True:\n val = 10**(val/10.)\n # convert sig0 to intensity\n if sig0 is False and fitsig0 is True:\n val = val/(4.*np.pi*np.cos(inc))\n # convert intensity to sig0\n if sig0 is True and fitsig0 is False:\n val = 4.*np.pi*np.cos(inc)*val\n # convert back to dB if required\n if dB is True:\n val = 10.*np.log10(val)\n elif dB is not fitdB:\n # if dB output is required, convert to dB\n if dB is True and fitdB is False:\n val = 10.*np.log10(val)\n # if linear output is required, convert to linear units\n if dB is False and fitdB is True:\n val = 10**(val/10.)\n\n return val\n\n\ndef pairwise(iterable, pairs=2):\n \"\"\"\n a generator to return n consecutive values from an iterable, e.g.:\n\n pairs = 2\n s -> (s0,s1), (s1,s2), (s2, s3), ...\n\n pairs = 3\n s -> (s0, s1, s2), (s1, s2, s3), (s2, s3, s4), ...\n\n adapted from https://docs.python.org/3.7/library/itertools.html\n \"\"\"\n x = tee(iterable, pairs)\n for n, n_iter in enumerate(x[1:]):\n [next(n_iter, None) for i in range(n + 1)]\n return zip(*x)\n\n\ndef split_into(iterable, sizes):\n \"\"\"\n a generator that splits the iterable into iterables with the given sizes\n\n see more_itertools split_into for details:\n https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.split_into\n \"\"\"\n it = iter(iterable)\n for size in sizes:\n if size is None:\n yield list(it)\n return\n else:\n yield list(islice(it, size))\n\n\ndef scale(x, out_range=(0, 1),\n domainfuncs=(np.nanmin, np.nanmax)):\n \"\"\"\n scale an array between out_range = (min, max) where the range of the\n array is evaluated via the domainfuncs (min-function, max-funcion)\n\n useful domainfuncs are:\n\n >>> np.nanmin()\n >>> np.nanmax()\n\n >>> from itertools import partial\n >>> partial(np.percentile, q=95)\n\n Notice: using functions like np.percentile might result in values that\n exceed the specified `out_range`! (e.g. if the out-range is (0,1),\n a min-function of np.percentile(q=5) might result in negative values!)\n \"\"\"\n domain = domainfuncs[0](x), domainfuncs[1](x)\n\n y = (x - (domain[1] + domain[0]) / 2) / (domain[1] - domain[0])\n return y * (out_range[1] - out_range[0]) + (out_range[1] +\n out_range[0]) / 2\n\n\ndef update_progress(progress, max_prog=100,\n title=\"\", finalmsg=\" DONE\\r\\n\",\n progress2=None):\n \"\"\"\n print a progress-bar\n\n adapted from: https://blender.stackexchange.com/a/30739\n \"\"\"\n\n length = 25 # the length of the progress bar\n block = int(round(length*progress/max_prog))\n if progress2 is not None:\n msg = (f'\\r{title} {\"#\"*block + \"-\"*(length-block)}' +\n f' {progress} [{progress2}] / {max_prog}')\n else:\n msg = (f'\\r{title} {\"#\"*block + \"-\"*(length-block)}' +\n f' {progress} / {max_prog}')\n\n if progress >= max_prog:\n msg = f'\\r{finalmsg:<79}\\n'\n\n sys.stdout.write(msg)\n sys.stdout.flush()\n\n\ndef dt_to_hms(td):\n \"\"\"\n convert a datetime.timedelta object into days, hours,\n minutes and seconds\n \"\"\"\n\n days, hours, minutes = td.days, td.seconds // 3600, td.seconds % 3600 // 60\n seconds = td.seconds - hours*3600 - minutes*60\n return days, hours, minutes, seconds\n\n\ndef groupby_unsorted(a, key=lambda x: x, sort=False, get=lambda x: x):\n \"\"\"\n group the elements of the input-array and return it as a dict with a list\n of the found values. optionally use a key- and a get- function.\n\n if sort is True, a OrderedDict with sorted keys will be returned\n\n roughly equivalent to:\n\n >>> # if only the input-array a is provided\n ... {unique value of a: [found copies of the unique value]}\n ... # if a and a key-function is provided\n ... {key(a) : [...values with the same key(a)...]}\n ... # if both a key- and a get-function is provided\n ... {key(a) : [get(x) for x in ...values with the same key(a)...]}\n\n \"\"\"\n # always use an OrderedDict to ensure sort-order for python < 3.6\n d = OrderedDict()\n for item in a:\n d.setdefault(key(item), []).append(get(item))\n if sort is True:\n return OrderedDict(sorted(d.items()))\n else:\n return d\n\n\ndef interpolate_to_index(data, index, data_index=None, **interp1d_kwargs):\n \"\"\"\n A wrapper around scipy.interp1d to interpolate a dataset to a given index\n\n Parameters\n ----------\n data : list, array-like, pandas.Series or pandas.DataFrame\n The input-data as list, array, pandas.Series or pandas.DataFrame\n If the data is provided as pandas Series or DataFrame, the index\n must support a method .to_julian_date() to convert the timestamps\n into numerical values.\n index : array-like\n the index to which the dataset should be interpolated.\n It must support a method .to_julian_date()\n data_index : TYPE, optional\n DESCRIPTION. The default is None.\n **interp1d_kwargs :\n additional keyword-arguments passed to scipy.interpolate.interp1d\n the default is (fill_value=None, bounds_error=False)\n\n Returns\n -------\n TYPE\n DESCRIPTION.\n\n \"\"\"\n from pandas import Series, DataFrame\n from scipy.interpolate import interp1d\n\n kwargs = dict(fill_value=None, bounds_error=False)\n kwargs.update(interp1d_kwargs)\n\n if isinstance(data, Series):\n # perform a linear interpolation to the auxiliary data timestamps\n f = interp1d(data.index.to_julian_date(), data.values, **kwargs)\n x = f(index.to_julian_date())\n return Series(x, index)\n elif isinstance(data, DataFrame):\n f = interp1d(data.index.to_julian_date(), data.values, axis=0,\n **kwargs)\n x = f(index.to_julian_date())\n return DataFrame(x, index, columns=data.columns)\n\n elif isinstance(data, (list, np.ndarray)):\n assert data_index is not None, ('you must provide \"data_index\"' +\n 'if data is provided as list or array')\n\n f = interp1d(data_index.to_julian_date(), data.values, **kwargs)\n x = f(index.to_julian_date())\n return Series(x, index)\n" ]
[ [ "pandas.Series", "numpy.cos", "pandas.DataFrame", "numpy.log10", "numpy.ma.masked_array" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.13", "1.16", "1.9", "1.18", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
BurakHocaoglu/Dynamic-Multi-Robot-Coverage
[ "707112fb86f4fa05fad24eb52b648646fa4a51d4" ]
[ "coverage_control/scripts/voronoi.py" ]
[ "import sys\r\nimport time\r\nimport yaml\r\nimport math\r\nimport signal\r\nimport datetime\r\nimport threading\r\nimport traceback\r\nimport numpy as np\r\nfrom cvxopt import matrix, solvers\r\n#from scipy.spatial import ConvexHull\r\nimport matplotlib.patches as ptc\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\n\r\n# from actions import *\r\n\r\nCOLORS = [(0.0, 0.0, 0.0), (0.99, 0.0, 0.0), (0.0, 0.99, 0.0), (0.0, 0.0, 0.99), (0.99, 0.99, 0.0), (0.99, 0.0, 0.99), (0.0, 0.99, 0.99)]\r\nglobal_boundary = []\r\nxlim = []\r\nylim = []\r\ntest_type = 0\r\n\r\nworld = None\r\n\r\ndef is_in_space(p, tol):\r\n global xlim, ylim\r\n return xlim[0] - tol <= p[0] <= xlim[1] + tol and ylim[0] - tol <= p[1] <= ylim[1] + tol\r\n\r\ndef is_in_bounding_polygon(p, tol):\r\n global global_boundary\r\n pass\r\n\r\ndef angle_in_2pi(v):\r\n angle = np.arctan2(v[1], v[0])\r\n #if angle <= 0:\r\n # angle += 2 * np.pi\r\n return angle\r\n\r\ndef to_grid(x, y, x_off, y_off):\r\n return (x - x_off, y - y_off)\r\n\r\n#def get_convex_hull(V):\r\n# hull = ConvexHull(V)\r\n# return [V[vertex] for vertex in hull.vertices]\r\n\r\ndef appendGlobalBoundaries(B):\r\n bottom_left = globals()['global_boundary'][0]\r\n top_right = globals()['global_boundary'][3]\r\n B.append((np.array([1., 0.], dtype=float), np.array(bottom_left, dtype=float)))\r\n B.append((np.array([0., 1.], dtype=float), np.array(bottom_left, dtype=float)))\r\n B.append((np.array([1., 0.], dtype=float), np.array(top_right, dtype=float)))\r\n B.append((np.array([0., 1.], dtype=float), np.array(top_right, dtype=float)))\r\n\r\ndef angularSort(reference, vertices):\r\n vectors = [p - reference for p in vertices]\r\n indexed_angles = [(angle_in_2pi(vectors[i]), i) for i in range(len(vectors))]\r\n #if self.name == \"uav1\":\r\n # print(\"------\")\r\n # for i in range(len(vectors)):\r\n # print(vectors[i], indexed_angles[i][0])\r\n # print(\"------\")\r\n indexed_angles.sort()\r\n return [vertices[i] for _, i in indexed_angles]\r\n\r\nclass StateBuffer:\r\n\r\n def __init__(self):\r\n self.buffers = dict()\r\n\r\n def getState(self, name):\r\n return self.buffers[name]\r\n\r\n def getAllStates(self):\r\n return dict(self.buffers)\r\n\r\n def updateState(self, name, s):\r\n self.buffers[name] = s\r\n\r\nclass Agent:\r\n\r\n def __init__(self, name, init, goal, vmax):\r\n self.name = name\r\n self.move_thread = threading.Thread(name=\"{}_move\".format(self.name), target=self.move)\r\n self.sim_log = open('LOG_{}.txt'.format(self.name), 'w+')\r\n\r\n self.terminate = False\r\n self.phys_radius = 2.0\r\n self.safe_radius = 3.0\r\n self.comm_radius = 10.0\r\n self.dt = 0.1\r\n self.vmax = vmax\r\n self.vmin = 0.5\r\n self.velocity = np.zeros(2)\r\n self.position = np.array(init, dtype=float)\r\n self.voronoi_graph = []\r\n #self.color = tuple(np.random.rand(3))\r\n self.color = globals()['COLORS'][int(self.name[3:])]\r\n self.inter_sort_type = [('angle', float), ('vector', np.ndarray)]\r\n self.world = None\r\n self.world_offset = (globals()['xlim'][0], globals()['ylim'][0])\r\n self.frontier = set()\r\n\r\n self._B = np.array([[1., 0.], [0., 1.], [1., 0.], [0., 1.]], dtype=float)\r\n\r\n self.neighbours = dict()\r\n # self.path = []\r\n # self.curves = []\r\n self.xhistory = []\r\n self.yhistory = []\r\n self.goal = np.array(goal, dtype=float)\r\n self.goal_change = 10.\r\n self.converged = False\r\n self.H = matrix([[2., 0.], [0., 2.]], tc='d')\r\n\r\n # STATE:\r\n self.state = {'pos': self.position, 'vel': self.velocity, 'end': False}\r\n self.advertiseState()\r\n \r\n def initialize_world(self):\r\n #global xlim, ylim\r\n #W = xlim[1] - xlim[0]\r\n #H = ylim[1] - ylim[0]\r\n #self.world = np.zeros((H, W))\r\n #grid_node = to_grid(self.position[0], self.position[1], xlim[1], ylim[1])\r\n #v_act = valid_actions(self.world, grid_node)\r\n #for act in v_act:\r\n # applied_coord = apply_action_to_node(grid_node, act)\r\n # pass\r\n pass\r\n\r\n def initialize(self):\r\n #print(\"Initializing agent {}\".format(self.name))\r\n #print(\"Agent {} --> {}\".format(self.name, self.goal))\r\n self.move_thread.start()\r\n\r\n def setGoal(self, g):\r\n self.goal_change = np.linalg.norm(g - self.goal)\r\n self.converged = self.goal_change <= 0.1\r\n self.goal = np.array(g, dtype=float)\r\n\r\n def hasReachedGoal(self):\r\n return np.linalg.norm(self.goal - self.state['pos']) <= 0.1 and self.converged\r\n\r\n def getCentroid(self):\r\n ### SOURCE: https://en.wikipedia.org/wiki/Centroid\r\n\r\n # Calculate area with Shoelace Formula\r\n area = 0\r\n for i in range(len(self.voronoi_graph) - 1):\r\n x_i, y_i = self.voronoi_graph[i]\r\n x_j, y_j = self.voronoi_graph[i + 1]\r\n area += x_i * y_j - x_j * y_i\r\n\r\n area *= 0.5\r\n\r\n # Calculate centroid of voronoi cell\r\n Cx, Cy = 0, 0\r\n for i in range(len(self.voronoi_graph) - 1):\r\n x_i, y_i = self.voronoi_graph[i]\r\n x_j, y_j = self.voronoi_graph[i + 1]\r\n product = (x_i * y_j - x_j * y_i)\r\n Cx += (x_i + x_j) * product\r\n Cy += (y_i + y_j) * product\r\n\r\n return np.array([Cx, Cy], dtype=float) / (6. * area)\r\n\r\n def computeBisectors(self):\r\n bisectors = [] # (normal, point)\r\n cons, vals = [], []\r\n tol = 0.1\r\n\r\n for a, st in self.neighbours.items():\r\n if st is None:\r\n continue\r\n\r\n if np.any(np.isnan(st['pos'])):\r\n print(f'Agent {self.name} neighbour {a} has NaN!')\r\n\r\n normal = (st['pos'] - self.state['pos']).round(4)\r\n m = ((st['pos'] + self.state['pos']) * 0.5).round(4)\r\n bisectors.append((normal, m))\r\n cons.append(normal)\r\n #vals.append(m.dot(normal) - self.safe_radius)\r\n vals.append((m.dot(normal)).round(4))\r\n\r\n # bottom_left = globals()['global_boundary'][0]\r\n # top_right = globals()['global_boundary'][3]\r\n\r\n # bisectors.append((np.array([1., 0.], dtype=float), np.array(bottom_left, dtype=float)))\r\n # bisectors.append((np.array([0., 1.], dtype=float), np.array(bottom_left, dtype=float)))\r\n # bisectors.append((np.array([1., 0.], dtype=float), np.array(top_right, dtype=float)))\r\n # bisectors.append((np.array([0., 1.], dtype=float), np.array(top_right, dtype=float)))\r\n appendGlobalBoundaries(bisectors)\r\n\r\n A = np.array(cons, dtype=float)\r\n b = np.array(vals, dtype=float)\r\n self.voronoi_graph = []\r\n for i in range(len(bisectors)):\r\n n_i, m_i = bisectors[i]\r\n d_i = m_i.dot(n_i)\r\n\r\n for j in range(i + 1, len(bisectors)):\r\n n_j, m_j = bisectors[j]\r\n d_j = m_j.dot(n_j)\r\n\r\n try:\r\n A_ = np.array([n_i.round(4), n_j.round(4)], dtype=float)\r\n b_ = np.array([d_i.round(4), d_j.round(4)], dtype=float)\r\n p = (np.linalg.solve(A_, b_)).round(4)\r\n\r\n except np.linalg.LinAlgError:\r\n continue\r\n\r\n except:\r\n print(traceback.format_exc())\r\n continue\r\n\r\n if is_in_space(p, tol) and np.all(A.dot(p) <= b + 0.1):\r\n self.voronoi_graph.append(p)\r\n\r\n A_iq = matrix(np.array(cons), tc='d')\r\n b_iq = matrix(np.array(vals), tc='d')\r\n self.voronoi_graph = angularSort(self.position, self.voronoi_graph)\r\n #self.voronoi_graph = get_convex_hull(self.voronoi_graph)\r\n return A_iq, b_iq\r\n\r\n def solveStep(self, A_iq, b_iq, _t=0):\r\n v_next = self.state['vel']\r\n\r\n if _t == 0:\r\n ## Buffered Voronoi Cell\r\n\r\n if A_iq and b_iq:\r\n solvers.options['show_progress'] = False\r\n sol = solvers.qp(self.H, matrix(-2. * self.goal, tc='d'), A_iq, b_iq)\r\n #print(\"Agent {} SOLN: {}\".format(self.name, sol['x']))\r\n\r\n v_next = (np.array(sol['x'][0]) - self.state['pos']) / self.dt\r\n _norm = np.linalg.norm(v_next)\r\n\r\n if _norm > self.vmax:\r\n v_next = self.vmax * v_next / _norm\r\n\r\n return v_next\r\n\r\n elif _t == 1:\r\n ## Lloyd's Descent\r\n if len(self.voronoi_graph):\r\n self.voronoi_graph.append(self.voronoi_graph[0])\r\n self.setGoal(self.getCentroid())\r\n v_next = self.goal - self.state['pos']\r\n _norm = np.linalg.norm(v_next)\r\n\r\n if _norm > self.vmax:\r\n v_next *= self.vmax / np.linalg.norm(v_next)\r\n\r\n return v_next\r\n\r\n print(f'Agent {self.name} stopped momentarily.')\r\n return np.zeros(2)\r\n\r\n def doStep(self, v_next):\r\n x_, y_ = self.state['pos'][0], self.state['pos'][1]\r\n self.xhistory.append(x_)\r\n self.yhistory.append(y_)\r\n self.state['pos'] = self.state['pos'] + self.dt * v_next\r\n self.state['vel'] = v_next\r\n\r\n def stepLog(self, _t=0):\r\n if _t == 0:\r\n self.sim_log.write('{} - pos: {} - vel: {} - at: {}\\n'.format(self.name, self.position, self.velocity, datetime.datetime.now()))\r\n\r\n elif _t == 1:\r\n # Agent name; current position; next goal\r\n #self.sim_log.write('{};{};{}\\n'.format(self.name, self.position, self.goal))\r\n #self.sim_log.write(f'{self.name};{self.voronoi_graph.dfs_traversal()}\\n')\r\n #self.sim_log.write(f'{self.name};{self.voronoi_graph}\\n')\r\n pass\r\n\r\n def updateNeighbours(self):\r\n for uav, st in globals()['buf'].buffers.items():\r\n if uav == self.name or st is None:\r\n continue\r\n\r\n self.neighbours[uav] = dict(st)\r\n\r\n def advertiseState(self):\r\n globals()['buf'].updateState(self.name, self.state)\r\n\r\n def stop(self):\r\n self.terminate = True\r\n\r\n def move(self):\r\n test = globals()['test_type']\r\n pre_flight_count = 20\r\n #while not self.terminate and not self.hasReachedGoal():\r\n while not self.terminate:\r\n _start = time.time()\r\n\r\n self.advertiseState()\r\n self.updateNeighbours()\r\n\r\n if pre_flight_count < 1:\r\n A, b = self.computeBisectors()\r\n v_next = self.solveStep(A, b, test)\r\n self.doStep(v_next)\r\n self.stepLog(test)\r\n\r\n else:\r\n pre_flight_count -= 1\r\n\r\n _elapsed = time.time() - _start\r\n fail_hard = _elapsed >= self.dt\r\n if fail_hard:\r\n #print('Agent {} failed hard real-time constraint at {}'.format(self.name, datetime.datetime.now()))\r\n pass\r\n\r\n else:\r\n time.sleep(self.dt - _elapsed)\r\n\r\n self.state['end'] = True\r\n if self.hasReachedGoal():\r\n print(\"Agent {} has reached goal at {}\".format(self.name, datetime.datetime.now()))\r\n\r\n self.sim_log.close()\r\n\r\nclass Simulator:\r\n\r\n def __init__(self, pfile):\r\n self.xlim = [-20, 80]\r\n self.ylim = [-20, 80]\r\n self.count = 0\r\n self.agents = dict()\r\n self.vmax = 0\r\n self.iteration = 0\r\n self.loadParams(pfile)\r\n #self.logfile = open('SimulatorLog.txt', 'w+')\r\n\r\n self.terminate = False\r\n self.distance_thread = threading.Thread(name='distance_thread', target=self.checkCollision)\r\n\r\n self.fig = plt.figure()\r\n self.ax = self.fig.add_subplot(1, 1, 1)\r\n #self.fig, self.axs = plt.subplots(2)\r\n self.ani = None\r\n\r\n def loadParams(self, pfile):\r\n params = None\r\n with open(pfile) as P:\r\n params = yaml.load(P, Loader=yaml.FullLoader)\r\n\r\n self.xlim = np.array(params['xlim'], dtype=float)\r\n self.ylim = np.array(params['ylim'], dtype=float)\r\n self.count = params['count']\r\n self.vmax = params['vmax']\r\n globals()['test_type'] = params['test_type']\r\n globals()['world'] = np.zeros((int(self.ylim[1] - self.ylim[0]), int(self.xlim[1] - self.xlim[0])), dtype=int)\r\n globals()['xlim'] = np.array(self.xlim, dtype=float)\r\n globals()['ylim'] = np.array(self.ylim, dtype=float)\r\n #globals()['global_boundary'] = np.array([[i, j] for i in self.xlim for j in self.ylim], dtype=float)\r\n globals()['global_boundary'] = np.array([vertex for vertex in params['bounding_polygon']], dtype=float)\r\n #sorted_boundary = angularSort(np.mean(globals()['global_boundary'], axis=0), globals()['global_boundary'])\r\n self.bounding_poly_plt = ptc.Polygon(angularSort(np.mean(globals()['global_boundary'], axis=0), globals()['global_boundary']), \r\n color=(0, 0, 0), fill=False)\r\n\r\n for entry in params['uav']:\r\n self.agents[entry[0]] = Agent(entry[0], entry[1], entry[2], self.vmax)\r\n\r\n def isDone(self):\r\n return all([a.state['end'] for _, a in self.agents.items()])\r\n\r\n def checkCollision(self):\r\n if not self.agents:\r\n return\r\n\r\n try:\r\n while not self.terminate:\r\n ax, ay = list(zip(*[tuple(a.state['pos']) for _, a in self.agents.items()]))\r\n X = np.array(ax, dtype=float)\r\n Y = np.array(ay, dtype=float)\r\n XX1, XX2 = np.meshgrid(X, X)\r\n YY1, YY2 = np.meshgrid(Y, Y)\r\n pairwise_dists = np.sqrt((XX2 - XX1) ** 2 + (YY2 - YY1) ** 2)\r\n R, C = pairwise_dists.shape\r\n\r\n for i in range(R):\r\n for j in range(C):\r\n if j < i and pairwise_dists[i, j] <= 2.0:\r\n print('COLLISION between agents uav{} and uav{} at {}'.format(i, j, datetime.datetime.now()))\r\n\r\n time.sleep(1)\r\n\r\n except Exception:\r\n print(traceback.format_exc())\r\n\r\n def animate_motion(self, i):\r\n self.ax.clear()\r\n self.ax.set_xlim(self.xlim[0] - 5, self.xlim[1] + 5)\r\n self.ax.set_ylim(self.ylim[0] - 5, self.ylim[1] + 5)\r\n self.iteration += 1\r\n\r\n for _, a in self.agents.items():\r\n pos = a.state['pos']\r\n vel = a.state['vel']\r\n angle = np.arctan2(vel[1], vel[0])\r\n circle = plt.Circle(tuple(pos), 2., color=a.color)\r\n self.ax.quiver(pos[0], pos[1], np.cos(angle), np.sin(angle), color=a.color)\r\n self.ax.add_artist(circle)\r\n self.ax.plot(a.xhistory, a.yhistory, color=a.color)\r\n self.ax.add_patch(self.bounding_poly_plt)\r\n\r\n polygon = a.voronoi_graph\r\n if len(polygon) < 3:\r\n continue\r\n\r\n poly = plt.Polygon(polygon, alpha=0.4, color=a.color)\r\n self.ax.add_patch(poly)\r\n\r\n def stop(self):\r\n self.terminate = True\r\n\r\n def run(self):\r\n print(\"Run starts at {}\".format(datetime.datetime.now()))\r\n\r\n for _, a in self.agents.items():\r\n a.initialize()\r\n\r\n self.ani = animation.FuncAnimation(self.fig, self.animate_motion, interval=100)\r\n #self.ani = animation.FuncAnimation(self.fig, self.animate_motion, frames=3000, interval=100)\r\n #self.ani.save(f'lloyd_{self.count}_uav.mp4', writer='ffmpeg', fps=30)\r\n self.distance_thread.start()\r\n plt.show()\r\n\r\n while not self.terminate and not self.isDone():\r\n time.sleep(1)\r\n\r\n for _, a in self.agents.items():\r\n a.stop()\r\n\r\n self.distance_thread.join()\r\n #self.logfile.close()\r\n print(\"Run done at {}\".format(datetime.datetime.now()))\r\n\r\ndef ctrl_c_handler(signum, frame):\r\n globals()['sim'].stop()\r\n print('Closing...')\r\n\r\nif __name__ == '__main__':\r\n buf = StateBuffer()\r\n sim = Simulator(sys.argv[1])\r\n signal.signal(signal.SIGINT, ctrl_c_handler)\r\n sim.run()" ]
[ [ "numpy.linalg.solve", "numpy.sqrt", "numpy.meshgrid", "numpy.isnan", "numpy.linalg.norm", "numpy.cos", "numpy.sin", "numpy.arctan2", "matplotlib.pyplot.Polygon", "matplotlib.animation.FuncAnimation", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
imsodin/pyvista
[ "af3bd7dc7b5b8551b732f6e6fa74f6675027469c" ]
[ "examples/02-plot/labels.py" ]
[ "\"\"\"\nLabel Points\n~~~~~~~~~~~~\n\nUse string arrays in a point set to label points\n\"\"\"\n# sphinx_gallery_thumbnail_number = 3\nfrom pyvista import examples\nimport pyvista as pv\nimport numpy as np\n\n# Labels are not currently supported by the VTKjs conversion script\npv.rcParams[\"use_panel\"] = False\n\n###############################################################################\n# Label String Array\n# ++++++++++++++++++\n#\n# This example will label the nodes of a mesh with a given array of string\n# labels for each of the nodes.\n\n# Make some random points\npoly = pv.PolyData(np.random.rand(10, 3))\n\n###############################################################################\n# Add string labels to the point data - this associates a label with every\n# node:\n\npoly[\"My Labels\"] = [\"Label {}\".format(i) for i in range(poly.n_points)]\n\nprint(poly)\n\n###############################################################################\n# Now plot the points with labels:\n\nplotter = pv.Plotter()\nplotter.add_point_labels(poly, \"My Labels\", point_size=20, font_size=36)\nplotter.show()\n\n\n###############################################################################\n# Label Node Locations\n# ++++++++++++++++++++\n#\n# This example will label the nodes of a mesh with their coordinate locations\n\n# Load example beam file\ngrid = pv.UnstructuredGrid(examples.hexbeamfile)\n\n\n###############################################################################\n# Create plotting class and add the unstructured grid\nplotter = pv.Plotter()\nplotter.add_mesh(grid, show_edges=True, color=\"tan\")\n\n# Add labels to points on the yz plane (where x == 0)\npoints = grid.points\nmask = points[:, 0] == 0\nplotter.add_point_labels(\n points[mask], points[mask].tolist(), point_size=20, font_size=36\n)\n\nplotter.camera_position = [\n (-1.5, 1.5, 3.0),\n (0.05, 0.6, 1.2),\n (0.2, 0.9, -0.25)]\n\nplotter.show()\n\n\n###############################################################################\n# Label Scalar Values\n# +++++++++++++++++++\n#\n# This example will label each point with their scalar values\n\nmesh = examples.load_uniform().slice()\n\n###############################################################################\np = pv.Plotter()\n\n# Add the mesh:\np.add_mesh(mesh, scalars=\"Spatial Point Data\", show_edges=True)\n# Add the points with scalar labels:\np.add_point_scalar_labels(mesh, \"Spatial Point Data\", point_size=20, font_size=36)\n\n# Use a nice camera position:\np.camera_position = [(7, 4, 5), (4.4, 7.0, 7.2), (0.8, 0.5, 0.25)]\n\np.show()\n" ]
[ [ "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ati-ozgur/KDD99ReviewArticle
[ "621d494cf0216c2993b43f387687264116739a09" ]
[ "HelperCodes/create_table_metaDatasetsUsed.py" ]
[ "\nimport ReviewHelper\nimport pandas as pd\n\ndf = ReviewHelper.get_pandas_data_frame_created_from_bibtex_file()\n\n# find problematic ones\ndf[df.metaDatasetsUsed.isnull()]\n\nlist1 = df.metaDatasetsUsed.str.split(\",\").tolist()\n\n\ndf1 = pd.DataFrame(list1)\n\nfor i in range(df1.columns.size):\n df1[i] = df1[i].str.strip()\n\nstacked = df1.stack()\n\n\nstacked_value_counts = stacked.value_counts()\n\ngreater_than = stacked_value_counts[stacked_value_counts > 3]\n\ntable_content_inside=\"\"\n\nlist_ids_dataset_names = [\"KDD99\",\"NSL-KDD\",\"DARPA\",\"Kyoto\",\"ISCX\"]\n\ntable_content_inside=\"\"\n\nfor dataset_name in greater_than.index:\n dataset_count = greater_than[dataset_name]\n dataset_name_in_table = dataset_name\n\n\n dataset_name_in_table = dataset_name\n if(dataset_name in list_ids_dataset_names):\n dataset_name_in_table = \"\\\\rowcolor{Gray}\\n\" + dataset_name + \"* \"\n\n\n line = \"{dataset_name} & {dataset_count} \\\\\\\\ \\n\".format(\n dataset_name = dataset_name_in_table\n ,dataset_count = dataset_count\n )\n table_content_inside = table_content_inside + line\n\n\ntable_content_start = \"\"\"\n\\\\begin{table}[!ht]\n \\\\centering \n \\\\caption{ \\\\textbf{Most used Datasets}. * denotes IDS datasets. Datasets that are used less than three is not included.}\n \\\\label{table-metaDatasetsUsed}\n\n\n\\\\begin{tabular}{ll}\n\n\\\\toprule\n\n\\\\textbf{Dataset Name } & \\\\textbf{Article Count} \\\\\\\\\n\n\\\\midrule\n\n\"\"\"\n\ntable_content_end = \"\"\"\n\\\\bottomrule\n\n\\\\end{tabular}\n\n\\\\end{table}\n\n\"\"\"\n\ntable_content_full = table_content_start + table_content_inside + table_content_end\n\n\n#print table_content_full\n\nfilename = \"../latex/table-metaDatasetsUsed.tex\"\ntarget = open(filename, 'w')\ntarget.write(table_content_full)\ntarget.close()\n\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
loaywael/FaceRecognition
[ "5aa79ff1d8a89e2f0e530bca02ead0bdf3926284" ]
[ "src/PKG/utils/scripts/extract_landmarks_data.py" ]
[ "import io\nimport os\nimport cv2\nimport time\nimport zipfile\nimport argparse\nimport numpy as np \nimport pandas as pd\nimport concurrent.futures\nfrom decouple import config\n\n\nDATA_PATH = config(\"FACE_DATA\")+\"face_landmarks1/\"\nfiles = os.listdir(DATA_PATH)\nprint(\"files in data directory: \", files)\n\ndef draw_batch(batch):\n rows = []\n BATCH_LENGTH = len(batch)\n for yi in range(0, BATCH_LENGTH-1, 8):\n cols = []\n row = batch[yi:yi+8]\n for data in row:\n img = data[\"image\"]\n keypoints = data[\"keypoints\"]\n img = plot_keypoints(img, keypoints)\n cols.append(img)\n row = np.hstack(cols)\n rows.append(row)\n batch = np.vstack(rows)\n cv2.imshow(\"batch\", batch)\n cv2.waitKey(0)\n cv2.destroyWindow(\"batch\")\n\n\ndef process_img(data_row, root_path=DATA_PATH+\"training/\", size=96):\n index, data = data_row\n image_name = data.iloc[0]\n img_path = root_path + image_name\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n keypoints = data.iloc[1:].values.reshape(-1, 2)\n\n scale = 1\n h, w = img.shape\n max_dim = max(h, w)\n if max_dim > size:\n scale = size / max_dim \n new_h = int(h * scale)\n new_w = int(w * scale)\n img = cv2.resize(img, (new_w, new_h))\n keypoints = keypoints * scale\n else:\n new_h, new_w = h, w\n \n start_x = size//2 - new_w // 2\n start_y = size//2 - new_h // 2\n keypoints += [start_x, start_y] \n resized_img = np.zeros((size, size)).astype(\"uint8\")\n resized_img[start_y:start_y+new_h, start_x:start_x+new_w] = img\n return {\"name\" : image_name, \"image\": resized_img, \"keypoints\" : keypoints}\n\n\ndef plot_keypoints(img, keypoints):\n if len(img.shape) == 2:\n img = np.dstack([img]*3).astype(\"uint8\")\n for (x, y) in keypoints:\n cv2.circle(img, (int(x), int(y)), 3, (0, 255, 0), -1)\n return img\n\n\nif __name__ == \"__main__\":\n EXTRACTION_PATH = \"../data/landmarks_dataset.npz\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-S\", \"--state\", required=True, type=str,\n help=\"extract/viusalize extract dataset from source or visualize saved data\"\n )\n parser.add_argument(\n \"-s\", \"--save\", required=False, action=\"store_true\",\n help=\"saveing extracted dataset from source to project root data dir\"\n )\n\n extracted_data = None\n args = parser.parse_args()\n if args.state == \"extract\":\n dataset = pd.read_csv(DATA_PATH+\"training_frames_keypoints.csv\")\n print(\"---> columns length: \", len(dataset.columns))\n \n t1 = time.perf_counter()\n with concurrent.futures.ThreadPoolExecutor() as executer:\n result = executer.map(process_img, dataset.iterrows())\n t2 = time.perf_counter()\n print(f\"time elapsed: {t2 - t1: .2f}\")\n\n extracted_data = list(result)\n if args.save:\n np.savez_compressed(EXTRACTION_PATH, extracted_data)\n print(\"saved extracted data in \", EXTRACTION_PATH)\n elif args.state == \"visualize\":\n if os.path.exists(EXTRACTION_PATH):\n extracted_data = np.load(EXTRACTION_PATH, allow_pickle=True)\n extracted_data = extracted_data[\"arr_0\"]\n np.random.shuffle(extracted_data)\n batch = extracted_data[:64]\n draw_batch(batch)\n\n " ]
[ [ "numpy.hstack", "pandas.read_csv", "numpy.dstack", "numpy.random.shuffle", "numpy.savez_compressed", "numpy.load", "numpy.zeros", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
w4k2/lies
[ "b689c6d781a1a3cf96705fd2f7bae6dcd964f11b" ]
[ "process_2x5cv.py" ]
[ "#!/usr/bin/env python\nimport helper as h\nimport numpy as np\nimport pandas as pd\nimport csv\n\nnp.random.seed(1337)\n\nfrom sklearn import model_selection, metrics\nfrom tqdm import tqdm\n\nrepetitions = 10\ndatasets = h.datasets()\nclfs = h.classifiers()\n\nfor i, dataset in tqdm(enumerate(datasets)):\n # print(i, dataset)\n # Gather dataset\n ds = pd.read_csv(dataset[0], header=None).values\n X, y = ds[:, :-1], ds[:, -1].astype(\"int\")\n\n # CV\n for repetition in tqdm(range(repetitions)):\n cv = model_selection.RepeatedStratifiedKFold(\n n_splits=2, n_repeats=5, random_state=np.random.randint(9999)\n )\n fold = 0\n k_accuracies = []\n for train, test in cv.split(X, y):\n fold_X_train, fold_y_train = X[train], y[train]\n fold_X_test, fold_y_test = X[test], y[test]\n\n clf_accuracies = []\n for clf_n in clfs:\n clf = clfs[clf_n]\n clf.fit(fold_X_train, fold_y_train)\n probas = clf.predict_proba(fold_X_test)\n prediction = np.argmax(probas, axis=1)\n accuracy = metrics.accuracy_score(fold_y_test, prediction)\n clf_accuracies.append(accuracy)\n k_accuracies.append(clf_accuracies)\n\n fold += 1\n filename = \"results/%s_r%i_k2x5.csv\" % (dataset[1], repetition)\n # print(filename)\n k_accuracies = np.array(k_accuracies)\n with open(filename, \"w\") as csvfile:\n spamwriter = csv.writer(csvfile)\n spamwriter.writerow(clfs.keys())\n for row in k_accuracies:\n spamwriter.writerow(row)\n" ]
[ [ "pandas.read_csv", "numpy.random.seed", "sklearn.metrics.accuracy_score", "numpy.argmax", "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
huihui7987/Kaggle-Playground
[ "18f09371994ebf220a5a77329f13b334da18788b" ]
[ "models/rfr.py" ]
[ "# -*- encoding:ISO-8859-1 -*-\nimport warnings\nwarnings.filterwarnings('ignore')\nimport time\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_squared_error, make_scorer\nfrom sklearn import pipeline, grid_search\nimport random\nrandom.seed(2016)\n\n\ndef mean_squared_error_(ground_truth, predictions):\n return mean_squared_error(ground_truth, predictions) ** 0.5\n\nRMSE = make_scorer(mean_squared_error_, greater_is_better=False)\n\n\ndef main(input='df_new_423.csv'):\n start_time = time.time()\n\n df_all = pd.read_csv(input, encoding='ISO-8859-1', index_col=0)\n num_train = 74067\n df_train = df_all.iloc[:num_train]\n df_test = df_all.iloc[num_train:]\n\n id_test = df_test['id']\n y_train = df_train['relevance'].values\n\n cols_to_drop = ['id', 'relevance']\n for col in cols_to_drop:\n try:\n df_train.drop(col, axis=1, inplace=True)\n df_test.drop(col, axis=1, inplace=True)\n except:\n continue\n\n X_train = df_train[:]\n X_test = df_test[:]\n\n print('--- Features Set: %s minutes ---' % round(((time.time() - start_time) / 60), 2))\n print('Number of Features: ', len(X_train.columns.tolist()))\n # print(X_train.columns.tolist())\n\n # exit(0)\n rfr = RandomForestRegressor(n_jobs=1, random_state=2016, verbose=1)\n\n param_grid = {'n_estimators': [500], 'max_features': [10, 12, 14]}\n model = grid_search.GridSearchCV(estimator=rfr, param_grid=param_grid, n_jobs=1, cv=10, verbose=20, scoring=RMSE)\n model.fit(X_train, y_train)\n\n print('--- Grid Search Completed: %s minutes ---' % round(((time.time() - start_time) / 60), 2))\n print('Best Params:')\n print(model.best_params_)\n print('Best CV Score:')\n print(-model.best_score_)\n\n y_pred = model.predict(X_test)\n for i in range(len(y_pred)):\n if y_pred[i] < 1.0:\n y_pred[i] = 1.0\n if y_pred[i] > 3.0:\n y_pred[i] = 3.0\n pd.DataFrame({'id': id_test, 'relevance': y_pred}).to_csv('submission_rfr.csv', index=False)\n\n print('--- Submission Generated: %s minutes ---' % round(((time.time() - start_time) / 60), 2))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "sklearn.ensemble.RandomForestRegressor", "pandas.read_csv", "pandas.DataFrame", "sklearn.metrics.mean_squared_error", "sklearn.grid_search.GridSearchCV", "sklearn.metrics.make_scorer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
mr4msm/chainer
[ "6fa28004889b260ae13484f17dc1ac68b25d52bb" ]
[ "tests/chainer_tests/functions_tests/connection_tests/test_convolution_nd.py" ]
[ "import unittest\n\nimport functools\nimport numpy\nfrom operator import mul\n\nimport chainer\nfrom chainer.backends import cuda\nimport chainer.functions as F\nfrom chainer import testing\nfrom chainer.testing import attr\nfrom chainer.utils import conv\n\n\[email protected](*(testing.product({\n 'dims': [(5,), (4, 3), (3, 4, 3)],\n 'dilate': [1, 2],\n 'groups': [1, 2],\n 'cover_all': [True, False],\n 'contiguous': ['C'],\n 'x_dtype': [numpy.float32],\n 'W_dtype': [numpy.float32],\n 'b_dtype': [numpy.float32],\n 'autotune': [True, False],\n 'nobias': [True, False],\n}) + testing.product({\n 'dims': [(4,)],\n 'dilate': [1],\n 'groups': [1],\n 'cover_all': [False],\n 'x_dtype': [numpy.float16, numpy.float32, numpy.float64],\n 'W_dtype': [numpy.float16, numpy.float32, numpy.float64],\n 'b_dtype': [numpy.float16, numpy.float32, numpy.float64],\n 'autotune': [False],\n 'nobias': [True, False],\n})))\[email protected]_backend_tests(\n None,\n # CPU tests\n [{}]\n # GPU tests\n + testing.product({\n 'use_cuda': [True],\n 'use_cudnn': ['never', 'always'],\n })\n # ChainerX tests\n + [\n {'use_chainerx': True, 'chainerx_device': 'native:0'},\n {'use_chainerx': True, 'chainerx_device': 'cuda:0'},\n ]\n)\nclass TestConvolutionND(testing.FunctionTestCase):\n\n def setUp(self):\n self.N = 2\n self.in_channels = 4\n self.out_channels = 2\n self.ndim = len(self.dims)\n self.ksize = (2,) * self.ndim\n self.stride = (1,) * self.ndim\n self.pad = (1,) * self.ndim\n self.dilate = (self.dilate,) * self.ndim\n\n self.x_shape = (self.N, self.in_channels) + self.dims\n self.W_shape = (\n self.out_channels, self.in_channels // self.groups) + self.ksize\n self.W_scale = numpy.sqrt(\n 1. / functools.reduce(mul, self.ksize, self.in_channels))\n self.gy_shape = (self.N, self.out_channels) + tuple(\n conv.get_conv_outsize(d, k, s, p, cover_all=self.cover_all, d=di)\n for (d, k, s, p, di)\n in zip(self.dims, self.ksize, self.stride, self.pad, self.dilate))\n\n self.check_backward_options.update({'atol': 5e-5, 'rtol': 5e-4})\n self.check_double_backward_options.update(\n {'atol': 5e-4, 'rtol': 5e-3})\n if self.x_dtype == numpy.float16 or self.W_dtype == numpy.float16:\n self.check_forward_options.update({'atol': 5e-4, 'rtol': 5e-3})\n self.check_backward_options.update({\n 'atol': 2 ** -4, 'rtol': 2 ** -4})\n self.check_double_backward_options.update({\n 'atol': 2 ** -4, 'rtol': 2 ** -4})\n\n def before_test(self, test_name):\n # Some of the test configurations do not\n # support autotune so this hack is necessary\n # for the CI to work\n self.backend_config.autotune = self.autotune\n # cuDNN 5 and 5.1 results suffer from precision issues\n using_old_cudnn = (self.backend_config.xp is cuda.cupy\n and self.backend_config.use_cudnn == 'always'\n and cuda.cuda.cudnn.getVersion() < 6000)\n if using_old_cudnn:\n self.check_backward_options.update({\n 'atol': 1e-3, 'rtol': 1e-3})\n self.check_double_backward_options.update({\n 'atol': 1e-2, 'rtol': 1e-2})\n\n def generate_inputs(self):\n W = numpy.random.normal(\n 0, self.W_scale, self.W_shape).astype(self.W_dtype)\n x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.x_dtype)\n if self.nobias:\n return x, W\n else:\n b = numpy.random.uniform(\n -1, 1, self.out_channels).astype(self.x_dtype)\n return x, W, b\n\n def forward_expected(self, inputs):\n \"\"\"\n Current forward_expected implementation depends on\n F.convolution_nd itself and thus it's only capable\n of checking consistency between backends, not absolute\n correctness of computations\n \"\"\"\n if self.nobias:\n x, W = inputs\n b = None\n else:\n x, W, b = inputs\n y_expected = F.convolution_nd(\n x, W, b, stride=self.stride, pad=self.pad,\n cover_all=self.cover_all, dilate=self.dilate,\n groups=self.groups)\n return y_expected.array,\n\n def forward(self, inputs, device):\n if self.nobias:\n x, W = inputs\n b = None\n else:\n x, W, b = inputs\n y = F.convolution_nd(\n x, W, b, stride=self.stride, pad=self.pad,\n cover_all=self.cover_all, dilate=self.dilate,\n groups=self.groups)\n return y,\n\n def check_forward_consistency_regression(self, backend_config):\n inputs = self.generate_inputs()\n if self.nobias:\n x, W = inputs\n b = None\n else:\n x, W, b = inputs\n x = chainer.Variable(backend_config.get_array(x))\n W = chainer.Variable(backend_config.get_array(W))\n if b is not None:\n b = chainer.Variable(backend_config.get_array(b))\n\n with chainer.using_config('use_cudnn', 'never'):\n y_nd = F.convolution_nd(\n x, W, b, stride=self.stride, pad=self.pad,\n cover_all=self.cover_all, dilate=self.dilate,\n groups=self.groups)\n y_2d = F.convolution_2d(\n x, W, b, stride=self.stride, pad=self.pad,\n cover_all=self.cover_all, dilate=self.dilate,\n groups=self.groups)\n\n testing.assert_allclose(\n y_nd.array, y_2d.array, **self.check_forward_options)\n\n def test_consistency_regression_forward(self, backend_config):\n # Regression test to convolution_2d.\n if len(self.dims) == 2:\n self.check_forward_consistency_regression(backend_config)\n\n\[email protected](*testing.product({\n 'dims': [(10,), (10, 8), (10, 8, 6)],\n 'use_cudnn': ['always', 'auto', 'never'],\n 'dtype': [numpy.float16, numpy.float32, numpy.float64],\n}))\[email protected]\nclass TestConvolutionNDCudnnCall(unittest.TestCase):\n\n def setUp(self):\n in_channels = 3\n out_channels = 2\n ndim = len(self.dims)\n ksize = (3,) * ndim\n self.stride = (2,) * ndim\n self.pad = (1,) * ndim\n x_shape = (2, 3) + self.dims\n self.x = cuda.cupy.random.uniform(-1, 1, x_shape).astype(self.dtype)\n W_scale = numpy.sqrt(1. / functools.reduce(mul, ksize, in_channels))\n W_shape = (out_channels, in_channels) + ksize\n self.W = cuda.cupy.random.normal(\n 0, W_scale, W_shape).astype(self.dtype)\n gy_shape = (2, 2) + tuple(\n conv.get_conv_outsize(d, k, s, p) for (d, k, s, p) in zip(\n self.dims, ksize, self.stride, self.pad))\n self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)\n with chainer.using_config('use_cudnn', self.use_cudnn):\n self.expect = chainer.should_use_cudnn('>=auto') and ndim > 1\n\n def forward(self):\n x = chainer.Variable(cuda.to_gpu(self.x))\n W = chainer.Variable(cuda.to_gpu(self.W))\n return F.convolution_nd(\n x, W, None, stride=self.stride, pad=self.pad)\n\n def test_call_cudnn_forward(self):\n with chainer.using_config('use_cudnn', self.use_cudnn):\n with testing.patch('cupy.cudnn.convolution_forward') as func:\n self.forward()\n self.assertEqual(func.called, self.expect)\n\n def test_call_cudnn_backward(self):\n with chainer.using_config('use_cudnn', self.use_cudnn):\n y = self.forward()\n y.grad = self.gy\n name = 'cupy.cudnn.convolution_backward_data'\n with testing.patch(name) as func:\n y.backward()\n self.assertEqual(func.called, self.expect)\n\n\nclass TestConvolutionNDarraySupplied(unittest.TestCase):\n\n def setUp(self):\n N = 2\n in_channels = 3\n out_channels = 2\n dtype = numpy.float32\n\n x_shape = (N, in_channels, 3, 3, 3)\n self.x_data = numpy.random.uniform(-1, 1, x_shape).astype(dtype)\n W_shape = (out_channels, in_channels, 1, 1, 1)\n self.W_data = numpy.random.uniform(-1, 1, W_shape).astype(dtype)\n self.b_data = numpy.random.uniform(-1, 1, out_channels).astype(dtype)\n\n def check_array_supplied(self, x_ary, W_ary, b_ary):\n y_ary = F.convolution_nd(x_ary, W_ary, b_ary)\n\n x_var = chainer.Variable(x_ary)\n W_var = chainer.Variable(W_ary)\n b_var = chainer.Variable(b_ary)\n y_var = F.convolution_nd(x_var, W_var, b_var)\n\n testing.assert_allclose(y_ary.data, y_var.data)\n\n def test_array_supplied_cpu(self):\n self.check_array_supplied(self.x_data, self.W_data, self.b_data)\n\n @attr.gpu\n def test_array_supplied_gpu(self):\n self.check_array_supplied(cuda.to_gpu(self.x_data),\n cuda.to_gpu(self.W_data),\n cuda.to_gpu(self.b_data))\n\n\nclass TestConvolutionNDBackwardNoncontiguousGradOutputs(unittest.TestCase):\n # NumPy raises an error when the inputs of dot operation are not\n # contiguous. This test ensures this issue is correctly handled.\n # (https://github.com/chainer/chainer/issues/2744)\n\n # This test depdends on that backward() of F.sum generates\n # a non-contiguous array.\n\n def test_1(self):\n n_batches = 2\n in_channels = 3\n out_channels = 1 # important\n x_shape = (n_batches, in_channels, 4)\n w_shape = (out_channels, in_channels, 3)\n x = numpy.ones(x_shape, numpy.float32)\n w = numpy.ones(w_shape, numpy.float32)\n y = F.convolution_nd(chainer.Variable(x), w)\n z = F.sum(y)\n z.backward()\n\n def test_2(self):\n n_batches = 2\n in_channels = 3\n out_channels = 1 # important\n x_shape = (n_batches, in_channels, 4)\n w_shape = (out_channels, in_channels, 3)\n x = numpy.ones(x_shape, numpy.float32)\n w = numpy.ones(w_shape, numpy.float32)\n y = F.convolution_nd(x, chainer.Variable(w))\n z = F.sum(y)\n z.backward()\n\n\nclass TestConvolutionNDWrappers(unittest.TestCase):\n\n def _get_data(self, ndim):\n in_channels = 3\n out_channels = 2\n dtype = numpy.float32\n\n x_shape = (2, in_channels) + (3,) * ndim\n x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)\n W_shape = (out_channels, in_channels) + (1,) * ndim\n W = numpy.random.uniform(-1, 1, W_shape).astype(dtype)\n b = numpy.random.uniform(-1, 1, out_channels).astype(dtype)\n\n return x, W, b\n\n def test_conv1d(self):\n (x, W, b) = self._get_data(1)\n testing.assert_allclose(\n F.convolution_nd(x, W, b).data, F.convolution_1d(x, W, b).data)\n\n def test_conv1d_invalid(self):\n (x, W, b) = self._get_data(2)\n with self.assertRaises(ValueError):\n F.convolution_1d(x, W, b)\n\n def test_conv3d(self):\n (x, W, b) = self._get_data(3)\n testing.assert_allclose(\n F.convolution_nd(x, W, b).data, F.convolution_3d(x, W, b).data)\n\n def test_conv3d_invalid(self):\n (x, W, b) = self._get_data(2)\n with self.assertRaises(ValueError):\n F.convolution_3d(x, W, b)\n\n\ntesting.run_module(__name__, __file__)\n" ]
[ [ "numpy.random.uniform", "numpy.random.normal", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hamza-ml/rice-grain-identifier
[ "17971727d6d3934dec25c97104332cc687754dae" ]
[ "source.py" ]
[ "import math\nimport warnings\nimport numpy as np\nimport skimage as ski\nimport skimage.io as skio\nfrom matplotlib import cm\nimport xlsxwriter as excel\nimport matplotlib.pyplot as plt\nfrom skimage.transform import rotate\nfrom skimage.morphology import square\nfrom skimage.measure import label as sml\nfrom skimage.filters import threshold_minimum\nfrom skimage.measure import regionprops as smr\nfrom skimage.morphology import binary_opening, binary_closing\nwarnings.filterwarnings(\"ignore\")\n\n\ndef create_excel(nxt_row, img_name, length, width, min_color, max_color, avg_color, rice_type):\n workbook = excel.Workbook('result.xlsx')\n worksheet = workbook.add_worksheet()\n worksheet.write(0, 0, \"Image Name\")\n worksheet.write(0, 1, \"Length\")\n worksheet.write(0, 2, \"Width\")\n worksheet.write(0, 3, \"Min Color\")\n worksheet.write(0, 4, \"Max Color\")\n worksheet.write(0, 5, \"Average Color\")\n worksheet.write(0, 6, \"Rice Type\")\n\n worksheet.write(nxt_row, 0, img_name)\n worksheet.write(nxt_row, 1, length)\n worksheet.write(nxt_row, 2, width)\n worksheet.write(nxt_row, 3, min_color)\n worksheet.write(nxt_row, 4, max_color)\n worksheet.write(nxt_row, 5, avg_color)\n worksheet.write(nxt_row, 6, rice_type)\n\n\ndef b_blox(image):\n lbl_img, num_label = sml(image, return_num=True)\n regions = smr(lbl_img)\n\n for props in regions:\n min_row, min_col, max_row, max_col = props.bbox\n ret_img = image[min_row:max_row, min_col:max_col]\n\n # print(ret_img)\n # newImg = ret_img*255\n # print(newImg)\n # r, g, b = newImg[:, :, 0], newImg[:, :, 1], newImg[:, :, 2]\n # gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n # print(gray)\n # plt.imsave(\"hell.tiff\", ret_img, cmap = cm.gray)\n\n return ret_img\n\n\ndef RGI(img_name, nxt_row):\n coin_mm = 290 # coin diameter in mm\n coin_pixels = 0.0007297616950602682 # default\n\n # Original Image --> Input as greyscale\n img = skio.imread(img_name, as_grey=True)\n # splitting image name\n rice_type = img_name.split('_')\n rice_type = rice_type[0]\n\n # Thresholding the Image\n thresh = threshold_minimum(img)\n threshed_img = img > thresh\n\n # CLOSING & OPENING WITH AN ENLARGING ELEMENT (to remove noise)\n dilated = binary_closing(threshed_img, square(10))\n dilated = binary_opening(dilated, square(10))\n\n # Applying Measureing Label\n lbl_img, num_label = sml(dilated, return_num=True)\n regions = smr(lbl_img, intensity_image=threshed_img)\n\n fig, ax = plt.subplots()\n ax.imshow(dilated)\n\n loop_count = 0\n for props in regions:\n y0, x0 = props.centroid\n orientation = props.orientation\n\n eccentricity_var = props.eccentricity\n area_var = props.area\n\n # 0.5 ==> length of red lines from the centre of the objects\n x1 = x0 + math.cos(orientation) * 0.5 * \\\n props.major_axis_length # 90Angle\n y1 = y0 - math.sin(orientation) * 0.5 * \\\n props.major_axis_length # lineA_length\n x2 = x0 - math.sin(orientation) * 0.5 * \\\n props.minor_axis_length # lineB_length\n y2 = y0 - math.cos(orientation) * 0.5 * \\\n props.minor_axis_length # 90Angle\n\n length = props.major_axis_length\n width = props.minor_axis_length\n angle = props.orientation\n\n # RADIANS TO DEGREE CONVERSION\n angle = (180 / 3.14) * angle\n # FINDING 90 DEGREE ANGLE\n new_angle = 90 + (angle * (- 1))\n\n # linewidth ==> width of redlines from centre, respectively\n ax.plot((x0, x1), (y0, y1), '-r', linewidth=1.5)\n ax.plot((x0, x2), (y0, y2), '-r', linewidth=1.5)\n # markersize ==> greenDots at Centre\n ax.plot(x0, y0, '.g', markersize=3)\n\n min_row, min_col, max_row, max_col = props.bbox\n bx = (min_col, max_col, max_col, min_col, min_col)\n by = (min_row, min_row, max_row, max_row, min_row)\n\n # linewidth ==> Intensity Level of SquareBox\n ax.plot(bx, by, '-b', linewidth=1.5)\n loop_count = loop_count + 1\n\n # finding coin\n if eccentricity_var < 0.3:\n coin_pixels = coin_mm / area_var\n # print (\"c\", coin_pixels)\n\n # finding rice grains\n if eccentricity_var > 0.5:\n # cropping bouding box\n imui = dilated[min_row:max_row, min_col:max_col]\n # calculating width & height & converting cm to mm\n length = format(((length * coin_pixels) * 10), '.4f')\n width = format(((width * coin_pixels) * 10), '.4f')\n # limiting afer point decimal places to 2\n # new_op = format(length, '.2f')\n print(\"length:\", length)\n print(\"Width:\", width)\n\n min_color = float(props.min_intensity)\n max_color = float(props.max_intensity)\n avg_color = format(props.mean_intensity, '.4f')\n\n print(\"a\", min_color, max_color, avg_color)\n\n abc = \".tiff\"\n newu = str(nxt_row) + abc\n\n imui = np.pad(imui, (200, 200), 'constant')\n imui = rotate(imui, new_angle)\n crop_grain = b_blox(imui)\n # skio.imsave(newu, (crop_grain))\n plt.imsave(newu, crop_grain, cmap=cm.gray)\n\n nxt_row = nxt_row + 1\n\n # worksheet.write(nxt_row, 0, img_name)\n # worksheet.write(nxt_row, 1, length)\n # worksheet.write(nxt_row, 2, width)\n # worksheet.write(nxt_row, 3, min_color)\n # worksheet.write(nxt_row, 4, max_color)\n # worksheet.write(nxt_row, 5, avg_color)\n # worksheet.write(nxt_row, 6, rice_type)\n\n create_excel(nxt_row, newu, length, width,\n min_color, max_color, avg_color, rice_type)\n\n plt.show()\n skio.imsave('result.jpg', ski.img_as_uint(dilated))\n\n print(\"Loop:\", loop_count, \"Times\")\n return nxt_row\n\n\ndef run_RGI_with_small_dataset():\n saylla = \"sample_saylla.jpg\"\n kainaat = \"sample_kainaat.jpg\"\n basmati = \"sample_basmati.jpg\"\n\n next_row = 0\n count = 0\n while (count <= 3):\n count = count + 1\n\n if count == 1:\n next_row = RGI(saylla, next_row)\n next_row = next_row + 1\n elif count == 2:\n next_row = RGI(kainaat, next_row)\n next_row = next_row + 1\n elif count == 3:\n next_row = RGI(basmati, next_row)\n next_row = next_row + 1\n else:\n print(\"Error: Unable to process image.\")\n\n\ndef run_RGI():\n nameA = \"Adhowaar_0\"\n nameB = \"Basmati_0\"\n nameC = \"Kainaat_0\"\n nameD = \"Mota Chawaal86_0\"\n nameE = \"Saylla_0\"\n nameF = \"Super Kernel - Double Zebra_0\"\n nameG = \"Super Kernel - Purana_0\"\n nameH = \"Super Kernel_0\"\n nameI = \"Totta_0\"\n nameJ = \"Zaraffa_0\"\n\n image_ext = \".jpg\"\n\n next_row = 0\n r_count = 0\n\n while (r_count <= 10):\n r_count = r_count + 1\n\n image_count = 0\n r_count2 = 0\n\n while (r_count2 < 5):\n image_count = image_count + 1\n\n if r_count == 1:\n image_name = nameA + str(image_count) + image_ext\n next_row = RGI(image_name, next_row)\n next_row = next_row + 1\n r_count2 = r_count2 + 1\n elif r_count == 2:\n image_name = nameB + str(image_count) + image_ext\n next_row = RGI(image_name, next_row)\n next_row = next_row + 1\n r_count2 = r_count2 + 1\n elif r_count == 3:\n image_name = nameC + str(image_count) + image_ext\n next_row = RGI(image_name, next_row)\n next_row = next_row + 1\n r_count2 = r_count2 + 1\n elif r_count == 4:\n image_name = nameD + str(image_count) + image_ext\n next_row = RGI(image_name, next_row)\n next_row = next_row + 1\n r_count2 = r_count2 + 1\n elif r_count == 5:\n image_name = nameE + str(image_count) + image_ext\n next_row = RGI(image_name, next_row)\n next_row = next_row + 1\n r_count2 = r_count2 + 1\n elif r_count == 6:\n image_name = nameF + str(image_count) + image_ext\n next_row = RGI(image_name, next_row)\n next_row = next_row + 1\n r_count2 = r_count2 + 1\n elif r_count == 7:\n image_name = nameG + str(image_count) + image_ext\n next_row = RGI(image_name, next_row)\n next_row = next_row + 1\n r_count2 = r_count2 + 1\n elif r_count == 8:\n image_name = nameH + str(image_count) + image_ext\n next_row = RGI(image_name, next_row)\n next_row = next_row + 1\n r_count2 = r_count2 + 1\n elif r_count == 9:\n image_name = nameI + str(image_count) + image_ext\n next_row = RGI(image_name, next_row)\n next_row = next_row + 1\n r_count2 = r_count2 + 1\n elif r_count == 10:\n image_name = nameJ + str(image_count) + image_ext\n next_row = RGI(image_name, next_row)\n next_row = next_row + 1\n r_count2 = r_count2 + 1\n else:\n print(\"Error: Unable to process image.\")\n\n\nprint(\"Setting up excel workbooks.\")\nworkbook = excel.Workbook('result.xlsx')\nworksheet = workbook.add_worksheet()\nprint(\"Starting RGI\")\n# RGI for complete data set - 10 rice types with 5 sample images each.\n# run_RGI()\n\n# RGI for small data set - 3 rice types with 1 image each.\nrun_RGI_with_small_dataset()\nprint(\"RGI process completed. Closing excel workbook.\")\nworkbook.close()\nprint(\"Workbook closed.\")\n" ]
[ [ "matplotlib.pyplot.imsave", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots", "numpy.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
luca-riboldi/HES-OFF
[ "95523c730ac46676cfe8326ef7498d1484cdf3b2" ]
[ "hes_off/core/deprecated/tests/visual_checks/plot_electrolizer_performance.py" ]
[ "## ------------------------------------------------------------------------------------------------------------------ ##\n## ------------------------------------------------------------------------------------------------------------------ ##\n## ___ ___ ________ ________ ______ ________ ________ ##\n## | | | | | ____| / | / __ \\ | ____|| ____| ##\n## | |__| | | |__ | (----` ______| | | | | |__ | |__ ##\n## | __ | | __| \\ \\ |______| | | | | __| | __| ##\n## | | | | | |____.----) | | `--' | | | | | ##\n## |__| |__| |_______|_______/ \\______/ |__| |__| ##\n## ##\n## ------------------------------------------------------------------------------------------------------------------ ##\n## ------------------------------------------------------------------------------------------------------------------ ##\n\n# Import packages\nimport os\nimport hes_off_object_oriented\nimport matplotlib.pyplot as plt\n\n# Create output folder if it does not exist\nif os.path.exists('figures/') is False:\n os.mkdir('figures')\n\n# Define font settings\nplt.rc('text', usetex=False)\nplt.rcParams['font.family'] = 'serif' # 'serif', 'sans-serif', 'cursive', 'fantasy', 'monospace'\nplt.rcParams['font.serif'] = 'cmr10' # 'cmr10', 'palatino', 'times new roman'\nplt.rcParams['mathtext.fontset'] = 'cm' # 'cm' (latex style), 'stix' (times new roman style), 'stixsans'\n\n# Plot NEL Hydrogen performance curves\nEL = hes_off_object_oriented.process_models.EL(model='NEL_HYDROGEN', rated_power=1e6)\nfig1, _ = EL.plot_hydrogen_production_curve()\nfig2, _ = EL.plot_specific_hydrogen_production_curve()\nfig3, _ = EL.plot_hydrogen_conversion_efficiency_curve()\nfig1.savefig('figures/NEL_Hydrogen_performance.pdf', bbox_inches='tight')\nfig2.savefig('figures/NEL_Hydrogen_specific_performance.pdf', bbox_inches='tight')\nfig3.savefig('figures/NEL_Hydrogen_efficiency.pdf', bbox_inches='tight')\n\n# Show the figure\nplt.show()\n\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.rc" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thilina27/ProojectRes
[ "8342ea354fb52a5f93b178b34e4a712515027590" ]
[ "Cnn/Test/test1/Untitled Folder/imagedatamake.py" ]
[ "import os\nimport random\nimport cv2\nimport numpy as np\n\n\ndef load_Imagedata(imagedir='/Images', numChannels=1, minNumSamplesPerClass=100, imsize=(28, 28), p_train=0.6,\n p_val=0.2):\n print(\"load images\")\n # run throug the folders, load the files\n onlyFolders = [f for f in os.listdir(imagedir) if os.path.isdir(os.path.join(imagedir, f))]\n\n\n # Remove folders with too few samples\n for i in reversed(range(len(onlyFolders))):\n # get files in folder\n onlyFiles = [f for f in os.listdir(imagedir + '/' + onlyFolders[i]) if\n os.path.isfile(os.path.join(imagedir + '/' + onlyFolders[i], f))]\n\n if len(onlyFiles) < minNumSamplesPerClass:\n del onlyFolders[i]\n\n # Run through all the folders\n for i in range(len(onlyFolders)):\n # get files in folder\n onlyFiles = [f for f in os.listdir(imagedir + '/' + onlyFolders[i]) if\n os.path.isfile(os.path.join(imagedir + '/' + onlyFolders[i], f))]\n\n print(str(len(onlyFiles)) + \" images of \" + onlyFolders[i])\n\n # load the images one-by-one\n for ii in range(len(onlyFiles)):\n Imgtmp = cv2.imread(imagedir + '/' + onlyFolders[i] + '/' + onlyFiles[ii])\n\n # Convert to gray-scale\n if (numChannels == 1):\n if Imgtmp.shape[2] == 3:\n Imgtmp = cv2.cvtColor(Imgtmp, cv2.COLOR_BGR2GRAY)\n Imgtmp = np.expand_dims(Imgtmp, axis=3)\n\n # resize image\n Imgtmpresize = cv2.resize(Imgtmp, (imsize[0], imsize[1]))\n\n # Based on the probabilities p_train, p_val: add to training, validation and test-set\n rs = random.random()\n if rs < p_train:\n if not 'X_train' in locals():\n X_train = Imgtmpresize[None, ...]\n else:\n X_train = np.concatenate((X_train, Imgtmpresize[None, ...]), axis=0)\n if not 'targets_train' in locals():\n targets_train = np.array([i])\n else:\n targets_train = np.concatenate((targets_train, np.array([i])))\n\n elif p_train <= rs < (p_val + p_train):\n if not 'X_val' in locals():\n X_val = Imgtmpresize[None, ...]\n else:\n X_val = np.concatenate((X_val, Imgtmpresize[None, ...]), axis=0)\n if not 'targets_val' in locals():\n targets_val = np.array([i])\n else:\n targets_val = np.concatenate((targets_val, np.array([i])))\n\n else:\n if not 'X_test' in locals():\n X_test = Imgtmpresize[None, ...]\n else:\n X_test = np.concatenate((X_test, Imgtmpresize[None, ...]), axis=0)\n if not 'targets_test' in locals():\n targets_test = np.array([i])\n else:\n targets_test = np.concatenate((targets_test, np.array([i])))\n\n if not 'targets_train' in locals():\n X_test = np.array(0, ndmin=3)\n targets_train = np.array(0)\n if not 'targets_val' in locals():\n X_val = np.array(0, ndmin=3)\n targets_val = np.array(0)\n if not 'targets_test' in locals():\n X_test = np.array(0, ndmin=3)\n targets_test = np.array(0)\n\n # typecast targets\n targets_test = targets_test.astype(np.int32)\n targets_val = targets_val.astype(np.int32)\n targets_train = targets_train.astype(np.int32)\n\n\n # apply some very simple normalization to the data\n X_test = X_test.astype(np.float32)\n X_val = X_val.astype(np.float32)\n X_train = X_train.astype(np.float32)\n\n X_test -= X_test.mean()\n X_test /= X_test.std()\n\n X_val -= X_val.mean()\n X_val /= X_val.std()\n\n X_train -= X_train.mean()\n X_train /= X_train.std()\n\n\n # permute dimensions.\n # The data has to have the layer dimension before the (x,y) dimension, as the conv. filters are applied to each layer and expect them to be in that order\n # The shape convention: (examples, channels, rows, columns)\n if numChannels == 1: # add channel dimension if image is grayscale\n X_test = np.expand_dims(X_test, axis=4)\n X_val = np.expand_dims(X_val, axis=4)\n X_train = np.expand_dims(X_train, axis=4)\n\n X_test = np.transpose(X_test, (0, 3, 1, 2))\n X_val = np.transpose(X_val, (0, 3, 1, 2))\n X_train = np.transpose(X_train, (0, 3, 1, 2))\n\n return X_train, targets_train, X_val, targets_val, X_test, targets_test\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.expand_dims", "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LIDS-UNICAMP/FLIM
[ "49947ca8962f5c548ca543ed68e5bbaf7ed8414c" ]
[ "flim/models/lcn/_decoder.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom ...utils import compute_importance\n\n__all__ = ['Decoder']\n\nclass Decoder(nn.Module):\n def __init__(self, images, markers, n_classes, device='cpu'):\n\n super(Decoder, self).__init__()\n\n self.n_classes = n_classes\n self.device = device\n\n self.register_buffer('importance_by_channel', torch.from_numpy(compute_importance(images, markers, n_classes)).float())\n\n \n def forward(self, X):\n y = X.unsqueeze(1).repeat(1, self.n_classes, 1, 1, 1) * self.importance_by_channel.view(1, self.n_classes, -1, 1, 1)\n\n comb = torch.sum(y, axis=2)\n comb[comb < 0] = 0\n\n return comb" ]
[ [ "torch.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
trisct/Software-Tutorials
[ "50d7851b861700fe256dfed97f84dc321a5286dc" ]
[ "python_packages/pytorch/4_extending_with_python/ATenCpp_exts/1_PMTS_gpu/PMTS_timetest.py" ]
[ "import torch\nimport torch.autograd\nfrom torch.autograd import gradcheck\n\nfrom PMTS_module import PMTS as pmts\nimport time\n\nmodule_name = \"PMTS\"\nprint(f\"\\n############ starting time test of module {module_name} ############\")\n\n# only double is precise enough for gradcheck\na = torch.randn(500000, requires_grad=True)\nb = torch.randn(500000, requires_grad=True)\n\niters = 100\n\n# cpu\ntime_s = time.time()\nfor i in range(iters):\n c, d = pmts(a, b)\n z = (c + d).sum()\n z.backward()\ntime_e = time.time()\nprint(f\"CPU time test: {(time_e - time_s) / iters} s per iter.\")\n\n# gpu\na = a.cuda()\nb = b.cuda()\n\ntime_s = time.time()\nfor i in range(iters):\n c, d = pmts(a, b)\n z = (c + d).sum()\n z.backward()\ntime_e = time.time()\nprint(f\"GPU time test: {(time_e - time_s) / iters} s per iter.\")\n\nprint(f\"############## time test of module {module_name} done ##############\\n\")\n" ]
[ [ "torch.randn" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
garrettkatz/ghu
[ "2bf25ac6f8e82d3e7231c3381f7a4946db6dc59f" ]
[ "max.py" ]
[ "import itertools as it\r\nimport numpy as np\r\nimport torch as tr\r\nimport matplotlib.pyplot as pt\r\nfrom ghu import *\r\nfrom codec import Codec\r\nfrom controller import Controller\r\nfrom lvd import lvd\r\nfrom reinforce import *\r\nimport json\r\n\r\ndef trials(i, avgrew, avggen, gradnorm):\r\n print(\"***************************** Trial \",str(i+1),\" *******************************\")\r\n \r\n # Configuration\r\n num_symbols = 10\r\n layer_sizes = {\"rinp\": 32, \"rout\":32, \"rtemp\":32}\r\n hidden_size = 32\r\n rho = .99\r\n plastic = []\r\n num_episodes = 500\r\n\r\n # Setup GHU\r\n symbols = [str(a) for a in range(num_symbols)]\r\n pathways, associations = default_initializer( # all to all\r\n layer_sizes.keys(), symbols)\r\n codec = Codec(layer_sizes, symbols, rho=rho, ortho=True)\r\n controller = Controller(layer_sizes, pathways, hidden_size, plastic)\r\n ghu = GatedHebbianUnit(\r\n layer_sizes, pathways, controller, codec, plastic=plastic, batch_size = num_episodes)\r\n ghu.associate(associations)\r\n\r\n # Initialize layers\r\n separator = symbols[0]\r\n ghu.fill_layers(separator)\r\n\r\n # Generate dataset\r\n input_length = 5\r\n all_inputs = [np.array(inputs)\r\n for inputs in it.product(symbols[1:], repeat=input_length)]\r\n split = int(.80*len(all_inputs))\r\n\r\n # example generation\r\n def example(dataset):\r\n inputs = dataset[np.random.randint(len(dataset))]\r\n targets = np.array([max(inputs)])\r\n return inputs, targets\r\n def training_example(): return example(all_inputs[:split])\r\n def testing_example(): return example(all_inputs[split:])\r\n \r\n # all or nothing reward\r\n def reward(ghu, targets, outputs):\r\n r = np.zeros(len(outputs))\r\n r[-1] = (outputs[-1] == targets[0])\r\n return r\r\n \r\n # Optimization settings\r\n avg_rewards, avg_general, grad_norms = reinforce(\r\n ghu,\r\n num_epochs = 100,\r\n episode_duration = input_length,\r\n training_example = training_example,\r\n testing_example = testing_example,\r\n reward = reward,\r\n task = \"max\",\r\n learning_rate = .1,\r\n verbose = 1)\r\n\r\n gradnorm[i+1]=grad_norms.tolist()\r\n avgrew[i+1]=avg_rewards.tolist()\r\n avggen[i+1]=avg_general.tolist()\r\n \r\nallgradnorms = {}\r\nallavgrewards = {} \r\nallavggeneral = {}\r\n\r\nfor i in range(30):\r\n trials(i, allavgrewards, allavggeneral, allgradnorms)\r\n\r\nwith open(\"data/maxavgrwd.json\",\"w\") as fp:\r\n json.dump(allavgrewards, fp)\r\n\r\nwith open(\"data/maxavggen.json\",\"w\") as fp:\r\n json.dump(allavggeneral, fp)\r\n\r\nwith open(\"data/maxgradnorm.json\",\"w\") as fp:\r\n json.dump(allgradnorms, fp)\r\n \r\n \r\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neet-cv/indexNet_STDCSeg
[ "9165e1352711e851b6807d3848a7abee09247096" ]
[ "models/model_stages.py" ]
[ "#!/usr/bin/python\n# -*- encoding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom nets.stdcnet import STDCNet1446, STDCNet813\n\n# from modules.bn import InPlaceABNSync as BatchNorm2d\n\n\nBatchNorm2d = nn.BatchNorm2d\n\n\n# ConvX??\nclass ConvBNReLU(nn.Module):\n def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs):\n super(ConvBNReLU, self).__init__()\n self.conv = nn.Conv2d(\n in_chan,\n out_chan,\n kernel_size=ks,\n stride=stride,\n padding=padding,\n bias=False,\n )\n # self.bn = BatchNorm2d(out_chan)\n self.bn = nn.BatchNorm2d(out_chan)\n self.relu = nn.ReLU()\n self.init_weight()\n \n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x\n \n def init_weight(self):\n for ly in self.children():\n if isinstance(ly, nn.Conv2d):\n nn.init.kaiming_normal_(ly.weight, a=1)\n if ly.bias is not None:\n nn.init.constant_(ly.bias, 0)\n\n\nclass BiSeNetOutput(nn.Module):\n def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs):\n super(BiSeNetOutput, self).__init__()\n self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1)\n self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False)\n self.init_weight()\n \n def forward(self, x):\n x = self.conv(x)\n x = self.conv_out(x)\n return x\n \n def init_weight(self):\n for ly in self.children():\n if isinstance(ly, nn.Conv2d):\n nn.init.kaiming_normal_(ly.weight, a=1)\n if ly.bias is not None:\n nn.init.constant_(ly.bias, 0)\n \n def get_params(self):\n wd_params, nowd_params = [], []\n for name, module in self.named_modules():\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n wd_params.append(module.weight)\n if module.bias is not None:\n nowd_params.append(module.bias)\n elif isinstance(module, nn.BatchNorm2d):\n nowd_params += list(module.parameters())\n return wd_params, nowd_params\n\n\nclass AttentionRefinementModule(nn.Module):\n def __init__(self, in_chan, out_chan, *args, **kwargs):\n super(AttentionRefinementModule, self).__init__()\n self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)\n self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size=1, bias=False)\n # self.bn_atten = BatchNorm2d(out_chan)\n self.bn_atten = nn.BatchNorm2d(out_chan)\n \n self.sigmoid_atten = nn.Sigmoid()\n self.init_weight()\n \n def forward(self, x):\n feat = self.conv(x)\n atten = F.avg_pool2d(feat, feat.size()[2:])\n atten = self.conv_atten(atten)\n atten = self.bn_atten(atten)\n atten = self.sigmoid_atten(atten)\n out = torch.mul(feat, atten)\n return out\n \n def init_weight(self):\n for ly in self.children():\n if isinstance(ly, nn.Conv2d):\n nn.init.kaiming_normal_(ly.weight, a=1)\n if ly.bias is not None:\n nn.init.constant_(ly.bias, 0)\n\n\n# ContextPaht的路\nclass ContextPath(nn.Module):\n def __init__(\n self,\n backbone=\"CatNetSmall\",\n pretrain_model=\"\",\n use_conv_last=False,\n *args,\n **kwargs\n ):\n super(ContextPath, self).__init__()\n \n self.backbone_name = backbone\n if backbone == \"STDCNet1446\":\n self.backbone = STDCNet1446(\n pretrain_model=pretrain_model, use_conv_last=use_conv_last\n )\n self.arm16 = AttentionRefinementModule(512, 128)\n inplanes = 1024\n if use_conv_last:\n inplanes = 1024\n self.arm32 = AttentionRefinementModule(inplanes, 128)\n self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)\n self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)\n self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0)\n \n elif backbone == \"STDCNet813\":\n self.backbone = STDCNet813(\n pretrain_model=pretrain_model, use_conv_last=use_conv_last\n )\n self.arm16 = AttentionRefinementModule(512, 128)\n inplanes = 1024\n if use_conv_last:\n inplanes = 1024\n self.arm32 = AttentionRefinementModule(inplanes, 128)\n self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)\n self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)\n self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0)\n else:\n print(\"backbone is not in backbone lists\")\n exit(0)\n \n self.init_weight()\n \n def forward(self, x):\n H0, W0 = x.size()[2:]\n \n feat2, feat4, feat8, feat16, feat32, en, en2, en3 = self.backbone(x)\n H8, W8 = feat8.size()[2:]\n H16, W16 = feat16.size()[2:]\n H32, W32 = feat32.size()[2:]\n \n avg = F.avg_pool2d(feat32, feat32.size()[2:])\n \n avg = self.conv_avg(avg)\n avg_up = F.interpolate(avg, (H32, W32), mode=\"nearest\")\n feat32_arm = self.arm32(feat32)\n feat32_sum = feat32_arm + avg_up\n feat32_up = F.interpolate(feat32_sum, (H16, W16), mode=\"nearest\")\n feat32_up = self.conv_head32(feat32_up)\n \n feat16_arm = self.arm16(feat16)\n feat16_sum = feat16_arm + feat32_up\n feat16_up = F.interpolate(feat16_sum, (H8, W8), mode=\"nearest\")\n feat16_up = self.conv_head16(feat16_up)\n \n return (\n feat2,\n feat4,\n feat8,\n feat16,\n feat16_up,\n feat32_up,\n en,\n en2,\n en3,\n ) # x8, x16\n \n def init_weight(self):\n for ly in self.children():\n if isinstance(ly, nn.Conv2d):\n nn.init.kaiming_normal_(ly.weight, a=1)\n if ly.bias is not None:\n nn.init.constant_(ly.bias, 0)\n \n def get_params(self):\n wd_params, nowd_params = [], []\n for name, module in self.named_modules():\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n wd_params.append(module.weight)\n if module.bias is not None:\n nowd_params.append(module.bias)\n elif isinstance(module, nn.BatchNorm2d):\n nowd_params += list(module.parameters())\n return wd_params, nowd_params\n\n\nclass FeatureFusionModule(nn.Module):\n def __init__(self, in_chan, out_chan, *args, **kwargs):\n super(FeatureFusionModule, self).__init__()\n self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0)\n self.conv1 = nn.Conv2d(\n out_chan, out_chan // 4, kernel_size=1, stride=1, padding=0, bias=False\n )\n self.conv2 = nn.Conv2d(\n out_chan // 4, out_chan, kernel_size=1, stride=1, padding=0, bias=False\n )\n self.relu = nn.ReLU(inplace=True)\n self.sigmoid = nn.Sigmoid()\n self.init_weight()\n \n def forward(self, fsp, fcp):\n fcat = torch.cat([fsp, fcp], dim=1)\n feat = self.convblk(fcat)\n atten = F.avg_pool2d(feat, feat.size()[2:])\n atten = self.conv1(atten)\n atten = self.relu(atten)\n atten = self.conv2(atten)\n atten = self.sigmoid(atten)\n feat_atten = torch.mul(feat, atten)\n feat_out = feat_atten + feat\n return feat_out\n \n def init_weight(self):\n for ly in self.children():\n if isinstance(ly, nn.Conv2d):\n nn.init.kaiming_normal_(ly.weight, a=1)\n if ly.bias is not None:\n nn.init.constant_(ly.bias, 0)\n \n def get_params(self):\n wd_params, nowd_params = [], []\n for name, module in self.named_modules():\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n wd_params.append(module.weight)\n if module.bias is not None:\n nowd_params.append(module.bias)\n elif isinstance(module, nn.BatchNorm2d):\n nowd_params += list(module.parameters())\n return wd_params, nowd_params\n\n\nclass BiSeNet(nn.Module):\n def __init__(\n self,\n backbone,\n n_classes,\n pretrain_model=\"\",\n use_boundary_2=False,\n use_boundary_4=False,\n use_boundary_8=False,\n use_boundary_16=False,\n use_conv_last=False,\n heat_map=False,\n *args,\n **kwargs\n ):\n super(BiSeNet, self).__init__()\n \n self.use_boundary_2 = use_boundary_2\n self.use_boundary_4 = use_boundary_4\n self.use_boundary_8 = use_boundary_8\n self.use_boundary_16 = use_boundary_16\n # self.heat_map = heat_map\n self.cp = ContextPath(backbone, pretrain_model, use_conv_last=use_conv_last)\n \n if backbone == \"STDCNet1446\":\n conv_out_inplanes = 128\n sp2_inplanes = 32\n sp4_inplanes = 64\n sp8_inplanes = 256\n sp16_inplanes = 512\n inplane = sp8_inplanes + conv_out_inplanes\n \n elif backbone == \"STDCNet813\":\n conv_out_inplanes = 128\n sp2_inplanes = 32\n sp4_inplanes = 64\n sp8_inplanes = 256\n sp16_inplanes = 512\n inplane = sp8_inplanes + conv_out_inplanes\n \n else:\n print(\"backbone is not in backbone lists\")\n exit(0)\n \n self.ffm = FeatureFusionModule(inplane, 256)\n self.conv_out = BiSeNetOutput(256, 256, n_classes)\n self.conv_out16 = BiSeNetOutput(conv_out_inplanes, 64, n_classes)\n self.conv_out32 = BiSeNetOutput(conv_out_inplanes, 64, n_classes)\n \n self.conv_out_sp16 = BiSeNetOutput(sp16_inplanes, 64, 1)\n \n self.conv_out_sp8 = BiSeNetOutput(sp8_inplanes, 64, 1)\n self.conv_out_sp4 = BiSeNetOutput(sp4_inplanes, 64, 1)\n self.conv_out_sp2 = BiSeNetOutput(sp2_inplanes, 64, 1)\n self.init_weight()\n \n def forward(self, x):\n H, W = x.size()[2:]\n \n (\n feat_res2,\n feat_res4,\n feat_res8,\n feat_res16,\n feat_cp8,\n feat_cp16,\n en,\n en2,\n en3,\n ) = self.cp(x)\n \n feat_out_sp2 = self.conv_out_sp2(feat_res2)\n \n feat_out_sp4 = self.conv_out_sp4(feat_res4)\n \n feat_out_sp8 = self.conv_out_sp8(feat_res8)\n \n feat_out_sp16 = self.conv_out_sp16(feat_res16)\n \n feat_fuse = self.ffm(feat_res8, feat_cp8)\n \n feat_out = self.conv_out(feat_fuse)\n feat_out16 = self.conv_out16(feat_cp8)\n feat_out32 = self.conv_out32(feat_cp16)\n \n feat_out = F.interpolate(\n feat_out, (H / 2, W / 2), mode=\"bilinear\", align_corners=True\n )\n # 辅助上采样\n feat_out = torch.mul(feat_out, en)\n feat_out = F.interpolate(feat_out, (H, W), mode=\"bilinear\", align_corners=True)\n \n feat_out16 = F.interpolate(\n feat_out16, (H / 4, W / 4), mode=\"bilinear\", align_corners=True\n )\n feat_out16 = torch.mul(feat_out16, en2)\n feat_out16 = F.interpolate(\n feat_out16, (H, W), mode=\"bilinear\", align_corners=True\n )\n \n feat_out32 = F.interpolate(\n feat_out32, (H / 8, W / 8), mode=\"bilinear\", align_corners=True\n )\n feat_out32 = torch.mul(feat_out32, en3)\n feat_out32 = F.interpolate(\n feat_out32, (H, W), mode=\"bilinear\", align_corners=True\n )\n \n if self.use_boundary_2 and self.use_boundary_4 and self.use_boundary_8:\n return (\n feat_out,\n feat_out16,\n feat_out32,\n feat_out_sp2,\n feat_out_sp4,\n feat_out_sp8,\n )\n \n if (not self.use_boundary_2) and self.use_boundary_4 and self.use_boundary_8:\n return feat_out, feat_out16, feat_out32, feat_out_sp4, feat_out_sp8\n \n if (\n (not self.use_boundary_2)\n and (not self.use_boundary_4)\n and self.use_boundary_8\n ):\n return feat_out, feat_out16, feat_out32, feat_out_sp8\n \n if (\n (not self.use_boundary_2)\n and (not self.use_boundary_4)\n and (not self.use_boundary_8)\n ):\n return feat_out, feat_out16, feat_out32\n \n def init_weight(self):\n for ly in self.children():\n if isinstance(ly, nn.Conv2d):\n nn.init.kaiming_normal_(ly.weight, a=1)\n if ly.bias is not None:\n nn.init.constant_(ly.bias, 0)\n \n def get_params(self):\n wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], []\n for name, child in self.named_children():\n child_wd_params, child_nowd_params = child.get_params()\n if isinstance(child, (FeatureFusionModule, BiSeNetOutput)):\n lr_mul_wd_params += child_wd_params\n lr_mul_nowd_params += child_nowd_params\n else:\n wd_params += child_wd_params\n nowd_params += child_nowd_params\n return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params\n\n\nif __name__ == \"__main__\":\n net = BiSeNet(\"STDCNet1446\", 19)\n net.eval()\n in_ten = torch.randn(1, 3, 768, 1536)\n out, out16, out32 = net(in_ten)\n print(out.shape)\n" ]
[ [ "torch.cat", "torch.nn.init.constant_", "torch.randn", "torch.nn.Conv2d", "torch.nn.Sigmoid", "torch.mul", "torch.nn.functional.interpolate", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JRyanShue/ldif_clone
[ "d16656d4bb75e0b6beb33ee413cb87b0ff695d35" ]
[ "ldif/training/loss.py" ]
[ "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Lint as: python3\n\"\"\"Loss functions for training Structured Implicit Functions.\"\"\"\n\nfrom ldif.util.tf_util import log\nimport numpy as np\nimport tensorflow as tf\n\n# LDIF is an internal package, should be imported last.\n# pylint: disable=g-bad-import-order\nfrom ldif.training import summarize\nfrom ldif.datasets import shapenet\nfrom ldif.util import geom_util\nfrom ldif.util import interpolate_util\nfrom ldif.util import math_util\nfrom ldif.util import sdf_util\n# pylint: enable=g-bad-import-order\n\n\ndef bounding_box_constraint_error(samples, box):\n if not isinstance(box.lower,\n float) and len(box.lower.get_shape().as_list()) < 3:\n box.lower = tf.reshape(box.lower, [1, 1, 3])\n box.upper = tf.reshape(box.upper, [1, 1, 3])\n lower_error = tf.maximum(box.lower - samples, 0.0)\n upper_error = tf.maximum(samples - box.upper, 0.0)\n constraint_error = lower_error * lower_error + upper_error * upper_error\n return constraint_error\n\n\ndef shape_element_center_magnitude_loss(model_config, _, structured_implicit):\n element_centers = structured_implicit.element_centers\n mse = model_config.hparams.cm * tf.reduce_mean(\n tf.square(element_centers + 1e-04)) + 1e-5\n summarize.summarize_loss(model_config, mse, 'center_magnitude_loss')\n return mse\n\n\ndef element_center_lowres_grid_direct_loss(model_config, training_example,\n structured_implicit):\n element_centers = structured_implicit.element_centers\n gt_sdf_at_centers, _ = interpolate_util.interpolate(\n training_example.grid, element_centers, training_example.world2grid)\n mse = model_config.hparams.gd * tf.reduce_mean(gt_sdf_at_centers) + 1e-5\n summarize.summarize_loss(model_config, mse, 'lowres_grid_direct_loss')\n return mse\n\n\ndef element_center_lowres_grid_squared_loss(model_config, training_example,\n structured_implicit):\n element_centers = structured_implicit.element_centers\n gt_sdf_at_centers, _ = interpolate_util.interpolate(\n training_example.grid, element_centers, training_example.world2grid)\n mse = model_config.hparams.gs * tf.reduce_mean(\n tf.sign(gt_sdf_at_centers) * tf.square(gt_sdf_at_centers + 1e-04)) + 1e-5\n summarize.summarize_loss(model_config, mse, 'lowres_grid_magnitude_loss')\n return mse\n\n\ndef element_center_lowres_grid_inside_loss(model_config, training_example,\n structured_implicit):\n \"\"\"Loss that element centers should lie within a voxel of the GT inside.\"\"\"\n # print('model_config.hparams.igt:', model_config.hparams.igt, 'model_config.hparams.ig:', model_config.hparams.ig) # 0.044, 1.0\n element_centers = structured_implicit.element_centers\n print('training_example.grid.shape:', training_example.grid.shape) # (1, 32, 32, 32)\n print('training_example.world2grid.shape:', training_example.world2grid.shape) # (1, 4, 4)\n gt_sdf_at_centers, _ = interpolate_util.interpolate(\n training_example.grid, element_centers, training_example.world2grid)\n gt_sdf_at_centers = tf.where_v2(gt_sdf_at_centers > model_config.hparams.igt,\n gt_sdf_at_centers, 0.0)\n mse = model_config.hparams.ig * tf.reduce_mean(\n tf.square(gt_sdf_at_centers + 1e-04)) + 1e-05\n summarize.summarize_loss(model_config, mse, 'lowres_grid_inside_loss')\n return mse\n\n\ndef smooth_element_center_lowres_grid_inside_loss(model_config,\n training_example,\n structured_implicit):\n \"\"\"Offset version of element_center_lowres_grid_inside_loss by voxel width.\"\"\"\n element_centers = structured_implicit.element_centers\n gt_sdf_at_centers, _ = interpolate_util.interpolate(\n training_example.grid, element_centers, training_example.world2grid)\n gt_sdf_at_centers = tf.maximum(gt_sdf_at_centers - model_config.hparams.igt,\n 0.0)\n mse = model_config.hparams.ig * tf.reduce_mean(\n tf.square(gt_sdf_at_centers + 1e-04)) + 1e-05\n summarize.summarize_loss(model_config, mse, 'lowres_grid_inside_loss')\n return mse\n\n\ndef center_variance_loss(model_config, training_example, structured_implicit): # pylint:disable=unused-argument\n \"\"\"A loss on the -variance of the center locations.\"\"\"\n # Training example present for interface uniformity\n element_centers = structured_implicit.element_centers\n center_shape = element_centers.get_shape().as_list()\n if len(center_shape) != 3:\n raise ValueError(f'Expected the element centers to have shape [b, #, 3],'\n f' but they have shape {center_shape}. center_variance.')\n variance = tf.math.reduce_variance(element_centers, axis=[1, 2])\n loss_max = model_config.hparams.vt\n loss = model_config.hparams.vw * tf.reduce_mean(\n tf.maximum(loss_max - variance, 0.0))\n summarize.summarize_loss(model_config, loss, 'center-variance-loss')\n return loss\n\n\ndef center_nn_loss(model_config, training_example, structured_implicit): # pylint:disable=unused-argument\n \"\"\"A loss that decreases with the nearest neighbor center->center distance.\"\"\"\n # Training example present for interface uniformity\n element_centers = structured_implicit.element_centers\n\n center_shape = element_centers.get_shape().as_list()\n if len(center_shape) != 3:\n raise ValueError(f'Expected the element centers to have shape [b, #, 3],'\n f' but they have shape {center_shape}. Loss=center_nn.')\n batch_size, center_count, _ = center_shape\n sq_distances = tf.reduce_sum(\n tf.square(\n tf.reshape(element_centers, [batch_size, center_count, 1, 3]) -\n tf.reshape(element_centers, [batch_size, 1, center_count, 3])),\n axis=-1)\n distances = tf.sqrt(sq_distances + 1e-8)\n loss_max = model_config.hparams.nnt\n # We have to give the diagonal self -> self distances a high weight so they\n # aren't valid choices:\n diag_distances = tf.diag(tf.ones([center_count]) * (loss_max + 1))\n diag_distances = tf.reshape(diag_distances, [1, center_count, center_count])\n distances = distances + diag_distances\n min_dists = tf.reduce_min(distances, axis=-1) # Shape [BS, #].\n assert len(min_dists.shape) == 2\n\n loss = tf.reduce_mean(tf.maximum(loss_max - min_dists,\n 0.0)) * model_config.hparams.nw\n summarize.summarize_loss(model_config, loss, 'center-nn-loss')\n return loss\n\n\ndef inside_box_loss(model_config, _, structured_implicit):\n \"\"\"Loss that centers should be inside a fixed size bounding box.\"\"\"\n element_centers = structured_implicit.element_centers\n if model_config.hparams.wm == 'f':\n bounding_box = shapenet.BoundingBox(lower=-0.7, upper=0.7)\n elif model_config.hparams.wm == 't':\n bounding_box = shapenet.BoundingBox(\n lower=np.array([-.75, -.075, -.75], dtype=np.float32),\n upper=np.array([.75, .075, .75], dtype=np.float32))\n\n if model_config.hparams.rsl != 1.0:\n bounding_box.lower *= model_config.hparams.rsl\n bounding_box.upper *= model_config.hparams.rsl\n\n bounding_box_error = tf.reduce_mean(\n bounding_box_constraint_error(element_centers, bounding_box))\n outside_bounding_box_loss = model_config.hparams.ibblw * bounding_box_error\n summarize.summarize_loss(model_config, outside_bounding_box_loss,\n 'fixed_bounding_box_loss')\n return outside_bounding_box_loss\n\n\ndef shape_element_center_loss(model_config, training_example,\n structured_implicit):\n \"\"\"Loss that centers should be inside the predicted surface.\"\"\"\n element_centers = structured_implicit.element_centers\n tf.logging.info('BID0: Shape Element Center Loss.')\n tf.logging.info('Element Center Shape: %s',\n str(element_centers.get_shape().as_list()))\n\n print(\"SHAPE_ELEMENT_CENTER_LOSS\")\n class_at_centers, _ = structured_implicit.class_at_samples(element_centers)\n\n bounding_box = training_example.sample_bounding_box\n bounding_box_error = tf.reduce_mean(\n bounding_box_constraint_error(element_centers, bounding_box),\n axis=-1,\n keep_dims=True)\n center_is_inside_gt_box = bounding_box_error <= 0.0\n inside_prediction_weights = model_config.hparams.cc * tf.cast(\n center_is_inside_gt_box, tf.float32)\n # bounding_box_error has shape [batch_size, center_count, 1]\n # inside_prediction_weights has shape [batch_size, center_count, 1]\n # class_at_centers has shape [batch_size, center_count, 1]. (Double check).\n\n # The class loss is 0 where the prediction is outside the bounding box,\n # because the bounding box loss is applied to those centers instead.\n class_loss = weighted_l2_loss(0.0, class_at_centers,\n inside_prediction_weights)\n summarize.summarize_loss(model_config, math_util.nonzero_mean(class_loss),\n 'ec_loss_class_comp_mean')\n\n outside_bounding_box_loss = model_config.hparams.ibblw * bounding_box_error\n summarize.summarize_loss(model_config,\n math_util.nonzero_mean(outside_bounding_box_loss),\n 'ec_loss_outside_bb_comp_mean')\n final_loss = tf.reduce_mean(class_loss + outside_bounding_box_loss)\n summarize.summarize_loss(model_config, final_loss, 'ec_loss')\n return final_loss\n\n\ndef old_shape_element_center_loss(model_config, training_example,\n structured_implicit):\n \"\"\"Deprecated version of shape_element_center_loss().\"\"\"\n element_centers = structured_implicit.element_centers\n tf.logging.info('Element Center Shape: %s',\n str(element_centers.get_shape().as_list()))\n\n bounding_box = training_example.sample_bounding_box\n bounding_box_error = tf.reduce_mean(\n bounding_box_constraint_error(element_centers, bounding_box))\n constraint_loss = model_config.hparams.ibblw * bounding_box_error\n summarize.summarize_loss(model_config, constraint_loss,\n 'inside-bounding-box-loss')\n class_at_centers, _ = structured_implicit.class_at_samples(element_centers)\n center_loss = tf.reduce_mean((class_at_centers - 0) * (class_at_centers - 0))\n center_loss *= model_config.hparams.cclw\n summarize.summarize_loss(model_config, center_loss, 'inside-pred-center-loss')\n return constraint_loss + center_loss\n\n\ndef weighted_l2_loss(gt_value, pred_value, weights):\n \"\"\"Computers an l2 loss given broadcastable weights and inputs.\"\"\"\n diff = pred_value - gt_value\n squared_diff = diff * diff\n if isinstance(gt_value, float):\n gt_shape = [1]\n else:\n gt_shape = gt_value.get_shape().as_list()\n if isinstance(weights, float):\n weight_shape = [1]\n else:\n weight_shape = weights.get_shape().as_list()\n tf.logging.info('gt vs pred vs weights shape: %s vs %s vs %s', str(gt_shape),\n str(pred_value.get_shape().as_list()), str(weight_shape))\n # TODO(kgenova) Consider using tf.losses.mean_squared_error. But need to\n # be careful about reduction method. Theirs is probably better since the\n # magnitude of the loss isn't affected by the weights. But it would need\n # hparam tuning, so it's left out in the first pass.\n return weights * squared_diff\n\n\ndef sample_loss(model_config, gt_sdf, structured_implicit, global_samples, name,\n apply_ucf):\n \"\"\"Computes an l2 loss for predicted-vs-gt insidedness at samples.\"\"\"\n # print('gt_sdf:', gt_sdf.shape) # (1, 1024, 1)\n gt_class = sdf_util.apply_class_transfer(\n gt_sdf, model_config, soft_transfer=False, offset=0.0) # binary from SDF value (< or > 0)\n\n print('SAMPLE_LOSS:', name)\n # print('model_config.hparams.ucf:', model_config.hparams.ucf) # 1.0\n # print('model_config.hparams.lrf:', model_config.hparams.lrf, 'apply_ucf:', apply_ucf) # g, True\n # print('global_samples: ', global_samples) # (1, 1024, 3)\n\n if model_config.hparams.lrf == 'l':\n global_decisions, local_outputs = structured_implicit.class_at_samples(\n global_samples)\n local_decisions, local_weights = local_outputs\n predicted_class = local_decisions\n gt_class = tf.tile(\n tf.expand_dims(gt_class, axis=1), [1, model_config.hparams.sc, 1, 1])\n weights = tf.stop_gradient(local_weights)\n elif model_config.hparams.lrf == 'g':\n global_decisions, local_outputs = structured_implicit.class_at_samples(\n global_samples)\n predicted_class = global_decisions\n weights = 1.0\n elif model_config.hparams.lrf == 'x':\n # TODO(kgenova) Don't forget we need more samples if lrf='x' than otherwise.\n local_samples, _, local_gt = geom_util.local_views_of_shape(\n global_samples,\n structured_implicit.world2local,\n local_point_count=model_config.hparams.spc,\n global_features=gt_class)\n # This is an important distinction: With lrf='x', the implicit values are\n # required to be a classification decision *on their own*.\n predicted_class = structured_implicit.implicit_values(local_samples)\n gt_class = local_gt\n weights = 1.0\n if apply_ucf:\n is_outside = gt_class > 0.5 # False == 0, True == 1; 1 == outside, 2 == inside\n # print(tf.cast(is_outside, dtype=tf.float32).shape) # (1, 1024, 1)\n is_outside_frac = tf.reduce_mean(tf.cast(is_outside, dtype=tf.float32))\n # print(is_outside_frac.shape) # ()\n if name is not None:\n tf.summary.scalar(\n '%s-%s-outside-frac' % (model_config.inputs['split'], name),\n is_outside_frac)\n weights *= tf.where_v2(is_outside, 1.0, model_config.hparams.ucf) # ufc == 1.0, so doesn't really do anything here\n loss = weighted_l2_loss(gt_class, predicted_class, weights)\n print(loss.shape) # (1, 1024, 1)\n print(tf.reduce_mean(loss).shape) # () -- Reduce to a single loss value\n return tf.reduce_mean(loss)\n\n\ndef uniform_sample_loss(model_config, training_example, structured_implicit):\n \"\"\"Loss that uniformly sampled points should have the right insidedness.\"\"\"\n print('uniform_sample_loss')\n sample_count = (\n model_config.hparams.xsc\n if model_config.hparams.lrf == 'x' else model_config.hparams.spc)\n samples, gt_sdf = training_example.sample_sdf_uniform(\n sample_count=sample_count)\n tf.logging.info('Building Uniform Sample Loss.')\n tf.logging.info('Uni. Samples shape: %s', str(samples.get_shape().as_list()))\n # print('model_config.hparams.l2w:', model_config.hparams.l2w) # 1.0\n loss = model_config.hparams.l2w * sample_loss(\n model_config,\n gt_sdf,\n structured_implicit,\n samples,\n 'uniform_sample',\n apply_ucf=True)\n summarize.summarize_loss(model_config, loss, 'uniform_sample')\n return loss\n\n\ndef overlap_loss(model_config, training_example, structured_implicit):\n \"\"\"A loss on the overlap between RBF weights.\"\"\"\n sample_count = (\n model_config.hparams.xsc\n if model_config.hparams.lrf == 'x' else model_config.hparams.spc)\n samples, _ = training_example.sample_sdf_near_surface(\n sample_count=sample_count)\n rbf_influences = structured_implicit.rbf_influence_at_samples(samples)\n assert len(rbf_influences.shape) == 3 # [b, sample_count, eec]\n loss = tf.reduce_mean(tf.linalg.norm(rbf_influences, ord=1,\n axis=2)) * model_config.hparams.ow\n summarize.summarize_loss(model_config, loss, 'rbf-l1-loss')\n return loss\n\n\ndef near_surface_sample_loss(model_config, training_example,\n structured_implicit):\n \"\"\"An inside/outside loss that samples based on distance to the surface.\"\"\"\n sample_count = (\n model_config.hparams.xsc\n if model_config.hparams.lrf == 'x' else model_config.hparams.spc)\n samples, gt_sdf = training_example.sample_sdf_near_surface(\n sample_count=sample_count)\n tf.logging.info('Building Near Surface Sample Loss.')\n tf.logging.info('NS Samples shape: %s', str(samples.get_shape().as_list()))\n # TODO(kgenova) Currently we set ucf=True here because that's how it was...\n # but go back and fix that because it seems bad.\n # print('model_config.hparams.a2w:', model_config.hparams.a2w) # 0.1\n loss = model_config.hparams.a2w * sample_loss(\n model_config,\n gt_sdf,\n structured_implicit,\n samples,\n 'ns_sample',\n apply_ucf=True) # False)\n summarize.summarize_loss(model_config, loss, 'near_surface_sample')\n return loss\n\n\ndef compute_loss(model_config, training_example, structured_implicit):\n \"\"\"Computes the overall loss based on the model configuration.\"\"\"\n # The keys are kept so short because they are used to autogenerate a\n # tensorboard entry.\n loss_fun_dict = {\n 'u': uniform_sample_loss,\n 'ns': near_surface_sample_loss,\n 'ec': shape_element_center_loss,\n 'oc': old_shape_element_center_loss,\n 'm': shape_element_center_magnitude_loss,\n 'gd': element_center_lowres_grid_direct_loss,\n 'gs': element_center_lowres_grid_squared_loss,\n 'gi': element_center_lowres_grid_inside_loss,\n 'gf': smooth_element_center_lowres_grid_inside_loss,\n 'bb': inside_box_loss,\n 'xv': center_variance_loss,\n 'xp': center_nn_loss,\n 'xw': overlap_loss,\n }\n\n losses = []\n for key, loss_fun in loss_fun_dict.items():\n if key in model_config.hparams.loss:\n print('key:', key)\n loss = loss_fun(model_config, training_example, structured_implicit)\n losses.append(loss)\n # There must be at least one loss:\n assert losses\n print('losses:', losses)\n return tf.add_n(losses)\n\n\ndef set_loss(model_config, training_example, structured_implicit):\n # TODO(kgenova) Consider returning the add_n result as a tensor, setting\n # the loss in the launcher, and having a separate scalar summarizer in\n # summarize.py\n # print('Computing loss...')\n model_config.loss = compute_loss(model_config, training_example,\n structured_implicit)\n # print('Loss computed')\n name = 'final-loss'\n tf.summary.scalar('%s-%s/final_loss_value' % (training_example.split, name),\n model_config.loss)\n" ]
[ [ "tensorflow.sign", "tensorflow.summary.scalar", "tensorflow.reduce_mean", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.cast", "tensorflow.ones", "tensorflow.expand_dims", "tensorflow.math.reduce_variance", "tensorflow.stop_gradient", "tensorflow.reduce_min", "tensorflow.logging.info", "tensorflow.square", "tensorflow.where_v2", "tensorflow.sqrt", "numpy.array", "tensorflow.add_n", "tensorflow.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
albert-magyar/ParlAI
[ "3133b4e8290f8a42679b93a65fc25e76ac7f6761" ]
[ "parlai/scripts/train_model.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nTraining script for ParlAI.\n\nThe standard way to train a model. After training, also computes\nvalidation and test error.\n\nThe user must provide a model (with `--model`) and a task (with\n`--task`).\n\n## Examples\n\n```shell\nparlai train_model -m ir_baseline -t dialog_babi:Task:1 -mf /tmp/model\nparlai train_model -m seq2seq -t babi:Task10k:1 -mf '/tmp/model' -bs 32 -lr 0.5 -hs 128\nparlai train_model -m drqa -t babi:Task10k:1 -mf /tmp/model -bs 10\n```\n\"\"\" # noqa: E501\n\n# TODO List:\n# * More logging (e.g. to files), make things prettier.\n\nimport json\nimport numpy as np\nimport signal\n\nfrom parlai.core.metrics import Metric\nfrom parlai.core.agents import create_agent, create_agent_from_shared\nfrom parlai.core.exceptions import StopTrainException\nfrom parlai.core.logs import TensorboardLogger\nfrom parlai.core.metrics import (\n aggregate_named_reports,\n aggregate_unnamed_reports,\n dict_report,\n)\nfrom parlai.core.params import ParlaiParser, print_announcements\nfrom parlai.core.worlds import create_task\nfrom parlai.scripts.build_dict import build_dict, setup_args as setup_dict_args\nfrom parlai.utils.distributed import (\n sync_object,\n is_primary_worker,\n all_gather_list,\n is_distributed,\n num_workers,\n)\nfrom parlai.utils.misc import Timer, nice_report\nfrom parlai.core.script import ParlaiScript, register_script\nimport parlai.utils.logging as logging\nfrom parlai.utils.io import PathManager\n\n\ndef setup_args(parser=None) -> ParlaiParser:\n \"\"\"\n Build the ParlAI parser, adding command line args if necessary.\n\n :param ParlaiParser parser:\n Preexisting parser to append options to. Will be created if needed.\n\n :returns:\n the ParlaiParser with CLI options added.\n \"\"\"\n if parser is None:\n parser = ParlaiParser(True, True, 'Train a model')\n train = parser.add_argument_group('Training Loop Arguments')\n train.add_argument(\n '-et',\n '--evaltask',\n help='task to use for valid/test (defaults to the one used for training)',\n )\n train.add_argument(\n '--eval-batchsize',\n type=int,\n hidden=True,\n help='Eval time batch size (defaults to same as -bs)',\n )\n train.add_argument('--display-examples', type='bool', default=False, hidden=True)\n train.add_argument('-eps', '--num-epochs', type=float, default=-1)\n train.add_argument('-ttim', '--max-train-time', type=float, default=-1)\n train.add_argument('-ltim', '--log-every-n-secs', type=float, default=10)\n train.add_argument(\n '-vtim',\n '--validation-every-n-secs',\n type=float,\n default=-1,\n help='Validate every n seconds. Saves model to model_file '\n '(if set) whenever best val metric is found',\n )\n train.add_argument(\n '-stim',\n '--save-every-n-secs',\n type=float,\n default=-1,\n help='Saves the model to model_file.checkpoint after '\n 'every n seconds (default -1, never).',\n )\n train.add_argument(\n '-sval',\n '--save-after-valid',\n type='bool',\n default=False,\n help='Saves the model to model_file.checkpoint after '\n 'every validation (default %(default)s).',\n )\n train.add_argument(\n '-veps',\n '--validation-every-n-epochs',\n type=float,\n default=-1,\n help='Validate every n epochs. Saves model to model_file '\n '(if set) whenever best val metric is found',\n )\n train.add_argument(\n '-vme',\n '--validation-max-exs',\n type=int,\n default=-1,\n hidden=True,\n help='max examples to use during validation (default -1 uses all)',\n )\n train.add_argument(\n '--short-final-eval',\n default=False,\n hidden=True,\n type='bool',\n help='If true, obeys --validation-max-exs in the final '\n 'validation and test evaluations.',\n )\n train.add_argument(\n '-vp',\n '--validation-patience',\n type=int,\n default=10,\n help=(\n 'number of iterations of validation where result'\n ' does not improve before we stop training'\n ),\n )\n train.add_argument(\n '-vmt',\n '--validation-metric',\n default='accuracy',\n help='key into report table for selecting best validation',\n )\n train.add_argument(\n '-vmm',\n '--validation-metric-mode',\n type=str,\n choices=['max', 'min'],\n help='how to optimize validation metric (max or min)',\n )\n train.add_argument(\n '-vcut',\n '--validation-cutoff',\n type=float,\n default=1.0,\n hidden=True,\n help='value at which training will stop if exceeded by metric',\n )\n train.add_argument(\n '-lfc',\n '--load-from-checkpoint',\n type='bool',\n default=True,\n hidden=True,\n help='load model from checkpoint if available',\n )\n train.add_argument(\n '-vshare',\n '--validation-share-agent',\n default=False,\n hidden=True,\n help='use a shared copy of the agent for validation. '\n 'this will eventually default to True, but '\n 'currently defaults to False.',\n )\n train.add_argument(\n '-mcs',\n '--metrics',\n type=str,\n default='default',\n help='list of metrics to show/compute, e.g. all, default,'\n 'or give a list split by , like '\n 'ppl,f1,accuracy,hits@1,rouge,bleu'\n 'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l',\n )\n train.add_argument(\n '-micro',\n '--aggregate-micro',\n type='bool',\n default=False,\n help='Report micro-averaged metrics instead of macro averaged metrics.',\n recommended=False,\n )\n TensorboardLogger.add_cmdline_args(parser)\n\n parser = setup_dict_args(parser)\n return parser\n\n\ndef load_eval_worlds(agent, opt, datatype):\n \"\"\"\n Create a new eval world for the agent and the given opt.\n\n Overrides the datatype options for doing this. Handles some magic\n overrides of other special options for the training script.\n\n :param Agent agent:\n The model being trained.\n\n :param Opt opt:\n The global CLI opts.\n\n :param string datatype:\n The new datatype.\n \"\"\"\n\n if 'stream' in opt['datatype']:\n datatype += ':stream'\n opt = opt.copy()\n opt['datatype'] = datatype\n if opt.get('evaltask'):\n # if a different eval task is specified, use it.\n opt['task'] = opt['evaltask']\n if opt.get('eval_batchsize'):\n # override eval time batchsize\n opt['batchsize'] = opt['eval_batchsize']\n\n tasks = opt['task'].split(',')\n worlds = []\n # possibly load agent\n if opt.get('validation_share_agent', False):\n valid_agent = create_agent_from_shared(agent.share())\n else:\n valid_agent = agent\n # create worlds\n for task in tasks:\n task_opt = opt.copy() # copy opt since we edit the task\n task_opt['task'] = task\n valid_world = create_task(task_opt, valid_agent)\n worlds.append(valid_world)\n\n return worlds\n\n\nclass TrainLoop:\n \"\"\"\n TrainLoop contains the core training loop logic.\n \"\"\"\n\n def __init__(self, opt):\n # if python is called from a non-interactive shell, like a bash script,\n # it will by-default ignore SIGINTs, and KeyboardInterrupt exceptions are\n # not produced. This line brings them back\n signal.signal(signal.SIGINT, signal.default_int_handler)\n # Possibly load from checkpoint\n trainstats_suffix = '.trainstats' # we might load training statistics from here\n if (\n opt['load_from_checkpoint']\n and opt.get('model_file')\n and PathManager.exists(opt['model_file'] + '.checkpoint')\n ):\n opt['init_model'] = opt['model_file'] + '.checkpoint'\n trainstats_suffix = '.checkpoint.trainstats'\n # Possibly build a dictionary (not all models do this).\n if not (opt.get('dict_file') or opt.get('model_file')):\n raise RuntimeError(\n 'WARNING: For train_model, please specify either a '\n 'model_file or dict_file.'\n )\n if 'dict_file' in opt:\n if opt['dict_file'] is None and opt.get('model_file'):\n opt['dict_file'] = opt['model_file'] + '.dict'\n logging.info(\"building dictionary first...\")\n build_dict(opt, skip_if_built=True)\n\n # Create model and assign it to the specified task\n self.agent = create_agent(opt)\n self.agent.opt.log()\n self.world = create_task(opt, self.agent)\n # set up timers\n self.train_time = Timer()\n self.validate_time = Timer()\n self.log_time = Timer()\n self.save_time = Timer()\n\n self.parleys = 0\n self.max_num_epochs = (\n opt['num_epochs'] if opt['num_epochs'] > 0 else float('inf')\n )\n self.max_train_time = (\n opt['max_train_time'] if opt['max_train_time'] > 0 else float('inf')\n )\n self.log_every_n_secs = (\n opt['log_every_n_secs'] if opt['log_every_n_secs'] > 0 else float('inf')\n )\n self.val_every_n_secs = (\n opt['validation_every_n_secs']\n if opt['validation_every_n_secs'] > 0\n else float('inf')\n )\n self.save_every_n_secs = (\n opt['save_every_n_secs'] if opt['save_every_n_secs'] > 0 else float('inf')\n )\n self.val_every_n_epochs = (\n opt['validation_every_n_epochs']\n if opt['validation_every_n_epochs'] > 0\n else float('inf')\n )\n\n # smart defaults for --validation-metric-mode\n if opt['validation_metric'] in {'loss', 'ppl', 'mean_rank'}:\n opt['validation_metric_mode'] = 'min'\n elif opt['validation_metric'] in {'accuracy', 'hits@1', 'hits@5', 'f1', 'bleu'}:\n opt['validation_metric_mode'] = 'max'\n if opt.get('validation_metric_mode') is None:\n opt['validation_metric_mode'] = 'max'\n\n self.last_valid_epoch = 0\n self.valid_optim = 1 if opt['validation_metric_mode'] == 'max' else -1\n self.train_reports = []\n self.valid_reports = []\n self.best_valid = None\n\n self.impatience = 0\n self.saved = False\n self.valid_worlds = None\n self.opt = opt\n\n # we may have been preempted, make sure we note that amount\n self._preempted_epochs = 0.0\n if opt.get('model_file') and PathManager.exists(\n opt['model_file'] + trainstats_suffix\n ):\n # looks like we were preempted. make sure we load up our total\n # training stats, etc\n with PathManager.open(opt['model_file'] + trainstats_suffix) as ts:\n obj = json.load(ts)\n self.parleys = obj.get('parleys', 0)\n self._preempted_epochs = obj.get('total_epochs', 0)\n self.train_time.total = obj.get('train_time', 0)\n self.impatience = obj.get('impatience', 0)\n self.valid_reports = obj.get('valid_reports', [])\n self.train_reports = obj.get('train_reports', [])\n if 'best_valid' in obj:\n self.best_valid = obj['best_valid']\n else:\n # old method\n if opt.get('model_file') and PathManager.exists(\n opt['model_file'] + '.best_valid'\n ):\n with PathManager.open(\n opt['model_file'] + \".best_valid\", 'r'\n ) as f:\n x = f.readline()\n self.best_valid = float(x)\n f.close()\n\n if opt['tensorboard_log'] and is_primary_worker():\n self.tb_logger = TensorboardLogger(opt)\n\n def save_model(self, suffix=None):\n \"\"\"\n Save the model to disk, possibly with a suffix.\n \"\"\"\n if not is_primary_worker():\n # never do IO as a non-primary worker\n return\n\n if not self.opt.get('model_file'):\n # nothing to save to, just exit\n return\n\n fn = self.opt['model_file']\n if suffix:\n fn += suffix\n while True:\n # don't ever let a ctrl-c interrupt saving\n try:\n self.agent.save(fn)\n self._save_train_stats(suffix)\n break\n except KeyboardInterrupt:\n pass\n\n def _save_train_stats(self, suffix=None):\n fn = self.opt['model_file']\n if suffix:\n fn += suffix\n fn += '.trainstats'\n with PathManager.open(fn, 'w') as f:\n json.dump(\n {\n 'parleys': self.parleys,\n 'train_time': self.train_time.time(),\n 'total_epochs': self._total_epochs,\n 'train_reports': self.train_reports,\n 'valid_reports': self.valid_reports,\n 'best_valid': self.best_valid,\n },\n f,\n indent=4,\n )\n\n def validate(self):\n \"\"\"\n Perform a validation run, checking whether we should stop training.\n\n :return: boolean indicating whether training should stop\n :rtype: bool\n \"\"\"\n opt = self.opt\n\n if self.valid_worlds is None:\n # we need to load the world now\n self.valid_worlds = load_eval_worlds(self.agent, opt, 'valid')\n\n # run evaluation on valid set\n valid_report = self._run_eval(\n self.valid_worlds, opt, 'valid', opt['validation_max_exs']\n )\n v = dict_report(valid_report)\n v['train_time'] = self.train_time.time()\n v['parleys'] = self.parleys\n v['total_exs'] = self._total_exs\n v['total_epochs'] = self._total_epochs\n self.valid_reports.append(v)\n # logging\n if opt['tensorboard_log'] and is_primary_worker():\n valid_report['total_exs'] = self._total_exs\n self.tb_logger.log_metrics('valid', self.parleys, valid_report)\n # flush on a validation\n self.tb_logger.flush()\n # saving\n if (\n opt.get('model_file')\n and opt.get('save_after_valid')\n and is_primary_worker()\n ):\n logging.info(f\"saving model checkpoint: {opt['model_file']}.checkpoint\")\n self.save_model('.checkpoint')\n\n # send valid metrics to agent if the agent wants them\n if hasattr(self.agent, 'receive_metrics'):\n self.agent.receive_metrics(valid_report)\n\n # check which metric to look at\n new_valid = valid_report[opt['validation_metric']]\n\n if isinstance(new_valid, Metric):\n new_valid = new_valid.value()\n\n # check if this is the best validation so far\n if (\n self.best_valid is None\n or self.valid_optim * new_valid > self.valid_optim * self.best_valid\n ):\n logging.success(\n 'new best {}: {:.4g}{}'.format(\n opt['validation_metric'],\n new_valid,\n ' (previous best was {:.4g})'.format(self.best_valid)\n if self.best_valid is not None\n else '',\n )\n )\n self.best_valid = new_valid\n self.impatience = 0\n if opt.get('model_file') and is_primary_worker():\n logging.info(f\"saving best valid model: {opt['model_file']}\")\n self.save_model()\n self.saved = True\n if (\n opt['validation_metric_mode'] == 'max'\n and self.best_valid >= opt['validation_cutoff']\n ) or (\n opt['validation_metric_mode'] == 'min'\n and self.best_valid <= opt['validation_cutoff']\n ):\n logging.info('task solved! stopping.')\n return True\n else:\n self.impatience += 1\n logging.report(\n 'did not beat best {}: {} impatience: {}'.format(\n opt['validation_metric'], round(self.best_valid, 4), self.impatience\n )\n )\n self.validate_time.reset()\n\n # check if we are out of patience\n if (\n opt['validation_patience'] > 0\n and self.impatience >= opt['validation_patience']\n ):\n logging.info('ran out of patience! stopping training.')\n return True\n return False\n\n def _run_single_eval(self, opt, valid_world, max_exs):\n\n # run evaluation on a single world\n valid_world.reset()\n\n cnt = 0\n max_cnt = max_exs if max_exs > 0 else float('inf')\n while not valid_world.epoch_done() and cnt < max_cnt:\n valid_world.parley()\n if cnt == 0 and opt['display_examples']:\n print(valid_world.display() + '\\n~~')\n print(valid_world.report())\n cnt = valid_world.report().get('exs') or 0\n\n valid_report = valid_world.report()\n valid_world.reset() # make sure world doesn't remember valid data\n\n return valid_report\n\n def _run_eval(self, valid_worlds, opt, datatype, max_exs=-1, write_log=False):\n \"\"\"\n Eval on validation/test data.\n\n :param valid_world:\n list of the pre-created validation worlds.\n :param opt:\n the options that specific the task, eval_task, etc\n :param datatype:\n the datatype to use, such as \"valid\" or \"test\"\n :param bool write_log:\n specifies to write metrics to file if the model_file is set\n :param int max_exs:\n limits the number of examples if max_exs > 0\n \"\"\"\n\n logging.info(f'running eval: {datatype}')\n timer = Timer()\n reports = []\n\n max_exs_per_worker = max_exs / (len(valid_worlds) * num_workers())\n for v_world in valid_worlds:\n task_report = self._run_single_eval(opt, v_world, max_exs_per_worker)\n reports.append(task_report)\n\n tasks = [world.getID() for world in valid_worlds]\n named_reports = dict(zip(tasks, reports))\n report = aggregate_named_reports(\n named_reports, micro_average=self.opt.get('aggregate_micro', False)\n )\n # get the results from all workers\n report = self._sync_metrics(report)\n\n metrics = f'{datatype}:\\n{nice_report(report)}\\n'\n logging.info(f'eval completed in {timer.time():.2f}s')\n logging.report(metrics)\n\n # write to file\n if write_log and opt.get('model_file') and is_primary_worker():\n # Write out metrics\n with PathManager.open(opt['model_file'] + '.' + datatype, 'a') as f:\n f.write(f'{metrics}\\n')\n\n return report\n\n def _sync_metrics(self, metrics):\n \"\"\"\n Sync training metrics across workers.\n\n A handful of special cases are handled as exceptions, and the remaining metrics\n are simply averaged across workers.\n \"\"\"\n if not is_distributed():\n # nothing special needed\n return metrics\n all_versions = all_gather_list(metrics)\n return aggregate_unnamed_reports(all_versions)\n\n def _compute_eta(self, epochs_completed, time_elapsed):\n \"\"\"\n Compute the estimated seconds remaining in training.\n\n :param float epochs_completed: number of epochs already completed.\n :param float time_elapsed: total time spent already, in seconds.\n :return: ETA in seconds, or None if not computable\n \"\"\"\n # start off with no estimate\n eta = None\n\n # Determine time_left and num_epochs\n max_epochs = self.opt.get('num_epochs', 0)\n if max_epochs > 0 and epochs_completed > 0:\n epoch_progress = epochs_completed / max_epochs\n eta = (1 - epoch_progress) * time_elapsed / epoch_progress\n\n max_training_time = self.opt.get('max_training_time', -1)\n if max_training_time > 0:\n time_left = max_training_time - time_elapsed\n if eta is None or time_left < eta:\n eta = time_left\n\n return eta\n\n def log(self):\n \"\"\"\n Output a training log entry.\n \"\"\"\n opt = self.opt\n if opt['display_examples']:\n print(self.world.display() + '\\n~~')\n logs = []\n # get report\n train_report = self.world.report()\n train_report = self._sync_metrics(train_report)\n self.world.reset_metrics()\n\n train_report_trainstats = dict_report(train_report)\n train_report_trainstats['total_epochs'] = self._total_epochs\n train_report_trainstats['total_exs'] = self._total_exs\n train_report_trainstats['parleys'] = self.parleys\n train_report_trainstats['train_time'] = self.train_time.time()\n self.train_reports.append(train_report_trainstats)\n\n # time elapsed\n logs.append(f'time:{self.train_time.time():.0f}s')\n logs.append(f'total_exs:{self._total_exs}')\n\n if self._total_epochs >= 0:\n # only if it's unbounded\n logs.append(f'epochs:{self._total_epochs:.2f}')\n\n time_left = self._compute_eta(self._total_epochs, self.train_time.time())\n if time_left is not None:\n logs.append(f'time_left:{max(0,time_left):.0f}s')\n\n log = '{}\\n{}\\n'.format(' '.join(logs), nice_report(train_report))\n logging.info(log)\n self.log_time.reset()\n\n if opt['tensorboard_log'] and is_primary_worker():\n self.tb_logger.log_metrics('train', self.parleys, train_report)\n\n def train(self):\n \"\"\"\n Perform a training run.\n\n :return: tuple of reports (validation_report, test_report)\n \"\"\"\n logging.info('training...')\n opt = self.opt\n world = self.world\n with world:\n while True:\n # do one example / batch of examples\n try:\n world.parley()\n except StopTrainException as e:\n logging.info(f\"Stopping from {e}\")\n break\n\n self.parleys += 1\n\n # get the total training examples done, compute epochs\n self._total_epochs = self._preempted_epochs + sum(\n all_gather_list(world.get_total_epochs())\n )\n exs_per_epoch = world.num_examples()\n self._total_exs = int(np.round(self._total_epochs * exs_per_epoch))\n # and use the primary worker's timings for everything\n train_time, log_time, validate_time = sync_object(\n (\n self.train_time.time(),\n self.log_time.time(),\n self.validate_time.time(),\n )\n )\n\n # check counters and timers\n if self._total_epochs >= self.max_num_epochs:\n self.log()\n logging.info(\n f'num_epochs completed:{self.max_num_epochs} time elapsed:{train_time}s'\n )\n break\n if train_time > self.max_train_time:\n logging.info(f'max_train_time elapsed:{train_time}s')\n break\n if log_time > self.log_every_n_secs:\n self.log()\n if (\n validate_time > self.val_every_n_secs\n or self._total_epochs - self.last_valid_epoch\n >= self.val_every_n_epochs\n ):\n try:\n # log before we validate\n self.log()\n world.reset_metrics()\n stop_training = self.validate()\n except StopTrainException:\n break\n # reset the log time because we logged right before validating\n self.log_time.reset()\n self.last_valid_epoch = self._total_epochs\n if stop_training:\n break\n # make sure metrics are clean before we log\n world.reset_metrics()\n if (\n self.save_time.time() > self.save_every_n_secs\n and opt.get('model_file')\n and is_primary_worker()\n ):\n logging.info(\n f\"saving model checkpoint: {opt['model_file']}.checkpoint\"\n )\n if opt['tensorboard_log'] and is_primary_worker():\n self.tb_logger.flush()\n self.save_model('.checkpoint')\n self.save_time.reset()\n\n if not self.saved and is_primary_worker():\n # save agent\n self.save_model()\n\n # there's a rare edge case where the we never saved the model, and we try\n # # to reload it. This sync_object ensures all workers wait for the primary\n # worker to finish flushing before loading from disk.\n sync_object(None)\n if opt.get('model_file'):\n # clean up all our memory, just to make sure we don't OOM on GPU when\n # reloading the world\n del world\n del self.world\n del self.agent\n del self.valid_worlds\n # reload best validation model\n self.agent = create_agent(opt)\n\n # perform final validation/testing\n valid_worlds = load_eval_worlds(self.agent, opt, 'valid')\n max_exs = opt['validation_max_exs'] if opt.get('short_final_eval') else -1\n v_report = self._run_eval(valid_worlds, opt, 'valid', max_exs, write_log=True)\n test_worlds = load_eval_worlds(self.agent, opt, 'test')\n t_report = self._run_eval(test_worlds, opt, 'test', max_exs, write_log=True)\n if valid_worlds:\n for valid_world in valid_worlds:\n valid_world.shutdown()\n if test_worlds:\n for test_world in test_worlds:\n test_world.shutdown()\n\n print_announcements(opt)\n\n return v_report, t_report\n\n\n@register_script('train_model', aliases=['tm', 'train'])\nclass TrainModel(ParlaiScript):\n @classmethod\n def setup_args(cls):\n return setup_args()\n\n def run(self):\n self.train_loop = TrainLoop(self.opt)\n return self.train_loop.train()\n\n\nif __name__ == '__main__':\n TrainModel.main()\n" ]
[ [ "numpy.round" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ly1996/facenet
[ "4ac4576268dd3386cc39a690952b907d2bf6c86d" ]
[ "src/models/inception_resnet_v1.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Contains the definition of the Inception Resnet V1 architecture.\nAs described in http://arxiv.org/abs/1602.07261.\n Inception-v4, Inception-ResNet and the Impact of Residual Connections\n on Learning\n Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\n# Inception-Resnet-A\ndef block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):\n \"\"\"Builds the 35x35 resnet block.\"\"\"\n with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')\n with tf.variable_scope('Branch_1'):\n tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_2'):\n tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')\n tower_conv2_1 = slim.conv2d(tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3')\n tower_conv2_2 = slim.conv2d(tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3')\n mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)\n up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,\n activation_fn=None, scope='Conv2d_1x1')\n net += scale * up\n if activation_fn:\n net = activation_fn(net)\n return net\n\n# Inception-Resnet-B\ndef block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):\n \"\"\"Builds the 17x17 resnet block.\"\"\"\n with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, 128, 1, scope='Conv2d_1x1')\n with tf.variable_scope('Branch_1'):\n tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1_0, 128, [1, 7],\n scope='Conv2d_0b_1x7')\n tower_conv1_2 = slim.conv2d(tower_conv1_1, 128, [7, 1],\n scope='Conv2d_0c_7x1')\n mixed = tf.concat([tower_conv, tower_conv1_2], 3)\n up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,\n activation_fn=None, scope='Conv2d_1x1')\n net += scale * up\n if activation_fn:\n net = activation_fn(net)\n return net\n\n\n# Inception-Resnet-C\ndef block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):\n \"\"\"Builds the 8x8 resnet block.\"\"\"\n with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')\n with tf.variable_scope('Branch_1'):\n tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1_0, 192, [1, 3],\n scope='Conv2d_0b_1x3')\n tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [3, 1],\n scope='Conv2d_0c_3x1')\n mixed = tf.concat([tower_conv, tower_conv1_2], 3)\n up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,\n activation_fn=None, scope='Conv2d_1x1')\n net += scale * up\n if activation_fn:\n net = activation_fn(net)\n return net\n \ndef reduction_a(net, k, l, m, n):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, n, 3, stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n tower_conv1_0 = slim.conv2d(net, k, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1_0, l, 3,\n scope='Conv2d_0b_3x3')\n tower_conv1_2 = slim.conv2d(tower_conv1_1, m, 3,\n stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)\n return net\n\ndef reduction_b(net):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(tower_conv1, 256, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv2_1 = slim.conv2d(tower_conv2, 256, 3,\n scope='Conv2d_0b_3x3')\n tower_conv2_2 = slim.conv2d(tower_conv2_1, 256, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_3'):\n tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n net = tf.concat([tower_conv_1, tower_conv1_1,\n tower_conv2_2, tower_pool], 3)\n return net\n \ndef inference(images, keep_probability, phase_train=True, \n bottleneck_layer_size=128, weight_decay=0.0, reuse=None):\n batch_norm_params = {\n # Decay for the moving averages.\n 'decay': 0.995,\n # epsilon to prevent 0s in variance.\n 'epsilon': 0.001,\n # force in-place updates of mean and variance estimates\n 'updates_collections': None,\n # Moving averages ends up in the trainable variables collection\n 'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],\n }\n \n with slim.arg_scope([slim.conv2d, slim.fully_connected],\n weights_initializer=slim.initializers.xavier_initializer(), \n weights_regularizer=slim.l2_regularizer(weight_decay),\n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params):\n return inception_resnet_v1(images, is_training=phase_train,\n dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)\n\n\ndef inception_resnet_v1(inputs, is_training=True,\n dropout_keep_prob=0.8,\n bottleneck_layer_size=128,\n reuse=None, \n scope='InceptionResnetV1'):\n \"\"\"Creates the Inception Resnet V1 model.\n Args:\n inputs: a 4-D tensor of size [batch_size, height, width, 3].\n num_classes: number of predicted classes.\n is_training: whether is training or not.\n dropout_keep_prob: float, the fraction to keep before final layer.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n Returns:\n logits: the logits outputs of the model.\n end_points: the set of end_points from the inception model.\n \"\"\"\n end_points = {}\n \n with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse):\n with slim.arg_scope([slim.batch_norm, slim.dropout],\n is_training=is_training):\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\n stride=1, padding='SAME'):\n \n # 149 x 149 x 32\n net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n end_points['Conv2d_1a_3x3'] = net\n # 147 x 147 x 32\n net = slim.conv2d(net, 32, 3, padding='VALID',\n scope='Conv2d_2a_3x3')\n end_points['Conv2d_2a_3x3'] = net\n # 147 x 147 x 64\n net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')\n end_points['Conv2d_2b_3x3'] = net\n # 73 x 73 x 64\n net = slim.max_pool2d(net, 3, stride=2, padding='VALID',\n scope='MaxPool_3a_3x3')\n end_points['MaxPool_3a_3x3'] = net\n # 73 x 73 x 80\n net = slim.conv2d(net, 80, 1, padding='VALID',\n scope='Conv2d_3b_1x1')\n end_points['Conv2d_3b_1x1'] = net\n # 71 x 71 x 192\n net = slim.conv2d(net, 192, 3, padding='VALID',\n scope='Conv2d_4a_3x3')\n end_points['Conv2d_4a_3x3'] = net\n # 35 x 35 x 256\n net = slim.conv2d(net, 256, 3, stride=2, padding='VALID',\n scope='Conv2d_4b_3x3')\n end_points['Conv2d_4b_3x3'] = net\n \n # 5 x Inception-resnet-A\n net = slim.repeat(net, 5, block35, scale=0.17)\n end_points['Mixed_5a'] = net\n \n # Reduction-A\n with tf.variable_scope('Mixed_6a'):\n net = reduction_a(net, 192, 192, 256, 384)\n end_points['Mixed_6a'] = net\n \n # 10 x Inception-Resnet-B\n net = slim.repeat(net, 10, block17, scale=0.10)\n end_points['Mixed_6b'] = net\n \n # Reduction-B\n with tf.variable_scope('Mixed_7a'):\n net = reduction_b(net)\n end_points['Mixed_7a'] = net\n \n # 5 x Inception-Resnet-C\n net = slim.repeat(net, 5, block8, scale=0.20)\n end_points['Mixed_8a'] = net\n \n net = block8(net, activation_fn=None)\n end_points['Mixed_8b'] = net\n \n with tf.variable_scope('Logits'):\n end_points['PrePool'] = net\n #pylint: disable=no-member\n net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',\n scope='AvgPool_1a_8x8')\n net = slim.flatten(net)\n \n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='Dropout')\n \n end_points['PreLogitsFlatten'] = net\n \n net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None, \n scope='Bottleneck', reuse=False)\n # print(\"name\",net)\n \n return net, end_points\n" ]
[ [ "tensorflow.concat", "tensorflow.contrib.slim.dropout", "tensorflow.contrib.slim.arg_scope", "tensorflow.contrib.slim.max_pool2d", "tensorflow.contrib.slim.l2_regularizer", "tensorflow.contrib.slim.repeat", "tensorflow.contrib.slim.initializers.xavier_initializer", "tensorflow.contrib.slim.fully_connected", "tensorflow.contrib.slim.flatten", "tensorflow.contrib.slim.conv2d", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.10", "1.12" ] } ]
drJfunk/gbmgeometry
[ "ca11005c349546ed962bb1bbc4f66d8022ea79a1", "ca11005c349546ed962bb1bbc4f66d8022ea79a1" ]
[ "gbmgeometry/geometry/sphere.py", "gbmgeometry/utils/array_to_cmap.py" ]
[ "import ipyvolume as ipv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numba as nb\nimport h5py\n\nfrom gbmgeometry.utils.package_utils import get_path_of_data_file\n\n\n# -*- coding: utf-8 -*-\nclass Sphere(object):\n def __init__(\n self,\n ax,\n x=0,\n y=0,\n z=0,\n radius=1.0,\n detail_level=100,\n color=\"#FFFFFF\",\n image=None,\n transform_matrix=None,\n **kwargs,\n ):\n\n self._x = x\n self._y = y\n self._z = z\n\n self._radius = radius\n\n self._detail_level = int(detail_level)\n self._color = color\n\n self._image = image\n self._transform_matrix = transform_matrix\n\n self._time_dep_transform = False\n\n if (transform_matrix is not None) and (len(transform_matrix.shape) == 3):\n self._time_dep_transform = True\n self._n_steps = transform_matrix.shape[0]\n\n @property\n def radius(self):\n return self._radius\n\n def plot(self, **kwargs):\n \"\"\"\n \n plot the sphere\n \n\n :returns: \n :rtype: \n\n \"\"\"\n\n # u = np.linspace(0, 2 * np.pi, self._detail_level)\n # v = np.linspace(0, np.pi, self._detail_level)\n\n # x_unit = np.outer(np.cos(u), np.sin(v))\n # y_unit = np.outer(np.sin(u), np.sin(v))\n # z_unit = np.outer(np.ones(np.size(u)), np.cos(v))\n\n u = np.linspace(0, 1, self._detail_level)\n v = np.linspace(0, 1, self._detail_level)\n u, v = np.meshgrid(u, v)\n phi = u * 2 * np.pi\n theta = v * np.pi\n\n\n x_unit = np.cos(phi) * np.sin(theta)\n y_unit = np.sin(theta) * np.sin(phi)\n z_unit = np.cos(theta)\n\n \n \n if self._transform_matrix is not None:\n\n xyz = np.array([x_unit, y_unit, z_unit]).T\n\n if self._time_dep_transform:\n\n # new_xyz = compute_multiple_rotation(xyz, self._transform_matrix, self._detail_level, self._n_steps)\n\n x_unit_rot = np.zeros(\n (self._n_steps, self._detail_level, self._detail_level)\n )\n y_unit_rot = np.zeros(\n (self._n_steps, self._detail_level, self._detail_level)\n )\n z_unit_rot = np.zeros(\n (self._n_steps, self._detail_level, self._detail_level)\n )\n\n for i in range(self._n_steps):\n\n this_xyz = compute_single_rotation(\n xyz, self._transform_matrix[i], self._detail_level\n )\n\n x_unit_rot[i] = this_xyz[0]\n y_unit_rot[i] = this_xyz[1]\n z_unit_rot[i] = this_xyz[2]\n\n else:\n\n xyz = compute_single_rotation(\n xyz, self._transform_matrix, self._detail_level\n )\n\n x_unit_rot = xyz[0]\n y_unit_rot = xyz[1]\n z_unit_rot = xyz[2]\n\n if np.atleast_1d(self._x).shape[0] == 1:\n\n if self._time_dep_transform:\n # if False:\n X = np.array(\n [self._x + self._radius * x_unit for _ in range(self._n_steps)]\n )\n\n Y = np.array(\n [self._y + self._radius * y_unit for _ in range(self._n_steps)]\n )\n\n Z = np.array(\n [self._z + self._radius * z_unit for _ in range(self._n_steps)]\n )\n\n else:\n\n X = self._x + self._radius * x_unit\n\n Y = self._y + self._radius * y_unit\n\n Z = self._z + self._radius * z_unit\n\n else:\n\n X = np.array([x + self._radius * x_unit for x in self._x])\n\n Y = np.array([y + self._radius * y_unit for y in self._y])\n\n Z = np.array([z + self._radius * z_unit for z in self._z])\n\n if self._image is None:\n\n return ipv.plot_surface(X, Y, Z, color=self._color, **kwargs)\n\n else:\n\n if self._transform_matrix is None:\n\n lon = np.arctan2(y_unit, x_unit)\n lat = np.arcsin(z_unit)\n\n else:\n\n lon = np.arctan2(y_unit_rot, x_unit_rot)\n lat = np.arcsin(z_unit_rot)\n\n u = 0.5 + lon / (2 * np.pi)\n v = 0.5 + lat / (np.pi)\n\n return ipv.plot_mesh(\n X, Y, Z, u=u, v=v, texture=self._image, wireframe=False\n )\n\n\[email protected](fastmath=True)\ndef compute_single_rotation(xyz, transform_matrix, detail_level):\n\n new_xyz = np.zeros((detail_level, detail_level, 3))\n\n for i in range(detail_level):\n for j in range(detail_level):\n new_xyz[i, j] = np.dot(transform_matrix, xyz[i, j, :])\n\n return new_xyz.T\n\n\[email protected](fastmath=True)\ndef compute_multiple_rotation(xyz, transform_matrix, detail_level, time_steps):\n\n new_xyz = np.zeros((time_steps, detail_level, detail_level, 3))\n # out_xyz = np.zeros((time_steps, 3, detail_level, detail_level))\n\n for i in range(time_steps):\n for j in range(detail_level):\n for k in range(detail_level):\n new_xyz[i, j, k] = np.dot(transform_matrix[i], xyz[j, k, :])\n\n # out_xyz[i] = new_xyz[i].T\n\n return new_xyz\n", "import matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n\ndef array_to_cmap(values, cmap, use_log=False):\n \"\"\"\n Generates a color map and color list that is normalized\n to the values in an array. Allows for adding a 3rd dimension\n onto a plot\n\n :param values: a list a values to map into a cmap\n :param cmap: the mpl colormap to use\n :param use_log: if the mapping should be done in log space\n \"\"\"\n\n if use_log:\n\n norm = mpl.colors.LogNorm(vmin=min(values), vmax=max(values))\n\n else:\n\n norm = mpl.colors.Normalize(vmin=min(values), vmax=max(values))\n\n cmap = plt.cm.ScalarMappable(norm=norm, cmap=cmap)\n\n rgb_colors = [cmap.to_rgba(v) for v in values]\n\n return cmap, rgb_colors\n" ]
[ [ "numpy.dot", "numpy.linspace", "numpy.arcsin", "numpy.cos", "numpy.sin", "numpy.arctan2", "numpy.atleast_1d", "numpy.array", "numpy.meshgrid", "numpy.zeros" ], [ "matplotlib.pyplot.cm.ScalarMappable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chipturner/advent-of-code-2021
[ "52d8f84eb9243fa076c9f7c2a2e3836e138ab127" ]
[ "src/day4.py" ]
[ "import helpers\nimport numpy\n\nimport itertools\nimport collections\nfrom typing import List\n\ndef card_wins(card):\n d = numpy.array(card)\n dt = d.transpose()\n for row in d:\n if numpy.sum(row) == 0:\n return True\n for row in dt:\n if numpy.sum(row) == 0:\n return True\n return False\n\n\ndef main() -> None:\n lines = helpers.read_input()\n numbers = [int(v) for v in lines[0].split(\",\")]\n\n cards = []\n cur_card: List[List[int]] = []\n for line in lines[2:]:\n if not line:\n cards.append(cur_card)\n cur_card = []\n else:\n cur_card.append(list(int(x) for x in line.split()))\n cards.append(cur_card)\n\n print(cards)\n winners = {}\n for num in numbers:\n for idx in range(len(cards)):\n if idx in winners:\n continue\n card = cards[idx]\n for i in range(len(card)):\n for j in range(len(card[0])):\n if card[i][j] == num:\n card[i][j] = 0\n if card_wins(card):\n print(num)\n s = sum(sum(r) for r in card)\n print(s)\n print(card)\n print(num * s)\n winners[idx] = 1\n\n\nmain()\n" ]
[ [ "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Brain-Modulation-Lab/Paper_SpatialPatternsMovementDecoding
[ "31588def6cef875dd42ea580f76f79c4856a1353" ]
[ "Experiments/Utilities/icn_m1/online_analysis.py" ]
[ "import filter\nimport numpy as np \nimport projection\nimport time\nfrom matplotlib import pyplot as plt \n\ndef append_time_dim(X, y_=None, time_stamps=5):\n \"\"\"\n :param X: in shape(time, grid_points/channels, f_bands)\n apply added time dimension for the data array and label given time_stamps (with downsample_rate=100) in 100ms / need to check with 1375Hz\n \"\"\"\n if len(X.shape) == 3:\n num_time = X.shape[0]\n num_channels = X.shape[1]\n num_f_bands = X.shape[2]\n\n time_arr = np.zeros([num_time-time_stamps, num_channels, int(time_stamps*num_f_bands)])\n for ch in range(num_channels):\n for time_idx, time_ in enumerate(np.arange(time_stamps, num_time)):\n for time_point in range(time_stamps):\n time_arr[time_idx, ch, time_point*num_f_bands:(time_point+1)*num_f_bands] = X[time_-time_point,ch,:]\n\n if y_ is None:\n return time_arr\n return time_arr, y_[time_stamps:]\n elif len(X.shape) == 2:\n if time_stamps == X.shape[0]:\n time_arr = np.zeros([1+X.shape[0]-time_stamps, int(time_stamps*X.shape[1])])\n #print(time_arr.shape)\n for time_idx, time_ in enumerate(np.arange(time_stamps-1, X.shape[0])):\n #print(time_idx)\n #print('time_:'+str(time_))\n for time_point in range(time_stamps):\n #print('time_point: '+str(time_point))\n time_arr[time_idx, time_point*X.shape[1]:(time_point+1)*X.shape[1]] = X[time_-time_point,:]\n else:\n time_arr = np.zeros([X.shape[0]-time_stamps, int(time_stamps*X.shape[1])])\n for time_idx, time_ in enumerate(np.arange(time_stamps, X.shape[0])):\n for time_point in range(time_stamps):\n time_arr[time_idx, time_point*X.shape[1]:(time_point+1)*X.shape[1]] = X[time_-time_point,:]\n if y_ is None:\n return time_arr\n return time_arr, y_[time_stamps:]\n \n\ndef predict(pf_stream, grid_classifiers, arr_act_grid_points):\n res_predict = np.zeros([num_grid_points])\n X = np.clip(pf_stream, -2, 2)\n for grid_point in range(arr_act_grid_points.shape[0]):\n if arr_act_grid_points[grid_point] == 0:\n continue\n \n X_test = X[:,grid_point,:]\n X_test_reshaped = np.reshape(X_test, (X_test.shape[0]*X_test.shape[1]))\n model = grid_classifiers[grid_point]\n res_predict[grid_point] = model.predict(np.expand_dims(X_test_reshaped, axis=0))\n return res_predict\n\ndef simulate_data_stream(bv_raw, ind_DAT, ind_time, fs):\n #time.sleep(1/fs)\n return bv_raw[ind_DAT, ind_time]\n\n\ndef real_time_simulation(fs, fs_new, seglengths, f_ranges, grid_, downsample_idx, bv_raw, line_noise, \\\n sess_right, dat_cortex, dat_subcortex, dat_label, ind_cortex, ind_subcortex, ind_label, ind_DAT, \\\n filter_fun, proj_matrix_run, arr_act_grid_points, grid_classifiers, normalization_samples, ch_names):\n \n num_grid_points = grid_[0].shape[1] + grid_[1].shape[1]+ grid_[2].shape[1]+ grid_[3].shape[1]\n\n label_con = dat_label[1,:][::100][10:]\n label_ips = dat_label[0,:][::100][10:]\n \n dat_buffer = np.zeros([ind_DAT.shape[0], 1000])\n rf_data_rt = np.zeros([ind_DAT.shape[0], len(f_ranges)])\n pf_data_rt = np.zeros([num_grid_points, len(f_ranges)])\n\n\n fig = plt.figure(figsize=(10, 5))\n #ax = fig.add_subplot(111)\n #plt.ion()\n #plt.title('label predictions grid point 42')\n #plt.show()\n \n\n dat_buffer = np.zeros([ind_DAT.shape[0], 1000])\n dat_res = np.zeros([num_grid_points, 100])\n dat_label_con = np.zeros([100])\n dat_label_ips = np.zeros([100])\n rf_data_rt = np.zeros([ind_DAT.shape[0], len(f_ranges)])\n pf_data_rt = np.zeros([num_grid_points, len(f_ranges)])\n\n pf_stream = []\n rf_stream = []\n pf_stream_median = []\n rf_stream_median = []\n estimates = []\n buffer_counter = 0\n idx_stream = 0\n for ind_time in range(bv_raw.shape[1]):\n if idx_stream == 0:\n if buffer_counter < seglengths[0]-1:\n dat_buffer[:, buffer_counter] = simulate_data_stream(bv_raw, ind_DAT, ind_time, fs)\n buffer_counter += 1 \n continue\n else:\n if buffer_counter < seglengths[7]-1:\n dat_buffer[:,:-1] = dat_buffer[:,1:]\n buffer_offset = seglengths[0] - seglengths[-1] # to have steps of 100 ms\n dat_buffer[:, buffer_counter+buffer_offset] = simulate_data_stream(bv_raw, ind_DAT, ind_time, fs)\n buffer_counter += 1 \n continue\n #plt.imshow(dat_buffer, aspect='auto')\n #plt.title('buffer')\n #plt.show()\n #print(ind_time)\n #print(str(np.round(ind_time*(1/fs),2))+' s')\n buffer_counter = 0 \n \n rf_data_rt = np.zeros([ind_DAT.shape[0], len(f_ranges)])\n pf_data_rt = np.zeros([num_grid_points, len(f_ranges)])\n for ch in ind_DAT: # think about using multiprocessing pool to do this simulatenously\n dat_ = dat_buffer[ch,:]\n dat_filt = filter.apply_filter(dat_, sample_rate=fs, filter_fun=filter_fun, line_noise=line_noise, seglengths=seglengths)\n rf_data_rt[ch,:] = dat_filt\n \n #plt.imshow(rf_data_rt.T, aspect='auto')\n #plt.title('raw t-f transformed')\n #plt.show()\n \n #PROJECTION of RF_data to pf_data\n dat_cortex = rf_data_rt[ind_cortex,:]\n dat_subcortex = rf_data_rt[ind_subcortex,:]\n proj_cortex, proj_subcortex = projection.get_projected_cortex_subcortex_data(proj_matrix_run, sess_right, dat_cortex, dat_subcortex)\n pf_data_rt = projection.write_proj_data(ch_names, sess_right, dat_label, ind_label, proj_cortex, proj_subcortex)\n \n #plt.imshow(pf_data_rt.T, aspect='auto')\n #plt.title('projected t-f transformed')\n #plt.show()\n \n if idx_stream<normalization_samples:\n if idx_stream == 0:\n n_idx = 0\n else:\n n_idx = np.arange(0,idx_stream,1)\n else:\n n_idx = np.arange(idx_stream-normalization_samples, idx_stream, 1)\n \n if idx_stream == 0:\n\n pf_stream.append(pf_data_rt)\n pf_stream_median.append(pf_data_rt)\n \n rf_stream.append(rf_data_rt)\n rf_stream_median.append(rf_data_rt)\n else:\n \n rf_stream.append(rf_data_rt)\n median_ = np.median(np.array(rf_stream)[n_idx,:,:], axis=0)\n rf_stream_val = (rf_data_rt - median_) / median_\n rf_stream_median.append(rf_stream_val)\n \n pf_stream.append(pf_data_rt)\n median_ = np.median(np.array(pf_stream)[n_idx,:,:][:,arr_act_grid_points>0,:], axis=0)\n pf_data_rt_median = (pf_data_rt[arr_act_grid_points>0,:] - median_) / median_\n pf_data_set = np.zeros([num_grid_points, len(f_ranges)])\n pf_data_set[arr_act_grid_points>0,:] = pf_data_rt_median\n pf_stream_median.append(pf_data_set)\n \n #plt.imshow(pf_data_rt.T, aspect='auto')\n #plt.title('projected and resampled t-f transformed')\n #plt.show()\n \n # now use the predictors to estimate the labelement \n if idx_stream >= 5:\n time_stamp_tf_dat = np.array(pf_stream_median)[-5:,:,:]\n #plt.imshow(time_stamp_tf_dat[:,:,0].T, aspect='auto')\n #plt.clim(-10,10)\n #plt.show()\n predictions = predict(time_stamp_tf_dat, grid_classifiers, arr_act_grid_points)\n estimates.append(predictions)\n \n dat_res[:,:-1] = dat_res[:,1:]\n dat_res[:,-1] = predictions\n \n dat_label_con[:-1] = dat_label_con[1:]\n dat_label_con[-1] = label_con[idx_stream-5]\n \n dat_label_ips[:-1] = dat_label_ips[1:]\n dat_label_ips[-1] = label_ips[idx_stream-5]\n \n \n plt.clf()\n plt.plot(dat_res[46,:], label='prediction', c='green')\n plt.plot(dat_label_con, label='contralateral force', c='red')\n plt.plot(dat_label_ips, label='ipsilateral force', c='blue')\n plt.legend(loc='upper left')\n plt.ylabel('Force')\n plt.xlabel('Time 0.1s')\n plt.ylim(-1, 6)\n if idx_stream == 5:\n plt.show()\n else:\n #plt.draw()\n fig.canvas.draw()\n #fig.canvas.flush_events()\n \n #usage for matplotlib\n '''\n ax.clear()\n ax.plot(dat_res[46,:], label='prediction', c='green')\n ax.plot(dat_label_con, label='contralateral force', c='red')\n ax.plot(dat_label_ips, label='ipsilateral force', c='blue')\n ax.legend(loc='upper left')\n ax.set_ylabel('Force')\n ax.set_xlabel('Time 0.1s')\n ax.set_ylim(-1, 6)\n fig.canvas.draw()\n '''\n \n idx_stream += 1\n \n return estimates" ]
[ [ "matplotlib.pyplot.legend", "numpy.expand_dims", "numpy.clip", "numpy.reshape", "numpy.arange", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ProjectBlackFalcon/BlackFalconCore
[ "23af1829224738c06092e3e513a0bf2753b4c35f" ]
[ "strategies/support_functions.py" ]
[ "import hashlib\nimport json\nimport random\nimport uuid\n\nimport numpy as np\nfrom heapq import *\nimport time\nimport itertools\nimport sys\n\nimport pymongo\nimport psycopg2\n\nfrom credentials import credentials\n\n\ndef cell2coord(cell):\n return cell % 14 + int((cell // 14) / 2 + 0.5), (13 - cell % 14 + int((cell // 14) / 2))\n\n\ndef dist(coord_1, coord_2):\n return ((coord_2[0] - coord_1[0]) ** 2 + (coord_2[1] - coord_1[1]) ** 2) ** 0.5\n\n\ndef distance_cell(cell_1, cell_2):\n return dist(cell2coord(cell_1), cell2coord(cell_2))\n\n\ndef fetch_map_cells(map_info, coord, worldmap):\n maps = []\n for map in map_info:\n if map['coord'] == coord and map['worldMap'] == worldmap:\n maps.append(map)\n if len(maps) == 1 and maps[0] is not None:\n return maps[0]['cells']\n elif len(maps) > 1:\n for map in maps:\n if map['hasPriorityOnWorldMap']:\n return map['cells']\n\n\ndef flatten_map(map):\n flattened = []\n for line in map:\n flattened += line\n return flattened\n\n\ndef get_neighbour_cells(cell):\n neighbours = []\n for i in range(560):\n if distance_cell(cell, i) == 1:\n neighbours.append(i)\n return neighbours[:]\n\n\ndef get_walkable_neighbour_cells(map_info, cell, map_coords, worldmap):\n walkable_neighbours = []\n for neighbour in get_neighbour_cells(cell):\n if flatten_map(fetch_map_cells(map_info, '{};{}'.format(map_coords[0], map_coords[1]), worldmap))[neighbour] == 0:\n walkable_neighbours.append(neighbour)\n return walkable_neighbours[:]\n\n\ndef get_closest_walkable_neighbour_cell(map_info, target_cell, player_cell, map_coords, worldmap):\n walkable_neighbours = get_walkable_neighbour_cells(map_info, target_cell, map_coords, worldmap)\n if walkable_neighbours:\n closest = walkable_neighbours[0], 10000\n else:\n return False\n for walkable_neighbour in walkable_neighbours:\n if distance_cell(walkable_neighbour, player_cell) < closest[1]:\n closest = walkable_neighbour, distance_cell(walkable_neighbour, player_cell)\n\n if closest[1] < 10000:\n return closest[0]\n return False\n\n\ndef get_closest_reachable_cell(map_info, target_cell, player_cell, map_coords, worldmap):\n cells = fetch_map_cells(map_info, '{};{}'.format(map_coords[0], map_coords[1]), worldmap)\n cells_vector = [item for sublist in cells for item in sublist]\n\n reachable_cells = []\n for cell, cell_type in enumerate(cells_vector):\n if cell_type not in [-1, 1, 2] and can_walk_to_node(cells_2_map(cells), player_cell, {'cell': cell}):\n reachable_cells.append(cell)\n\n closest, distance = [], distance_cell(reachable_cells[0], target_cell)\n for cell in reachable_cells:\n if 0 < distance_cell(cell, target_cell):\n if distance_cell(cell, target_cell) < distance:\n closest, distance = [cell], distance_cell(cell, target_cell)\n elif distance_cell(cell, target_cell) == distance:\n closest.append(cell)\n\n closest_to_player, distance = closest[0], sys.maxsize\n for cell in closest:\n if distance_cell(cell, player_cell) < distance:\n closest_to_player, distance = cell, distance_cell(cell, player_cell)\n\n return closest_to_player\n\n\ndef mongo_client():\n return pymongo.MongoClient(\n host=credentials['mongo']['host'],\n port=credentials['mongo']['port'],\n username=credentials['mongo']['username'],\n password=credentials['mongo']['password'],\n )\n\n\ndef create_profile(id, bot_name, username, password, server):\n if mongo_client().blackfalcon.bots.find_one({'name': bot_name}) is not None:\n raise Exception('Bot already exists. Delete it using the \\'delete_bot\\' command first.')\n profile = {\n 'id': id,\n 'name': bot_name,\n 'username': username,\n 'password': password,\n 'server': server,\n 'known_zaaps': [],\n 'sub_end': 0,\n 'position': (69, 420),\n 'cell': 0,\n 'worldmap': 1,\n 'banned': False,\n 'stuff': {},\n 'stats': {},\n }\n mongo_client().blackfalcon.bots.insert_one(profile)\n\n\ndef delete_profile(bot_name):\n mongo_client().blackfalcon.bots.delete_one({'name': bot_name})\n\n\ndef get_profile(bot_name, client=None):\n if client is None:\n client = mongo_client()\n profile = client.blackfalcon.bots.find_one({'name': bot_name})\n if profile is None:\n raise Exception('Bot does not exist. Create a profile using the \\'new_bot\\' command first.')\n return profile\n\n\ndef update_profile_full(bot_name, new_profile, client=None):\n if client is None:\n client = mongo_client()\n client.blackfalcon.bots.replace_one({'name': bot_name}, new_profile)\n\n\ndef update_profile(bot_name, key, new_value):\n client = mongo_client()\n profile = get_profile(bot_name, client=client)\n profile[key] = new_value\n update_profile_full(bot_name, profile, client=client)\n\n\ndef get_account(username, client=None):\n if client is None:\n client = mongo_client()\n account = client.blackfalcon.account.find_one({'username': username})\n if account is None:\n raise Exception('Account does not exist.')\n return account\n\n\ndef update_account_full(username, new_account, client=None):\n if client is None:\n client = mongo_client()\n client.blackfalcon.account.replace_one({'username': username}, new_account)\n\n\ndef update_account(username, key, new_value):\n client = mongo_client()\n account = get_account(username, client=client)\n account[key] = new_value\n update_account_full(username, account, client=client)\n\n\ndef get_known_zaaps(bot_name):\n client = mongo_client()\n return client.blackfalcon.bots.find_one({'name': bot_name})['known_zaaps']\n\n\ndef add_known_zaap(bot_name, pos: list):\n if type(pos) is not list:\n raise TypeError('Positions must be lists')\n profile = get_profile(bot_name)\n if pos not in profile['known_zaaps']:\n profile['known_zaaps'].append(pos)\n update_profile_full(bot_name, profile)\n\n\ndef get_closest_known_zaap(bot_name, pos):\n known_zaaps = get_known_zaaps(bot_name)\n closest = None, 100000\n for zaap_pos in known_zaaps:\n if dist(pos, zaap_pos) < closest[1]:\n closest = zaap_pos, dist(pos, zaap_pos)\n return tuple(closest[0]) if closest[0] is not None else None\n\n\ndef heuristic(node1, node2):\n coords_1 = [int(coord) for coord in node1['coord'].split(';')]\n coords_2 = [int(coord) for coord in node2['coord'].split(';')]\n return dist(coords_1, coords_2)\n\n\ndef get_path_nodes(graph, start_node_id, end_node_id):\n close_set = set()\n came_from = {}\n gscore = {start_node_id: 0}\n fscore = {start_node_id: heuristic(graph[start_node_id], graph[end_node_id])}\n oheap = []\n\n heappush(oheap, (fscore[start_node_id], start_node_id))\n\n while oheap:\n\n current = heappop(oheap)[1]\n\n if current == end_node_id:\n data = []\n while current in came_from:\n data.append(current)\n current = came_from[current]\n path = []\n coords = ''\n for node_id in data:\n if graph[node_id]['coord'] != coords:\n path.append({'coord': graph[node_id]['coord'], 'cell': random.choice(graph[node_id]['edge']), 'direction': graph[node_id]['direction']})\n coords = graph[node_id]['coord']\n\n path.append({'coord': graph[start_node_id]['coord'], 'cell': graph[start_node_id]['cell'], 'direction': graph[start_node_id]['direction']})\n return list(reversed(path[1:]))\n\n close_set.add(current)\n neighbours = graph[current]['neighbours']\n for neighbour in neighbours:\n tentative_g_score = gscore[current] + heuristic(graph[current], graph[neighbour])\n\n if neighbour in close_set and tentative_g_score >= gscore.get(neighbour, 0):\n continue\n\n if tentative_g_score < gscore.get(neighbour, 0) or neighbour not in [i[1] for i in oheap]:\n came_from[neighbour] = current\n gscore[neighbour] = tentative_g_score\n fscore[neighbour] = tentative_g_score + heuristic(graph[neighbour], graph[end_node_id])\n heappush(oheap, (fscore[neighbour], neighbour))\n\n return False\n\n\ndef fetch_map(map_info, coord, worldmap):\n maps = []\n for map in map_info:\n if map['coord'] == coord and map['worldMap'] == worldmap:\n maps.append(map)\n if len(maps) == 1 and maps[0] is not None:\n return maps[0]\n elif len(maps) > 1:\n for map in maps:\n if map['hasPriorityOnWorldMap']:\n return map\n\n\ndef map_id_2_coord(map_info, map_id):\n for map in map_info:\n if int(map['id']) == map_id:\n return [int(coord) for coord in map['coord'].split(';')]\n raise Exception('Map id {} not in map info'.format(map_id))\n\n\ndef get_worldmap(map_info, map_id):\n for map in map_info:\n if int(map['id']) == map_id:\n return map['worldMap']\n raise Exception('Map id {} not in map info'.format(map_id))\n\n\ndef cells_2_map(cells):\n maps = np.array(cells)\n shape = maps.shape\n flattened = maps.flatten()\n new_base = np.zeros((14 * shape[1] // 14 + 20 * shape[0] // 40 - 1, 14 * shape[1] // 14 + 20 * shape[0] // 40))\n new_base[new_base == 0] = -1\n for i in range(len(flattened)):\n coord = i % shape[1] + int((i // shape[1]) / 2 + 0.5), (shape[1] - 1 - i % shape[1] + int((i // shape[1]) / 2))\n new_base[coord[1]][coord[0]] = flattened[i]\n return new_base[:]\n\n\ndef cell_2_coord(cell):\n return (14 - 1 - cell % 14 + int((cell // 14) / 2)), cell % 14 + int((cell // 14) / 2 + 0.5)\n\n\ndef can_walk_to_node(map, cell, node):\n start_pos = cell_2_coord(cell)\n goal_pos = cell_2_coord(node['cell'])\n\n neighbors = [(1, 1), (-1, -1), (1, -1), (-1, 1), (1, 0), (0, 1), (-1, 0), (0, -1)]\n\n close_set = set()\n came_from = {}\n gscore = {start_pos: 0}\n fscore = {start_pos: (goal_pos[0] - start_pos[0]) ** 2 + (goal_pos[1] - start_pos[1]) ** 2}\n oheap = []\n\n heappush(oheap, (fscore[start_pos], start_pos))\n\n while oheap:\n\n current = heappop(oheap)[1]\n\n if current == goal_pos:\n data = []\n while current in came_from:\n data.append(current)\n current = came_from[current]\n return True\n\n close_set.add(current)\n for i, j in neighbors:\n neighbor = current[0] + i, current[1] + j\n tentative_g_score = gscore[current] + (neighbor[0] - current[0]) ** 2 + (neighbor[1] - current[1]) ** 2\n if 0 <= neighbor[0] < map.shape[0]:\n if 0 <= neighbor[1] < map.shape[1]:\n if map[neighbor[0]][neighbor[1]] in [-1, 1, 2]:\n continue\n else:\n # array bound y walls\n continue\n else:\n # array bound x walls\n continue\n\n if neighbor in close_set and tentative_g_score >= gscore.get(neighbor, 0):\n continue\n\n if tentative_g_score < gscore.get(neighbor, 0) or neighbor not in [i[1] for i in oheap]:\n came_from[neighbor] = current\n gscore[neighbor] = tentative_g_score\n fscore[neighbor] = tentative_g_score + (goal_pos[0] - neighbor[0]) ** 2 + (goal_pos[1] - neighbor[1]) ** 2\n heappush(oheap, (fscore[neighbor], neighbor))\n\n return False\n\n\ndef get_path(map_info, graph, start_pos: tuple, end_pos: tuple, start_cell=None, end_cell=None, worldmap=1):\n start = time.time()\n potential_start_nodes_ids = []\n potential_end_nodes_ids = []\n start_cell_set = False if start_cell is None else True\n end_cell_set = False if end_cell is None else True\n for key, node in graph.items():\n if node['coord'] == '{};{}'.format(start_pos[0], start_pos[1]) and node['worldmap'] == worldmap:\n tmp_start_cell = node['cell'] if start_cell_set is False else start_cell\n cells = fetch_map(map_info, node['coord'], worldmap)['cells']\n if can_walk_to_node(cells_2_map(cells), tmp_start_cell, node):\n potential_start_nodes_ids.append(key)\n if node['coord'] == '{};{}'.format(end_pos[0], end_pos[1]) and node['worldmap'] == worldmap:\n tmp_end_cell = node['cell'] if end_cell_set is False else end_cell\n cells = fetch_map(map_info, node['coord'], worldmap)['cells']\n if can_walk_to_node(cells_2_map(cells), tmp_end_cell, node):\n potential_end_nodes_ids.append(key)\n\n couples = list(itertools.product(potential_start_nodes_ids, potential_end_nodes_ids))\n best_path, length = None, sys.maxsize\n for couple in couples:\n path = get_path_nodes(graph, couple[0], couple[1])\n if path is not False and len(path) < length:\n best_path = path\n length = len(path)\n return best_path\n\n\ndef generate_token(valid_until, description):\n \"\"\"\n generated a token to access the swarm node\n\n :param valid_until: timestamp in seconds since 01/01/1970\n :param description: A description of whet the token is used for\n :return: token\n \"\"\"\n token = str(uuid.uuid4())\n client = mongo_client()\n client.blackfalcon.tokens.insert_one({'token': token, 'valid_until': valid_until, 'description': description})\n return token\n\n\ndef delete_token(token):\n client = mongo_client()\n client.blackfalcon.tokens.delete_one({'token': token})\n\n\ndef token_is_authorized(token):\n \"\"\"\n Used to control access to the swarm node\n :param token: a token provided in the credentials.py\n :return: bool\n \"\"\"\n client = mongo_client()\n token_data = client.blackfalcon.tokens.find_one({'token': token})\n if token_data is None:\n return False\n if token_data['valid_until'] < time.time():\n delete_token(token)\n return False\n return token_data['valid_until']\n\n\ndef log_prices(object_type, data, server, sampling_time):\n conn = psycopg2.connect(\n dbname=credentials['postgre']['database'],\n user=credentials['postgre']['user'],\n host=credentials['postgre']['host'],\n password=credentials['postgre']['password'],\n port=credentials['postgre']['port']\n )\n cursor = conn.cursor()\n\n if object_type == 'item':\n objects = []\n for item_id, item in list(data.items()):\n for object in item['items_stats']:\n formatted_object = (\n item_id,\n server,\n object['prices'][0],\n object['prices'][1],\n object['prices'][2],\n -1,\n str(object['effects']).replace(\"'\", '\"'),\n hashlib.sha256(str(object['effects']).encode('utf8')).hexdigest(),\n int(sampling_time)\n )\n objects.append(formatted_object)\n cursor.executemany(\"INSERT INTO itemprices (itemid, server, price1, price10, price100, craftcost, stats, hash, sampleid) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s)\", objects)\n\n elif object_type == 'resource':\n objects = []\n for item_id, item in list(data.items()):\n for object in item['items_stats']:\n formatted_object = (\n item_id,\n 'Julith',\n object['prices'][0],\n object['prices'][1],\n object['prices'][2],\n sampling_time\n )\n objects.append(formatted_object)\n cursor.executemany(\"INSERT INTO resourceprices (itemid, server, price1, price10, price100, sampleid) VALUES(%s,%s,%s,%s,%s,%s)\", objects)\n\n else:\n raise Exception('Type must be \"item\" or \"resource\", got {}'.format(object_type))\n conn.commit()\n conn.close()\n return len(objects)\n\n\nif __name__ == '__main__':\n # mapinfo = []\n # for i in range(8):\n # with open('../assets/map_info_{}.json'.format(i), 'r', encoding='utf8') as f:\n # mapinfo += json.load(f)\n # graph = {}\n # for i in range(2):\n # with open('../assets/pathfinder_graph_{}.json'.format(i), 'r', encoding='utf8') as f:\n # graph.update(json.load(f))\n #\n # print('Starting')\n # print(get_path(mapinfo, graph, (4, -18), (3, -5)))\n # client = mongo_client()\n # print([thing for thing in client.blackfalcon.tokens.find({})])\n # token = generate_token(time.time() + 90, 'Test token')\n # print(token)\n # print(token_is_authorized(token))\n log_prices('lel', '')\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
khaiyichin/DS595-RL-Projects
[ "4add6b2adc2cb9f7cdb783d50b005ecd1b4aada3" ]
[ "Project3/dqn_model.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass DQN(nn.Module):\n \"\"\"Initialize a deep Q-learning network\n\n Hints:\n -----\n Original paper for DQN\n https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf\n\n This is just a hint. You can build your own structure.\n \"\"\"\n\n def __init__(self, gamma, ddqn=False, in_channels=4, num_actions=4):\n \"\"\"\n Parameters:\n -----------\n in_channels: number of channel of input.\n i.e The number of most recent frames stacked together, here we use 4 frames, which means each state in Breakout is composed of 4 frames.\n num_actions: number of action-value to output, one-to-one correspondence to action in game.\n\n You can add additional arguments as you need.\n In the constructor we instantiate modules and assign them as\n member variables.\n \"\"\"\n super(DQN, self).__init__()\n ###########################\n # YOUR IMPLEMENTATION HERE #\n\n # @todos: allow parameter changes..?\n\n # Define variables for input arguments\n self.gamma = gamma\n self.ddqn = ddqn\n\n self.in_channels = in_channels\n self.num_actions = num_actions\n\n # Define neural network structure according to the Nature paper\n self.pool = nn.MaxPool2d(2, 2) # shrink the 2d image by a factor of 0.5\n self.conv_1 = nn.Conv2d(self.in_channels, 32, kernel_size=8, stride=4)\n self.conv_2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)\n self.conv_3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)\n self.fc_1 = nn.Linear(64 * 7 * 7, 512)\n self.output_layer = nn.Linear(512, self.num_actions)\n\n def forward(self, x):\n \"\"\"\n In the forward function we accept a Tensor of input data and we must return\n a Tensor of output data. We can use Modules defined in the constructor as\n well as arbitrary operators on Tensors.\n \"\"\"\n ###########################\n # YOUR IMPLEMENTATION HERE #\n\n \"\"\"Execute a forward propagation step for the neural network.\n\n Args:\n x: An observation in the form of a (4, 84, 84) tensor.\n \"\"\"\n\n # Go through literature to find good DQN structure\n\n x = F.relu(self.conv_1(x)) # 84x84x4 -> 20x20x32\n x = F.relu(self.conv_2(x)) # 20x20x32 -> 9x9x64\n x = F.relu(self.conv_3(x)) # 9x9x64 -> 7x7x64\n x = x.reshape(-1, 64 * 7 * 7)\n x = F.relu(self.fc_1(x))\n x = self.output_layer(x)\n\n ###########################\n return x\n\n def compute_loss(self, tensor_lst, target_network, criterion):\n \"\"\"Computes loss between target-Q function and current Q-function.\n\n Args:\n tensor_lst: A list of 5 tensors - current states, current actions, current rewards,\n terminal state booleans, and next states.\n\n Returns:\n Loss values in the form of a PyTorch tensor.\n \"\"\"\n obs, act, rew, done, next_obs = tensor_lst\n\n # Compute targets\n with torch.no_grad():\n if self.ddqn:\n\n # Get the Q values for the next state using the training/online network\n next_state_training_q_vals = self(next_obs)\n\n # Get the action that gives the maximum Q value for the next state (based on the online network)\n max_training_q_vals_ind = next_state_training_q_vals.argmax(dim=1, keepdim=True)\n\n # Get the Q values for the next state using the target network\n next_state_target_q_vals = target_network(next_obs)\n\n # Get the maximum Q value (obtained from the target network) based on the action (from the online network)\n max_target_q_vals = torch.gather(input=next_state_target_q_vals, dim=1, index=max_training_q_vals_ind)\n\n target_q_vals = rew + self.gamma*(1-done)*max_target_q_vals\n else:\n\n # Get the Q values for the next state using the target network\n next_state_target_q_vals = target_network(next_obs)\n\n # Get the max Q value for the next state with respect to all the actions\n max_target_q_vals = next_state_target_q_vals.max(dim=1, keepdim=True)[0]\n\n target_q_vals = rew + self.gamma*(1-done)*max_target_q_vals # condensed piecewise function from the Nature paper\n\n # Compute Q-values based on actual actions\n q_vals = self(obs) # size of 32 x 4\n actual_q_vals = torch.gather(input=q_vals, dim=1, index=act) # based on actual actions took\n\n # Compute loss between the new Q-value and the updated Q-value\n return criterion(actual_q_vals, target_q_vals)" ]
[ [ "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.no_grad", "torch.gather" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nishantsinghdev/smartdata
[ "e4a9799c76da4230e8d4edbb2e04c2a48eaa20df" ]
[ "rishabh/stats (1).py" ]
[ "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata = input('enter the data path ')#'C:\\Users\\yobin\\Desktop\\Ecommerce Purchases.csv'\nsep = input('enter the seperater ')\ndf = pd.read_csv(data,sep)\n#get only the numeric values of dataframe\npp=df._get_numeric_data()\n\n\npp2=pp.describe()\npp2.to_csv(\"new.csv\")\n\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
philbull/VisModel
[ "ccdad81064082efced9d4ba940cb42873d9326c9" ]
[ "VisModel/gains.py" ]
[ "\nimport numpy as np\nimport copy\n\nclass BaseGainModel(object):\n \n def __init__(self, uvd):\n \"\"\"\n Per-antenna complex gain model.\n \n Parameters\n ----------\n uvd : UVData object\n UVData object used to define metadata etc. \n \"\"\"\n # Collect information about the shape of the data\n # (the order of these lists is used to determine parameter order)\n self.freqs = np.unique(uvd.freq_array)\n self.times = np.unique(uvd.time_array)\n self.antpairs = sorted(uvd.get_antpairs())\n self.ants = sorted(uvd.ants())\n self.pols = sorted(uvd.get_pols())\n \n # Set-up empty parameter dictionary\n self.params = {}\n for pol in self.pols:\n self.params[pol] = {}\n for ant in self.ants:\n self.params[pol][ant] = None\n \n \n def set_params(self, params):\n \"\"\"\n Set gain model parameter values per polarization and antenna.\n \n Parameters\n ----------\n params : dict\n Dict of parameters to assign, of the form `params[pol][ant]`.\n \"\"\"\n for pol in params.keys():\n assert pol in self.pols, \\\n \"Polarization '%s' not found.\" % pol\n for ant in params[pol].keys():\n assert ant in self.ants, \\\n \"Antenna '%s' not found for polarization '%s'.\" % (ant, pol)\n self.params[pol][ant] = params[pol][ant]\n \n \n def model(self, freqs, times, params=None):\n \"\"\"\n Complex gain model, as a function of frequency, time, and a set of \n parameters.\n \n The model function should have a well-defined behaviour if `params=None`.\n \n Parameters\n ----------\n freqs : array_like\n 1D array of frequency values, in Hz.\n \n times : array_like\n 1D array of time values.\n \"\"\"\n # Basic complex gain model (g = 1 = const.)\n g = np.ones((freqs.size, times.size)) + 0.j\n return g\n \n \n def apply_gains(self, uvd_in, params=None, mode='multiply', inplace=False, \n check_order=True):\n \"\"\"\n Apply gains to a UVData object.\n \n Parameters\n ----------\n uvd_in : UVData\n UVData object to apply the gains to.\n \n params : dict, optional\n Dict containing gain model parameters for each polarization \n and antenna. Structure: `params[pol][ant]`.\n \n mode : str, optional\n How to apply the gains. 'multiply' means that the \n data will be multiplied by the gains. 'calibrate' \n means that the data will be divided by the gains.\n \n inplace : bool, optional\n Whether to apply the gains to the input data in-place, or to \n return a new copy of the object. Default: False.\n \n check_order : bool, optional\n Whether to explicitly check that the ordering of the times in the \n UVData object's `data_array` matches what is expected.\n \"\"\"\n # Check inputs\n assert mode in ['multiply', 'calibrate'], \\\n \"mode must be 'multiply' or 'calibrate'\"\n \n # Check whether to apply operation in-place\n if inplace:\n uvd = uvd_in\n else:\n uvd = copy.deepcopy(uvd_in)\n \n # Get frequencies and times\n freqs = np.unique(uvd.freq_array)\n times = np.unique(uvd.time_array)\n \n # Loop over known antennas and polarisations and compute the gain model\n gains = {}\n for pol in self.pols:\n for ant in self.ants:\n gains[(ant, pol)] = self.model(freqs, times, self.params[pol][ant])\n \n # Loop over antenna pairs and polarisations in the UVData object\n for ant1, ant2, pol in uvd.get_antpair_pols():\n assert ant1 in self.ants, \"Unexpected antenna %d in uvd_in\" % ant1\n assert ant2 in self.ants, \"Unexpected antenna %d in uvd_in\" % ant2\n assert pol in self.pols, \"Unexpected polarization %d in uvd_in\" % pol\n \n # Get gains\n g1 = gains[(ant1, pol)]\n g2 = gains[(ant2, pol)]\n \n # Find correct elements of the data array\n # uvd.data_array shape: (Nblts, 1, Nfreqs, Npols)\n idxs = uvd.antpair2ind(ant1, ant2) # blts index\n ipol = np.where(self.pols == pol)[0] # polarization index\n \n # Explicitly check the order of the times\n if check_order:\n assert np.almost_equal(uvd.time_array[idxs], times), \\\n \"Times in the UVData object do not match expected ordering\"\n \n # Apply gains\n if mode == 'multiply':\n uvd.data_array[idxs, 0, :, ipol] *= g1 * g2.conj()\n else:\n uvd.data_array[idxs, 0, :, ipol] /= (g1 * g2.conj())\n \n return uvd\n\n\n\nclass FactorizedFourierGainModel(BaseGainModel):\n \n def __init__(self, uvd, freq_range, time_range, freq_modes, time_modes):\n \"\"\"\n Per-antenna complex gain model with factorisable complex Fourier \n series in frequency and time.\n \n Parameters\n ----------\n uvd : UVData object\n UVData object used to define metadata etc.\n \n freq_range, time_range : tuple of float\n The frequencies and times to consider as the minimum and maximum \n of the range, e.g. `freq_range = (freq_min, freq_max)`. The model \n is allowed to extend outside this range\n \n freq_modes, time_modes : int or array_like\n If specified as an integer ``n``, the first ``n`` complex Fourier \n modes will be used to define the model, starting with the zero mode.\n \n If specified as an array, each element gives the order of a Fourier \n mode to include in the model, e.g. ``freq_modes = [0, 1, 5]`` would \n include the n=0, 1, and 5 Fourier modes in the model only.\n \"\"\"\n # Initialise superclass\n super().__init__(uvd)\n \n # Check inputs\n assert len(freq_range) == 2, \"freq_range must be a tuple: (freq_min, freq_max)\"\n assert len(time_range) == 2, \"time_range must be a tuple: (time_min, time_max)\"\n assert freq_range[1] > freq_range[0]\n assert time_range[1] > time_range[0]\n \n # Calculate period etc.\n self.freq_min, self.freq_max = freq_range\n self.time_min, self.time_max = time_range\n self.freq_period = self.freq_max - self.freq_min\n self.time_period = self.time_max - self.time_min\n \n # Specify which Fourier modes to include in the model\n if isinstance(freq_modes, (int, np.integer)):\n self.freq_modes = np.arange(freq_modes, dtype=np.integer)\n else:\n self.freq_modes = np.array(freq_modes)\n \n if isinstance(time_modes, (int, np.integer)):\n self.time_modes = np.arange(time_modes, dtype=np.integer)\n else:\n self.time_modes = np.array(time_modes)\n \n \n def model(self, freqs, times, params=None):\n \"\"\"\n Complex gain model, as a function of frequency, time, and a set of \n parameters.\n \n Parameters\n ----------\n freqs : array_like\n 1D array of frequency values, in Hz.\n \n times : array_like\n 1D array of time values.\n \n params : dict, optional\n Dictionary of model parameters per polarisation and per antenna, \n with structure `params[pol][ant]`. The parameters here are the \n complex coefficients of the Fourier series, corresponding to each \n mode in the lists `self.freq_modes` and `self.time_modes`.\n \"\"\"\n # Factors for \n freq_fac = 1.j * 2.*np.pi * (freqs - self.freq_min) / self.freq_period\n time_fac = 1.j * 2.*np.pi * (times - self.time_min) / self.time_period\n \n # Frequency modes\n gf = 0\n for i, n in enumerate(self.freq_modes):\n gf += cn * np.exp(n * freq_fac) # FIXME: cn not defined\n \n # Time modes\n gf = 0\n for i, n in enumerate(self.time_modes):\n gt += cn * np.exp(n * time_fac) # FIXME: cn not defined\n \n # Return total gain model\n return gf[np.newaxis,:] * gt[:,np.newaxis] # (Ntimes, Nfreqs)\n" ]
[ [ "numpy.unique", "numpy.arange", "numpy.almost_equal", "numpy.ones", "numpy.exp", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
archman/phantasy
[ "ac362cd3a80f7d1cfc68c0722f8a4aad504d1edd" ]
[ "phantasy/library/lattice/lattice.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Create high-level lattice object from machine configuration files.\n\"\"\"\nimport json\nimport logging\nimport numpy as np\nimport os\nimport shelve\nimport sys\nimport tempfile\nimport time\n\nfrom collections import OrderedDict\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom flame import Machine\nfrom fnmatch import fnmatch\nfrom math import log10\n\nfrom phantasy.library.layout import Layout\nfrom phantasy.library.layout import build_layout\nfrom phantasy.library.misc import bisect_index\nfrom phantasy.library.misc import flatten\nfrom phantasy.library.misc import get_intersection\nfrom phantasy.library.misc import parse_dt\nfrom phantasy.library.misc import pattern_filter\nfrom phantasy.library.misc import epoch2human\nfrom phantasy.library.misc import truncate_number\nfrom phantasy.library.misc import create_tempfile\nfrom phantasy.library.misc import create_tempdir\nfrom phantasy.library.model import BeamState\nfrom phantasy.library.model import ModelFlame\nfrom phantasy.library.parser import Configuration\nfrom phantasy.library.settings import Settings\nfrom phantasy.library.settings import build_flame_settings\nfrom phantasy.library.physics import get_orbit\nfrom phantasy.library.physics import inverse_matrix\nfrom .element import BaseElement\nfrom .element import CaElement\nfrom .flame import FlameLatticeFactory\nfrom .impact import LatticeFactory as ImpactLatticeFactory\nfrom .impact import run_lattice as run_impact_lattice\n\n_LOGGER = logging.getLogger(__name__)\nTS_FMT = \"%Y-%m-%d %H:%M:%S\"\n\n\nclass Lattice(object):\n \"\"\"Machine high-level lattice object, all elements inside this lattice\n has an unique name.\n\n Parameters\n ----------\n name : str\n Lattice name.\n\n Keyword Arguments\n -----------------\n s_begin : float\n Longitudinal position at the beginning of current lattice layout, [m].\n s_end : float\n Longitudinal position at the end of current lattice layout, [m].\n mname : str\n Name of loaded machine, from which lattice itself is loaded.\n mpath : str\n Path name of machine configurations.\n mconf : obj\n Machine configuration object.\n mtype : int\n Machine type, 0 for linear (default), 1 for a ring.\n source : str\n Source of PV data, URL of channel finder service, file name of SQLite\n database or csv spreadsheet.\n length : float\n Total length of lattice (layout), if 'mtype' is 1, refers to circumference.\n model : str\n Model type (case insensitive), or code name for simulation, 'FLAME'\n or 'IMPACT', the former is the default one.\n data_dir: str\n Path of directory to host data generated from model, including input\n lattice files, output files and other related files, if not defined,\n random folder will be created in system temporary directory,\n e.g.'/tmp/model_hGe1sq'.\n config :\n Lattice configuration object.\n layout :\n Lattice layout object.\n settings :\n Lattice settings object.\n model_factory :\n Lattice factory object for online model.\n trace : str\n If 'on', history of PV set actions could be traced back, or ('off')\n set action cannot be reverted, by default, trace feature is on.\n group : dict\n Initial group configuration.\n\n Note\n ----\n :class:`~phantasy.library.operation.create_lattice` could be used to\n set up lattice created by this class by provided information like: PV data,\n lattice layout, configuration and settings, etc.\n\n See Also\n --------\n :func:`~phantasy.library.operation.lattice.create_lattice`\n Create high-level lattice object.\n \"\"\"\n\n def __init__(self, name, **kws):\n self.name = name\n self._model_factory = None\n self.source = kws.get('source', None)\n self.s_begin = kws.get('s_begin', None)\n self.s_end = kws.get('s_end', None)\n self.mname = kws.get('mname', None)\n self.mpath = kws.get('mpath', None)\n self.mconf = kws.get('mconf', None)\n self.mtype = kws.get('mtype', None)\n self.length = kws.get('length', None)\n self.model = kws.get('model', None)\n self.data_dir = kws.get('data_dir', None)\n self.layout = kws.get('layout', None)\n self.config = kws.get('config', None)\n self.settings = kws.get('settings', None)\n self.model_factory = kws.get('model_factory', None)\n self.group = kws.get('group', None)\n\n self._viewer_settings = OrderedDict()\n self._trace_history = None\n self.trace = kws.get('trace', None)\n self._elements = []\n self._orm = None\n\n # name:element, assume no duplicated element.\n self._name_element_map = {}\n\n # clean up the following parameters\n self.isring = bool(self.mtype)\n self.latticemodelmap = None\n\n @property\n def group(self):\n \"\"\"dict: Group configuration.\"\"\"\n return self._group\n\n @group.setter\n def group(self, g):\n if g is None:\n self._group = dict()\n elif isinstance(g, dict):\n self._group = g\n else:\n _LOGGER.warning(\"'group' attribute is always initialized with {}.\")\n\n @property\n def trace(self):\n \"\"\"str: Keep/revert history feature flag for PV set actions.\"\"\"\n return self._trace\n\n @trace.setter\n def trace(self, trace):\n if trace is None or trace == 'on':\n self._trace = 'on'\n if self._trace_history is None:\n self._trace_history = []\n else:\n self._trace = 'off'\n if self._trace_history is not None:\n self._trace_history = None\n\n @property\n def config(self):\n \"\"\"Obj: Lattice configuration object.\"\"\"\n return self._config\n\n @config.setter\n def config(self, config):\n if config is not None and isinstance(config, Configuration):\n self._config = config\n else:\n self._config = self._get_default_config()\n\n @property\n def layout(self):\n \"\"\"Obj: Accelerator layout object.\"\"\"\n return self._layout\n\n @layout.setter\n def layout(self, layout):\n if layout is not None and isinstance(layout, Layout):\n self._layout = layout\n else:\n self._layout = self._get_default_layout()\n\n @property\n def settings(self):\n \"\"\"Settings: Object of lattice model settings.\"\"\"\n return self._settings\n\n @settings.setter\n def settings(self, settings):\n if settings is not None and isinstance(settings, Settings):\n self._settings = settings\n else:\n self._settings = self._get_default_settings()\n # update model factory\n if self._model_factory is not None:\n self._model_factory.settings = settings\n _LOGGER.info(\"Updating model settings.\")\n\n @property\n def model_factory(self):\n \"\"\"Obj: Lattice factory of defined model type.\"\"\"\n return self._model_factory\n\n @model_factory.setter\n def model_factory(self, mf):\n if mf is None:\n self._model_factory = self._set_model_factory()\n elif self.model == \"FLAME\" and isinstance(mf, FlameLatticeFactory):\n self._model_factory = mf\n elif self.model == \"IMPACT\" and isinstance(mf, ImpactLatticeFactory):\n self._model_factory = mf\n else:\n raise TypeError(\"Wrong input model factory.\")\n\n @property\n def s_begin(self):\n \"\"\"float: Longitudinal position at the beginning of current lattice layout, [m].\"\"\"\n return self._s_begin\n\n @s_begin.setter\n def s_begin(self, s):\n if s is None:\n self._s_begin = 0.0\n else:\n self._s_begin = s\n\n @property\n def s_end(self):\n \"\"\"float: Longitudinal position at the end of current lattice layout, [m].\"\"\"\n return self._s_end\n\n @s_end.setter\n def s_end(self, s):\n if s is None:\n self._s_end = sys.float_info.max\n else:\n self._s_end = s\n\n @property\n def mname(self):\n \"\"\"str: Name of loaded machine, from which lattice itself is loaded.\"\"\"\n return self._mname\n\n @mname.setter\n def mname(self, name):\n if name is None:\n from phantasy.library.dconf import _DEMO_MNAME\n self._mname = _DEMO_MNAME\n else:\n self._mname = name\n\n @property\n def mpath(self):\n \"\"\"str: Path name of machine configurations.\"\"\"\n return self._mpath\n\n @mpath.setter\n def mpath(self, path):\n if path is None:\n from phantasy.library.dconf import _DEMO_MPATH\n self._mpath = _DEMO_MPATH\n else:\n self._mpath = path\n\n @property\n def mconf(self):\n \"\"\"Obj: Machine configuration object.\"\"\"\n return self._mconf\n\n @mconf.setter\n def mconf(self, config):\n if isinstance(config, Configuration):\n self._mconf = config\n else:\n from phantasy.library.dconf import _DEMO_MCONF\n self._mconf = _DEMO_MCONF\n\n @property\n def mtype(self):\n \"\"\"int: Machine type, linear (0) or circular (1).\"\"\"\n return self._mtype\n\n @mtype.setter\n def mtype(self, i):\n if i is None:\n self._mtype = 0\n else:\n self._mtype = i\n\n @property\n def source(self):\n \"\"\"str: Source of PV data.\"\"\"\n return self._source\n\n @source.setter\n def source(self, src):\n if src is None:\n self._source = None\n else:\n self._source = src\n\n @property\n def length(self):\n \"\"\"Total length of lattice, if 'mtype' is 1, refers to circumference.\"\"\"\n return self._length\n\n @length.setter\n def length(self, s):\n if s is None:\n self._length = 0.0\n else:\n self._length = s\n\n @property\n def model(self):\n \"\"\"str: Code name to simulate online model type, *FLAME* by default.\"\"\"\n return self._model\n\n @model.setter\n def model(self, code):\n if code is None:\n self._model = \"FLAME\"\n else:\n self._model = code.upper()\n\n @property\n def data_dir(self):\n \"\"\"str: Path of directory to host data generated from model.\"\"\"\n return self._data_dir\n\n @data_dir.setter\n def data_dir(self, path):\n if path is None:\n systmp = '/tmp'\n self._data_dir = create_tempdir(prefix='data_', dir=systmp)\n else:\n self._data_dir = path\n\n def _get_default_config(self):\n try:\n if self.mconf.has_option(self.name, \"config_file\"):\n configfile = self.mconf.getabspath(self.name, \"config_file\")\n config = Configuration(configfile)\n else:\n from phantasy.library.dconf import _DEMO_MCONFIG\n config = _DEMO_MCONFIG\n except:\n config = Configuration()\n return config\n\n def _get_default_settings(self):\n try:\n if self.mconf.has_option(self.name, \"settings_file\"):\n settingfile = self.mconf.getabspath(self.name, \"settings_file\")\n settings = Settings(settingfile)\n _LOGGER.debug(\"Apply settings file from machine configs.\")\n else:\n settings = Settings()\n except:\n settings = Settings()\n return settings\n\n def _get_default_layout(self):\n try:\n if self.mconf.has_option(self.name, \"layout_file\"):\n layoutfile = self.mconf.getabspath(self.name, \"layout_file\")\n layout = build_layout(layoutfile)\n else:\n layout = None\n except:\n layout = None\n return layout\n\n def _set_model_factory(self):\n if self.model == \"IMPACT\":\n mf = ImpactLatticeFactory(self.layout, config=self.config,\n settings=self.settings)\n elif self.model == \"FLAME\":\n mf = FlameLatticeFactory(self.layout, config=self.config,\n settings=self.settings)\n else:\n raise RuntimeError(\n f\"Lattice: Model '{self.model}' not supported\")\n return mf\n\n def get_model_settings(self, only_physics=False):\n \"\"\"Get settings from 'model' environment, if *only_physics* is `True`,\n only return physics field settings, otherwise return both engineering\n and physics settings.\n\n Parameters\n ----------\n only_physics : bool\n If `True`, only return physics settings, the same as `settings`\n attribute, the default value is `False`.\n\n Returns\n -------\n r :\n Settings object.\n\n See Also\n --------\n :func:`~phantasy.library.settings.common.generate_settings`\n \"\"\"\n if only_physics:\n return self.settings\n else:\n all_settings = Settings()\n for e_name, e_phyconf in self.settings.items():\n elem = self._find_exact_element(e_name)\n if elem is None:\n continue\n all_settings.update(OrderedDict({e_name: e_phyconf}))\n for phy_fld_name in set(e_phyconf).intersection(\n elem.get_phy_fields()):\n eng_fields = elem.get_eng_fields()\n if len(eng_fields) == 1:\n all_settings[e_name].update(\n {eng_fields[0]:\n elem._unicorn_p2e(e_phyconf[phy_fld_name])})\n return all_settings\n\n def set(self, elem, value, field=None, **kws):\n \"\"\"Set the value of a lattice element field, if element only has one\n field, parameter *field* is optional, or *field* must be specified.\n\n Parameters\n ----------\n elem : str or CaElement object\n Element name string or CaElement object.\n value :\n Value of the field, type should be valid w.r.t *field*.\n field : str\n Field name (case insensitive) of element to be assigned, optional\n if element has only one field, *value* will be assigned to.\n\n Keyword Arguments\n -----------------\n source : str\n Three options available: 'all', 'control' and 'model', by default\n 'all', i.e. update both 'control' and 'model' environment.\n\n Returns\n -------\n ret :\n None if failed, or 0.\n \"\"\"\n elems = self._get_element_list(elem)\n if len(elems) != 1:\n raise RuntimeError(\n \"Lattice: Multiple elements found with the specified name.\")\n _elem = elems[0]\n\n all_fields = _elem.fields\n\n if len(all_fields) > 1:\n if field is None:\n print(\"Please specify field from [{}]\".format(\n ','.join(all_fields)))\n return None\n elif field not in all_fields:\n print(\"Wrong field.\")\n return None\n elif len(all_fields) == 1:\n field = all_fields[0]\n else:\n print(\"Element does not have the defined field.\")\n return None\n\n source = kws.get('source', 'all')\n if source == 'all':\n self._set_control_field(_elem, field, value)\n self._set_model_field(_elem, field, value)\n elif source == 'control':\n self._set_control_field(_elem, field, value)\n elif source == 'model':\n self._set_model_field(_elem, field, value)\n else:\n raise RuntimeError(\"Invalid source.\")\n\n return 0\n\n def _set_control_field(self, elem, field, value):\n \"\"\"Set value to element field onto 'control' environment.\n \"\"\"\n value0 = elem.last_settings.get(field)\n if value0 is None:\n value0 = getattr(elem, field)\n if elem.family == \"CAV\" and field in {'PHA', 'PHASE'}:\n value = _normalize_phase(value)\n setattr(elem, field, value)\n self._log_trace('control', element=elem.name,\n field=field, value0=value0, value=value)\n\n def _set_model_field(self, elem, field, value):\n \"\"\"Set *value* to *elem* *field* in 'model' environment.\n \"\"\"\n if isinstance(elem, CaElement):\n elem_name = elem.name\n else:\n elem_name = elem\n if elem_name not in self.settings:\n _LOGGER.warning(\n f\"Element:{elem_name} to set not found in lattice model.\")\n elif field not in self.settings[elem_name]:\n _LOGGER.warning(\n f\"Field: {field} to set not found in element: {elem_name}.\")\n else:\n value0 = self.settings[elem_name][field]\n self.settings[elem_name][field] = value\n self.model_factory.settings[elem_name][field] = value\n _LOGGER.debug(\n \"Updated field: {0:s} of element: {1:s} with value: {2:f}.\".format(\n field, elem_name, value))\n self._log_trace('model', element=elem_name, field=field,\n value=value, value0=value0)\n\n def _log_trace(self, type, **kws):\n \"\"\"Add set log entry into trace history.\n\n Parameters\n ----------\n type : str\n Set type according to environment source, 'control' or 'model'.\n timestamp\n element\n field\n value\n value0\n \"\"\"\n if self._trace == 'on':\n name = kws.get('element')\n field = kws.get('field')\n value = kws.get('value')\n value0 = kws.get('value0')\n log_entry = OrderedDict((\n ('timestamp', time.time()),\n ('type', type),\n ('element', name),\n ('field', field),\n ('value0', value0),\n ('value', value),\n ))\n self._trace_history.append(log_entry)\n else:\n pass\n\n def get(self, elem, field=None, **kws):\n \"\"\"Get the value of a lattice element field.\n\n Parameters\n ----------\n elem : str or CaElement object\n Element name string or CaElement object.\n field : str\n Field name (case insensitive) of element, if not defined, all\n field names will be selected.\n\n Keyword Arguments\n -----------------\n source : str\n Two options available: 'control' and 'model', by default 'control'\n i.e. only get from 'control' environment.\n mstate : bool\n If True, return BeamState instance, False by default, only\n valid for viewer elements and ``source='model'``.\n\n Note\n ----\n If ``source`` is defined as ``'model'``, settings will be retrieved\n from model environment, there are two categories: one is devices that\n accept new values settings, e.g. corrector, cavity, etc., the other is\n devices that only can show readings, e.g. BPM, PM, etc. (so-called\n *viewer elements*). For *viewer elements*, ``BeamState`` could be\n got after ``run()``, for FLAME model.\n\n Returns\n -------\n ret : dict\n Field value, {field: value}.\n \"\"\"\n elems = self._get_element_list(elem)\n if len(elems) != 1:\n raise RuntimeError(\n \"Lattice: Multiple elements found with the specified name.\")\n _elem = elems[0]\n all_fields = _elem.fields\n if field is None:\n field = all_fields\n elif field not in all_fields:\n print(\"Wrong field.\")\n return None\n\n source = kws.get('source', 'control')\n\n if source == 'control':\n retval = _get_control_field(_elem, field)\n elif source == 'model':\n mstate_flag = kws.get('mstate', False)\n retval = self._get_model_field(_elem, field, mstate=mstate_flag)\n else:\n raise RuntimeError(\"Invalid source.\")\n\n return retval\n\n def _get_model_field(self, elem, field, **kws):\n \"\"\"Get field value(s) from elment.\n\n Keyword Arguments\n -----------------\n mstate : bool\n If True, return BeamState instance, False by default.\n \"\"\"\n if not isinstance(field, (list, tuple)):\n field = field,\n elem_name = elem.name\n if _is_viewer(elem):\n _settings = self._viewer_settings\n else:\n _settings = self.settings\n retval = {k: v for k, v in _settings[elem_name].items() if k in field}\n if kws.get('mstate', False) and _is_viewer(elem):\n retval['mstate'] = self._viewer_settings[elem_name]['mstate']\n return retval\n\n def trace_history(self, rtype='human', **kws):\n \"\"\"Inspect trace history of Lattice set actions, return data type with\n human friendly or raw format.\n\n Parameters\n ----------\n rtype : str\n 'human' or 'raw', default option 'human' will return formated\n human readable strings, could be printed out to streams;\n 'raw' will return history entries meeting filters defined by\n keyword arguments.\n\n Note\n ----\n Data structure of every traced log entry:\n\n +---------------+--------------------------------------+\n | key name | value example |\n +---------------+--------------------------------------+\n | *timestamp* | ``1485275869`` |\n +---------------+--------------------------------------+\n | *type* | ``control`` |\n +---------------+--------------------------------------+\n | *element* | ``LS1_CA01:CAV1_D1127`` |\n +---------------+--------------------------------------+\n | *field* | ``PHA`` |\n +---------------+--------------------------------------+\n | *value* | ``30`` |\n +---------------+--------------------------------------+\n | *value0* | ``325`` |\n +---------------+--------------------------------------+\n\n Keyword Arguments\n -----------------\n element : str\n Unix shell pattern of element name.\n field : str\n Unix shell pattern of element field name.\n type : str\n Log entry type: 'control' or 'model', could be Unix shell pattern.\n value : number or list of numbers\n From which, lower and upper limit values will be extracted.\n \"\"\"\n if self._trace_history is None:\n return None\n\n _history = self._filter_trace(self._trace_history, **kws)\n\n if rtype == 'human':\n retval = []\n for log_entry in _history:\n type = log_entry['type']\n ts = log_entry['timestamp']\n value = log_entry['value']\n value0 = log_entry['value0']\n name = log_entry['element']\n field = log_entry['field']\n log_str = \"{ts} [{type:^7s}] Set {name:<22s} TO {value:<10.3f} [{value0:^10.3f}]\".format(\n ts=datetime.fromtimestamp(ts).strftime(\n '%Y-%m-%d %H:%M:%S'),\n type=type, name=\"{0} [{1}]\".format(name, field),\n value=value, value0=value0)\n retval.append(log_str)\n print(log_str)\n return \"\\n\".join(retval)\n else:\n return _history\n\n @staticmethod\n def _filter_trace(data, **kws):\n \"\"\"Apply filters on trace history data, return list of valid entries.\n \"\"\"\n # filters\n elem_name = kws.get('element', '*')\n pv_name = kws.get('pv', None)\n entry_type = kws.get('type', '*')\n field_name = kws.get('field', '*')\n value = kws.get('value', None)\n if value is not None:\n if isinstance(value, (int, float)):\n value = [value]\n elif not isinstance(value, (list, tuple)):\n raise RuntimeError(\"Invalid value argument.\")\n val_min, val_max = min(value), max(value)\n else:\n val_min, val_max = -1e10, 1e10\n\n retval = []\n for d in data:\n _elem_name = d.get('element')\n _pv_name = d.get('pv', None)\n _entry_type = d.get('type')\n _field_name = d.get('field')\n _value = d.get('value')\n if Lattice._fnmatch(_pv_name, pv_name) \\\n and fnmatch(_elem_name, elem_name) \\\n and fnmatch(_entry_type, entry_type) \\\n and fnmatch(_field_name, field_name) \\\n and val_min <= _value <= val_max:\n retval.append(d)\n\n return retval\n\n @staticmethod\n def _fnmatch(name, pattern):\n if pattern is None: # pv pattern is not defined\n return True\n else: # pv pattern is defined\n if name is None:\n return False\n else:\n return fnmatch(name, pattern)\n\n def roll_back(self, setting=None, type=None, retroaction=None):\n \"\"\"Roll back PV setpoint by providing *setting* or log entries from\n trace history, which indicating high-level lattice object to proceed\n some set action to roll it back into previous states.\n\n Parameters\n ----------\n setting : dict of list of dict\n Element setting, could be trace_history entry, if not defined,\n use the last entry with the type of 'control'\n type : str\n Type of environment to roll back, 'control' or 'model', 'control'\n by default.\n retroaction : time\n Timestamp of the trace history, dating back to *retroaction*,\n machine state should roll back to be.\n\n Note\n ----\n Possible input of *setting* parameter:\n - (Default) Last trace history entry.\n - Trace history entry or list of entries;\n\n Note\n ----\n About *retroaction* parameter, following input types will be supported:\n\n - Absolute timestamp indicated by a float number, i.e. time in seconds\n since Epoch: entries with logging timestamp newer (bigger) than\n *retroaction* will be rolled back;\n - Relative timestamp w.r.t. current time available units: *years*,\n *months*, *weeks*, *days*, *hours*, *minutes*, *seconds*,\n *microseconds*, and some unit alias: *year*, *month*, *week*, *day*,\n *hour*, *minute*, *second*, *microsecond*, *mins*, *secs*, *msecs*,\n *min*, *sec*, *msec*, could be linked by string 'and' or ',',\n ended with 'ago', e.g. '5 mins ago', '1 hour and 30 mins ago'.\n\n Warning\n -------\n If valid *retroaction* parameter is defined, *setting* will be\n disregarded.\n\n See Also\n --------\n trace_history : Log history of set actions.\n \"\"\"\n stype = 'control' if type is None else type\n _history = self.trace_history(rtype='raw', type=stype)\n if setting is None:\n if _history != []:\n setting = _history[-1]\n\n if retroaction is not None:\n setting = _get_retroactive_trace_history(_history, retroaction)\n\n if not isinstance(setting, (list, tuple)):\n setting = setting,\n\n for entry in setting:\n _elem_name = entry.get('element')\n _entry_type = entry.get('type')\n _field_name = entry.get('field')\n _value = entry.get('value0')\n self.set(_elem_name, _value, _field_name, source=_entry_type)\n\n def update_model_settings(self, model_lattice, **kws):\n \"\"\"Update model lattice settings with external lattice file, prefer\n keyword argument *sdict* or *sjson* if any one is defined.\n\n Parameters\n ----------\n model_lattice : path\n External model lattice file.\n\n Keyword Arguments\n -----------------\n sdict : dict\n Dict of model lattice settings.\n sjson : path\n JSON file of model lattice settings.\n \"\"\"\n sdict = kws.get('sdict', None)\n sjson = kws.get('sjson', None)\n if isinstance(sdict, dict):\n settings = sdict\n elif sjson is not None:\n with open(sjson, 'r') as fp:\n settings = json.load(fp)\n else:\n # read settings from lattice file\n if self.model == 'FLAME':\n settings = build_flame_settings(model_lattice)\n elif self.model == 'IMPACT':\n raise NotImplementedError\n\n # apply settings\n for e_name, e_setting in settings.items():\n if e_name in self.settings:\n for field, value in e_setting.items():\n self._set_model_field(e_name, field, value)\n _LOGGER.debug(\n f\"Update model: {e_name}:{field} to be {value}.\")\n else:\n _LOGGER.debug(\n f'Model settings does not have field: {e_name}:{field}.')\n\n def sync_settings(self, data_source=None):\n \"\"\"Synchronize lattice settings between 'model' and 'control'\n environment.\n\n Parameters\n ----------\n data_source : str\n Data source for settings synchronization. If 'model' is defined,\n the settings of 'control' environment will be updated with the\n settings from 'model'; if 'control' is defined, 'model' settings\n will be updated with the data from 'control'. *data_source* is\n 'control' by default.\n \"\"\"\n data_source = 'control' if data_source is None else data_source\n\n if data_source == 'control':\n _LOGGER.info(\"Sync settings from 'control' to 'model'.\")\n model_settings = self.settings\n for elem in self._get_element_list('*'):\n if elem.name in model_settings:\n if not self._skip_elements(elem.name):\n for field, value in self.get(elem=elem,\n source='control').items():\n if field in model_settings[elem.name]:\n self._set_model_field(elem, field, value)\n else:\n _LOGGER.debug(\n f'Model settings does not have field: {elem.name}:{field}.')\n else:\n _LOGGER.debug(\n f'Model settings does not have element: {elem.name}.')\n elif data_source == 'model':\n _LOGGER.info(\"Sync settings from 'model' to 'control'.\")\n for e_name, e_setting in self.settings.items():\n elem = self._get_element_list(e_name)\n if elem == []:\n _LOGGER.debug(\n f'Control settings does not have element {e_name}.')\n continue\n for field, value in e_setting.items():\n if not self._skip_elements(elem[0].name):\n if field in elem[0].fields:\n self._set_control_field(elem[0], field, value)\n\n def load_settings(self, settings=None, stype='design'):\n \"\"\"Initializing design settings of elements from *settings*.\n\n Parameters\n ----------\n settings :\n Settings object.\n stype : str\n Setting type, 'design' or 'last', to set `design_settings` or\n `last_settings`, respectively.\n \"\"\"\n settings = self.settings if settings is None and \\\n self.settings is not None else settings\n if settings is None:\n _LOGGER.warning(\"Cannot load settings from None.\")\n return 0\n for k, v in settings.items():\n el = self._find_exact_element(k)\n if el is not None:\n if stype == 'design':\n el.design_settings.update(dict(v))\n elif stype == 'last':\n el.last_settings.update(dict(v))\n _LOGGER.debug(\n f\"Updated {el.name:<20}: {stype}_settings.\")\n\n def _skip_elements(self, name):\n \"\"\"Presently, element should skip: SEXT\n \"\"\"\n SKIP_TYPES = ['SEXT']\n elements = self.model_factory._accel.elements\n for e in elements:\n if e.name == name:\n return e.ETYPE in SKIP_TYPES\n\n def run(self, init_beam_conf=None):\n \"\"\"Run machine with defined model, e.g. 'FLAME' or 'IMPACT',\n update model settings, but not control settings.\n\n Parameters\n ----------\n init_beam_conf : dict\n Initial beam condition, only support FLAME now.\n\n Returns\n -------\n ret : tuple\n tuple of (path, model), path of the model data directory and model\n object.\n \"\"\"\n if self.model == \"IMPACT\":\n lat = self._latticeFactory.build()\n config = self._latticeFactory.config\n work_dir = run_impact_lattice(lat, config=config,\n work_dir=self.data_dir)\n if self.latticemodelmap is None:\n self.createLatticeModelMap(os.path.join(work_dir, \"model.map\"))\n return work_dir, None\n elif self.model == \"FLAME\":\n lat = self.model_factory.build()\n latpath = create_tempfile(prefix='model_', suffix='.lat',\n dir=self.data_dir)\n with open(latpath, 'w') as f:\n lat.write(f)\n fm = self._flame_model(latconf=lat.conf())\n if init_beam_conf is not None:\n fm.configure(init_beam_conf)\n _LOGGER.info(f\"Applied user-customized initial beam condition.\")\n return latpath, fm\n else:\n raise RuntimeError(\n f\"Lattice: Simulation code '{self.model}' not supported\")\n\n def _flame_model(self, **kws):\n \"\"\"Create a new flame model\n \"\"\"\n latconf = kws.get('latconf', None)\n latfile = kws.get('latfile', None)\n\n if latconf is not None:\n m = Machine(latconf)\n elif latfile is not None:\n m = Machine(open(latfile, 'r'))\n ms = BeamState(machine=m)\n fm = ModelFlame()\n fm.bmstate, fm.machine = ms, m\n obs = fm.get_index_by_type(type='bpm')['bpm']\n r, s = fm.run(monitor=obs)\n self._update_viewer_settings(fm, r)\n return fm\n\n def _update_viewer_settings(self, fm, r):\n \"\"\"Initially, all viewer settings are {}, after ``run()``,\n new key-values will be added into.\n\n field : model environment\n key : flame model\n +---------+----------+-----------+\n | Family | field | key |\n +---------+----------+-----------+\n | BPM | X [m] | x0 [mm] |\n | BPM | Y [m] | y0 [mm] |\n +---------+----------+-----------+\n \"\"\"\n for i, res in r:\n elem_name = fm.get_element(index=i)[0]['properties']['name']\n readings = {field: getattr(res, k)[0] * 1e-3 for field, k in\n zip(['X', 'Y'], ['x0', 'y0'])}\n readings['mstate'] = res\n self._viewer_settings[elem_name] = readings\n\n def __getitem__(self, i):\n if isinstance(i, str):\n return self._find_exact_element(i)\n else:\n return self._elements[i]\n\n def __len__(self):\n return len(self._elements)\n\n def _find_exact_element(self, name):\n \"\"\"Return element object if *name* is fully matched, or return None.\n \"\"\"\n if isinstance(name, BaseElement):\n name = name.name\n return self._name_element_map.get(name, None)\n\n def get_elements(self, name=None, type=None, srange=None, **kws):\n \"\"\"Get element(s) with defined filter rules.\n\n Parameters\n ----------\n name : str or list[str]\n (List of) Element names or Unix shell style patterns.\n type : str or list[str]\n (List of) Element type/group/family, or Unix shell style patterns.\n srange : tuple\n Start and end points (tuple of float) of elements' longitudinal\n position.\n\n Keyword Arguments\n -----------------\n sort_key : str\n Ascendingly sort key of the returned list, ``name`` or ``pos``,\n ``pos`` by default, or other attributes valid for ``CaElement``.\n\n Returns\n -------\n ret : List\n List of elements (``CaElement``), excluding virtual elements.\n\n Note\n ----\n 1. The pattern here used is Unix shell style, slightly different\n from regex, e.g. pattern 'BP' matches 'BPM' in regex, but matches\n nothing in Unix shell style, 'BP*' works;\n 2. If more than one positional parameters (*name*, *type*, *srange*)\n are defined, return elements that meet all definitions;\n 3. By default, the returned elements are ascendingly sorted according\n to element position values.\n\n Examples\n --------\n 1. Define *name* with an invalid name:\n\n >>> get_elements(name='NOEXISTS')\n []\n\n 2. Define *name* with name or name patterns:\n\n >>> get_elements(name='FS1_BMS:DCV_D2662')\n [FS1_BMS:DCV_D2662 [VCOR] @ sb=153.794690]\n >>> get_elements(name=['FS1_B?*D266?', 'LS1_B*DCV*'])\n [LS1_BTS:DCV_D1937 [VCOR] @ sb=81.365954,\n LS1_BTS:DCV_D1964 [VCOR] @ sb=84.013954,\n LS1_BTS:DCV_D1997 [VCOR] @ sb=87.348954,\n LS1_BTS:DCV_D2024 [VCOR] @ sb=90.055166,\n LS1_BTS:DCV_D2061 [VCOR] @ sb=93.710487,\n LS1_BTS:DCV_D2114 [VCOR] @ sb=98.985556,\n FS1_BMS:DCV_D2662 [VCOR] @ sb=153.794690,\n FS1_BMS:DCH_D2662 [HCOR] @ sb=153.794690,\n FS1_BMS:BPM_D2664 [BPM] @ sb=153.963690,\n FS1_BMS:QH_D2666 [QUAD] @ sb=154.144690]\n\n 3. Filter BPMs from the above result:\n\n >>> get_elements(name=['FS1_B?*D266?', 'LS1_B*DCV*'], type='BPM')\n [FS1_BMS:BPM_D2664 [BPM] @ sb=153.963690]\n >>> # type='BPM' also could be pattern\n\n 4. Filter hybrid types:\n\n >>> get_elements(name=['FS1_B?*D266?', 'LS1_B*DCV*'],\n >>> type=['BPM', 'QUAD'])\n [FS1_BMS:BPM_D2664 [BPM] @ sb=153.963690,\n FS1_BMS:QH_D2666 [QUAD] @ sb=154.144690]\n\n 5. Get subsection from lattice according to s-position range:\n\n >>> get_elements(srange=(10, 11))\n [LS1_CB01:CAV1_D1229 [CAV] @ sb=10.366596,\n LS1_CB01:BPM_D1231 [BPM] @ sb=10.762191,\n LS1_CB01:SOL1_D1235 [SOL] @ sb=10.894207]\n\n 6. Continue filter with *srange* parameter\n\n >>> get_elements(name=['FS1_B?*D266?', 'LS1_B*DCV*'],\n >>> type=['BPM', 'QUAD'], srange=(154, 155))\n [FS1_BMS:QH_D2666 [QUAD] @ sb=154.144690]\n\n Note\n ----\n Select subsection by ``srange`` parameter is realized by new approach,\n other than ``~phantasy.library.Lattice.getLine()``, e.g. the result of\n ``getLine((10,11))`` contains element before the start range: i.e.\n ``LS1_WA03:PM_D1223:PM @ sb=9.929284``, which is beyond the range.\n\n See Also\n --------\n :func:`get_virtual_elements`\n Get virtual elements.\n :func:`next_elements`\n Get neighborhood of reference element.\n :class:`~phantasy.library.lattice.element.CaElement`\n Element class.\n \"\"\"\n valid_types = self.get_all_types(virtual=False)\n\n # name\n if isinstance(name, str):\n ele_names = self._get_element_list(name)\n elif isinstance(name, (list, tuple)):\n ele_names = flatten(self._get_element_list(n) for n in name)\n else:\n ele_names = []\n\n # group\n if type is not None:\n if isinstance(type, str):\n type = type,\n _type_list = flatten(pattern_filter(valid_types, p) for p in type)\n ele_types = flatten(self._get_element_list(t) for t in _type_list)\n else:\n ele_types = []\n\n # srange\n if isinstance(srange, (list, tuple)):\n pos_start, pos_end = srange[0], srange[1]\n # by default elems is sorted, if not, sort it before using.\n elems = self._get_element_list('*', virtual=False)\n s = [e.sb for e in elems]\n index0 = bisect_index(s, pos_start)\n index1 = bisect_index(s, pos_end)\n ele_srange = elems[index0:index1]\n else:\n ele_srange = []\n\n ret_elems = get_intersection(ele_names, ele_types, ele_srange)\n\n sk = kws.get('sort_key', 'sb')\n if sk == 'pos':\n sk = 'sb'\n return sorted([e for e in ret_elems if not e.virtual],\n key=lambda e: getattr(e, sk))\n\n def next_elements(self, ref_elem, count=1, **kws):\n \"\"\"Get elements w.r.t reference element, according to the defined\n confinement.\n\n Parameters\n ----------\n ref_elem :\n ``CaElement`` object, reference element.\n count : int\n Skip element number after *ref_elem*, negative input means before,\n e.g. ``count=1`` will locate the next one of *ref_elem* in the\n investigated lattice, if keyword parameter *type* is given, will\n locate the next one element of the defined type; ``count=-1`` will\n locate in the opposite direction.\n\n Keyword Arguments\n -----------------\n type : str or list(str)\n (List of) Element type/group/family, if *type* is a list of more\n than one element types, the *next* parameter will apply on each\n type.\n range : str\n String of format ``start:stop:step``, to slicing the output list,\n e.g. return 50 BPMs after *ref_elem* (``count=50``), but only get\n every two elements, simply by setting ``range=0::2``.\n ref_include : True or False\n Include *ref_elem* in the returned list or not, False by default.\n\n Returns\n -------\n ret : List\n List of next located elements, ascendingly sorted by position, by\n default, only return one element (for eath *type*) that meets the\n confinement, return more by assgining *range* keyword parameter.\n\n Examples\n --------\n 1. Select an element as reference element:\n\n >>> print(all_e)\n [LS1_CA01:CAV1_D1127 [CAV] @ sb=0.207064,\n LS1_CA01:BPM_D1129 [BPM] @ sb=0.511327,\n LS1_CA01:SOL1_D1131 [SOL] @ sb=0.643330,\n LS1_CA01:DCV_D1131 [VCOR] @ sb=0.743330,\n LS1_CA01:DCH_D1131 [HCOR] @ sb=0.743330,\n LS1_CA01:CAV2_D1135 [CAV] @ sb=0.986724,\n LS1_CA01:CAV3_D1143 [CAV] @ sb=1.766370,\n LS1_CA01:BPM_D1144 [BPM] @ sb=2.070634,\n LS1_CA01:SOL2_D1147 [SOL] @ sb=2.202637,\n LS1_CA01:DCV_D1147 [VCOR] @ sb=2.302637,\n LS1_CA01:DCH_D1147 [HCOR] @ sb=2.302637,\n LS1_CA01:CAV4_D1150 [CAV] @ sb=2.546031,\n LS1_WA01:BPM_D1155 [BPM] @ sb=3.109095,\n LS1_CA02:CAV1_D1161 [CAV] @ sb=3.580158,\n LS1_CA02:BPM_D1163 [BPM] @ sb=3.884422,\n LS1_CA02:SOL1_D1165 [SOL] @ sb=4.016425,\n LS1_CA02:DCV_D1165 [VCOR] @ sb=4.116425,\n LS1_CA02:DCH_D1165 [HCOR] @ sb=4.116425,\n LS1_CA02:CAV2_D1169 [CAV] @ sb=4.359819,\n LS1_CA02:CAV3_D1176 [CAV] @ sb=5.139465,\n LS1_CA02:BPM_D1178 [BPM] @ sb=5.443728]\n >>> ref_elem = all_e[5]\n\n 2. Get next element of *ref_elem*:\n\n >>> lat.next_elements(ref_elem)\n [LS1_CA01:CAV3_D1143 [CAV] @ sb=1.766370]\n\n 3. Get last of the next two element:\n\n >>> lat.next_elements(ref_elem, count=2)\n [LS1_CA01:BPM_D1144 [BPM] @ sb=2.070634]\n\n 4. Get all of the next two elements:\n\n >>> lat.next_elements(ref_elem, count=2, range='0::1')\n [LS1_CA01:CAV3_D1143 [CAV] @ sb=1.766370,\n LS1_CA01:BPM_D1144 [BPM] @ sb=2.070634]\n\n 5. Get all of the two elements before *ref_elem*:\n\n >>> lat.next_elements(ref_elem, count=-2, range='0::1')\n [LS1_CA01:DCV_D1131 [VCOR] @ sb=0.743330,\n LS1_CA01:DCH_D1131 [HCOR] @ sb=0.743330]\n\n 6. Get next two BPM elements after *ref_elem*, including itself:\n\n >>> lat.next_elements(ref_elem, count=2, type=['BPM'],\n >>> ref_include=True, range='0::1')\n [LS1_CA01:CAV2_D1135:CAV @ sb=0.986724,\n LS1_CA01:BPM_D1144 [BPM] @ sb=2.070634,\n LS1_WA01:BPM_D1155 [BPM] @ sb=3.109095]\n\n 7. Get with hybrid types:\n\n >>> lat.next_elements(ref_elem, count=2, type=['BPM', 'CAV'],\n >>> range='0::1')\n [LS1_CA01:CAV3_D1143 [CAV] @ sb=1.766370,\n LS1_CA01:BPM_D1144 [BPM] @ sb=2.070634,\n LS1_CA01:CAV4_D1150 [CAV] @ sb=2.546031,\n LS1_WA01:BPM_D1155 [BPM] @ sb=3.109095]\n \"\"\"\n ref_include_flag = kws.get('ref_include', False)\n if not isinstance(ref_elem, CaElement):\n _LOGGER.warning(f\"{str(ref_elem)} is not a valid CaElement.\")\n if ref_include_flag:\n return [ref_elem]\n else:\n return []\n\n if count == 0:\n return [ref_elem]\n\n count_is_positive = True if count > 0 else False\n if count_is_positive:\n eslice = kws.get('range', '-1::1')\n else:\n eslice = kws.get('range', '0:1:1')\n slice_tuple = [int(i) if i != '' else None for i in eslice.split(':')]\n eslice = slice(*slice_tuple)\n\n etype = kws.get('type', None)\n\n elem_sorted = sorted([e for e in self._elements if e.virtual == 0],\n key=lambda e: e.sb)\n spos_list = [e.sb for e in elem_sorted]\n ref_idx = spos_list.index(ref_elem.sb)\n if count_is_positive:\n eslice0 = slice(ref_idx + 1, ref_idx + count + 1, 1)\n else:\n eslice0 = slice(ref_idx + count, ref_idx, 1)\n\n if etype is None:\n ret = elem_sorted[eslice0][eslice]\n else:\n if isinstance(etype, str):\n etype = etype,\n if count_is_positive:\n ret = flatten([e for e in elem_sorted[ref_idx + 1:]\n if e.family == t][:count]\n for t in etype)\n else:\n ret = flatten([e for e in elem_sorted[:ref_idx]\n if e.family == t][count:]\n for t in etype)\n if ref_include_flag:\n ret.append(ref_elem)\n return sorted(ret, key=lambda e: e.sb)\n\n def get_all_types(self, virtual=False, **kws):\n \"\"\"Get names of element types (groups/families).\n\n Parameters\n ----------\n virtual : True or False\n Return virtual group or not, ``False`` by default.\n\n Returns\n -------\n ret : List(str)\n List of type names.\n\n See Also\n --------\n lattice_names : Names of all loaded lattices.\n get_all_names : Get all element names from given lattice.\n \"\"\"\n all_groups = self.get_groups('*', empty=True)\n if virtual is True:\n return all_groups\n else:\n return [g for g in all_groups if g != 'HLA:VIRTUAL']\n\n def get_all_names(self, virtual=False, **kws):\n \"\"\"Get names of all elements from given lattice.\n\n Parameters\n ----------\n virtual : True or False\n Return virtual elements or not, ``False`` by default.\n\n Returns\n -------\n ret : List(str)\n List of element names.\n\n See Also\n --------\n lattice_names : Names of all loaded lattices.\n get_all_types : Get all element types from given lattice.\n \"\"\"\n return [e.name for e in self._get_element_list('*', virtual=virtual)]\n\n def has_element(self, name):\n \"\"\"If lattice has element or not.\n\n Parameters\n ----------\n name : str or CaElement\n Name of element or element itself.\n\n Returns\n -------\n ret : True or False\n True if lattice has element, or False.\n \"\"\"\n if self._find_exact_element(name):\n return True\n else:\n return False\n\n def insert(self, elem, i=None, groups=None, **kws):\n \"\"\"Ascendingly insert element regarding s-position, if *i* is defined,\n insert at *i*-th place.\n\n If *groups* is defined, add element into each group.\n\n Parameters\n ----------\n elem : str or CaElement\n CaElement object or element name.\n i : int\n Index to insert, append if None.\n groups : list or str\n Group name(s) the element belongs to.\n\n Keyword Arguments\n -----------------\n trust : True or False\n Trust input *elem* if True, else test first, False by default.\n\n See Also\n --------\n append : Add element at the end of lattice.\n :class:`~phantasy.library.lattice.CaElement`\n \"\"\"\n #if not kws.get('trust', False):\n #elem = self._find_exact_element(elem)\n #if elem is None:\n # _LOGGER.warning(\"insert: not a valid element.\")\n # return\n\n if i is not None:\n self._elements.insert(i, elem)\n else:\n if len(self._elements) == 0:\n self._elements.append(elem)\n else:\n _inplace_order_insert(elem, self._elements)\n\n self.update_name_element_map(elem)\n\n if isinstance(groups, str):\n groups = groups,\n if groups is not None:\n for g in groups:\n if g in self._group:\n self._group[g].append(elem)\n else:\n self._group[g] = [elem]\n\n def append(self, elem):\n \"\"\"Append new element to lattice.\n\n Parameters\n ----------\n elem : CaElement\n Element object.\n\n Returns:\n r : bool\n True if appended, otherwise False.\n \"\"\"\n if not self.has_element(elem.name):\n self._elements.append(elem)\n self.update_name_element_map(elem)\n return True\n else:\n return False\n\n def update_name_element_map(self, elem):\n \"\"\"Update internal name element mapping and settings.\n \"\"\"\n ename = elem.name\n self._name_element_map[ename] = elem\n if ename not in self.settings:\n self.settings.update(\n elem.get_current_settings(only_physics=True))\n\n def sort(self, elements=None, **kws):\n \"\"\"Return sorted list of elements with defined key.\n\n Parameters\n ----------\n elements : List\n List of elements, could be returned from\n func:`~phantasy.library.lattice.Lattice.get_elements`, if not\n defined, entire lattice will be sorted.\n\n Keyword Arguments\n -----------------\n sort_key : str\n Ascendingly sort key for element list, ``name`` or ``pos``,\n ``pos`` by default, or other attributes valid for ``CaElement``.\n inplace : True or False\n If *inplace* is True, the original element list will be replaced\n with sorted one, False by default.\n\n Warning\n -------\n Inplace sort only supports the case of ``elements=None``.\n\n Returns\n -------\n ret : List\n Sorted list of elements.\n \"\"\"\n if elements is None:\n elem0 = self._elements\n else:\n elem0 = elements\n if not isinstance(elem0, list):\n _LOGGER.warning(\"'elements' is not a list.\")\n return []\n\n sk = kws.get('sort_key', 'sb')\n if sk == 'pos':\n sk = 'sb'\n sorted_elemlist = sorted([em for em in elem0],\n key=lambda e: getattr(e, sk))\n\n if kws.get('inplace', False):\n if elements is None:\n self._elements = sorted_elemlist\n else:\n _LOGGER.warning(\n \"'inplace' sort is only valid when 'elements=None'.\"\n )\n\n return sorted_elemlist\n\n def size(self):\n \"\"\"Total number of elements.\"\"\"\n return len(self._elements)\n\n def remove(self, name):\n \"\"\"Remove element with *name*, or return None.\n\n Parameters\n ----------\n name : str\n Name of element.\n\n Returns\n -------\n ret :\n Element if success or None.\n \"\"\"\n for i, e in enumerate(self._elements):\n if e.name != name:\n continue\n return self._elements.pop(i)\n return None\n\n def update_groups(self):\n \"\"\"Update group attribute by iterating over all elements.\n\n Returns\n -------\n ret : dict\n Dict of groups, with group names as keys and group members as\n values.\n \"\"\"\n for e in self._elements:\n for g in e.group:\n g_lst = self._group.setdefault(g, [])\n if e not in g_lst:\n g_lst.append(e)\n\n def add_group(self, name):\n \"\"\"Create a new group.\n\n Parameters\n ----------\n name : str\n Group name.\n \"\"\"\n if name not in self._group:\n self._group[name] = []\n else:\n raise ValueError(f\"Group '{name}' exists.\")\n\n def remove_group(self, name, **kws):\n \"\"\"Remove group defined by *name*, by default only remove empty group.\n\n Parameters\n ----------\n name : str\n Group name.\n\n Keyword Arguments\n -----------------\n empty_only: True or False\n Remove empty group only if True, True by default.\n \"\"\"\n if name not in self._group:\n raise ValueError(f\"Group '{name}' does not exist.\")\n\n empty_only = kws.get('empty_only', True)\n if len(self._group[name]) > 0:\n if empty_only:\n raise ValueError(f\"Cannot remove non-empty group '{name}'.\")\n else:\n print(\"Warning: Group to remove is not empty.\")\n self._group.pop(name)\n\n def add_group_member(self, group, member, **kws):\n \"\"\"Add a *member* to *group*, if *group* is new, add and update\n *member* group only when *new* if True.\n\n Parameters\n ----------\n group : str\n Group name.\n member :\n CaElement.\n\n Keyword Arguments\n -----------------\n new : True or False\n If *group* is new, add and update when *new* is True, or ignore.\n \"\"\"\n new = kws.get('new', True)\n elem = self._find_exact_element(member)\n if elem is None:\n raise ValueError(f\"Invalid element '{member}'.\")\n\n if group in self._group:\n if elem in self._group[group]:\n msg = \"'{0}' is already in group: '{1}'.\".format(\n elem.name, group)\n print(\"Warning: {0}\".format(msg))\n _LOGGER.warning(msg)\n return\n else:\n elem.group.add(group)\n _inplace_order_insert(elem, self._group[group])\n msg = \"Add '{0}' into group '{1}'.\".format(\n elem.name, group)\n _LOGGER.info(msg)\n elif new:\n self._group[group] = [elem]\n elem.group.add(group)\n msg = \"Add '{0}' into new group '{1}'.\".format(\n elem.name, group)\n _LOGGER.info(msg)\n else:\n raise ValueError(\n \"Group {} does not exist, use 'new=True' to add it.\".format(\n group))\n\n def has_group(self, name):\n \"\"\"Check if group exists or not.\n\n Parameters\n ----------\n name : str\n Group name.\n\n Returns\n -------\n ret : True or False\n True if has group *name* or False.\n \"\"\"\n return name in self._group\n\n def remove_group_member(self, group, member):\n \"\"\"Remove a *member* from *group*.\n\n Parameters\n ----------\n group : str\n Group name.\n member :\n CaElement.\n \"\"\"\n if group not in self._group:\n raise ValueError(\n \"Remove error: group '{}' does not exist.\".format(group))\n if member in self._group[group]:\n self._group[group].remove(member)\n else:\n raise ValueError(\n \"Remove error: '{}' not in group '{}'.\".format(\n member, group))\n\n def get_groups(self, name=None, element=None, **kws):\n \"\"\"Get groups filtered by *name*, if *element* is given, a list of\n groups that *element* belongs to would return.\n\n Parameters\n ----------\n name : str\n Group name string, could be Unix shell style pattern.\n element : str\n Element name.\n\n Keyword Arguments\n -----------------\n empty : True or False\n If *empty* is True, also return name the empty groups, else not,\n True by default.\n\n Returns\n -------\n ret : list\n List of group names.\n \"\"\"\n if element is None:\n if kws.get('empty', True):\n g = [k for k, v in self._group.items() if fnmatch(k, name)]\n else:\n g = [k for k, v in self._group.items() if fnmatch(k, name)\n and v != []]\n return g\n else:\n return [k for k, v in self._group.items()\n if fnmatch(k, name) and element in [el.name for el in v]]\n\n def get_group_members(self, group, **kws):\n \"\"\"Return element members by applying proper filtering operation on\n each group from *group*, filtering operation could be defined by\n keyword argument *op*.\n\n Parameters\n ----------\n group: str or list\n Group name string or list[str], could be Unix shell style pattern.\n\n Keyword Arguments\n -----------------\n op : str\n Valid options: ``and``, ``or``.\n\n Returns\n -------\n ret : list\n List of elements.\n \"\"\"\n op = kws.get('op', 'and')\n if isinstance(group, str):\n group = group,\n group_list = flatten(\n [[g for g in self._group if fnmatch(g, gi)] for gi in group]\n )\n elem_dict = {g: self._group[g] for g in group_list}\n\n if op == 'and':\n return get_intersection(**elem_dict)\n else: # op = 'or'\n return list(set(flatten(elem_dict.values())))\n\n @property\n def orm(self):\n \"\"\"Array: Orbit response matrix.\n\n See Also\n --------\n :func:`~phantasy.library.physics.orm.get_orm`\n Calculator orbit response matrix.\n \"\"\"\n return self._orm\n\n @orm.setter\n def orm(self, m):\n self._orm = m\n\n def correct_orbit(self, correctors, bpms, **kws):\n \"\"\"Correct orbit by using ORM.\n\n Parameters\n ----------\n correctors : list\n List of corrector elements.\n bpms : list\n List of BPM elements.\n\n Keyword Arguments\n -----------------\n cor_field : str\n Field name for correctors, ``'ANG'`` by default.\n orb_field : tuple[str]\n Field names for monitors to retrieve orbit data, ``('X', 'Y')`` for\n *x* and *y* directions by default.\n xoy : str\n 'x'('y') for monitoring 'x'('y') direction,'xy' for both (default).\n damping_factor : float\n Factor to correct orbit, default is 0.05, which would decrease beam\n orbit (BPM readings) by 5% for every correction.\n iteration : int\n Iteration numbers of correction, default is 1.\n wait : float\n Wait time after set value, in *sec*, 1.0 by default.\n echo : bool\n Print out message or not, default is True.\n msg_queue : Queue\n A queue that keeps log messages.\n mode : str\n If running under 'interactive' mode or not.\n cor_min : float\n Lower limit for corrector settings.\n cor_max : float\n Upper limit for corrector settings.\n\n Returns\n -------\n r : bool\n True if no errors happen.\n\n See Also\n --------\n get_settings_from_orm : calculate COR settings from ORM for orbit\n correction.\n apply_settings_from_orm : apply COR settings from ORM to do orbit\n correction.\n \"\"\"\n itern = kws.get('iteration', 1)\n cor_field = kws.get('cor_field', 'ANG')\n damp_fac = kws.get('damping_factor', 0.05)\n wait = kws.get('wait', 1.0)\n echo = kws.get('echo', True)\n q_msg = kws.get('msg_queue', None)\n mode = kws.get('mode', 'interactive')\n upper_limit_cor = kws.get('cor_max', 5.0) # A\n lower_limit_cor = kws.get('cor_min', -5.0) # A\n\n if self._orm is None:\n _LOGGER.error(\"correct_orbit: ORM is not available, set ORM first.\")\n raise RuntimeError(\"INVALID ORM data.\")\n m = self._orm\n m_inv = inverse_matrix(m)\n\n n_cor = len(correctors)\n for i in range(1, itern + 1):\n bpm_readings = get_orbit(bpms, **kws)\n delt_cor = np.dot(m_inv, -bpm_readings * damp_fac)\n for ic, (e, v) in enumerate(zip(correctors, delt_cor)):\n v0 = getattr(e, cor_field)\n v_to_set = limit_input(v0 + v,\n lower_limit_cor, upper_limit_cor)\n setattr(e, cor_field, v_to_set)\n time.sleep(wait)\n msg = \"[{0}] #[{1}]/[{2}] Set [{3:02d}] {4} [{5}]: {6:>10.6g}.\".format(\n epoch2human(time.time(), fmt=TS_FMT),\n i, itern, ic + 1, e.name, cor_field, v_to_set)\n if q_msg is not None:\n q_msg.put((((ic + (i - 1) * n_cor))* 100.0 / n_cor / itern, msg))\n if echo:\n print(msg)\n if i+1 > itern:\n break\n if mode != 'interactive':\n next_iter = 'Y'\n else:\n next_iter = input(\n \"Continue correction iteration: {0}/{1}? ([Y]/N)\".format(i + 1,\n itern)\n )\n if next_iter.upper() in ['Y', '']:\n continue\n else:\n break\n return True\n\n def apply_settings_from_orm(self, settings, **kws):\n \"\"\"Apply correctors settings calculated from OMR to do orbit correction.\n\n Parameters\n ----------\n settings : list\n List of tuple of (CaElement, field, setting, setting_limited).\n\n Keyword Arguments\n -----------------\n iteration : int\n Iteration numbers of correction, default is 1.\n wait : float\n Wait time after set value, in *sec*, 1.0 by default.\n echo : bool\n Print out message or not, default is True.\n msg_queue : Queue\n A queue that keeps log messages.\n mode : str\n If running under 'interactive' mode or not.\n cor_min : float\n Lower limit for corrector settings.\n cor_max : float\n Upper limit for corrector settings.\n\n See Also\n --------\n get_settings_from_orm : calculate COR settings from ORM for orbit\n correction.\n correct_orbit : calculate and apply COR settings from ORM to do orbit\n correction.\n \"\"\"\n itern = kws.get('iteration', 1)\n wait = kws.get('wait', 1.0)\n echo = kws.get('echo', True)\n q_msg = kws.get('msg_queue', None)\n mode = kws.get('mode', 'interactive')\n upper_limit_cor = kws.get('cor_max', 5.0) # A, void in this method\n lower_limit_cor = kws.get('cor_min', -5.0) # A\n\n n_cor = len(settings)\n for i in range(1, itern + 1):\n for ic, (cor, cor_field, v, v_limited) in enumerate(settings):\n #v_to_set = limit_input(v, lower_limit_cor, upper_limit_cor)\n v_to_set = v_limited\n setattr(cor, cor_field, v_to_set)\n time.sleep(wait)\n msg = \"[{0}] #[{1}]/[{2}] Set [{3:02d}] {4} [{5}]: {6:>10.6g}.\".format(\n epoch2human(time.time(), fmt=TS_FMT),\n i, itern, ic + 1, cor.name, cor_field, v_to_set)\n if q_msg is not None:\n q_msg.put((((ic + (i - 1) * n_cor))* 100.0 / n_cor / itern, msg))\n if echo:\n print(msg)\n if i+1 > itern:\n break\n if mode != 'interactive':\n next_iter = 'Y'\n else:\n next_iter = input(\n \"Continue correction iteration: {0}/{1}? ([Y]/N)\".format(i + 1,\n itern)\n )\n if next_iter.upper() in ['Y', '']:\n continue\n else:\n break\n return True\n\n def apply_setting(self, setting, **kws):\n \"\"\"Apply setting for one corrector.\n\n Parameters\n ----------\n setting : tuple\n Tuple of corrector setting:\n (CaElement, field, setpoint, setpoint_limited).\n\n Keyword Arguments\n -----------------\n wait : float\n Wait time after set value, in *sec*, 1.0 by default.\n msg_queue : Queue\n A queue that keeps log messages.\n idx : int\n Index of selected corrector of all selected ones.\n ncor : int\n Total number of selected correctors.\n ndigits : int\n Number of effective digits to keep for a float number.\n \"\"\"\n wait = kws.get('wait', 1.0)\n idx = kws.get('idx', 0.0) # index of correctors\n n = kws.get('ncor', 1) # total number of correctors\n q_msg = kws.get('msg_queue', None)\n n_trun = kws.get('ndigits', 6)\n\n cor, cor_field, v, v_limited = setting\n v_truncated = truncate_number(v_limited, n_trun)\n setattr(cor, cor_field, v_truncated)\n time.sleep(wait)\n\n msg = \"[{0}] Set [{1:02d}] {2} [{3}]: {4:>10.6f} (RD: {5:>10.6f})\".format(\n epoch2human(time.time(), fmt=TS_FMT), idx + 1, cor.name,\n cor_field, v_truncated, getattr(cor, cor_field))\n if q_msg is not None:\n q_msg.put((idx * 100.0 / n, msg))\n print(msg)\n\n def get_settings_from_orm(self, correctors, bpms, **kws):\n \"\"\"Return correctors settings from ORM.\n\n Parameters\n ----------\n correctors : list\n List of corrector elements.\n bpms : list\n List of BPM elements.\n\n Keyword Arguments\n -----------------\n cor_field : str\n Field name for correctors, ``'ANG'`` by default.\n orb_field : tuple[str]\n Field names for monitors to retrieve orbit data, ``('X', 'Y')`` for\n *x* and *y* directions by default.\n damping_factor : float\n Factor to correct orbit, default is 0.05, which would decrease beam\n orbit (BPM readings) by 5% for every correction.\n cor_min : float\n Lower limit for corrector settings.\n cor_max : float\n Upper limit for corrector settings.\n sf : float\n Scaling factor multipied on settings, default is 1.0.\n\n Returns\n -------\n r : list\n List of tuple of (CaElement, field, setting, setting_limited).\n\n See Also\n --------\n apply_settings_from_orm : apply COR settings from ORM to do orbit\n correction.\n correct_orbit : calculate and apply COR settings from ORM to do orbit\n correction.\n \"\"\"\n damp_fac = kws.get('damping_factor', 0.05)\n cor_field = kws.get('cor_field', 'ANG')\n upper_limit_cor = kws.get('cor_max', 5.0) # A\n lower_limit_cor = kws.get('cor_min', -5.0) # A\n sf = kws.get('sf', 1.0)\n\n if self._orm is None:\n _LOGGER.error(\"correct_orbit: ORM is not available, set ORM first.\")\n raise RuntimeError(\"INVALID ORM data.\")\n m = self._orm\n m_inv = inverse_matrix(m)\n\n settings = []\n n_cor = len(correctors)\n bpm_readings = get_orbit(bpms, **kws)\n delt_cor = np.dot(m_inv, -bpm_readings * damp_fac)\n for ic, (e, v) in enumerate(zip(correctors, delt_cor)):\n v0 = getattr(e, cor_field)\n v_to_set = ( v0 + v ) * sf\n v_to_set_limited = limit_input(v_to_set, lower_limit_cor, upper_limit_cor)\n settings.append((e, cor_field, v_to_set, v_to_set_limited))\n\n return settings\n\n def measure_orm(self):\n pass\n\n def refresh_with_layout_info(self):\n \"\"\"Update every element of current lattice with layout info, which is\n an accel Element instance.\n \"\"\"\n if self.layout is None:\n _LOGGER.warning(\"Layout does not exist.\")\n return\n\n for i in self._elements:\n i.layout = self.layout[i.name]\n # pass alignment data\n try:\n self.layout[i.name].alignment = i.alignment\n except AttributeError:\n _LOGGER.warning(f\"{i.name} is not in layout.\")\n\n def get_layout_length(self):\n \"\"\"Return the length of current lattice layout, as well as starting\n and ending positions.\n\n Returns\n -------\n r : tuple\n Tuple of s_begin, s_end and length.\n \"\"\"\n if self.layout is None:\n _LOGGER.warning(\"Layout does not exist.\")\n return 0.0, 0.0, 0.0\n\n le0, le1 = self.layout[0], self.layout[-1]\n z0 = le0.z - le0.length / 2.0\n z1 = le1.z + le1.length / 2.0\n l = z1 - z0\n return z0, z1, l\n\n ###############################################################################\n def createLatticeModelMap(self, mapfile):\n \"\"\"Create a mapping between lattice layout and model output from a file\n\n :param mapfile: file name which has mapping information.\n\n \"\"\"\n mapping = np.loadtxt(mapfile, dtype=str)\n if self.latticemodelmap is not None:\n self.latticemodelmap.clear()\n else:\n self.latticemodelmap = {}\n for idx, mp in enumerate(mapping):\n if mp[0] == \"NONE\":\n continue\n if mp[0] not in self.latticemodelmap:\n self.latticemodelmap[mp[0]] = {}\n if mp[1] not in self.latticemodelmap[mp[0]]:\n self.latticemodelmap[mp[0]][mp[1]] = []\n self.latticemodelmap[mp[0]][mp[1]].append(idx)\n\n def _get_element_list(self, group, **kwargs):\n \"\"\"Get a list of element objects.\n\n Parameters\n ----------\n group : str, list.\n Element name, pattern or name list.\n when it is str, searching for elements of defined group name;\n if not found, searching for a group with name *group*.\n At last treat it as a pattern to match the element names.\n When the input *group* is a list, each string in this list will\n be treated as exact string instead of pattern.\n\n Keyword Arguments\n -----------------\n virtual : bool\n Including virtual element or not, False by default.\n\n Returns\n --------\n ret : list\n List of element objects.\n \"\"\"\n virtual = kwargs.get('virtual', False)\n # do exact element name match first\n elem = self._find_exact_element(group)\n if elem is not None:\n return [elem]\n\n # do exact group name match\n if group in self._group:\n return self._group[group][:]\n\n if isinstance(group, str):\n # do pattern match on element name\n ret, names = [], []\n for e in self._elements:\n if e.name in names:\n continue\n if not virtual and e.virtual:\n continue\n if fnmatch(e.name, group):\n ret.append(e)\n names.append(e.name)\n return ret\n elif isinstance(group, list):\n # exact one-by-one match, None if not found\n return [self._find_exact_element(e) for e in group]\n\n def __add__(self, other):\n # elements\n # settings\n l = Lattice('{}_{}'.format(self.name, other.name))\n for i in self._elements + other._elements:\n l.insert(i)\n l.update_groups()\n return l\n\n def __repr__(self):\n return str(self)\n\n def _repr_html_(self):\n curdir = os.path.dirname(__file__)\n with open(os.path.join(curdir, 'style.css'), 'r') as fp:\n style = fp.read()\n\n return \"\"\"{1}{0}\"\"\".format(\n self.to_html(), style)\n\n def to_html(self):\n t = []\n t.append('''\n <thead>\n <tr>\n <th width=\"10%\"></th>\n <th>Name</th>\n <th>Family</th>\n <th>Position</th>\n <th>Length</th>\n </tr>\n </thead>''')\n tbody = ['<tbody>']\n fmt = '''\n <th>{idx}</th>\n <td>{name}</td>\n <td>{family}</td>\n <td>{pos}</td>\n <td>{len}</td>'''\n for i, e in enumerate(self._elements):\n if e.virtual:\n continue\n row = fmt.format(idx=i, name=e.name, family=e.family,\n pos=e.sb, len=e.length)\n tbody.append('<tr>{}</tr>'.format(row))\n tbody.append('</tbody>')\n t.append('\\n'.join(tbody))\n return '''<html><body>\n <table class=\"mystyle\">{}</table>\n </body></html>'''.format('\\n'.join(t))\n\n def __str__(self):\n return \"Lattice({}) of {}, [{}] elements.\".format(\n self.name, self.mname, self.size())\n# s0 = \"Segment: '{}' | Machine: '{}': {} elements\".format(\n# self.name, self.mname, len(self._elements))\n# s1 = \"Length unit: [m]\"\n# s2 = '{0:<4s} {1:^20s} {2:<6s} {3:>10s} {4:>10s}'.format(\n# 'IDX', 'NAME', 'FAMILY', 'POSITION', 'LENGTH'\n# )\n# s3 = '{0:<4s}-{1:^20s}-{2:<6s}-{3:>10s}-{4:>10s}'.format(\n# '-'*4, '-'*20, '-'*6, '-'*10, '-'*10\n# )\n# ret = [s3, s0, s1, s3, s2, s3]\n#\n# fmt = \"{{idx:<{wi}d}} {{name:<{wn}s}} {{family:<{wf}s}} {{pos:>10.4f}} {{len:>10.4f}}\".format(\n# wi=4, wn=20, wf=6)\n# for i, e in enumerate(self._elements):\n# if e.virtual:\n# continue\n# ret.append(fmt.format(idx=i, name=e.name, family=e.family,\n# pos=e.sb, len=e.length))\n# return '\\n'.join(ret)\n\n def get_settings(self, only_physics=False):\n \"\"\"Return lattice element settings, only include physics settings if\n *only_physics* is True, otherwise return engineering settings as well.\n \"\"\"\n s = deepcopy(self.settings)\n if only_physics:\n return s\n\n # if B=v1, then I=elem.convert(from_field='B', value=v1)\n for ename, phy_conf in self.settings.items():\n elem = self[ename]\n if elem is None:\n print(\"{} does not have physics settings.\".format(ename))\n continue\n phy_flds = elem.get_phy_fields()\n eng_flds = elem.get_eng_fields()\n for phy_fld, eng_fld in zip(phy_flds, eng_flds):\n if phy_fld not in phy_conf:\n continue\n eng_val = elem.convert(from_field=phy_fld, value=phy_conf[phy_fld])\n s[ename].update({eng_fld: eng_val})\n return s\n\n def get_settings_from_element_list(self, elem_list=None, data_source='model',\n only_physics=True):\n \"\"\"Get settings from a list of CaElement, for both engineering and\n physics fields (based on *only_physics* parameter).\n\n Note\n ----\n 1. FREQ of CAV should be removed from Settings.\n\n Parameters\n ----------\n elem_list : list\n List of CaElement, if not defined, use the whole lattice.\n data_source : str\n 'model' or 'control', get element settings from MODEL environment if\n *data_source* is 'model', otherwise get live settings from controls\n network.\n only_physics : bool\n If True, only get physics settings, otherwise, get engineering\n settings as well.\n\n Returns\n -------\n s : Settings\n Settings object.\n\n See Also\n --------\n :class:`~phantasy.library.settings.common.Settings`\n \"\"\"\n lat_settings = self.settings\n s = Settings()\n elems = self if elem_list is None else elem_list\n for elem in elems:\n ename = elem.name\n if ename not in lat_settings:\n print(\"{} is not in lattice settings.\".format(ename))\n continue\n m_settings = lat_settings[ename]\n elem_settings = OrderedDict()\n phy_flds = elem.get_phy_fields()\n if only_physics:\n for phy_fld in phy_flds:\n if phy_fld not in m_settings: continue\n if data_source == 'model':\n phy_val = m_settings[phy_fld]\n else:\n phy_val = getattr(elem, phy_fld)\n elem_settings.update([(phy_fld, phy_val)])\n else:\n eng_flds = elem.get_eng_fields()\n for phy_fld, eng_fld in zip(phy_flds, eng_flds):\n if phy_fld not in m_settings: continue\n if data_source == 'model':\n phy_val = m_settings[phy_fld]\n else:\n phy_val = getattr(elem, phy_fld)\n eng_val = elem.convert(from_field=phy_fld, value=phy_val)\n elem_settings.update([(phy_fld, phy_val),\n (eng_fld, eng_val)])\n s.update([(ename, elem_settings)])\n\n return s\n\n def reset_settings(self):\n \"\"\"Reset settings.\n \"\"\"\n self.settings = Settings()\n _LOGGER.info(\"Reset settings.\")\n\n def reset_elements(self):\n \"\"\"Reset elements.\n \"\"\"\n self._elements = []\n self._name_element_map = {}\n _LOGGER.info(\"Reset elements and mapping.\")\n\n\ndef _inplace_order_insert(elem, lat):\n k = 0\n for ielem in lat:\n if ielem.sb < elem.sb:\n k += 1\n continue\n else:\n break\n lat.insert(k, elem)\n\n\ndef _normalize_phase(x):\n while x >= 360.0:\n x -= 360.0\n while x < 0.0:\n x += 360.0\n return x\n\n\ndef _is_viewer(elem):\n \"\"\"Test if elem is viewer, e.g. BPM, PM, ...\n \"\"\"\n return elem.family in ['BPM']\n\n\ndef _get_retroactive_trace_history(trace_history_data, retroaction):\n data = trace_history_data\n if isinstance(retroaction, (float, int)):\n # absolute time\n retval = [_entry for _entry in data\n if _entry['timestamp'] >= retroaction]\n else:\n # relative time\n retro_datetime = parse_dt(retroaction, datetime.now(), epoch=True)\n retval = [_entry for _entry in data\n if _entry['timestamp'] >= retro_datetime]\n return retval\n\n\ndef _get_control_field(elem, field):\n \"\"\"Get field value(s) from element, data source is 'control' environment.\n\n TODO: support get setpoint values\n \"\"\"\n if not isinstance(field, (list, tuple)):\n field = field,\n return {f: getattr(elem, f) for f in field}\n\n\ndef limit_input(x, lower, upper):\n # limit the input *x* within [lower, upper].\n if x <= lower:\n return lower\n if x >= upper:\n return upper\n return x\n" ]
[ [ "numpy.dot", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Rajathbharadwaj/pytorch-lightning
[ "89d0064b33a8a8e60177ccca4fc176333941db4d" ]
[ "pytorch_lightning/trainer/trainer.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Trainer to automate the training.\"\"\"\nimport inspect\nimport logging\nimport os\nimport traceback\nimport warnings\nfrom argparse import ArgumentParser, Namespace\nfrom datetime import timedelta\nfrom pathlib import Path\nfrom typing import Any, Callable, cast, Dict, Iterable, List, Optional, Tuple, Union\nfrom weakref import proxy\n\nimport torch\nfrom torch.optim import Optimizer\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.accelerators import Accelerator, IPUAccelerator\nfrom pytorch_lightning.callbacks import Callback, EarlyStopping, ModelCheckpoint, ProgressBarBase\nfrom pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter\nfrom pytorch_lightning.core.datamodule import LightningDataModule\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.loggers import LightningLoggerBase\nfrom pytorch_lightning.loggers.base import DummyLogger, LoggerCollection\nfrom pytorch_lightning.loggers.tensorboard import TensorBoardLogger\nfrom pytorch_lightning.loops import PredictionLoop, TrainingBatchLoop, TrainingEpochLoop\nfrom pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop\nfrom pytorch_lightning.loops.fit_loop import FitLoop\nfrom pytorch_lightning.plugins import DDPSpawnPlugin, ParallelPlugin, PLUGIN_INPUT, PrecisionPlugin, TrainingTypePlugin\nfrom pytorch_lightning.plugins.environments.slurm_environment import SLURMEnvironment\nfrom pytorch_lightning.profiler import (\n AdvancedProfiler,\n BaseProfiler,\n PassThroughProfiler,\n PyTorchProfiler,\n SimpleProfiler,\n XLAProfiler,\n)\nfrom pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin\nfrom pytorch_lightning.trainer.configuration_validator import verify_loop_configurations\nfrom pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector\nfrom pytorch_lightning.trainer.connectors.callback_connector import CallbackConnector\nfrom pytorch_lightning.trainer.connectors.checkpoint_connector import CheckpointConnector\nfrom pytorch_lightning.trainer.connectors.data_connector import DataConnector\nfrom pytorch_lightning.trainer.connectors.logger_connector import LoggerConnector\nfrom pytorch_lightning.trainer.connectors.logger_connector.result import ResultCollection\nfrom pytorch_lightning.trainer.connectors.signal_connector import SignalConnector\nfrom pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin\nfrom pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin\nfrom pytorch_lightning.trainer.states import RunningStage, TrainerFn, TrainerState, TrainerStatus\nfrom pytorch_lightning.tuner.lr_finder import _LRFinder\nfrom pytorch_lightning.tuner.tuning import Tuner\nfrom pytorch_lightning.utilities import (\n _IPU_AVAILABLE,\n _StrategyType,\n _TPU_AVAILABLE,\n device_parser,\n DeviceType,\n GradClipAlgorithmType,\n parsing,\n rank_zero_deprecation,\n rank_zero_info,\n rank_zero_warn,\n)\nfrom pytorch_lightning.utilities.argparse import (\n _defaults_from_env_vars,\n add_argparse_args,\n from_argparse_args,\n parse_argparser,\n parse_env_variables,\n)\nfrom pytorch_lightning.utilities.cloud_io import get_filesystem\nfrom pytorch_lightning.utilities.distributed import distributed_available\nfrom pytorch_lightning.utilities.exceptions import ExitGracefullyException, MisconfigurationException\nfrom pytorch_lightning.utilities.imports import _fault_tolerant_training\nfrom pytorch_lightning.utilities.meta import is_on_meta_device, materialize_module\nfrom pytorch_lightning.utilities.model_helpers import is_overridden\nfrom pytorch_lightning.utilities.seed import reset_seed\nfrom pytorch_lightning.utilities.types import (\n _EVALUATE_OUTPUT,\n _PATH,\n _PREDICT_OUTPUT,\n EVAL_DATALOADERS,\n LRSchedulerTypeUnion,\n TRAIN_DATALOADERS,\n)\n\nlog = logging.getLogger(__name__)\n# warnings to ignore in trainer\nwarnings.filterwarnings(\n \"ignore\", message=\"torch.distributed.reduce_op is deprecated, please use torch.distributed.ReduceOp instead\"\n)\n\n\nclass Trainer(\n TrainerCallbackHookMixin,\n TrainerOptimizersMixin,\n TrainerDataLoadingMixin,\n):\n # Needed because of LightningOptimizer\n _lightning_optimizers = None\n\n @_defaults_from_env_vars\n def __init__(\n self,\n logger: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool] = True,\n checkpoint_callback: Optional[bool] = None,\n enable_checkpointing: bool = True,\n callbacks: Optional[Union[List[Callback], Callback]] = None,\n default_root_dir: Optional[str] = None,\n gradient_clip_val: Optional[Union[int, float]] = None,\n gradient_clip_algorithm: Optional[str] = None,\n process_position: int = 0,\n num_nodes: int = 1,\n num_processes: int = 1,\n devices: Optional[Union[List[int], str, int]] = None,\n gpus: Optional[Union[List[int], str, int]] = None,\n auto_select_gpus: bool = False,\n tpu_cores: Optional[Union[List[int], str, int]] = None,\n ipus: Optional[int] = None,\n log_gpu_memory: Optional[str] = None, # TODO: Remove in 1.7\n progress_bar_refresh_rate: Optional[int] = None, # TODO: remove in v1.7\n enable_progress_bar: bool = True,\n overfit_batches: Union[int, float] = 0.0,\n track_grad_norm: Union[int, float, str] = -1,\n check_val_every_n_epoch: int = 1,\n fast_dev_run: Union[int, bool] = False,\n accumulate_grad_batches: Optional[Union[int, Dict[int, int]]] = None,\n max_epochs: Optional[int] = None,\n min_epochs: Optional[int] = None,\n max_steps: int = -1,\n min_steps: Optional[int] = None,\n max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None,\n limit_train_batches: Union[int, float] = 1.0,\n limit_val_batches: Union[int, float] = 1.0,\n limit_test_batches: Union[int, float] = 1.0,\n limit_predict_batches: Union[int, float] = 1.0,\n val_check_interval: Union[int, float] = 1.0,\n flush_logs_every_n_steps: Optional[int] = None,\n log_every_n_steps: int = 50,\n accelerator: Optional[Union[str, Accelerator]] = None,\n strategy: Optional[Union[str, TrainingTypePlugin]] = None,\n sync_batchnorm: bool = False,\n precision: Union[int, str] = 32,\n enable_model_summary: bool = True,\n weights_summary: Optional[str] = \"top\",\n weights_save_path: Optional[str] = None,\n num_sanity_val_steps: int = 2,\n resume_from_checkpoint: Optional[Union[Path, str]] = None,\n profiler: Optional[Union[BaseProfiler, str]] = None,\n benchmark: bool = False,\n deterministic: bool = False,\n reload_dataloaders_every_n_epochs: int = 0,\n auto_lr_find: Union[bool, str] = False,\n replace_sampler_ddp: bool = True,\n detect_anomaly: bool = False,\n auto_scale_batch_size: Union[str, bool] = False,\n prepare_data_per_node: Optional[bool] = None,\n plugins: Optional[Union[PLUGIN_INPUT, List[PLUGIN_INPUT]]] = None,\n amp_backend: str = \"native\",\n amp_level: Optional[str] = None,\n move_metrics_to_cpu: bool = False,\n multiple_trainloader_mode: str = \"max_size_cycle\",\n stochastic_weight_avg: bool = False,\n terminate_on_nan: Optional[bool] = None,\n ):\n r\"\"\"\n Customize every aspect of training via flags.\n\n Args:\n\n accelerator: Supports passing different accelerator types (\"cpu\", \"gpu\", \"tpu\", \"ipu\", \"auto\")\n as well as custom accelerator instances.\n\n .. deprecated:: v1.5\n Passing training strategies (e.g., 'ddp') to ``accelerator`` has been deprecated in v1.5.0\n and will be removed in v1.7.0. Please use the ``strategy`` argument instead.\n\n accumulate_grad_batches: Accumulates grads every k batches or as set up in the dict.\n\n amp_backend: The mixed precision backend to use (\"native\" or \"apex\").\n\n amp_level: The optimization level to use (O1, O2, etc...). By default it will be set to \"O2\"\n if ``amp_backend`` is set to \"apex\".\n\n auto_lr_find: If set to True, will make trainer.tune() run a learning rate finder,\n trying to optimize initial learning for faster convergence. trainer.tune() method will\n set the suggested learning rate in self.lr or self.learning_rate in the LightningModule.\n To use a different key set a string instead of True with the key name.\n\n auto_scale_batch_size: If set to True, will `initially` run a batch size\n finder trying to find the largest batch size that fits into memory.\n The result will be stored in self.batch_size in the LightningModule.\n Additionally, can be set to either `power` that estimates the batch size through\n a power search or `binsearch` that estimates the batch size through a binary search.\n\n auto_select_gpus: If enabled and ``gpus`` is an integer, pick available\n gpus automatically. This is especially useful when\n GPUs are configured to be in \"exclusive mode\", such\n that only one process at a time can access them.\n\n benchmark: If true enables cudnn.benchmark.\n\n callbacks: Add a callback or list of callbacks.\n\n checkpoint_callback: If ``True``, enable checkpointing.\n\n .. deprecated:: v1.5\n ``checkpoint_callback`` has been deprecated in v1.5 and will be removed in v1.7.\n Please consider using ``enable_checkpointing`` instead.\n\n enable_checkpointing: If ``True``, enable checkpointing.\n It will configure a default ModelCheckpoint callback if there is no user-defined ModelCheckpoint in\n :paramref:`~pytorch_lightning.trainer.trainer.Trainer.callbacks`.\n\n check_val_every_n_epoch: Check val every n train epochs.\n\n default_root_dir: Default path for logs and weights when no logger/ckpt_callback passed.\n Default: ``os.getcwd()``.\n Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'\n\n detect_anomaly: Enable anomaly detection for the autograd engine.\n\n deterministic: If ``True``, sets whether PyTorch operations must use deterministic algorithms.\n Default: ``False``.\n\n devices: Will be mapped to either `gpus`, `tpu_cores`, `num_processes` or `ipus`,\n based on the accelerator type.\n\n fast_dev_run: Runs n if set to ``n`` (int) else 1 if set to ``True`` batch(es)\n of train, val and test to find any bugs (ie: a sort of unit test).\n\n flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps).\n\n .. deprecated:: v1.5\n ``flush_logs_every_n_steps`` has been deprecated in v1.5 and will be removed in v1.7.\n Please configure flushing directly in the logger instead.\n\n gpus: Number of GPUs to train on (int) or which GPUs to train on (list or str) applied per node\n\n gradient_clip_val: The value at which to clip gradients. Passing ``gradient_clip_val=None`` disables\n gradient clipping. If using Automatic Mixed Precision (AMP), the gradients will be unscaled before.\n\n gradient_clip_algorithm: The gradient clipping algorithm to use. Pass ``gradient_clip_algorithm=\"value\"``\n to clip by value, and ``gradient_clip_algorithm=\"norm\"`` to clip by norm. By default it will\n be set to ``\"norm\"``.\n\n limit_train_batches: How much of training dataset to check (float = fraction, int = num_batches).\n\n limit_val_batches: How much of validation dataset to check (float = fraction, int = num_batches).\n\n limit_test_batches: How much of test dataset to check (float = fraction, int = num_batches).\n\n limit_predict_batches: How much of prediction dataset to check (float = fraction, int = num_batches).\n\n logger: Logger (or iterable collection of loggers) for experiment tracking. A ``True`` value uses\n the default ``TensorBoardLogger``. ``False`` will disable logging. If multiple loggers are\n provided and the `save_dir` property of that logger is not set, local files (checkpoints,\n profiler traces, etc.) are saved in ``default_root_dir`` rather than in the ``log_dir`` of any\n of the individual loggers.\n\n log_gpu_memory: None, 'min_max', 'all'. Might slow performance.\n\n .. deprecated:: v1.5\n Deprecated in v1.5.0 and will be removed in v1.7.0\n Please use the ``DeviceStatsMonitor`` callback directly instead.\n\n log_every_n_steps: How often to log within steps (defaults to every 50 steps).\n\n prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data.\n Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data\n\n .. deprecated:: v1.5\n Deprecated in v1.5.0 and will be removed in v1.7.0\n Please set ``prepare_data_per_node`` in LightningDataModule or LightningModule directly instead.\n\n process_position: Orders the progress bar when running multiple models on same machine.\n\n .. deprecated:: v1.5\n ``process_position`` has been deprecated in v1.5 and will be removed in v1.7.\n Please pass :class:`~pytorch_lightning.callbacks.progress.TQDMProgressBar` with ``process_position``\n directly to the Trainer's ``callbacks`` argument instead.\n\n progress_bar_refresh_rate: How often to refresh progress bar (in steps). Value ``0`` disables progress bar.\n Ignored when a custom progress bar is passed to :paramref:`~Trainer.callbacks`. Default: None, means\n a suitable value will be chosen based on the environment (terminal, Google COLAB, etc.).\n\n .. deprecated:: v1.5\n ``progress_bar_refresh_rate`` has been deprecated in v1.5 and will be removed in v1.7.\n Please pass :class:`~pytorch_lightning.callbacks.progress.TQDMProgressBar` with ``refresh_rate``\n directly to the Trainer's ``callbacks`` argument instead. To disable the progress bar,\n pass ``enable_progress_bar = False`` to the Trainer.\n\n enable_progress_bar: Whether to enable to progress bar by default.\n\n profiler: To profile individual steps during training and assist in identifying bottlenecks.\n\n overfit_batches: Overfit a fraction of training data (float) or a set number of batches (int).\n\n plugins: Plugins allow modification of core behavior like ddp and amp, and enable custom lightning plugins.\n\n precision: Double precision (64), full precision (32), half precision (16) or bfloat16 precision (bf16).\n Can be used on CPU, GPU or TPUs.\n\n max_epochs: Stop training once this number of epochs is reached. Disabled by default (None).\n If both max_epochs and max_steps are not specified, defaults to ``max_epochs = 1000``.\n To enable infinite training, set ``max_epochs = -1``.\n\n min_epochs: Force training for at least these many epochs. Disabled by default (None).\n If both min_epochs and min_steps are not specified, defaults to ``min_epochs = 1``.\n\n max_steps: Stop training after this number of steps. Disabled by default (-1). If ``max_steps = -1``\n and ``max_epochs = None``, will default to ``max_epochs = 1000``. To enable infinite training, set\n ``max_epochs`` to ``-1``.\n\n min_steps: Force training for at least these number of steps. Disabled by default (None).\n\n max_time: Stop training after this amount of time has passed. Disabled by default (None).\n The time duration can be specified in the format DD:HH:MM:SS (days, hours, minutes seconds), as a\n :class:`datetime.timedelta`, or a dictionary with keys that will be passed to\n :class:`datetime.timedelta`.\n\n num_nodes: Number of GPU nodes for distributed training.\n\n num_processes: Number of processes for distributed training with ``accelerator=\"cpu\"``.\n\n num_sanity_val_steps: Sanity check runs n validation batches before starting the training routine.\n Set it to `-1` to run all batches in all validation dataloaders.\n\n reload_dataloaders_every_n_epochs: Set to a non-negative integer to reload dataloaders every n epochs.\n\n replace_sampler_ddp: Explicitly enables or disables sampler replacement. If not specified this\n will toggled automatically when DDP is used. By default it will add ``shuffle=True`` for\n train sampler and ``shuffle=False`` for val/test sampler. If you want to customize it,\n you can set ``replace_sampler_ddp=False`` and add your own distributed sampler.\n\n resume_from_checkpoint: Path/URL of the checkpoint from which training is resumed. If there is\n no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint,\n training will start from the beginning of the next epoch.\n\n .. deprecated:: v1.5\n ``resume_from_checkpoint`` is deprecated in v1.5 and will be removed in v1.7.\n Please pass the path to ``Trainer.fit(..., ckpt_path=...)`` instead.\n\n strategy: Supports different training strategies with aliases\n as well custom training type plugins.\n\n sync_batchnorm: Synchronize batch norm layers between process groups/whole world.\n\n terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the\n end of each training batch, if any of the parameters or the loss are NaN or +/-inf.\n\n .. deprecated:: v1.5\n Trainer argument ``terminate_on_nan`` was deprecated in v1.5 and will be removed in 1.7.\n Please use ``detect_anomaly`` instead.\n\n detect_anomaly: Enable anomaly detection for the autograd engine.\n\n tpu_cores: How many TPU cores to train on (1 or 8) / Single TPU to train on [1]\n\n ipus: How many IPUs to train on.\n\n track_grad_norm: -1 no tracking. Otherwise tracks that p-norm. May be set to 'inf' infinity-norm. If using\n Automatic Mixed Precision (AMP), the gradients will be unscaled before logging them.\n\n val_check_interval: How often to check the validation set. Use float to check within a training epoch,\n use int to check every n steps (batches).\n\n enable_model_summary: Whether to enable model summarization by default.\n\n weights_summary: Prints a summary of the weights when training begins.\n\n .. deprecated:: v1.5\n ``weights_summary`` has been deprecated in v1.5 and will be removed in v1.7.\n To disable the summary, pass ``enable_model_summary = False`` to the Trainer.\n To customize the summary, pass :class:`~pytorch_lightning.callbacks.model_summary.ModelSummary`\n directly to the Trainer's ``callbacks`` argument.\n\n weights_save_path: Where to save weights if specified. Will override default_root_dir\n for checkpoints only. Use this if for whatever reason you need the checkpoints\n stored in a different place than the logs written in `default_root_dir`.\n Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'\n Defaults to `default_root_dir`.\n\n move_metrics_to_cpu: Whether to force internal logged metrics to be moved to cpu.\n This can save some gpu memory, but can make training slower. Use with attention.\n\n multiple_trainloader_mode: How to loop over the datasets when there are multiple train loaders.\n In 'max_size_cycle' mode, the trainer ends one epoch when the largest dataset is traversed,\n and smaller datasets reload when running out of their data. In 'min_size' mode, all the datasets\n reload when reaching the minimum length of datasets.\n\n stochastic_weight_avg: Whether to use `Stochastic Weight Averaging (SWA)\n <https://pytorch.org/blog/pytorch-1.6-now-includes-stochastic-weight-averaging/>`_.\n\n .. deprecated:: v1.5\n ``stochastic_weight_avg`` has been deprecated in v1.5 and will be removed in v1.7.\n Please pass :class:`~pytorch_lightning.callbacks.stochastic_weight_avg.StochasticWeightAveraging`\n directly to the Trainer's ``callbacks`` argument instead.\n \"\"\"\n super().__init__()\n Trainer._log_api_event(\"init\")\n self.state = TrainerState()\n\n gpu_ids, tpu_cores = self._parse_devices(gpus, auto_select_gpus, tpu_cores)\n\n # init connectors\n self._data_connector = DataConnector(self, multiple_trainloader_mode)\n\n self._accelerator_connector = AcceleratorConnector(\n num_processes,\n devices,\n tpu_cores,\n ipus,\n accelerator,\n strategy,\n gpus,\n gpu_ids,\n num_nodes,\n sync_batchnorm,\n benchmark,\n replace_sampler_ddp,\n deterministic,\n precision,\n amp_backend,\n amp_level,\n plugins,\n )\n self.logger_connector = LoggerConnector(self, log_gpu_memory)\n self._callback_connector = CallbackConnector(self)\n self.checkpoint_connector = CheckpointConnector(self, resume_from_checkpoint)\n self.signal_connector = SignalConnector(self)\n self.tuner = Tuner(self)\n\n fit_loop = FitLoop(\n min_epochs=(1 if (min_epochs is None and min_steps is None and max_time is None) else min_epochs),\n max_epochs=(\n max_epochs if max_epochs is not None else (1000 if (max_steps == -1 and max_time is None) else -1)\n ),\n )\n training_epoch_loop = TrainingEpochLoop(min_steps, max_steps)\n training_batch_loop = TrainingBatchLoop()\n training_validation_loop = EvaluationLoop()\n training_epoch_loop.connect(batch_loop=training_batch_loop, val_loop=training_validation_loop)\n fit_loop.connect(epoch_loop=training_epoch_loop)\n\n # default .fit() loop\n self.fit_loop = fit_loop\n\n # default .validate() loop\n self.validate_loop = EvaluationLoop()\n\n # default .test() loop\n self.test_loop = EvaluationLoop()\n\n # default .predict() loop\n self.predict_loop = PredictionLoop()\n\n # Needed because of LightningOptimizer\n self._lightning_optimizers = None\n\n # .validate() and .test() set this when they load a checkpoint\n self.validated_ckpt_path: Optional[str] = None\n self.tested_ckpt_path: Optional[str] = None\n self.predicted_ckpt_path: Optional[str] = None\n\n # todo: remove in v1.7\n self._weights_summary: Optional[str] = None\n\n # init callbacks\n # Declare attributes to be set in _callback_connector on_trainer_init\n self._callback_connector.on_trainer_init(\n callbacks,\n checkpoint_callback,\n enable_checkpointing,\n enable_progress_bar,\n progress_bar_refresh_rate,\n process_position,\n default_root_dir,\n weights_save_path,\n enable_model_summary,\n weights_summary,\n stochastic_weight_avg,\n max_time,\n accumulate_grad_batches,\n )\n\n # hook\n self.on_init_start()\n\n # init optimizer + lr scheduler related flags\n self.lr_schedulers = []\n self.optimizers = []\n self.optimizer_frequencies = []\n\n # init data flags\n self._data_connector.on_trainer_init(\n check_val_every_n_epoch,\n reload_dataloaders_every_n_epochs,\n prepare_data_per_node,\n )\n\n if terminate_on_nan is not None:\n rank_zero_deprecation(\n \"Trainer argument `terminate_on_nan` was deprecated in v1.5 and will be removed in 1.7.\"\n \" Please use `Trainer(detect_anomaly=True)` instead.\"\n )\n if not isinstance(terminate_on_nan, bool):\n raise TypeError(f\"`terminate_on_nan` should be a bool, got {terminate_on_nan}.\")\n\n # gradient clipping\n if gradient_clip_val is not None and not isinstance(gradient_clip_val, (int, float)):\n raise TypeError(f\"`gradient_clip_val` should be an int or a float. Got {gradient_clip_val}.\")\n\n if gradient_clip_algorithm is not None and not GradClipAlgorithmType.supported_type(\n gradient_clip_algorithm.lower()\n ):\n raise MisconfigurationException(\n f\"`gradient_clip_algorithm` {gradient_clip_algorithm} is invalid. \"\n f\"Allowed algorithms: {GradClipAlgorithmType.supported_types()}.\"\n )\n\n # gradient norm tracking\n if track_grad_norm != -1 and not (\n (isinstance(track_grad_norm, (int, float)) or track_grad_norm == \"inf\") and float(track_grad_norm) > 0\n ):\n raise MisconfigurationException(\n f\"`track_grad_norm` must be a positive number or 'inf' (infinity norm). Got {track_grad_norm}.\"\n )\n\n self._terminate_on_nan = terminate_on_nan\n self.gradient_clip_val = gradient_clip_val\n self.gradient_clip_algorithm = (\n GradClipAlgorithmType(gradient_clip_algorithm.lower())\n if gradient_clip_algorithm is not None\n else gradient_clip_algorithm\n )\n self.track_grad_norm: float = float(track_grad_norm)\n\n self._detect_anomaly: bool = detect_anomaly\n self._setup_on_init(num_sanity_val_steps)\n\n # configure tuner\n self.tuner.on_trainer_init(auto_lr_find, auto_scale_batch_size)\n\n # configure profiler\n self.__init_profiler(profiler)\n\n # init logger flags\n self.logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu)\n\n # init debugging flags\n self._init_debugging_flags(\n limit_train_batches,\n limit_val_batches,\n limit_test_batches,\n limit_predict_batches,\n val_check_interval,\n overfit_batches,\n fast_dev_run,\n )\n\n # Callback system\n self.on_init_end()\n\n def _init_debugging_flags(\n self,\n limit_train_batches,\n limit_val_batches,\n limit_test_batches,\n limit_predict_batches,\n val_check_interval,\n overfit_batches,\n fast_dev_run,\n ):\n if isinstance(fast_dev_run, int) and (fast_dev_run < 0):\n raise MisconfigurationException(\n f\"fast_dev_run={fast_dev_run} is not a valid configuration. It should be >= 0.\"\n )\n\n self.fast_dev_run = fast_dev_run\n\n # set fast_dev_run=True when it is 1, used while logging\n if fast_dev_run == 1:\n self.fast_dev_run = True\n\n if fast_dev_run:\n num_batches = int(fast_dev_run)\n limit_train_batches = num_batches\n limit_val_batches = num_batches\n limit_test_batches = num_batches\n limit_predict_batches = num_batches\n self.fit_loop.max_steps = num_batches\n self.num_sanity_val_steps = 0\n self.fit_loop.max_epochs = 1\n val_check_interval = 1.0\n self.check_val_every_n_epoch = 1\n self.logger = DummyLogger() if self.logger is not None else None\n\n rank_zero_info(\n \"Running in fast_dev_run mode: will run a full train,\"\n f\" val, test and prediction loop using {num_batches} batch(es).\"\n )\n\n self.limit_train_batches = _determine_batch_limits(limit_train_batches, \"limit_train_batches\")\n self.limit_val_batches = _determine_batch_limits(limit_val_batches, \"limit_val_batches\")\n self.limit_test_batches = _determine_batch_limits(limit_test_batches, \"limit_test_batches\")\n self.limit_predict_batches = _determine_batch_limits(limit_predict_batches, \"limit_predict_batches\")\n self.val_check_interval = _determine_batch_limits(val_check_interval, \"val_check_interval\")\n self.overfit_batches = _determine_batch_limits(overfit_batches, \"overfit_batches\")\n self._determine_data_use_amount(self.overfit_batches)\n\n def _determine_data_use_amount(self, overfit_batches: float) -> None:\n \"\"\"Use less data for debugging purposes.\"\"\"\n if overfit_batches > 0:\n self.limit_train_batches = overfit_batches\n self.limit_val_batches = overfit_batches\n self.limit_test_batches = overfit_batches\n\n def _setup_on_init(self, num_sanity_val_steps: int) -> None:\n self._log_device_info()\n\n self.should_stop = False\n self.state = TrainerState()\n self.num_training_batches = float(\"inf\")\n self.train_dataloader = None\n\n if num_sanity_val_steps == -1:\n self.num_sanity_val_steps = float(\"inf\")\n else:\n self.num_sanity_val_steps = num_sanity_val_steps\n\n self.num_sanity_val_batches = []\n self.num_test_batches = []\n self.num_val_batches = []\n self.test_dataloaders = None\n self.val_dataloaders = None\n\n # when true, print evaluation results in .validate() and .test()\n self.verbose_evaluate = True\n\n self.num_predict_batches = []\n\n def _call_and_handle_interrupt(self, trainer_fn: Callable, *args: Any, **kwargs: Any) -> Any:\n r\"\"\"\n Error handling, intended to be used only for main trainer function entry points (fit, validate, test, predict)\n as all errors should funnel through them\n\n Args:\n trainer_fn: one of (fit, validate, test, predict)\n *args: positional arguments to be passed to the `trainer_fn`\n **kwargs: keyword arguments to be passed to `trainer_fn`\n \"\"\"\n try:\n return trainer_fn(*args, **kwargs)\n # TODO: treat KeyboardInterrupt as BaseException (delete the code below) in v1.7\n except KeyboardInterrupt as exception:\n rank_zero_warn(\"Detected KeyboardInterrupt, attempting graceful shutdown...\")\n # user could press Ctrl+c many times... only shutdown once\n if not self.interrupted:\n self.state.status = TrainerStatus.INTERRUPTED\n self.on_keyboard_interrupt()\n self.on_exception(exception)\n except BaseException as exception:\n self.state.status = TrainerStatus.INTERRUPTED\n if distributed_available() and self.world_size > 1:\n # try syncing remaing processes, kill otherwise\n self.training_type_plugin.reconciliate_processes(traceback.format_exc())\n self._on_exception()\n # reset bookkeeping\n self.state.stage = None\n self.on_exception(exception)\n # shutdown workers\n self._data_connector.teardown()\n raise\n\n def fit(\n self,\n model: \"pl.LightningModule\",\n train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,\n val_dataloaders: Optional[EVAL_DATALOADERS] = None,\n datamodule: Optional[LightningDataModule] = None,\n ckpt_path: Optional[str] = None,\n ) -> None:\n r\"\"\"\n Runs the full optimization routine.\n\n Args:\n model: Model to fit.\n\n train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a\n :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples.\n In the case of multiple dataloaders, please see this :ref:`page <multiple-training-dataloaders>`.\n\n val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples.\n\n ckpt_path: Path/URL of the checkpoint from which training is resumed. If there is\n no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint,\n training will start from the beginning of the next epoch.\n\n datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.\n \"\"\"\n self._call_and_handle_interrupt(\n self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path\n )\n\n def _fit_impl(\n self,\n model: \"pl.LightningModule\",\n train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,\n val_dataloaders: Optional[EVAL_DATALOADERS] = None,\n datamodule: Optional[LightningDataModule] = None,\n ckpt_path: Optional[str] = None,\n ) -> None:\n Trainer._log_api_event(\"fit\")\n\n self.state.fn = TrainerFn.FITTING\n self.state.status = TrainerStatus.RUNNING\n self.training = True\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(train_dataloaders, LightningDataModule):\n datamodule = train_dataloaders\n train_dataloaders = None\n # If you supply a datamodule you can't supply train_dataloader or val_dataloaders\n if (train_dataloaders is not None or val_dataloaders is not None) and datamodule is not None:\n raise MisconfigurationException(\n \"You cannot pass `train_dataloader` or `val_dataloaders` to `trainer.fit(datamodule=...)`\"\n )\n\n # links data to the trainer\n self._data_connector.attach_data(\n model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule\n )\n\n # TODO: ckpt_path only in v1.7\n ckpt_path = ckpt_path or self.resume_from_checkpoint\n self._run(model, ckpt_path=ckpt_path)\n\n assert self.state.stopped\n self.training = False\n\n def validate(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n ckpt_path: Optional[str] = None,\n verbose: bool = True,\n datamodule: Optional[LightningDataModule] = None,\n ) -> _EVALUATE_OUTPUT:\n r\"\"\"\n Perform one evaluation epoch over the validation set.\n\n Args:\n model: The model to validate.\n\n dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,\n or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying validation samples.\n\n ckpt_path: Either ``best`` or path to the checkpoint you wish to validate.\n If ``None`` and the model instance was passed, use the current weights.\n Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded\n if a checkpoint callback is configured.\n\n verbose: If True, prints the validation results.\n\n datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.\n\n Returns:\n List of dictionaries with metrics logged during the validation phase, e.g., in model- or callback hooks\n like :meth:`~pytorch_lightning.core.lightning.LightningModule.validation_step`,\n :meth:`~pytorch_lightning.core.lightning.LightningModule.validation_epoch_end`, etc.\n The length of the list corresponds to the number of validation dataloaders used.\n \"\"\"\n return self._call_and_handle_interrupt(self._validate_impl, model, dataloaders, ckpt_path, verbose, datamodule)\n\n def _validate_impl(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n ckpt_path: Optional[str] = None,\n verbose: bool = True,\n datamodule: Optional[LightningDataModule] = None,\n ) -> _EVALUATE_OUTPUT:\n # --------------------\n # SETUP HOOK\n # --------------------\n Trainer._log_api_event(\"validate\")\n self.verbose_evaluate = verbose\n\n self.state.fn = TrainerFn.VALIDATING\n self.state.status = TrainerStatus.RUNNING\n self.validating = True\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(dataloaders, LightningDataModule):\n datamodule = dataloaders\n dataloaders = None\n # If you supply a datamodule you can't supply val_dataloaders\n if dataloaders is not None and datamodule:\n raise MisconfigurationException(\"You cannot pass both `trainer.validate(dataloaders=..., datamodule=...)`\")\n\n model_provided = model is not None\n model = model or self.lightning_module\n if model is None:\n raise MisconfigurationException(\n \"`model` must be provided to `trainer.validate()` when it hasn't been passed in a previous run\"\n )\n\n # links data to the trainer\n self._data_connector.attach_data(model, val_dataloaders=dataloaders, datamodule=datamodule)\n\n self.validated_ckpt_path = self.__set_ckpt_path(\n ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None\n )\n\n # run validate\n results = self._run(model, ckpt_path=self.validated_ckpt_path)\n\n assert self.state.stopped\n self.validating = False\n\n return results\n\n def test(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n ckpt_path: Optional[str] = None,\n verbose: bool = True,\n datamodule: Optional[LightningDataModule] = None,\n ) -> _EVALUATE_OUTPUT:\n r\"\"\"\n Perform one evaluation epoch over the test set.\n It's separated from fit to make sure you never run on your test set until you want to.\n\n Args:\n model: The model to test.\n\n dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,\n or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying test samples.\n\n ckpt_path: Either ``best`` or path to the checkpoint you wish to test.\n If ``None`` and the model instance was passed, use the current weights.\n Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded\n if a checkpoint callback is configured.\n\n verbose: If True, prints the test results.\n\n datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.\n\n Returns:\n List of dictionaries with metrics logged during the test phase, e.g., in model- or callback hooks\n like :meth:`~pytorch_lightning.core.lightning.LightningModule.test_step`,\n :meth:`~pytorch_lightning.core.lightning.LightningModule.test_epoch_end`, etc.\n The length of the list corresponds to the number of test dataloaders used.\n \"\"\"\n return self._call_and_handle_interrupt(self._test_impl, model, dataloaders, ckpt_path, verbose, datamodule)\n\n def _test_impl(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n ckpt_path: Optional[str] = None,\n verbose: bool = True,\n datamodule: Optional[LightningDataModule] = None,\n ) -> _EVALUATE_OUTPUT:\n # --------------------\n # SETUP HOOK\n # --------------------\n Trainer._log_api_event(\"test\")\n self.verbose_evaluate = verbose\n\n self.state.fn = TrainerFn.TESTING\n self.state.status = TrainerStatus.RUNNING\n self.testing = True\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(dataloaders, LightningDataModule):\n datamodule = dataloaders\n dataloaders = None\n # If you supply a datamodule you can't supply test_dataloaders\n if dataloaders is not None and datamodule:\n raise MisconfigurationException(\"You cannot pass both `trainer.test(dataloaders=..., datamodule=...)`\")\n\n model_provided = model is not None\n model = model or self.lightning_module\n if model is None:\n raise MisconfigurationException(\n \"`model` must be provided to `trainer.test()` when it hasn't been passed in a previous run\"\n )\n\n # links data to the trainer\n self._data_connector.attach_data(model, test_dataloaders=dataloaders, datamodule=datamodule)\n\n self.tested_ckpt_path = self.__set_ckpt_path(\n ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None\n )\n\n # run test\n results = self._run(model, ckpt_path=self.tested_ckpt_path)\n\n assert self.state.stopped\n self.testing = False\n\n return results\n\n def predict(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n datamodule: Optional[LightningDataModule] = None,\n return_predictions: Optional[bool] = None,\n ckpt_path: Optional[str] = None,\n ) -> Optional[_PREDICT_OUTPUT]:\n r\"\"\"\n Run inference on your data.\n This will call the model forward function to compute predictions. Useful to perform distributed\n and batched predictions. Logging is disabled in the predict hooks.\n\n Args:\n model: The model to predict with.\n\n dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,\n or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying prediction samples.\n\n datamodule: The datamodule with a predict_dataloader method that returns one or more dataloaders.\n\n return_predictions: Whether to return predictions.\n ``True`` by default except when an accelerator that spawns processes is used (not supported).\n\n ckpt_path: Either ``best`` or path to the checkpoint you wish to predict.\n If ``None`` and the model instance was passed, use the current weights.\n Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded\n if a checkpoint callback is configured.\n\n Returns:\n Returns a list of dictionaries, one for each provided dataloader containing their respective predictions.\n \"\"\"\n return self._call_and_handle_interrupt(\n self._predict_impl, model, dataloaders, datamodule, return_predictions, ckpt_path\n )\n\n def _predict_impl(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n datamodule: Optional[LightningDataModule] = None,\n return_predictions: Optional[bool] = None,\n ckpt_path: Optional[str] = None,\n ) -> Optional[_PREDICT_OUTPUT]:\n # --------------------\n # SETUP HOOK\n # --------------------\n Trainer._log_api_event(\"predict\")\n\n self.state.fn = TrainerFn.PREDICTING\n self.state.status = TrainerStatus.RUNNING\n self.predicting = True\n\n self.predict_loop.return_predictions = return_predictions\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(dataloaders, LightningDataModule):\n datamodule = dataloaders\n dataloaders = None\n if dataloaders is not None and datamodule:\n raise MisconfigurationException(\"You cannot pass both `trainer.predict(dataloaders=..., datamodule=...)`\")\n\n model_provided = model is not None\n model = model or self.lightning_module\n if model is None:\n raise MisconfigurationException(\n \"`model` must be provided to `trainer.predict()` when it hasn't been passed in a previous run\"\n )\n\n # links data to the trainer\n self._data_connector.attach_data(model, predict_dataloaders=dataloaders, datamodule=datamodule)\n\n self.predicted_ckpt_path = self.__set_ckpt_path(\n ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None\n )\n\n results = self._run(model, ckpt_path=self.predicted_ckpt_path)\n\n assert self.state.stopped\n self.predicting = False\n\n return results\n\n def tune(\n self,\n model: \"pl.LightningModule\",\n train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,\n val_dataloaders: Optional[EVAL_DATALOADERS] = None,\n datamodule: Optional[LightningDataModule] = None,\n scale_batch_size_kwargs: Optional[Dict[str, Any]] = None,\n lr_find_kwargs: Optional[Dict[str, Any]] = None,\n ) -> Dict[str, Optional[Union[int, _LRFinder]]]:\n r\"\"\"\n Runs routines to tune hyperparameters before training.\n\n Args:\n model: Model to tune.\n\n train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a\n :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples.\n In the case of multiple dataloaders, please see this :ref:`page <multiple-training-dataloaders>`.\n\n val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples.\n\n datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.\n\n scale_batch_size_kwargs: Arguments for :func:`~pytorch_lightning.tuner.batch_size_scaling.scale_batch_size`\n\n lr_find_kwargs: Arguments for :func:`~pytorch_lightning.tuner.lr_finder.lr_find`\n \"\"\"\n Trainer._log_api_event(\"tune\")\n\n self.state.fn = TrainerFn.TUNING\n self.state.status = TrainerStatus.RUNNING\n self.tuning = True\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(train_dataloaders, LightningDataModule):\n datamodule = train_dataloaders\n train_dataloaders = None\n # If you supply a datamodule you can't supply train_dataloader or val_dataloaders\n if (train_dataloaders is not None or val_dataloaders is not None) and datamodule is not None:\n raise MisconfigurationException(\n \"You cannot pass `train_dataloader` or `val_dataloaders` to `trainer.tune(datamodule=...)`\"\n )\n\n # links data to the trainer\n self._data_connector.attach_data(\n model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule\n )\n\n result = self.tuner._tune(model, scale_batch_size_kwargs=scale_batch_size_kwargs, lr_find_kwargs=lr_find_kwargs)\n\n assert self.state.stopped\n self.tuning = False\n\n return result\n\n def _restore_modules_and_callbacks(self, checkpoint_path: Optional[_PATH] = None) -> None:\n # restore modules after setup\n self.checkpoint_connector.resume_start(checkpoint_path)\n self.checkpoint_connector.restore_model()\n self.checkpoint_connector.restore_datamodule()\n if self.state.fn == TrainerFn.FITTING:\n # restore callback states\n self.checkpoint_connector.restore_callbacks()\n\n def _run(\n self, model: \"pl.LightningModule\", ckpt_path: Optional[str] = None\n ) -> Optional[Union[_EVALUATE_OUTPUT, _PREDICT_OUTPUT]]:\n # clean hparams\n if hasattr(model, \"hparams\"):\n parsing.clean_namespace(model.hparams)\n\n verify_loop_configurations(self, model)\n\n # attach model log function to callback\n self._callback_connector.attach_model_logging_functions(model)\n\n # attach model to the training type plugin\n self.training_type_plugin.connect(model)\n\n # hook\n self._data_connector.prepare_data()\n self._callback_connector._attach_model_callbacks()\n\n # ----------------------------\n # SET UP TRAINING\n # ----------------------------\n self.call_hook(\"on_before_accelerator_backend_setup\")\n self.accelerator.setup_environment()\n self._call_setup_hook() # allow user to setup lightning_module in accelerator environment\n\n # check if we should delay restoring checkpoint till later\n if not self.training_type_plugin.restore_checkpoint_after_pre_dispatch:\n self._restore_modules_and_callbacks(ckpt_path)\n\n self._call_configure_sharded_model() # allow user to setup in model sharded environment\n self.accelerator.setup(self)\n\n # ----------------------------\n # INSPECT THE CORE LOOPS\n # ----------------------------\n fr\"\"\"\n Lightning internal flow looks like this:\n {Trainer.fit} or {Trainer.test} or {Trainer.predict} ||\n | ||\n create accelerator ||\n | ||\n {self._dispatch} ||\n | || LIGHTNING\n {self.training_type_plugin.start_training} ||\n or {self.training_type_plugin.start_evaluating} ||\n or {self.training_type_plugin.start_predicting} || FLOW\n | ||\n {self.run_stage} ||\n | || DIRECTION\n {self._run_train} ||\n or {self._run_evaluate} ||\n or {self._run_predict} ||\n | ||\n results \\/\n This is used to guide readers to the core loops: train, test, predict.\n {self._run_predict} is the simplest to understand, use `Go to Definition` to read it :)\n Search for `start_training` or `start_evaluating` or `start_predicting` in\n `pytorch_lightning/plugins/training_type_plugin` to find accelerator dispatch functions.\n \"\"\"\n\n # ----------------------------\n # TRAIN\n # ----------------------------\n\n # reset logger connector\n self.logger_connector.reset_results()\n self.logger_connector.reset_metrics()\n\n # hook\n if self.state.fn == TrainerFn.FITTING:\n self.call_hook(\"on_fit_start\")\n\n # plugin will setup fitting (e.g. ddp will launch child processes)\n self._pre_dispatch()\n\n if self.training_type_plugin.restore_checkpoint_after_pre_dispatch:\n self._restore_modules_and_callbacks(ckpt_path)\n\n # restore optimizers, etc.\n self.checkpoint_connector.restore_training_state()\n\n self.checkpoint_connector.resume_end()\n\n # dispatch `start_training` or `start_evaluating` or `start_predicting`\n self._dispatch()\n\n # plugin will finalized fitting (e.g. ddp_spawn will load trained model)\n self._post_dispatch()\n\n # ----------------------------\n # POST-Training CLEAN UP\n # ----------------------------\n # hook\n if self.state.fn == TrainerFn.FITTING:\n self.call_hook(\"on_fit_end\")\n\n # teardown if necessary (similar calls for spawn plugins are excluded as they have\n # been included at the end of `new_process` functions)\n if not isinstance(self.training_type_plugin, DDPSpawnPlugin):\n self._call_teardown_hook()\n\n if self.state.status != TrainerStatus.INTERRUPTED:\n self.state.status = TrainerStatus.FINISHED\n self.state.stage = None\n\n return self.training_type_plugin.results\n\n def _pre_dispatch(self):\n self.accelerator.pre_dispatch(self)\n self._log_hyperparams()\n\n def _log_hyperparams(self) -> None:\n # log hyper-parameters\n hparams_initial = None\n\n if self.logger is not None:\n # save exp to get started (this is where the first experiment logs are written)\n datamodule_log_hyperparams = self.datamodule._log_hyperparams if self.datamodule is not None else False\n\n if self.lightning_module._log_hyperparams and datamodule_log_hyperparams:\n datamodule_hparams = self.datamodule.hparams_initial\n lightning_hparams = self.lightning_module.hparams_initial\n inconsistent_keys = []\n for key in lightning_hparams.keys() & datamodule_hparams.keys():\n lm_val, dm_val = lightning_hparams[key], datamodule_hparams[key]\n if type(lm_val) != type(dm_val):\n inconsistent_keys.append(key)\n elif isinstance(lm_val, torch.Tensor) and id(lm_val) != id(dm_val):\n inconsistent_keys.append(key)\n elif lm_val != dm_val:\n inconsistent_keys.append(key)\n if inconsistent_keys:\n raise MisconfigurationException(\n f\"Error while merging hparams: the keys {inconsistent_keys} are present \"\n \"in both the LightningModule's and LightningDataModule's hparams \"\n \"but have different values.\"\n )\n hparams_initial = {**lightning_hparams, **datamodule_hparams}\n elif self.lightning_module._log_hyperparams:\n hparams_initial = self.lightning_module.hparams_initial\n elif datamodule_log_hyperparams:\n hparams_initial = self.datamodule.hparams_initial\n\n if hparams_initial is not None:\n self.logger.log_hyperparams(hparams_initial)\n self.logger.log_graph(self.lightning_module)\n self.logger.save()\n\n def _post_dispatch(self):\n self.accelerator.post_dispatch(self)\n # these `teardown` calls are here instead of in `_call_teardown_hook` since they are internal teardowns\n # which need to happen before.\n self.accelerator.teardown()\n self._data_connector.teardown()\n self._active_loop.teardown()\n self.logger_connector.teardown()\n\n def _dispatch(self):\n if self.evaluating:\n self.training_type_plugin.start_evaluating(self)\n elif self.predicting:\n self.training_type_plugin.start_predicting(self)\n else:\n self.training_type_plugin.start_training(self)\n\n def run_stage(self):\n self.accelerator.dispatch(self)\n self.__setup_profiler()\n\n if self.evaluating:\n return self._run_evaluate()\n if self.predicting:\n return self._run_predict()\n return self._run_train()\n\n def _pre_training_routine(self):\n # wait for all to join if on distributed\n self.training_type_plugin.barrier(\"setup_training\")\n\n # register signals\n self.signal_connector.register_signal_handlers()\n\n # --------------------------\n # Pre-train\n # --------------------------\n self.call_hook(\"on_pretrain_routine_start\")\n\n self.call_hook(\"on_pretrain_routine_end\")\n\n def _run_train(self) -> None:\n self._pre_training_routine()\n\n if not self.is_global_zero and self.progress_bar_callback is not None:\n self.progress_bar_callback.disable()\n\n self._run_sanity_check(self.lightning_module)\n\n # enable train mode\n self.model.train()\n torch.set_grad_enabled(True)\n\n self.fit_loop.trainer = self\n with torch.autograd.set_detect_anomaly(self._detect_anomaly):\n self.fit_loop.run()\n\n def _run_evaluate(self) -> _EVALUATE_OUTPUT:\n if not self.is_global_zero and self.progress_bar_callback is not None:\n self.progress_bar_callback.disable()\n\n assert self.evaluating\n\n # reload dataloaders\n self._evaluation_loop._reload_evaluation_dataloaders()\n\n # reset trainer on this loop and all child loops in case user connected a custom loop\n self._evaluation_loop.trainer = self\n\n with self.profiler.profile(f\"run_{self.state.stage}_evaluation\"), torch.no_grad():\n eval_loop_results = self._evaluation_loop.run()\n\n # remove the tensors from the eval results\n for result in eval_loop_results:\n if isinstance(result, dict):\n for k, v in result.items():\n if isinstance(v, torch.Tensor):\n result[k] = v.cpu().item()\n\n return eval_loop_results\n\n def _run_predict(self) -> Optional[_PREDICT_OUTPUT]:\n self.reset_predict_dataloader(self.lightning_module)\n # reset trainer on this loop and all child loops in case user connected a custom loop\n self.predict_loop.trainer = self\n with torch.no_grad():\n return self.predict_loop.run()\n\n def _run_sanity_check(self, ref_model):\n using_val_step = self._data_connector._val_dataloader_source.is_defined() and is_overridden(\n \"validation_step\", ref_model\n )\n should_sanity_check = using_val_step and self.num_sanity_val_steps > 0 and self.limit_val_batches > 0\n\n # run tiny validation (if validation defined)\n # to make sure program won't crash during val\n if should_sanity_check:\n stage = self.state.stage\n self.sanity_checking = True\n\n # reset logger connector\n self.logger_connector.reset_results()\n self.logger_connector.reset_metrics()\n\n self.call_hook(\"on_sanity_check_start\")\n\n # reload dataloaders\n self._evaluation_loop._reload_evaluation_dataloaders()\n\n # run eval step\n with torch.no_grad():\n self._evaluation_loop.run()\n\n self.call_hook(\"on_sanity_check_end\")\n\n # reset logger connector\n self.logger_connector.reset_results()\n self.logger_connector.reset_metrics()\n\n # reset the seed to what it was before sanity check\n # prevents sanity check to affect random sampling in training\n reset_seed()\n\n # restore the previous stage when the sanity check if finished\n self.state.stage = stage\n\n def __set_ckpt_path(self, ckpt_path: Optional[str], model_provided: bool, model_connected: bool) -> Optional[str]:\n if model_provided and ckpt_path is None:\n # use passed model to function without loading weights\n return\n\n fn = self.state.fn.value\n\n if model_connected and ckpt_path is None:\n rank_zero_warn(\n f\"`.{fn}(ckpt_path=None)` was called without a model.\"\n \" The best model of the previous `fit` call will be used.\"\n f\" You can pass `{fn}(ckpt_path='best')` to use and best model\"\n \" checkpoint and avoid this warning or\"\n \" `ckpt_path=trainer.model_checkpoint.last_model_path` to use the last model.\"\n )\n ckpt_path = \"best\"\n\n if ckpt_path == \"best\":\n # if user requests the best checkpoint but we don't have it, error\n if not self.checkpoint_callback:\n raise MisconfigurationException(\n f'`.{fn}(ckpt_path=\"best\")` is set but `ModelCheckpoint` is not configured.'\n )\n if not self.checkpoint_callback.best_model_path:\n if self.fast_dev_run:\n raise MisconfigurationException(\n f\"You cannot execute `.{fn}()` with `fast_dev_run=True` unless you do\"\n f\" `.{fn}(ckpt_path=PATH)` as no checkpoint path was generated during fitting.\"\n )\n raise MisconfigurationException(\n f'`.{fn}(ckpt_path=\"best\")` is set but `ModelCheckpoint` is not configured to save the best model.'\n )\n # load best weights\n ckpt_path = self.checkpoint_callback.best_model_path\n\n if not ckpt_path:\n raise MisconfigurationException(\n f\"`.{fn}()` found no path for the best weights: {ckpt_path!r}. Please\"\n f\" specify a path for a checkpoint `.{fn}(ckpt_path=PATH)`\"\n )\n return ckpt_path\n\n def _call_setup_hook(self) -> None:\n fn = self.state.fn._setup_fn\n\n self.training_type_plugin.barrier(\"pre_setup\")\n\n if self.datamodule is not None:\n self.datamodule.setup(stage=fn)\n self.call_hook(\"setup\", stage=fn)\n\n self.training_type_plugin.barrier(\"post_setup\")\n\n def _call_configure_sharded_model(self) -> None:\n with self.accelerator.model_sharded_context():\n self._handle_meta_model()\n self.call_hook(\"configure_sharded_model\")\n self.call_hook(\"on_configure_sharded_model\")\n\n def _handle_meta_model(self) -> None:\n if not is_on_meta_device(self.lightning_module):\n return\n\n if isinstance(self.training_type_plugin, DDPSpawnPlugin):\n raise MisconfigurationException(\"LightningModule on meta device isn't supported with spawn.\")\n\n materialize_module(self.lightning_module)\n # the trainer reference is lost during materialization\n self.lightning_module.trainer = proxy(self)\n\n def _call_teardown_hook(self) -> None:\n fn = self.state.fn._setup_fn\n\n if self.datamodule is not None:\n self.datamodule.teardown(stage=fn)\n\n self.call_hook(\"teardown\", stage=fn)\n\n self.lightning_module._current_fx_name = None\n self.lightning_module._current_dataloader_idx = None\n # these could have become stale if metrics are defined in `setup`\n self.lightning_module._metric_attributes = None\n\n # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.\n # It might be related to xla tensors blocked when moving the cpu kill loggers.\n if self.logger is not None:\n self.logger.finalize(\"success\")\n\n # summarize profile results\n self.profiler.describe()\n\n def call_hook(\n self, hook_name: str, *args: Any, pl_module: Optional[\"pl.LightningModule\"] = None, **kwargs: Any\n ) -> Any:\n pl_module = self.lightning_module or pl_module\n if pl_module:\n prev_fx_name = pl_module._current_fx_name\n pl_module._current_fx_name = hook_name\n\n # always profile hooks\n with self.profiler.profile(hook_name):\n\n # first call trainer hook\n callback_fx = getattr(self, hook_name, None)\n if callable(callback_fx):\n callback_fx(*args, **kwargs)\n\n # next call hook in lightningModule\n output = None\n model_fx = getattr(pl_module, hook_name, None)\n if callable(model_fx):\n output = model_fx(*args, **kwargs)\n\n # *Bad code alert*\n # The `Accelerator` mostly calls the `TrainingTypePlugin` but some of those calls are deprecated.\n # The following logic selectively chooses which hooks are called on each object.\n # In the case of `setup` and `teardown`, the hooks on the `LightningModule` should not call the hooks of the\n # same name in these objects as they are meant to be managed outside of the `LightningModule` lifecycle.\n # All of this should be fixed by #8506\n\n # call the accelerator hook\n if hook_name in (\"on_train_start\",) and hasattr(self.accelerator, hook_name):\n accelerator_hook = getattr(self.accelerator, hook_name)\n accelerator_output = accelerator_hook(*args, **kwargs)\n # Rely on the accelerator output if lightningModule hook returns nothing\n # Required for cases such as DataParallel where we reduce the output for the user\n # todo: move this data parallel logic into the data parallel plugin\n output = accelerator_output if output is None else output\n\n # call the ttp hook\n if hook_name not in (\"setup\", \"teardown\", \"on_train_start\") and hasattr(\n self.training_type_plugin, hook_name\n ):\n ttp_hook = getattr(self.training_type_plugin, hook_name)\n ttp_output = ttp_hook(*args, **kwargs)\n output = ttp_output if output is None else output\n\n if pl_module:\n # restore current_fx when nested context\n pl_module._current_fx_name = prev_fx_name\n\n return output\n\n @staticmethod\n def _parse_devices(\n gpus: Optional[Union[List[int], str, int]],\n auto_select_gpus: bool,\n tpu_cores: Optional[Union[List[int], str, int]],\n ) -> Tuple[Optional[List[int]], Optional[Union[List[int], int]]]:\n return device_parser._parse_devices(gpus, auto_select_gpus, tpu_cores)\n\n @staticmethod\n def _log_api_event(event: str) -> None:\n torch._C._log_api_usage_once(\"lightning.trainer.\" + event)\n\n def __init_profiler(self, profiler: Optional[Union[BaseProfiler, str]]) -> None:\n if isinstance(profiler, str):\n PROFILERS = {\n \"simple\": SimpleProfiler,\n \"advanced\": AdvancedProfiler,\n \"pytorch\": PyTorchProfiler,\n \"xla\": XLAProfiler,\n }\n profiler = profiler.lower()\n if profiler not in PROFILERS:\n raise MisconfigurationException(\n \"When passing string value for the `profiler` parameter of `Trainer`,\"\n f\" it can only be one of {list(PROFILERS.keys())}\"\n )\n profiler_class = PROFILERS[profiler]\n profiler = profiler_class()\n self.profiler: BaseProfiler = profiler or PassThroughProfiler()\n\n def __setup_profiler(self) -> None:\n local_rank = self.local_rank if self.world_size > 1 else None\n self.profiler._lightning_module = proxy(self.lightning_module)\n self.profiler.setup(stage=self.state.fn._setup_fn, local_rank=local_rank, log_dir=self.log_dir)\n\n def _log_device_info(self) -> None:\n rank_zero_info(f\"GPU available: {torch.cuda.is_available()}, used: {self._device_type == DeviceType.GPU}\")\n\n num_tpu_cores = self.tpu_cores if self.tpu_cores is not None and self._device_type == DeviceType.TPU else 0\n rank_zero_info(f\"TPU available: {_TPU_AVAILABLE}, using: {num_tpu_cores} TPU cores\")\n\n num_ipus = self.ipus if self.ipus is not None else 0\n rank_zero_info(f\"IPU available: {_IPU_AVAILABLE}, using: {num_ipus} IPUs\")\n\n if torch.cuda.is_available() and self._device_type != DeviceType.GPU:\n rank_zero_warn(\n \"GPU available but not used. Set the gpus flag in your trainer `Trainer(gpus=1)` or script `--gpus=1`.\"\n )\n\n if _TPU_AVAILABLE and self._device_type != DeviceType.TPU:\n rank_zero_warn(\n \"TPU available but not used. Set the `tpu_cores` flag in your trainer\"\n \" `Trainer(tpu_cores=8)` or script `--tpu_cores=8`.\"\n )\n\n if _IPU_AVAILABLE and self._device_type != DeviceType.IPU and not isinstance(self.accelerator, IPUAccelerator):\n rank_zero_warn(\n \"IPU available but not used. Set the `ipus` flag in your trainer\"\n \" `Trainer(ipus=8)` or script `--ipus=8`.\"\n )\n\n def _on_exception(self):\n if not _fault_tolerant_training():\n return\n # save a checkpoint for fault tolerant training. we don't use `log_dir` to minimize the chances of failure.\n file_path = os.path.join(self.default_root_dir, \".pl_auto_save.ckpt\")\n self.save_checkpoint(file_path)\n\n \"\"\"\n Accelerator properties\n \"\"\"\n\n @property\n def accelerator(self) -> Accelerator:\n return self._accelerator_connector.accelerator\n\n @property\n def training_type_plugin(self) -> TrainingTypePlugin:\n return self.accelerator.training_type_plugin\n\n @property\n def precision_plugin(self) -> PrecisionPlugin:\n return self.training_type_plugin.precision_plugin\n\n @property\n def global_rank(self) -> int:\n return self.training_type_plugin.global_rank\n\n @property\n def local_rank(self) -> int:\n # some training types define a local rank\n return getattr(self.training_type_plugin, \"local_rank\", 0)\n\n @property\n def node_rank(self) -> int:\n # some training types define a node rank\n return getattr(self.training_type_plugin, \"node_rank\", 0)\n\n @property\n def world_size(self) -> int:\n # some training types define a world size\n return getattr(self.training_type_plugin, \"world_size\", 1)\n\n @property\n def should_rank_save_checkpoint(self) -> bool:\n return self.training_type_plugin.should_rank_save_checkpoint\n\n @property\n def _distrib_type(self) -> _StrategyType:\n return self._accelerator_connector._distrib_type\n\n @property\n def _device_type(self) -> DeviceType:\n return self._accelerator_connector._device_type\n\n @property\n def num_nodes(self) -> int:\n return self._accelerator_connector.num_nodes\n\n @property\n def num_processes(self) -> int:\n return self._accelerator_connector.num_processes\n\n @property\n def root_gpu(self) -> Optional[int]:\n return self._accelerator_connector.root_gpu\n\n @property\n def tpu_cores(self) -> int:\n return self._accelerator_connector.tpu_cores\n\n @property\n def ipus(self) -> int:\n return self._accelerator_connector.num_ipus\n\n @property\n def num_gpus(self) -> int:\n return self._accelerator_connector.num_gpus\n\n @property\n def devices(self) -> Optional[Union[List[int], str, int]]:\n return self._accelerator_connector.devices\n\n @property\n def data_parallel_device_ids(self) -> Optional[List[int]]:\n return self._accelerator_connector.parallel_device_ids\n\n @property\n def lightning_module(self) -> \"pl.LightningModule\":\n return self.accelerator.lightning_module\n\n @property\n def optimizers(self) -> List[Optimizer]:\n return self.accelerator.optimizers\n\n @optimizers.setter\n def optimizers(self, new_optims: Optional[List[Optimizer]]) -> None:\n # Necessary to rewrap optimizers to lightning\n # They will be re-created when accessing\n # the `lightning_optimizers` trainer property\n self._lightning_optimizers = None\n\n self.accelerator.optimizers = new_optims\n\n @property\n def lr_schedulers(self) -> List[LRSchedulerTypeUnion]:\n return self.accelerator.lr_schedulers\n\n @lr_schedulers.setter\n def lr_schedulers(self, new_schedulers: List[LRSchedulerTypeUnion]) -> None:\n self.accelerator.lr_schedulers = new_schedulers\n\n @property\n def optimizer_frequencies(self) -> list:\n return self.accelerator.optimizer_frequencies\n\n @optimizer_frequencies.setter\n def optimizer_frequencies(self, new_freqs: list) -> None:\n self.accelerator.optimizer_frequencies = new_freqs\n\n @property\n def amp_backend(self) -> Optional[str]:\n return self.accelerator.amp_backend\n\n @property\n def precision(self) -> Union[str, int]:\n return self.training_type_plugin.precision_plugin.precision\n\n @property\n def scaler(self):\n return self.accelerator.scaler\n\n @property\n def gpus(self) -> Optional[Union[List[int], str, int]]:\n return self._accelerator_connector.gpus\n\n @property\n def model(self) -> torch.nn.Module:\n \"\"\"The LightningModule, but possibly wrapped into DataParallel or DistributedDataParallel.\n\n To access the pure LightningModule, use\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.lightning_module` instead.\n \"\"\"\n return self.accelerator.model\n\n @model.setter\n def model(self, model: torch.nn.Module) -> None:\n \"\"\"Setter for the model, pass-through to accelerator and plugin where the model reference is stored. Used\n by the Tuner to reset the state of Trainer and Accelerator.\n\n Args:\n model: The LightningModule, possibly wrapped into DataParallel or DistributedDataParallel, depending\n on the backend.\n \"\"\"\n self.accelerator.model = model\n\n \"\"\"\n General properties\n \"\"\"\n\n @property\n def log_dir(self) -> Optional[str]:\n if self.logger is None:\n dirpath = self.default_root_dir\n elif isinstance(self.logger, TensorBoardLogger):\n dirpath = self.logger.log_dir\n elif isinstance(self.logger, LoggerCollection):\n dirpath = self.default_root_dir\n else:\n dirpath = self.logger.save_dir\n\n dirpath = self.training_type_plugin.broadcast(dirpath)\n return dirpath\n\n @property\n def use_amp(self) -> bool:\n return self.precision == 16\n\n @property\n def is_global_zero(self) -> bool:\n return self.global_rank == 0\n\n @property\n def slurm_job_id(self) -> Optional[int]:\n rank_zero_deprecation(\"Method `slurm_job_id` is deprecated in v1.6.0 and will be removed in v1.7.0.\")\n return SLURMEnvironment.job_id()\n\n @property\n def lightning_optimizers(self) -> List[LightningOptimizer]:\n if self._lightning_optimizers is None:\n self.convert_to_lightning_optimizers()\n return self._lightning_optimizers\n\n @property\n def distributed_sampler_kwargs(self) -> Optional[dict]:\n if isinstance(self.training_type_plugin, ParallelPlugin):\n return self.training_type_plugin.distributed_sampler_kwargs\n\n @property\n def data_parallel(self) -> bool:\n return self._distrib_type in (\n _StrategyType.DP,\n _StrategyType.DDP,\n _StrategyType.DDP_SPAWN,\n _StrategyType.DDP2,\n )\n\n @property\n def progress_bar_dict(self) -> dict:\n \"\"\"Read-only for progress bar metrics.\"\"\"\n rank_zero_deprecation(\n \"`trainer.progress_bar_dict` is deprecated in v1.5 and will be removed in v1.7.\"\n \" Use `ProgressBarBase.get_metrics` instead.\"\n )\n ref_model = self.lightning_module\n ref_model = cast(pl.LightningModule, ref_model)\n if self.progress_bar_callback:\n return self.progress_bar_callback.get_metrics(self, ref_model)\n return self.progress_bar_metrics\n\n @property\n def _should_reload_dl_epoch(self) -> bool:\n \"\"\"Check if dataloader should be reloaded in the current epoch.\"\"\"\n n_epochs = self.reload_dataloaders_every_n_epochs\n return n_epochs and (not self.current_epoch % n_epochs)\n\n @property\n def enable_validation(self) -> bool:\n \"\"\"Check if we should run validation during training.\"\"\"\n model_ref = self.lightning_module\n val_loop_enabled = is_overridden(\"validation_step\", model_ref) and self.limit_val_batches > 0\n return val_loop_enabled\n\n @property\n def default_root_dir(self) -> str:\n \"\"\"The default location to save artifacts of loggers, checkpoints etc.\n\n It is used as a fallback if logger or checkpoint callback do not define specific save paths.\n \"\"\"\n if get_filesystem(self._default_root_dir).protocol == \"file\":\n return os.path.normpath(self._default_root_dir)\n return self._default_root_dir\n\n @property\n def weights_save_path(self) -> str:\n \"\"\"\n The default root location to save weights (checkpoints), e.g., when the\n :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` does not define a file path.\n \"\"\"\n if get_filesystem(self._weights_save_path).protocol == \"file\":\n return os.path.normpath(self._weights_save_path)\n return self._weights_save_path\n\n @property\n def early_stopping_callback(self) -> Optional[EarlyStopping]:\n \"\"\"The first :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` callback in the\n Trainer.callbacks list, or ``None`` if it doesn't exist.\"\"\"\n callbacks = self.early_stopping_callbacks\n return callbacks[0] if len(callbacks) > 0 else None\n\n @property\n def early_stopping_callbacks(self) -> List[EarlyStopping]:\n \"\"\"A list of all instances of :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` found in\n the Trainer.callbacks list.\"\"\"\n return [c for c in self.callbacks if isinstance(c, EarlyStopping)]\n\n @property\n def prediction_writer_callbacks(self) -> List[BasePredictionWriter]:\n \"\"\"A list of all instances of :class:`~pytorch_lightning.callbacks.prediction_writer.BasePredictionWriter`\n found in the Trainer.callbacks list.\"\"\"\n return [cb for cb in self.callbacks if isinstance(cb, BasePredictionWriter)]\n\n @property\n def checkpoint_callback(self) -> Optional[ModelCheckpoint]:\n \"\"\"The first :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callback in the\n Trainer.callbacks list, or ``None`` if it doesn't exist.\"\"\"\n callbacks = self.checkpoint_callbacks\n return callbacks[0] if len(callbacks) > 0 else None\n\n @property\n def checkpoint_callbacks(self) -> List[ModelCheckpoint]:\n \"\"\"A list of all instances of :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` found\n in the Trainer.callbacks list.\"\"\"\n return [c for c in self.callbacks if isinstance(c, ModelCheckpoint)]\n\n @property\n def progress_bar_callback(self) -> Optional[ProgressBarBase]:\n \"\"\"An instance of :class:`~pytorch_lightning.callbacks.progress.base.ProgressBarBase` found in the\n Trainer.callbacks list, or ``None`` if one doesn't exist.\"\"\"\n for c in self.callbacks:\n if isinstance(c, ProgressBarBase):\n return c\n return None\n\n @property\n def resume_from_checkpoint(self) -> Optional[Union[str, Path]]:\n resume_from_checkpoint = self.checkpoint_connector.resume_from_checkpoint_fit_path\n if resume_from_checkpoint is not None:\n rank_zero_deprecation(\n \"`trainer.resume_from_checkpoint` is deprecated in v1.5 and will be removed in v1.7.\"\n \" Specify the fit checkpoint path with `trainer.fit(ckpt_path=)` instead.\"\n )\n\n return resume_from_checkpoint\n\n def save_checkpoint(self, filepath: _PATH, weights_only: bool = False) -> None:\n self.checkpoint_connector.save_checkpoint(filepath, weights_only)\n\n \"\"\"\n Parsing properties\n \"\"\"\n\n @classmethod\n def default_attributes(cls) -> dict:\n init_signature = inspect.signature(cls)\n return {k: v.default for k, v in init_signature.parameters.items()}\n\n @classmethod\n def get_deprecated_arg_names(cls) -> List:\n \"\"\"Returns a list with deprecated Trainer arguments.\"\"\"\n depr_arg_names = []\n for name, val in cls.__dict__.items():\n if name.startswith(\"DEPRECATED\") and isinstance(val, (tuple, list)):\n depr_arg_names.extend(val)\n return depr_arg_names\n\n @classmethod\n def from_argparse_args(cls: Any, args: Union[Namespace, ArgumentParser], **kwargs) -> Any:\n return from_argparse_args(cls, args, **kwargs)\n\n @classmethod\n def parse_argparser(cls, arg_parser: Union[ArgumentParser, Namespace]) -> Namespace:\n return parse_argparser(cls, arg_parser)\n\n @classmethod\n def match_env_arguments(cls) -> Namespace:\n return parse_env_variables(cls)\n\n @classmethod\n def add_argparse_args(cls, parent_parser: ArgumentParser, **kwargs) -> ArgumentParser:\n return add_argparse_args(cls, parent_parser, **kwargs)\n\n \"\"\"\n State properties\n \"\"\"\n\n @property\n def interrupted(self) -> bool:\n return self.state.status == TrainerStatus.INTERRUPTED\n\n @property\n def training(self) -> bool:\n return self.state.stage == RunningStage.TRAINING\n\n @training.setter\n def training(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.TRAINING\n elif self.training:\n self.state.stage = None\n\n @property\n def testing(self) -> bool:\n return self.state.stage == RunningStage.TESTING\n\n @testing.setter\n def testing(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.TESTING\n elif self.testing:\n self.state.stage = None\n\n @property\n def predicting(self) -> bool:\n return self.state.stage == RunningStage.PREDICTING\n\n @predicting.setter\n def predicting(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.PREDICTING\n elif self.predicting:\n self.state.stage = None\n\n @property\n def tuning(self) -> bool:\n return self.state.stage == RunningStage.TUNING\n\n @tuning.setter\n def tuning(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.TUNING\n elif self.tuning:\n self.state.stage = None\n\n @property\n def validating(self) -> bool:\n return self.state.stage == RunningStage.VALIDATING\n\n @validating.setter\n def validating(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.VALIDATING\n elif self.validating:\n self.state.stage = None\n\n @property\n def evaluating(self) -> bool:\n return self.state.stage and self.state.stage.evaluating\n\n @property\n def sanity_checking(self) -> bool:\n return self.state.stage == RunningStage.SANITY_CHECKING\n\n @sanity_checking.setter\n def sanity_checking(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.SANITY_CHECKING\n elif self.sanity_checking:\n self.state.stage = None\n\n \"\"\"\n Loop properties\n \"\"\"\n\n @property\n def global_step(self) -> int:\n return self.fit_loop.global_step\n\n @property\n def current_epoch(self) -> int:\n return self.fit_loop.current_epoch\n\n @property\n def max_epochs(self) -> int:\n return self.fit_loop.max_epochs\n\n @property\n def min_epochs(self) -> Optional[int]:\n return self.fit_loop.min_epochs\n\n @property\n def max_steps(self) -> int:\n return self.fit_loop.max_steps\n\n @property\n def min_steps(self) -> Optional[int]:\n return self.fit_loop.min_steps\n\n @property\n def is_last_batch(self) -> bool:\n return self.fit_loop.epoch_loop.batch_progress.is_last_batch\n\n @property\n def fit_loop(self) -> FitLoop:\n return self._fit_loop\n\n @fit_loop.setter\n def fit_loop(self, loop: FitLoop):\n \"\"\"Attach a custom fit loop to this Trainer.\n\n It will run with\n :meth:`~pytorch_lighting.trainer.trainer.Trainer.fit`.\n \"\"\"\n loop.trainer = self\n self._fit_loop = loop\n\n @property\n def validate_loop(self) -> EvaluationLoop:\n return self._validate_loop\n\n @validate_loop.setter\n def validate_loop(self, loop: EvaluationLoop):\n \"\"\"Attach a custom validation loop to this Trainer.\n\n It will run with\n :meth:`~pytorch_lighting.trainer.trainer.Trainer.validate`. Note that this loop is different from the one\n running during training inside the :meth:`pytorch_lightning.trainer.trainer.Trainer.fit` call.\n \"\"\"\n loop.trainer = self\n self._validate_loop = loop\n\n @property\n def test_loop(self) -> EvaluationLoop:\n return self._test_loop\n\n @test_loop.setter\n def test_loop(self, loop: EvaluationLoop):\n \"\"\"Attach a custom test loop to this Trainer.\n\n It will run with\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.test`.\n \"\"\"\n loop.trainer = self\n self._test_loop = loop\n\n @property\n def predict_loop(self) -> PredictionLoop:\n return self._predict_loop\n\n @predict_loop.setter\n def predict_loop(self, loop: PredictionLoop):\n \"\"\"Attach a custom prediction loop to this Trainer.\n\n It will run with\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`.\n \"\"\"\n loop.trainer = self\n self._predict_loop = loop\n\n @property\n def _evaluation_loop(self) -> EvaluationLoop:\n if self.state.fn in (TrainerFn.FITTING, TrainerFn.TUNING):\n return self.fit_loop.epoch_loop.val_loop\n if self.state.fn == TrainerFn.VALIDATING:\n return self.validate_loop\n if self.state.fn == TrainerFn.TESTING:\n return self.test_loop\n raise RuntimeError(\"The `Trainer._evaluation_loop` property isn't defined. Accessed outside of scope\")\n\n @property\n def _active_loop(self) -> Optional[Union[FitLoop, EvaluationLoop, PredictionLoop]]:\n if self.training:\n return self.fit_loop\n if self.sanity_checking or self.evaluating:\n return self._evaluation_loop\n if self.predicting:\n return self.predict_loop\n\n \"\"\"\n Logging properties\n \"\"\"\n\n @property\n def callback_metrics(self) -> dict:\n return self.logger_connector.callback_metrics\n\n @property\n def logged_metrics(self) -> dict:\n return self.logger_connector.logged_metrics\n\n @property\n def progress_bar_metrics(self) -> dict:\n return self.logger_connector.progress_bar_metrics\n\n @property\n def _results(self) -> Optional[ResultCollection]:\n active_loop = self._active_loop\n if active_loop is not None:\n return active_loop._results\n\n def _exit_gracefully_on_signal(self) -> None:\n if _fault_tolerant_training() and self._terminate_gracefully:\n caller = inspect.stack()[1]\n class_name = caller[0].f_locals[\"self\"].__class__.__name__\n raise ExitGracefullyException(f\"Exiting gracefully on {class_name}:{caller.function}\")\n\n @property\n def weights_summary(self) -> Optional[str]:\n rank_zero_deprecation(\"`Trainer.weights_summary` is deprecated in v1.5 and will be removed in v1.7.\")\n return self._weights_summary\n\n @weights_summary.setter\n def weights_summary(self, val: Optional[str]) -> None:\n rank_zero_deprecation(\"Setting `Trainer.weights_summary` is deprecated in v1.5 and will be removed in v1.7.\")\n self._weights_summary = val\n\n \"\"\"\n Other\n \"\"\"\n\n # TODO: refactor this so that it can be done in LightningOptimizer\n def __getstate__(self):\n # remove lightning_optimizers\n self._lightning_optimizers = None\n return self.__dict__\n\n def __setstate__(self, state):\n self.__dict__ = state\n\n @property\n def terminate_on_nan(self) -> bool:\n rank_zero_deprecation(\"`Trainer.terminate_on_nan` is deprecated in v1.5 and will be removed in 1.7.\")\n return self._terminate_on_nan\n\n @terminate_on_nan.setter\n def terminate_on_nan(self, val: bool) -> None:\n rank_zero_deprecation(\n f\"Setting `Trainer.terminate_on_nan = {val}` is deprecated in v1.5 and will be removed in 1.7.\"\n f\" Please set `Trainer(detect_anomaly={val})` instead.\"\n )\n self._terminate_on_nan = val # : 212\n\n\ndef _determine_batch_limits(batches: Union[int, float], name: str) -> Union[int, float]:\n if 0 <= batches <= 1:\n return batches\n if batches > 1 and batches % 1.0 == 0:\n return int(batches)\n raise MisconfigurationException(\n f\"You have passed invalid value {batches} for {name}, it has to be in [0.0, 1.0] or an int.\"\n )\n" ]
[ [ "torch.autograd.set_detect_anomaly", "torch._C._log_api_usage_once", "torch.set_grad_enabled", "torch.no_grad", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
d29parkar/pyrsgis
[ "394ed2dc3f5c08c06e1b258bb12a71d30caf0966" ]
[ "pyrsgis/beta/__init__.py" ]
[ "# pyrsgis/beta\r\n\r\n#Importing all the necessary libraries\r\nimport os, glob, datetime\r\n# add exception for deprecated version of gdal\r\ntry:\r\n import gdal\r\nexcept:\r\n from osgeo import gdal\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\nimport numpy as np\r\nimport warnings, shutil\r\nimport tarfile, tempfile\r\nfrom ..raster import read, export, _create_ds\r\n\r\ntry:\r\n from matplotlib_scalebar.scalebar import ScaleBar\r\nexcept:\r\n pass\r\n \r\n#Disabling annoying warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n#This creates a class for raster, and based on the format of input\r\n#it decides whether input is stacked raster data or .tar.gz file\r\nclass readtar():\r\n oldDir = os.getcwd()\r\n\r\n def __init__(self, name):\r\n self.oldDir = os.getcwd()\r\n self.name = name\r\n self.fileName, self.ext = os.path.splitext(self.name)\r\n self.initiated = False\r\n if (self.ext == \".gz\") or (self.ext == \".GZ\"):\r\n self.type = \"TARfile\"\r\n self.nbands = self.initiateTAR()\r\n self.band = self.getband(1)\r\n self.band = None\r\n self.satellite = self.sensor()\r\n self.bandIndex = self.checkBandIndex()\r\n else:\r\n self.type = \"Unidentified data format\"\r\n print(\"Warning! Expected file type is .tar.gz\")\r\n\r\n #This method reads the TAR file and returns the number of TIFF files in it \r\n def initiateTAR(self):\r\n global filesList\r\n global tarTifList\r\n self.initiate = True\r\n self.tarTifList = []\r\n with tarfile.open(self.name, 'r:gz') as self.tar:\r\n self.filesList = self.tar.getnames()\r\n for self.files in self.filesList:\r\n if (self.files[-4:] == '.TIF') or (self.files[-4:] == '.tif'):\r\n self.tarTifList.append(self.files)\r\n return(len(self.tarTifList)-1)\r\n\r\n #This method creates a temporary directory to extract .tar.gz files\r\n #This section only executes if input is a tar file\r\n def createTempDirectory(self):\r\n try:\r\n self.tempDirPath = None\r\n self.tempDirPath = tempfile.TemporaryDirectory('pyrsgis')\r\n self.tempDir = str(self.tempDirPath).split(\" \")[1]\r\n self.tempDir = self.tempDir[1:-2]\r\n self.tempDir = self.tempDir.split(\"\\\\\\\\\")\r\n self.tempDirPath = self.tempDir\r\n self.tempDir[-1] = 'pyrsgis'\r\n self.tempDir = \"\\\\\\\\\".join(self.tempDir)\r\n self.tempDirPath.pop()\r\n self.tempDirPath = \"\\\\\\\\\".join(self.tempDirPath)\r\n## print(\"Temporary directory created in \\n%s\" % self.tempDir)\r\n except FileNotFoundError:\r\n pass\r\n\r\n #This method acts as a generator if the input is a tarfile\r\n def generatorTAR(self, nBand):\r\n global createdFile\r\n self.createdFile = []\r\n self.nBand = str(nBand)\r\n self.nCreated = 0\r\n for self.tarinfo in self.tar:\r\n self.folder = os.path.split(self.tarinfo.name)[0]\r\n self.fileName = os.path.splitext(self.tarinfo.name)[0]\r\n self.bandName = self.fileName.split('_')[-1]\r\n self.ext = os.path.splitext(self.tarinfo.name)[1]\r\n if (self.ext == \".tif\") or (self.ext == \".TIF\"):\r\n if (self.bandName == (\"B\"+str(self.nBand))):\r\n self.createdFile.append(self.tarinfo.name)\r\n if self.nCreated > 0:\r\n print(\"Creating temporary file %s\" % self.tarinfo.name)\r\n self.nCreated += 1\r\n yield self.tarinfo\r\n\r\n #This method returns the band in the form of an array\r\n def getband(self, nBand):\r\n if self.type == \"TARfile\":\r\n with tarfile.open(self.name, 'r:gz') as self.tar:\r\n self.createTempDirectory()\r\n if not os.path.exists(self.tempDir):\r\n os.mkdir(self.tempDir)\r\n os.chdir(self.tempDir)\r\n self.tar.extractall(members=self.generatorTAR(nBand))\r\n self.ds, self.band = read(self.createdFile[0], bands=1)\r\n if self.initiated == False:\r\n self.rows = self.ds.RasterYSize\r\n self.cols = self.ds.RasterXSize\r\n self.projection = self.ds.GetProjection()\r\n self.geotransform = self.ds.GetGeoTransform()\r\n self.initiated = True\r\n self.ds = createDS(self.ds)\r\n # Goes back to the old directory and deletes the temporary directory\r\n os.chdir(self.oldDir)\r\n self.clearMemory()\r\n return(self.band)\r\n\r\n def extractBand(self, bands='All', filename='pyrsgisExtarctedBands.tif'):\r\n if type(bands)==type(str()):\r\n tempArray = np.random.randint(1, size=(self.nbands, self.rows, self.cols))\r\n for n in range(0, self.nbands):\r\n tempArray[n,:,:] = self.getband(n+1)\r\n elif type(bands) == type(list()):\r\n tempArray = np.random.randint(1, size=(len(bands), self.rows, self.cols))\r\n for n, index in enumerate(bands):\r\n tempArray[n,:,:] = self.getband(index)\r\n export(tempArray, self.ds, filename, bands='All')\r\n\r\n #This method calculates the normalised differnce of any two given bands \r\n def nordif(self, band2, band1):\r\n self.band1 = self.getband(band1)\r\n self.band2 = self.getband(band2)\r\n return((self.band2-self.band1)/(self.band2+self.band1))\r\n\r\n #This method saves the processed image in the drive \r\n def export(self, array, outfile='pyrsgisRaster.tif', dtype='int'):\r\n export(array, self.ds, filename=outfile, dtype=dtype)\r\n\r\n #This method clears everything stored in the virtual momry to reduce load \r\n def clearMemory(self):\r\n os.chdir(self.tempDirPath)\r\n for folder in glob.glob(\"*pyrsgis\"):\r\n shutil.rmtree(folder)\r\n os.chdir(self.oldDir)\r\n \r\n #This method decides the index of the bands depending on the sensor type\r\n def checkBandIndex(self):\r\n if self.satellite == 'Landsat - 4/5 TM':\r\n return({'blue':1,\r\n 'green':2,\r\n 'red':3,\r\n 'nir':4,\r\n 'swir1':5,\r\n 'thermal':6,\r\n 'swir':7})\r\n elif self.satellite == 'Landsat - 7':\r\n return({'blue':1,\r\n 'green':2,\r\n 'red':3,\r\n 'nir':4,\r\n 'swir1':5,\r\n 'thermal1':6,\r\n 'thermal2':7,\r\n 'swir2':8,\r\n 'panchromatic':9})\r\n elif self.satellite == 'Landsat - 8':\r\n return({'aerosol':1,\r\n 'blue':2,\r\n 'green':3,\r\n 'red':4,\r\n 'nir':5,\r\n 'swir1':6,\r\n 'swir2':7,\r\n 'panchromatic':8,\r\n 'cirrus':9,\r\n 'tirs1':10,\r\n 'tirs2':11})\r\n\r\n #This method decides the satellite sensor, depending on the number of bands\r\n def sensor(self):\r\n try:\r\n if (self.type == \"TARfile\") and (self.nbands == 7):\r\n return('Landsat - 4/5 TM')\r\n elif (self.type == \"TARfile\") and (self.nbands == 9):\r\n return('Landsat - 7')\r\n elif (self.type == \"TARfile\") and (self.nbands) == 11:\r\n return('Landsat - 8')\r\n except:\r\n print('Warning! Input data has no match in the inventory') \r\n \r\n #This method returns the NDVI of the input file\r\n def ndvi(self):\r\n try:\r\n self.redband = self.getband(self.bandIndex['red'])\r\n self.nirband = self.getband(self.bandIndex['nir'])\r\n self.ndviband = ((self.nirband-self.redband)/(self.nirband+self.redband))\r\n except KeyError:\r\n print('One of the required band was not found.')\r\n self.redband = None\r\n self.nirband = None\r\n return(self.ndviband)\r\n\r\nclass readtif():\r\n oldDir = os.getcwd()\r\n\r\n def __init__(self, name):\r\n self.name = name\r\n self.fileName, self.ext = os.path.splitext(self.name)\r\n self.initiated = False\r\n self.type = \"TIFFfile\"\r\n self.band = self.getband(1)\r\n self.band = None\r\n self.satellite = self.sensor()\r\n self.bandIndex = self.checkBandIndex()\r\n\r\n #This method returns the band in the form of an array\r\n def getband(self, nBand, datatype='int'):\r\n self.ds, self.band = read(self.name, bands=nBand)\r\n if datatype == 'float':\r\n self.band = self.band.astype(float)\r\n if self.initiated == False:\r\n self.rows = self.ds.RasterYSize\r\n self.cols = self.ds.RasterXSize\r\n self.nbands = self.ds.RasterCount\r\n self.projection = self.ds.GetProjection()\r\n self.geotransform = self.ds.GetGeoTransform()\r\n self.initiated = True\r\n self.ds = None\r\n return(self.band)\r\n\r\n #This method calculates the normalised difference of any two given bands \r\n def nordif(self, band2, band1):\r\n self.band1 = self.getband(band1)\r\n self.band1 = self.band1.astype(float)\r\n self.band2 = self.getband(band2)\r\n self.band2 = self.band2.astype(float)\r\n return((self.band2-self.band1)/(self.band2+self.band1))\r\n\r\n #This method saves the processed image in the drive \r\n def export(self, array, outfile='pyrsgisRaster.tif', datatype='int'):\r\n export(array, self.ds, filename=outfile, dtype=datatype)\r\n \r\n #This method clears everything stored in the virtual momry to reduce load \r\n def clearMemory(self):\r\n self.band = None\r\n self.ds = None\r\n \r\n #This method decides the index of the bands depending on the sensor type\r\n def checkBandIndex(self):\r\n if self.satellite == 'Landsat - 4/5 TM':\r\n return({'blue':1,\r\n 'green':2,\r\n 'red':3,\r\n 'nir':4,\r\n 'swir1':5,\r\n 'thermal':6,\r\n 'swir':7})\r\n elif self.satellite == 'Landsat - 7':\r\n return({'blue':1,\r\n 'green':2,\r\n 'red':3,\r\n 'nir':4,\r\n 'swir1':5,\r\n 'thermal1':6,\r\n 'thermal2':7,\r\n 'swir2':8,\r\n 'panchromatic':9})\r\n elif self.satellite == 'Landsat - 8':\r\n return({'aerosol':1,\r\n 'blue':2,\r\n 'green':3,\r\n 'red':4,\r\n 'nir':5,\r\n 'swir1':6,\r\n 'swir2':7,\r\n 'panchromatic':8,\r\n 'cirrus':9,\r\n 'tirs1':10,\r\n 'tirs2':11})\r\n\r\n #This method decides the satellite sensor, depending on the number of bands\r\n def sensor(self):\r\n try:\r\n if (self.nbands == 7):\r\n return('Landsat - 4/5 TM')\r\n elif (self.nbands == 8):\r\n return('Landsat - 7')\r\n elif (self.nbands) == 11:\r\n return('Landsat - 8')\r\n elif (self.nbands) == 1:\r\n return('Panchromatic data') \r\n except:\r\n print('Warning! Input data has no match in the inventory') \r\n \r\n #This method returns the NDVI of the input file\r\n def ndvi(self):\r\n try:\r\n self.redband = self.getband(self.bandIndex['red'])\r\n self.nirband = self.getband(self.bandIndex['nir'])\r\n self.ndviband = ((self.nirband-self.redband)/(self.nirband+self.redband))\r\n except KeyError:\r\n print('ERROR! One of the required band was not found.')\r\n self.redband = None\r\n self.nirband = None\r\n return(self.ndviband)\r\n\r\ndef radioCorrection(band, maxVal=255):\r\n band = np.nan_to_num(band)\r\n return((band-band.min())/(band.max()-band.min())*maxVal)\r\n\r\n#This method shows the band using matplotlib\r\ndef display(band, maptitle = 'Pyrsgis Raster', cmap='PRGn'):\r\n plt.title(maptitle, fontsize=20)\r\n legend = cm.ScalarMappable(cmap=cmap)\r\n legend.set_array(np.array([band.min(), band.min()+band.max()/2, band.max()]))\r\n plt.colorbar(legend)\r\n plt.imshow(band, cmap=cmap)\r\n try:\r\n scalebar = ScaleBar(30)\r\n except:\r\n raise ModuleNotFoundError(\"Please install matplotlib_scalebar library to use this feature.\")\r\n plt.gca().add_artist(scalebar)\r\n plt.show()\r\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.imshow", "matplotlib.pyplot.title", "numpy.nan_to_num", "matplotlib.pyplot.colorbar", "matplotlib.cm.ScalarMappable", "matplotlib.pyplot.show", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ekkobit/avapi
[ "0dc83e7932ceda230dcb1b124366b5de4943832e" ]
[ "avapi/data.py" ]
[ "'''The data module contains fuctions to get data from Alpha Vantage into\npython. Data may be saved as csv files or loaded as Pandas data frames.\n'''\n\n__authors__ = \"Ole Olaussen, Xuan Ling\"\n__email__ = \"[email protected], [email protected]\"\n\n\nimport requests\nimport json\nimport pandas as pd\nimport os\n\n\ndef get_data(save_to=None, **kwargs):\n r'''Downloads a json file from Alpha Vantage.\n\n :param save_to: Default None. Where to save csv file if\n ``datatype=\"csv\"`` is provided.\n :type save_to: ``str`` or ``None``\n :param \\**kwargs:\n See below\n\n :Keyword Arguments:\n * *function* (``str``) --\n Any of the Alpha Vantage function types. \\\n Such as ``\"TIME_SERIES_INTRADAY\"``. \\\n See their `documentation \\\n <https://www.alphavantage.co/documentation/>`_\n * *symbol* (``str``) --\n Company ticker symbol, such as ``\"GOOGL\"``.\n * *interval* (``str``) --\n ``\"1min\"``, ``\"5min\"``, ``\"15min\"``, ``\"30min\"`` or ``\"60min\"``. \\\n Also ``\"daily\"``, ``\"weekly\"``, ``\"monthly\"``\n * *outputsize* (``str``) --\n ``\"compact\"`` (default) or ``\"full\"``\n * *datatype* (``str``) --\n ``\"json\"`` (default) or ``\"csv\"``\n * *apikey* (``str``) --\n You need to get a free API key from `Alpha Vantage \\\n <https://www.alphavantage.co/>`_\n\n The above list is not exhaustive. Please see `Alpha Vantage docs\n <https://www.alphavantage.co/documentation/>`_ for\n complete listing and what fuction requires which keyword arguments.\n\n :returns: If datatype is not set to ``\"csv\"``, a dictionary is returned\n :rtype: ``dict`` [``str``, ``float``]\n '''\n\n url = 'https://www.alphavantage.co/query?'\n for key, value in kwargs.items():\n url += key + '=' + str(value) + '&'\n url = url[:-1]\n\n csv = 'datatype' in kwargs and kwargs['datatype'] == 'csv'\n\n if csv:\n url += '&datatype=csv'\n\n r = requests.get(url)\n content = r.content\n\n path = os.path.abspath(__file__)\n dir_path = os.path.dirname(path)\n\n with open(dir_path + '/response', 'wb') as file:\n file.write(content)\n\n if csv:\n if save_to is None:\n print(\"Missing keyword argument save_to='...'\")\n else:\n with open(save_to, 'wb') as file:\n file.write(content)\n else:\n dic = json.loads(content)\n return dic\n\n\ndef to_df(dic):\n '''Converts data dictionary, downloaded from Alpha Vantage, to pandas\n dataframes.\n\n :param dic: Python dictionary of Alpha Vantage time series data\n :type dic: ``dict`` [``str``, ``float``]\n :returns: Returns the converted dictionary as a Pandas data frame\n :rtype: ``pandas.DataFrame()``\n '''\n\n # Get outer dictionaries\n outer = [key for key, value in dic.items()]\n\n # Get timestamps\n dates = [key for key, value in dic[outer[1]].items()]\n\n # Get columns\n columns = [key for key, value in dic[outer[1]][dates[0]].items()]\n\n data = {'date': dates}\n\n # Populate dictionary with column names (keys) and empty lists (values)\n for column in columns:\n data[column] = []\n\n # Fill lists with corresponding data\n for date in dates:\n for column in columns:\n data[column].append(dic[outer[1]][date][column])\n\n df = pd.DataFrame(data) # Convert dict to pandas data frame\n df.set_index('date', inplace=True)\n\n return df\n\n\ndef response():\n '''Opens and reads last response from Alpha Vantage server.\n\n :returns: Content of response.\n :rtype: ``str``\n '''\n\n path = os.path.abspath(__file__)\n dir_path = os.path.dirname(path)\n\n with open(dir_path + '/response', 'rb') as file:\n reader = file.read()\n\n return reader\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
leeesangwon/CNN_sentence.pytorch
[ "512a1fe1de359f01557b6441a0571d7d98335716" ]
[ "src/data/__init__.py" ]
[ "import os\n\nimport torch\n\nimport constants as const\n\nfrom .cross_val_dataset import CrossValDataset, _CrossValDataset\nfrom .sst import SSTDataset\nfrom .trec import TRECDataset\n\nDATASETS = ('MR', 'Subj', 'CR', 'MPQA', 'SST1', 'SST2', 'TREC')\n\n\ndef get_datasets(dataset_type, dataset_folder, batch_size):\n if dataset_type not in DATASETS:\n raise ValueError(\"Invalid dataset: %s\" % dataset_type)\n\n train_datasets = []\n val_datasets = []\n test_datasets = []\n\n if dataset_type in ['MR', 'Subj', 'CR', 'MPQA']:\n dataset_file = os.path.join(dataset_folder, const.DATASET_FILENAME[dataset_type])\n dataset = _CrossValDataset(dataset_file, CrossValDataset.cv, random_seed=const.RANDOM_SEED)\n for test_cv in range(CrossValDataset.cv):\n train_datasets.append(CrossValDataset(dataset, batch_size, test_cv, type='train'))\n val_datasets.append(CrossValDataset(dataset, batch_size, test_cv, type='val'))\n test_datasets.append(CrossValDataset(dataset, batch_size, test_cv, type='test'))\n\n elif dataset_type in ['SST1', 'SST2']:\n datasets = []\n for dataset_filename in const.DATASET_FILENAME[dataset_type]:\n dataset_file = os.path.join(dataset_folder, dataset_filename)\n datasets.append(SSTDataset(dataset_file))\n\n train_datasets.append(datasets[0])\n val_datasets.append(datasets[1])\n test_datasets.append(datasets[2])\n\n elif dataset_type in ['TREC']:\n train_dataset_file = os.path.join(dataset_folder, const.DATASET_FILENAME[dataset_type][0])\n train_datasets = [TRECDataset(train_dataset_file, batch_size, type='train')]\n val_datasets = [TRECDataset(train_dataset_file, batch_size, type='val')]\n\n test_dataset_file = os.path.join(dataset_folder, const.DATASET_FILENAME[dataset_type][2])\n test_datasets = [TRECDataset(test_dataset_file, batch_size, type='test')]\n\n return train_datasets, val_datasets, test_datasets\n\n\ndef sentence_collate_fn(batch):\n data = [item[0] for item in batch]\n target = [item[1] for item in batch]\n target = torch.LongTensor(target)\n return data, target\n" ]
[ [ "torch.LongTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
deep-diver/great_expectations
[ "9d85b7e0543d170e73df99dcc985c649a0f395bb" ]
[ "great_expectations/dataset/sparkdf_dataset.py" ]
[ "import copy\nimport inspect\nimport json\nimport logging\nimport warnings\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom functools import reduce, wraps\nfrom typing import List\n\nimport jsonschema\nimport numpy as np\nimport pandas as pd\nfrom dateutil.parser import parse\n\nfrom great_expectations.data_asset import DataAsset\nfrom great_expectations.data_asset.util import DocInherit, parse_result_format\n\nfrom .dataset import Dataset\nfrom .pandas_dataset import PandasDataset\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import pyspark.sql.types as sparktypes\n from pyspark.ml.feature import Bucketizer\n from pyspark.sql import SQLContext, Window\n from pyspark.sql.functions import (\n array,\n col,\n count,\n countDistinct,\n datediff,\n desc,\n expr,\n isnan,\n lag,\n )\n from pyspark.sql.functions import length as length_\n from pyspark.sql.functions import (\n lit,\n monotonically_increasing_id,\n stddev_samp,\n struct,\n udf,\n when,\n year,\n )\nexcept ImportError as e:\n logger.debug(str(e))\n logger.debug(\n \"Unable to load spark context; install optional spark dependency for support.\"\n )\n\n\nclass MetaSparkDFDataset(Dataset):\n \"\"\"MetaSparkDFDataset is a thin layer between Dataset and SparkDFDataset.\n This two-layer inheritance is required to make @classmethod decorators work.\n Practically speaking, that means that MetaSparkDFDataset implements \\\n expectation decorators, like `column_map_expectation` and `column_aggregate_expectation`, \\\n and SparkDFDataset implements the expectation methods themselves.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @classmethod\n def column_map_expectation(cls, func):\n \"\"\"Constructs an expectation using column-map semantics.\n\n\n The MetaSparkDFDataset implementation replaces the \"column\" parameter supplied by the user with a Spark Dataframe\n with the actual column data. The current approach for functions implementing expectation logic is to append\n a column named \"__success\" to this dataframe and return to this decorator.\n\n See :func:`column_map_expectation <great_expectations.Dataset.base.Dataset.column_map_expectation>` \\\n for full documentation of this function.\n \"\"\"\n argspec = inspect.getfullargspec(func)[0][1:]\n\n @cls.expectation(argspec)\n @wraps(func)\n def inner_wrapper(\n self, column, mostly=None, result_format=None, *args, **kwargs,\n ):\n \"\"\"\n This whole decorator is pending a re-write. Currently there is are huge performance issues\n when the # of unexpected elements gets large (10s of millions). Additionally, there is likely\n easy optimization opportunities by coupling result_format with how many different transformations\n are done on the dataset, as is done in sqlalchemy_dataset.\n \"\"\"\n\n # Rename column so we only have to handle dot notation here\n eval_col = \"__eval_col_\" + column.replace(\".\", \"__\").replace(\"`\", \"_\")\n self.spark_df = self.spark_df.withColumn(eval_col, col(column))\n\n if result_format is None:\n result_format = self.default_expectation_args[\"result_format\"]\n\n result_format = parse_result_format(result_format)\n\n # this is a little dangerous: expectations that specify \"COMPLETE\" result format and have a very\n # large number of unexpected results could hang for a long time. we should either call this out in docs\n # or put a limit on it\n if result_format[\"result_format\"] == \"COMPLETE\":\n unexpected_count_limit = None\n else:\n unexpected_count_limit = result_format[\"partial_unexpected_count\"]\n\n col_df = self.spark_df.select(col(eval_col)) # pyspark.sql.DataFrame\n\n # a couple of tests indicate that caching here helps performance\n col_df.persist()\n element_count = self.get_row_count()\n\n # FIXME temporary fix for missing/ignored value\n if func.__name__ not in [\n \"expect_column_values_to_not_be_null\",\n \"expect_column_values_to_be_null\",\n ]:\n col_df = col_df.filter(col_df[0].isNotNull())\n # these nonnull_counts are cached by SparkDFDataset\n nonnull_count = self.get_column_nonnull_count(eval_col)\n else:\n nonnull_count = element_count\n\n # success_df will have columns [column, '__success']\n # this feels a little hacky, so might want to change\n success_df = func(self, col_df, *args, **kwargs)\n success_count = success_df.filter(\"__success = True\").count()\n\n unexpected_count = nonnull_count - success_count\n\n if unexpected_count == 0:\n # save some computation time if no unexpected items\n maybe_limited_unexpected_list = []\n else:\n # here's an example of a place where we could do optimizations if we knew result format: see\n # comment block below\n unexpected_df = success_df.filter(\"__success = False\")\n if unexpected_count_limit:\n unexpected_df = unexpected_df.limit(unexpected_count_limit)\n maybe_limited_unexpected_list = [\n row[eval_col] for row in unexpected_df.collect()\n ]\n\n if \"output_strftime_format\" in kwargs:\n output_strftime_format = kwargs[\"output_strftime_format\"]\n parsed_maybe_limited_unexpected_list = []\n for val in maybe_limited_unexpected_list:\n if val is None:\n parsed_maybe_limited_unexpected_list.append(val)\n else:\n if isinstance(val, str):\n val = parse(val)\n parsed_maybe_limited_unexpected_list.append(\n datetime.strftime(val, output_strftime_format)\n )\n maybe_limited_unexpected_list = parsed_maybe_limited_unexpected_list\n\n success, percent_success = self._calc_map_expectation_success(\n success_count, nonnull_count, mostly\n )\n\n # Currently the abstraction of \"result_format\" that _format_column_map_output provides\n # limits some possible optimizations within the column-map decorator. It seems that either\n # this logic should be completely rolled into the processing done in the column_map decorator, or that the decorator\n # should do a minimal amount of computation agnostic of result_format, and then delegate the rest to this method.\n # In the first approach, it could make sense to put all of this decorator logic in Dataset, and then implement\n # properties that require dataset-type-dependent implementations (as is done with SparkDFDataset.row_count currently).\n # Then a new dataset type could just implement these properties/hooks and Dataset could deal with caching these and\n # with the optimizations based on result_format. A side benefit would be implementing an interface for the user\n # to get basic info about a dataset in a standardized way, e.g. my_dataset.row_count, my_dataset.columns (only for\n # tablular datasets maybe). However, unclear if this is worth it or if it would conflict with optimizations being done\n # in other dataset implementations.\n return_obj = self._format_map_output(\n result_format,\n success,\n element_count,\n nonnull_count,\n unexpected_count,\n maybe_limited_unexpected_list,\n unexpected_index_list=None,\n )\n\n # FIXME Temp fix for result format\n if func.__name__ in [\n \"expect_column_values_to_not_be_null\",\n \"expect_column_values_to_be_null\",\n ]:\n del return_obj[\"result\"][\"unexpected_percent_nonmissing\"]\n del return_obj[\"result\"][\"missing_count\"]\n del return_obj[\"result\"][\"missing_percent\"]\n try:\n del return_obj[\"result\"][\"partial_unexpected_counts\"]\n except KeyError:\n pass\n\n col_df.unpersist()\n\n return return_obj\n\n inner_wrapper.__name__ = func.__name__\n inner_wrapper.__doc__ = func.__doc__\n\n return inner_wrapper\n\n @classmethod\n def column_pair_map_expectation(cls, func):\n \"\"\"\n The column_pair_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating\n truthiness of some condition on a per row basis across a pair of columns.\n \"\"\"\n argspec = inspect.getfullargspec(func)[0][1:]\n\n @cls.expectation(argspec)\n @wraps(func)\n def inner_wrapper(\n self,\n column_A,\n column_B,\n mostly=None,\n ignore_row_if=\"both_values_are_missing\",\n result_format=None,\n *args,\n **kwargs,\n ):\n # Rename column so we only have to handle dot notation here\n eval_col_A = \"__eval_col_A_\" + column_A.replace(\".\", \"__\").replace(\"`\", \"_\")\n eval_col_B = \"__eval_col_B_\" + column_B.replace(\".\", \"__\").replace(\"`\", \"_\")\n\n self.spark_df = self.spark_df.withColumn(\n eval_col_A, col(column_A)\n ).withColumn(eval_col_B, col(column_B))\n\n if result_format is None:\n result_format = self.default_expectation_args[\"result_format\"]\n\n result_format = parse_result_format(result_format)\n\n # this is a little dangerous: expectations that specify \"COMPLETE\" result format and have a very\n # large number of unexpected results could hang for a long time. we should either call this out in docs\n # or put a limit on it\n if result_format[\"result_format\"] == \"COMPLETE\":\n unexpected_count_limit = None\n else:\n unexpected_count_limit = result_format[\"partial_unexpected_count\"]\n\n cols_df = self.spark_df.select(eval_col_A, eval_col_B).withColumn(\n \"__row\", monotonically_increasing_id()\n ) # pyspark.sql.DataFrame\n\n # a couple of tests indicate that caching here helps performance\n cols_df.cache()\n element_count = self.get_row_count()\n\n if ignore_row_if == \"both_values_are_missing\":\n boolean_mapped_null_values = cols_df.selectExpr(\n \"`__row`\",\n \"`{0}` AS `A_{0}`\".format(eval_col_A),\n \"`{0}` AS `B_{0}`\".format(eval_col_B),\n \"ISNULL(`{}`) AND ISNULL(`{}`) AS `__null_val`\".format(\n eval_col_A, eval_col_B\n ),\n )\n elif ignore_row_if == \"either_value_is_missing\":\n boolean_mapped_null_values = cols_df.selectExpr(\n \"`__row`\",\n \"`{0}` AS `A_{0}`\".format(eval_col_A),\n \"`{0}` AS `B_{0}`\".format(eval_col_B),\n \"ISNULL(`{}`) OR ISNULL(`{}`) AS `__null_val`\".format(\n eval_col_A, eval_col_B\n ),\n )\n elif ignore_row_if == \"never\":\n boolean_mapped_null_values = cols_df.selectExpr(\n \"`__row`\",\n \"`{0}` AS `A_{0}`\".format(eval_col_A),\n \"`{0}` AS `B_{0}`\".format(eval_col_B),\n lit(False).alias(\"__null_val\"),\n )\n else:\n raise ValueError(\"Unknown value of ignore_row_if: %s\", (ignore_row_if,))\n\n # since pyspark guaranteed each columns selected has the same number of rows, no need to do assert as in pandas\n # assert series_A.count() == (\n # series_B.count()), \"Series A and B must be the same length\"\n\n nonnull_df = boolean_mapped_null_values.filter(\"__null_val = False\")\n nonnull_count = nonnull_df.count()\n\n col_A_df = nonnull_df.select(\"__row\", \"`A_{}`\".format(eval_col_A))\n col_B_df = nonnull_df.select(\"__row\", \"`B_{}`\".format(eval_col_B))\n\n success_df = func(self, col_A_df, col_B_df, *args, **kwargs)\n success_count = success_df.filter(\"__success = True\").count()\n\n unexpected_count = nonnull_count - success_count\n if unexpected_count == 0:\n # save some computation time if no unexpected items\n maybe_limited_unexpected_list = []\n else:\n # here's an example of a place where we could do optimizations if we knew result format: see\n # comment block below\n unexpected_df = success_df.filter(\"__success = False\")\n if unexpected_count_limit:\n unexpected_df = unexpected_df.limit(unexpected_count_limit)\n maybe_limited_unexpected_list = [\n (row[\"A_{}\".format(eval_col_A)], row[\"B_{}\".format(eval_col_B)],)\n for row in unexpected_df.collect()\n ]\n\n if \"output_strftime_format\" in kwargs:\n output_strftime_format = kwargs[\"output_strftime_format\"]\n parsed_maybe_limited_unexpected_list = []\n for val in maybe_limited_unexpected_list:\n if val is None or (val[0] is None or val[1] is None):\n parsed_maybe_limited_unexpected_list.append(val)\n else:\n if isinstance(val[0], str) and isinstance(val[1], str):\n val = (parse(val[0]), parse(val[1]))\n parsed_maybe_limited_unexpected_list.append(\n (\n datetime.strftime(val[0], output_strftime_format),\n datetime.strftime(val[1], output_strftime_format),\n )\n )\n maybe_limited_unexpected_list = parsed_maybe_limited_unexpected_list\n\n success, percent_success = self._calc_map_expectation_success(\n success_count, nonnull_count, mostly\n )\n\n # Currently the abstraction of \"result_format\" that _format_column_map_output provides\n # limits some possible optimizations within the column-map decorator. It seems that either\n # this logic should be completely rolled into the processing done in the column_map decorator, or that the decorator\n # should do a minimal amount of computation agnostic of result_format, and then delegate the rest to this method.\n # In the first approach, it could make sense to put all of this decorator logic in Dataset, and then implement\n # properties that require dataset-type-dependent implementations (as is done with SparkDFDataset.row_count currently).\n # Then a new dataset type could just implement these properties/hooks and Dataset could deal with caching these and\n # with the optimizations based on result_format. A side benefit would be implementing an interface for the user\n # to get basic info about a dataset in a standardized way, e.g. my_dataset.row_count, my_dataset.columns (only for\n # tablular datasets maybe). However, unclear if this is worth it or if it would conflict with optimizations being done\n # in other dataset implementations.\n return_obj = self._format_map_output(\n result_format,\n success,\n element_count,\n nonnull_count,\n unexpected_count,\n maybe_limited_unexpected_list,\n unexpected_index_list=None,\n )\n\n # # FIXME Temp fix for result format\n # if func.__name__ in ['expect_column_values_to_not_be_null', 'expect_column_values_to_be_null']:\n # del return_obj['result']['unexpected_percent_nonmissing']\n # del return_obj['result']['missing_count']\n # del return_obj['result']['missing_percent']\n # try:\n # del return_obj['result']['partial_unexpected_counts']\n # except KeyError:\n # pass\n\n cols_df.unpersist()\n\n return return_obj\n\n inner_wrapper.__name__ = func.__name__\n inner_wrapper.__doc__ = func.__doc__\n\n return inner_wrapper\n\n @classmethod\n def multicolumn_map_expectation(cls, func):\n \"\"\"\n The multicolumn_map_expectation decorator handles boilerplate issues surrounding the common pattern of\n evaluating truthiness of some condition on a per row basis across a set of columns.\n \"\"\"\n argspec = inspect.getfullargspec(func)[0][1:]\n\n @cls.expectation(argspec)\n @wraps(func)\n def inner_wrapper(\n self,\n column_list,\n mostly=None,\n ignore_row_if=\"all_values_are_missing\",\n result_format=None,\n *args,\n **kwargs,\n ):\n # Rename column so we only have to handle dot notation here\n eval_cols = []\n for col_name in column_list:\n eval_col = \"__eval_col_\" + col_name.replace(\".\", \"__\").replace(\"`\", \"_\")\n eval_cols.append(eval_col)\n self.spark_df = self.spark_df.withColumn(eval_col, col(col_name))\n if result_format is None:\n result_format = self.default_expectation_args[\"result_format\"]\n\n result_format = parse_result_format(result_format)\n\n # this is a little dangerous: expectations that specify \"COMPLETE\" result format and have a very\n # large number of unexpected results could hang for a long time. we should either call this out in docs\n # or put a limit on it\n if result_format[\"result_format\"] == \"COMPLETE\":\n unexpected_count_limit = None\n else:\n unexpected_count_limit = result_format[\"partial_unexpected_count\"]\n\n temp_df = self.spark_df.select(*eval_cols) # pyspark.sql.DataFrame\n\n # a couple of tests indicate that caching here helps performance\n temp_df.cache()\n element_count = self.get_row_count()\n\n if ignore_row_if == \"all_values_are_missing\":\n boolean_mapped_skip_values = temp_df.select(\n [\n *eval_cols,\n reduce(\n lambda a, b: a & b, [col(c).isNull() for c in eval_cols]\n ).alias(\"__null_val\"),\n ]\n )\n elif ignore_row_if == \"any_value_is_missing\":\n boolean_mapped_skip_values = temp_df.select(\n [\n *eval_cols,\n reduce(\n lambda a, b: a | b, [col(c).isNull() for c in eval_cols]\n ).alias(\"__null_val\"),\n ]\n )\n elif ignore_row_if == \"never\":\n boolean_mapped_skip_values = temp_df.select(\n [*eval_cols, lit(False).alias(\"__null_val\")]\n )\n else:\n raise ValueError(\"Unknown value of ignore_row_if: %s\", (ignore_row_if,))\n\n nonnull_df = boolean_mapped_skip_values.filter(\"__null_val = False\")\n nonnull_count = nonnull_df.count()\n\n cols_df = nonnull_df.select(*eval_cols)\n\n success_df = func(self, cols_df, *args, **kwargs)\n success_count = success_df.filter(\"__success = True\").count()\n\n unexpected_count = nonnull_count - success_count\n if unexpected_count == 0:\n maybe_limited_unexpected_list = []\n else:\n # here's an example of a place where we could do optimizations if we knew result format: see\n # comment block below\n unexpected_df = success_df.filter(\"__success = False\")\n if unexpected_count_limit:\n unexpected_df = unexpected_df.limit(unexpected_count_limit)\n maybe_limited_unexpected_list = [\n OrderedDict(\n (col_name, row[eval_col_name])\n for (col_name, eval_col_name) in zip(column_list, eval_cols)\n )\n for row in unexpected_df.collect()\n ]\n\n if \"output_strftime_format\" in kwargs:\n output_strftime_format = kwargs[\"output_strftime_format\"]\n parsed_maybe_limited_unexpected_list = []\n for val in maybe_limited_unexpected_list:\n if val is None or not all(v for k, v in val):\n parsed_maybe_limited_unexpected_list.append(val)\n else:\n if all(isinstance(v, str) for k, v in val):\n val = OrderedDict((k, parse(v)) for k, v in val)\n parsed_maybe_limited_unexpected_list.append(\n OrderedDict(\n (k, datetime.strftime(v, output_strftime_format))\n for k, v in val\n )\n )\n maybe_limited_unexpected_list = parsed_maybe_limited_unexpected_list\n\n success, percent_success = self._calc_map_expectation_success(\n success_count, nonnull_count, mostly\n )\n\n # Currently the abstraction of \"result_format\" that _format_column_map_output provides\n # limits some possible optimizations within the column-map decorator. It seems that either\n # this logic should be completely rolled into the processing done in the column_map decorator, or that the decorator\n # should do a minimal amount of computation agnostic of result_format, and then delegate the rest to this method.\n # In the first approach, it could make sense to put all of this decorator logic in Dataset, and then implement\n # properties that require dataset-type-dependent implementations (as is done with SparkDFDataset.row_count currently).\n # Then a new dataset type could just implement these properties/hooks and Dataset could deal with caching these and\n # with the optimizations based on result_format. A side benefit would be implementing an interface for the user\n # to get basic info about a dataset in a standardized way, e.g. my_dataset.row_count, my_dataset.columns (only for\n # tablular datasets maybe). However, unclear if this is worth it or if it would conflict with optimizations being done\n # in other dataset implementations.\n return_obj = self._format_map_output(\n result_format,\n success,\n element_count,\n nonnull_count,\n unexpected_count,\n maybe_limited_unexpected_list,\n unexpected_index_list=None,\n )\n\n temp_df.unpersist()\n\n return return_obj\n\n inner_wrapper.__name__ = func.__name__\n inner_wrapper.__doc__ = func.__doc__\n\n return inner_wrapper\n\n\nclass SparkDFDataset(MetaSparkDFDataset):\n \"\"\"\nThis class holds an attribute `spark_df` which is a spark.sql.DataFrame.\n\n--ge-feature-maturity-info--\n\n id: validation_engine_pyspark_self_managed\n title: Validation Engine - pyspark - Self-Managed\n icon:\n short_description: Use Spark DataFrame to validate data\n description: Use Spark DataFrame to validate data\n how_to_guide_url: https://docs.greatexpectations.io/en/latest/how_to_guides/creating_batches/how_to_load_a_spark_dataframe_as_a_batch.html\n maturity: Production\n maturity_details:\n api_stability: Stable\n implementation_completeness: Moderate\n unit_test_coverage: Complete\n integration_infrastructure_test_coverage: N/A -> see relevant Datasource evaluation\n documentation_completeness: Complete\n bug_risk: Low/Moderate\n expectation_completeness: Moderate\n\n id: validation_engine_databricks\n title: Validation Engine - Databricks\n icon:\n short_description: Use Spark DataFrame in a Databricks cluster to validate data\n description: Use Spark DataFrame in a Databricks cluster to validate data\n how_to_guide_url: https://docs.greatexpectations.io/en/latest/how_to_guides/creating_batches/how_to_load_a_spark_dataframe_as_a_batch.html\n maturity: Beta\n maturity_details:\n api_stability: Stable\n implementation_completeness: Low (dbfs-specific handling)\n unit_test_coverage: N/A -> implementation not different\n integration_infrastructure_test_coverage: Minimal (we've tested a bit, know others have used it)\n documentation_completeness: Moderate (need docs on managing project configuration via dbfs/etc.)\n bug_risk: Low/Moderate\n expectation_completeness: Moderate\n\n id: validation_engine_emr_spark\n title: Validation Engine - EMR - Spark\n icon:\n short_description: Use Spark DataFrame in an EMR cluster to validate data\n description: Use Spark DataFrame in an EMR cluster to validate data\n how_to_guide_url: https://docs.greatexpectations.io/en/latest/how_to_guides/creating_batches/how_to_load_a_spark_dataframe_as_a_batch.html\n maturity: Experimental\n maturity_details:\n api_stability: Stable\n implementation_completeness: Low (need to provide guidance on \"known good\" paths, and we know there are many \"knobs\" to tune that we have not explored/tested)\n unit_test_coverage: N/A -> implementation not different\n integration_infrastructure_test_coverage: Unknown\n documentation_completeness: Low (must install specific/latest version but do not have docs to that effect or of known useful paths)\n bug_risk: Low/Moderate\n expectation_completeness: Moderate\n\n id: validation_engine_spark_other\n title: Validation Engine - Spark - Other\n icon:\n short_description: Use Spark DataFrame to validate data\n description: Use Spark DataFrame to validate data\n how_to_guide_url: https://docs.greatexpectations.io/en/latest/how_to_guides/creating_batches/how_to_load_a_spark_dataframe_as_a_batch.html\n maturity: Experimental\n maturity_details:\n api_stability: Stable\n implementation_completeness: Other (we haven't tested possibility, known glue deployment)\n unit_test_coverage: N/A -> implementation not different\n integration_infrastructure_test_coverage: Unknown\n documentation_completeness: Low (must install specific/latest version but do not have docs to that effect or of known useful paths)\n bug_risk: Low/Moderate\n expectation_completeness: Moderate\n\n--ge-feature-maturity-info--\n \"\"\"\n\n @classmethod\n def from_dataset(cls, dataset=None):\n if isinstance(dataset, SparkDFDataset):\n return cls(spark_df=dataset.spark_df)\n else:\n raise ValueError(\"from_dataset requires a SparkDFDataset dataset\")\n\n def __init__(self, spark_df, *args, **kwargs):\n # Creation of the Spark DataFrame is done outside this class\n self.spark_df = spark_df\n self._persist = kwargs.pop(\"persist\", True)\n if self._persist:\n self.spark_df.persist()\n super().__init__(*args, **kwargs)\n\n def head(self, n=5):\n \"\"\"Returns a *PandasDataset* with the first *n* rows of the given Dataset\"\"\"\n return PandasDataset(\n self.spark_df.limit(n).toPandas(),\n expectation_suite=self.get_expectation_suite(\n discard_failed_expectations=False,\n discard_result_format_kwargs=False,\n discard_catch_exceptions_kwargs=False,\n discard_include_config_kwargs=False,\n ),\n )\n\n def get_row_count(self):\n return self.spark_df.count()\n\n def get_column_count(self):\n return len(self.spark_df.columns)\n\n def get_table_columns(self) -> List[str]:\n return self.spark_df.columns\n\n def get_column_nonnull_count(self, column):\n return self.spark_df.filter(col(column).isNotNull()).count()\n\n def get_column_mean(self, column):\n # TODO need to apply this logic to other such methods?\n types = dict(self.spark_df.dtypes)\n if types[column] not in (\"int\", \"float\", \"double\", \"bigint\"):\n raise TypeError(\"Expected numeric column type for function mean()\")\n result = self.spark_df.select(column).groupBy().mean().collect()[0]\n return result[0] if len(result) > 0 else None\n\n def get_column_sum(self, column):\n return self.spark_df.select(column).groupBy().sum().collect()[0][0]\n\n # TODO: consider getting all basic statistics in one go:\n def _describe_column(self, column):\n # temp_column = self.spark_df.select(column).where(col(column).isNotNull())\n # return self.spark_df.select(\n # [\n # count(temp_column),\n # mean(temp_column),\n # stddev(temp_column),\n # min(temp_column),\n # max(temp_column)\n # ]\n # )\n pass\n\n def get_column_max(self, column, parse_strings_as_datetimes=False):\n temp_column = self.spark_df.select(column).where(col(column).isNotNull())\n if parse_strings_as_datetimes:\n temp_column = self._apply_dateutil_parse(temp_column)\n result = temp_column.agg({column: \"max\"}).collect()\n if not result or not result[0]:\n return None\n return result[0][0]\n\n def get_column_min(self, column, parse_strings_as_datetimes=False):\n temp_column = self.spark_df.select(column).where(col(column).isNotNull())\n if parse_strings_as_datetimes:\n temp_column = self._apply_dateutil_parse(temp_column)\n result = temp_column.agg({column: \"min\"}).collect()\n if not result or not result[0]:\n return None\n return result[0][0]\n\n def get_column_value_counts(self, column, sort=\"value\", collate=None):\n if sort not in [\"value\", \"count\", \"none\"]:\n raise ValueError(\"sort must be either 'value', 'count', or 'none'\")\n if collate is not None:\n raise ValueError(\"collate parameter is not supported in SparkDFDataset\")\n value_counts = (\n self.spark_df.select(column)\n .where(col(column).isNotNull())\n .groupBy(column)\n .count()\n )\n if sort == \"value\":\n value_counts = value_counts.orderBy(column)\n elif sort == \"count\":\n value_counts = value_counts.orderBy(desc(\"count\"))\n value_counts = value_counts.collect()\n series = pd.Series(\n [row[\"count\"] for row in value_counts],\n index=pd.Index(data=[row[column] for row in value_counts], name=\"value\"),\n name=\"count\",\n )\n return series\n\n def get_column_unique_count(self, column):\n return self.spark_df.agg(countDistinct(column)).collect()[0][0]\n\n def get_column_modes(self, column):\n \"\"\"leverages computation done in _get_column_value_counts\"\"\"\n s = self.get_column_value_counts(column)\n return list(s[s == s.max()].index)\n\n def get_column_median(self, column):\n # We will get the two middle values by choosing an epsilon to add\n # to the 50th percentile such that we always get exactly the middle two values\n # (i.e. 0 < epsilon < 1 / (2 * values))\n\n # Note that this can be an expensive computation; we are not exposing\n # spark's ability to estimate.\n # We add two to 2 * n_values to maintain a legitimate quantile\n # in the degnerate case when n_values = 0\n result = self.spark_df.approxQuantile(\n column, [0.5, 0.5 + (1 / (2 + (2 * self.get_row_count())))], 0\n )\n return np.mean(result)\n\n def get_column_quantiles(self, column, quantiles, allow_relative_error=False):\n if allow_relative_error is False:\n allow_relative_error = 0.0\n if (\n not isinstance(allow_relative_error, float)\n or allow_relative_error < 0\n or allow_relative_error > 1\n ):\n raise ValueError(\n \"SparkDFDataset requires relative error to be False or to be a float between 0 and 1.\"\n )\n return self.spark_df.approxQuantile(\n column, list(quantiles), allow_relative_error\n )\n\n def get_column_stdev(self, column):\n return self.spark_df.select(stddev_samp(col(column))).collect()[0][0]\n\n def get_column_hist(self, column, bins):\n \"\"\"return a list of counts corresponding to bins\"\"\"\n bins = list(\n copy.deepcopy(bins)\n ) # take a copy since we are inserting and popping\n if bins[0] == -np.inf or bins[0] == -float(\"inf\"):\n added_min = False\n bins[0] = -float(\"inf\")\n else:\n added_min = True\n bins.insert(0, -float(\"inf\"))\n\n if bins[-1] == np.inf or bins[-1] == float(\"inf\"):\n added_max = False\n bins[-1] = float(\"inf\")\n else:\n added_max = True\n bins.append(float(\"inf\"))\n\n temp_column = self.spark_df.select(column).where(col(column).isNotNull())\n bucketizer = Bucketizer(splits=bins, inputCol=column, outputCol=\"buckets\")\n bucketed = bucketizer.setHandleInvalid(\"skip\").transform(temp_column)\n\n # This is painful to do, but: bucketizer cannot handle values outside of a range\n # (hence adding -/+ infinity above)\n\n # Further, it *always* follows the numpy convention of lower_bound <= bin < upper_bound\n # for all but the last bin\n\n # But, since the last bin in our case will often be +infinity, we need to\n # find the number of values exactly equal to the upper bound to add those\n\n # We'll try for an optimization by asking for it at the same time\n if added_max:\n upper_bound_count = (\n temp_column.select(column).filter(col(column) == bins[-2]).count()\n )\n else:\n upper_bound_count = 0\n\n hist_rows = bucketed.groupBy(\"buckets\").count().collect()\n # Spark only returns buckets that have nonzero counts.\n hist = [0] * (len(bins) - 1)\n for row in hist_rows:\n hist[int(row[\"buckets\"])] = row[\"count\"]\n\n hist[-2] += upper_bound_count\n\n if added_min:\n below_bins = hist.pop(0)\n bins.pop(0)\n if below_bins > 0:\n logger.warning(\"Discarding histogram values below lowest bin.\")\n\n if added_max:\n above_bins = hist.pop(-1)\n bins.pop(-1)\n if above_bins > 0:\n logger.warning(\"Discarding histogram values above highest bin.\")\n\n return hist\n\n def get_column_count_in_range(\n self, column, min_val=None, max_val=None, strict_min=False, strict_max=True\n ):\n if min_val is None and max_val is None:\n raise ValueError(\"Must specify either min or max value\")\n if min_val is not None and max_val is not None and min_val > max_val:\n raise ValueError(\"Min value must be <= to max value\")\n\n result = self.spark_df.select(column)\n if min_val is not None:\n if strict_min:\n result = result.filter(col(column) > min_val)\n else:\n result = result.filter(col(column) >= min_val)\n if max_val is not None:\n if strict_max:\n result = result.filter(col(column) < max_val)\n else:\n result = result.filter(col(column) <= max_val)\n return result.count()\n\n # Utils\n @staticmethod\n def _apply_dateutil_parse(column):\n assert len(column.columns) == 1, \"Expected DataFrame with 1 column\"\n col_name = column.columns[0]\n _udf = udf(parse, sparktypes.TimestampType())\n return column.withColumn(col_name, _udf(col_name))\n\n # Expectations\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_values_to_be_in_set(\n self,\n column, # pyspark.sql.DataFrame\n value_set, # List[Any]\n mostly=None,\n parse_strings_as_datetimes=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n if value_set is None:\n # vacuously true\n return column.withColumn(\"__success\", lit(True))\n if parse_strings_as_datetimes:\n column = self._apply_dateutil_parse(column)\n value_set = [\n parse(value) if isinstance(value, str) else value for value in value_set\n ]\n if None in value_set:\n # spark isin returns None when any value is compared to None\n logger.error(\n \"expect_column_values_to_be_in_set cannot support a None in the value_set in spark\"\n )\n raise ValueError(\n \"expect_column_values_to_be_in_set cannot support a None in the value_set in spark\"\n )\n return column.withColumn(\"__success\", column[0].isin(value_set))\n\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_values_to_not_be_in_set(\n self,\n column, # pyspark.sql.DataFrame\n value_set, # List[Any]\n mostly=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n if None in value_set:\n # spark isin returns None when any value is compared to None\n logger.error(\n \"expect_column_values_to_not_be_in_set cannot support a None in the value_set in spark\"\n )\n raise ValueError(\n \"expect_column_values_to_not_be_in_set cannot support a None in the value_set in spark\"\n )\n return column.withColumn(\"__success\", ~column[0].isin(value_set))\n\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_values_to_be_between(\n self,\n column,\n min_value=None,\n max_value=None,\n strict_min=False,\n strict_max=False,\n parse_strings_as_datetimes=None,\n output_strftime_format=None,\n allow_cross_type_comparisons=None,\n mostly=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n # NOTE: This function is implemented using native functions instead of UDFs, which is a faster\n # implementation. Please ensure new spark implementations migrate to the new style where possible\n if allow_cross_type_comparisons:\n raise ValueError(\"Cross-type comparisons are not valid for SparkDFDataset\")\n\n if parse_strings_as_datetimes:\n min_value = parse(min_value)\n max_value = parse(max_value)\n\n if min_value is None and max_value is None:\n raise ValueError(\"min_value and max_value cannot both be None\")\n elif min_value is None:\n if strict_max:\n return column.withColumn(\n \"__success\",\n when(column[0] < max_value, lit(True)).otherwise(lit(False)),\n )\n else:\n return column.withColumn(\n \"__success\",\n when(column[0] <= max_value, lit(True)).otherwise(lit(False)),\n )\n elif max_value is None:\n if strict_min:\n return column.withColumn(\n \"__success\",\n when(column[0] > min_value, lit(True)).otherwise(lit(False)),\n )\n else:\n return column.withColumn(\n \"__success\",\n when(column[0] >= min_value, lit(True)).otherwise(lit(False)),\n )\n else:\n if min_value > max_value:\n raise ValueError(\"minvalue cannot be greater than max_value\")\n if strict_min and strict_max:\n return column.withColumn(\n \"__success\",\n when(\n (min_value < column[0]) & (column[0] < max_value), lit(True)\n ).otherwise(lit(False)),\n )\n elif strict_min:\n return column.withColumn(\n \"__success\",\n when(\n (min_value < column[0]) & (column[0] <= max_value), lit(True)\n ).otherwise(lit(False)),\n )\n elif strict_max:\n return column.withColumn(\n \"__success\",\n when(\n (min_value <= column[0]) & (column[0] < max_value), lit(True)\n ).otherwise(lit(False)),\n )\n else:\n return column.withColumn(\n \"__success\",\n when(\n (min_value <= column[0]) & (column[0] <= max_value), lit(True)\n ).otherwise(lit(False)),\n )\n\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_value_lengths_to_be_between(\n self,\n column,\n min_value=None,\n max_value=None,\n mostly=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n if min_value is None and max_value is None:\n return column.withColumn(\"__success\", lit(True))\n elif min_value is None:\n return column.withColumn(\n \"__success\",\n when(length_(column[0]) <= max_value, lit(True)).otherwise(lit(False)),\n )\n elif max_value is None:\n return column.withColumn(\n \"__success\",\n when(length_(column[0]) >= min_value, lit(True)).otherwise(lit(False)),\n )\n # FIXME: whether the below condition is enforced seems to be somewhat inconsistent\n\n # else:\n # if min_value > max_value:\n # raise ValueError(\"minvalue cannot be greater than max_value\")\n\n return column.withColumn(\n \"__success\",\n when(\n (min_value <= length_(column[0])) & (length_(column[0]) <= max_value),\n lit(True),\n ).otherwise(lit(False)),\n )\n\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_values_to_be_unique(\n self,\n column,\n mostly=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n return column.withColumn(\n \"__success\", count(lit(1)).over(Window.partitionBy(column[0])) <= 1\n )\n\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_value_lengths_to_equal(\n self,\n column,\n value, # int\n mostly=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n return column.withColumn(\n \"__success\",\n when(length_(column[0]) == value, lit(True)).otherwise(lit(False)),\n )\n\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_values_to_match_strftime_format(\n self,\n column,\n strftime_format, # str\n mostly=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n # Below is a simple validation that the provided format can both format and parse a datetime object.\n # %D is an example of a format that can format but not parse, e.g.\n try:\n datetime.strptime(\n datetime.strftime(datetime.now(), strftime_format), strftime_format\n )\n except ValueError as e:\n raise ValueError(\"Unable to use provided strftime_format. \" + e.message)\n\n def is_parseable_by_format(val):\n try:\n datetime.strptime(val, strftime_format)\n return True\n except TypeError:\n raise TypeError(\n \"Values passed to expect_column_values_to_match_strftime_format must be of type string.\\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format.\"\n )\n except ValueError:\n return False\n\n success_udf = udf(is_parseable_by_format)\n return column.withColumn(\"__success\", success_udf(column[0]))\n\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_values_to_not_be_null(\n self,\n column,\n mostly=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n return column.withColumn(\"__success\", column[0].isNotNull())\n\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_values_to_be_null(\n self,\n column,\n mostly=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n return column.withColumn(\"__success\", column[0].isNull())\n\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_values_to_match_json_schema(\n self,\n column,\n json_schema,\n mostly=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n def matches_json_schema(val):\n try:\n val_json = json.loads(val)\n jsonschema.validate(val_json, json_schema)\n # jsonschema.validate raises an error if validation fails.\n # So if we make it this far, we know that the validation succeeded.\n return True\n except jsonschema.ValidationError:\n return False\n except jsonschema.SchemaError:\n raise\n except:\n raise\n\n matches_json_schema_udf = udf(matches_json_schema, sparktypes.StringType())\n\n return column.withColumn(\"__success\", matches_json_schema_udf(column[0]))\n\n @DocInherit\n @DataAsset.expectation([\"column\", \"type_\", \"mostly\"])\n def expect_column_values_to_be_of_type(\n self,\n column,\n type_,\n mostly=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n # Rename column so we only have to handle dot notation here\n eval_col = \"__eval_col_\" + column.replace(\".\", \"__\").replace(\"`\", \"_\")\n self.spark_df = self.spark_df.withColumn(eval_col, col(column))\n if mostly is not None:\n raise ValueError(\n \"SparkDFDataset does not support column map semantics for column types\"\n )\n\n try:\n col_df = self.spark_df.select(eval_col)\n col_data = [f for f in col_df.schema.fields if f.name == eval_col][0]\n col_type = type(col_data.dataType)\n except IndexError:\n raise ValueError(\"Unrecognized column: %s\" % column)\n except KeyError:\n raise ValueError(\"No type data available for column: %s\" % column)\n\n try:\n if type_ is None:\n # vacuously true\n success = True\n else:\n success = issubclass(col_type, getattr(sparktypes, type_))\n\n return {\"success\": success, \"result\": {\"observed_value\": col_type.__name__}}\n\n except AttributeError:\n raise ValueError(\"Unrecognized spark type: %s\" % type_)\n\n @DocInherit\n @DataAsset.expectation([\"column\", \"type_list\", \"mostly\"])\n def expect_column_values_to_be_in_type_list(\n self,\n column,\n type_list: List[str],\n mostly=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n # Rename column so we only have to handle dot notation here\n eval_col = \"__eval_col_\" + column.replace(\".\", \"__\").replace(\"`\", \"_\")\n self.spark_df = self.spark_df.withColumn(eval_col, col(column))\n\n if mostly is not None:\n raise ValueError(\n \"SparkDFDataset does not support column map semantics for column types\"\n )\n\n try:\n col_df = self.spark_df.select(eval_col)\n col_data = [f for f in col_df.schema.fields if f.name == eval_col][0]\n col_type = type(col_data.dataType)\n except IndexError:\n raise ValueError(\"Unrecognized column: %s\" % column)\n except KeyError:\n raise ValueError(\"No database type data available for column: %s\" % column)\n\n if type_list is None:\n success = True\n else:\n types = []\n for type_ in type_list:\n try:\n type_class = getattr(sparktypes, type_)\n types.append(type_class)\n except AttributeError:\n logger.debug(\"Unrecognized type: %s\" % type_)\n if len(types) == 0:\n raise ValueError(\"No recognized spark types in type_list\")\n types = tuple(types)\n success = issubclass(col_type, types)\n return {\"success\": success, \"result\": {\"observed_value\": col_type.__name__}}\n\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_values_to_match_regex(\n self,\n column,\n regex,\n mostly=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n return column.withColumn(\"__success\", column[0].rlike(regex))\n\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_values_to_not_match_regex(\n self,\n column,\n regex,\n mostly=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n return column.withColumn(\"__success\", ~column[0].rlike(regex))\n\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_values_to_match_regex_list(\n self,\n column,\n regex_list,\n match_on=\"any\",\n mostly=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n if match_on == \"any\":\n return column.withColumn(\"__success\", column[0].rlike(\"|\".join(regex_list)))\n elif match_on == \"all\":\n formatted_regex_list = [\"(?={})\".format(regex) for regex in regex_list]\n return column.withColumn(\n \"__success\", column[0].rlike(\"\".join(formatted_regex_list))\n )\n else:\n raise ValueError(\"match_on must be either 'any' or 'all'\")\n\n @DocInherit\n @MetaSparkDFDataset.column_pair_map_expectation\n def expect_column_pair_values_to_be_equal(\n self,\n column_A,\n column_B,\n ignore_row_if=\"both_values_are_missing\",\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n column_A_name = column_A.schema.names[1]\n column_B_name = column_B.schema.names[1]\n join_df = column_A.join(\n column_B, column_A[\"__row\"] == column_B[\"__row\"], how=\"inner\"\n )\n return join_df.withColumn(\n \"__success\",\n when(col(column_A_name) == col(column_B_name), True).otherwise(False),\n )\n\n @DocInherit\n @MetaSparkDFDataset.column_pair_map_expectation\n def expect_column_pair_values_A_to_be_greater_than_B(\n self,\n column_A,\n column_B,\n or_equal=None,\n parse_strings_as_datetimes=None,\n allow_cross_type_comparisons=None,\n ignore_row_if=\"both_values_are_missing\",\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n # FIXME\n if allow_cross_type_comparisons:\n raise NotImplementedError\n\n column_A_name = column_A.schema.names[1]\n column_B_name = column_B.schema.names[1]\n\n if parse_strings_as_datetimes:\n _udf = udf(parse, sparktypes.TimestampType())\n # Create new columns for comparison without replacing original values.\n (timestamp_column_A, timestamp_column_B) = (\n \"__ts_{}\".format(column_A_name),\n \"__ts_{}\".format(column_B_name),\n )\n temp_column_A = column_A.withColumn(timestamp_column_A, _udf(column_A_name))\n temp_column_B = column_B.withColumn(timestamp_column_B, _udf(column_B_name))\n # Use the new columns to compare instead of original columns.\n (column_A_name, column_B_name) = (timestamp_column_A, timestamp_column_B)\n\n else:\n temp_column_A = column_A\n temp_column_B = column_B\n\n join_df = temp_column_A.join(\n temp_column_B, temp_column_A[\"__row\"] == temp_column_B[\"__row\"], how=\"inner\"\n )\n\n if or_equal:\n return join_df.withColumn(\n \"__success\",\n when(col(column_A_name) >= col(column_B_name), True).otherwise(False),\n )\n else:\n return join_df.withColumn(\n \"__success\",\n when(col(column_A_name) > col(column_B_name), True).otherwise(False),\n )\n\n @DocInherit\n @MetaSparkDFDataset.column_pair_map_expectation\n def expect_column_pair_values_to_be_in_set(\n self,\n column_A,\n column_B,\n value_pairs_set, # List[List]\n ignore_row_if=\"both_values_are_missing\",\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n column_A_name = column_A.schema.names[1]\n column_B_name = column_B.schema.names[1]\n\n join_df = column_A.join(\n column_B, column_A[\"__row\"] == column_B[\"__row\"], how=\"inner\"\n )\n\n join_df = join_df.withColumn(\n \"combine_AB\", array(col(column_A_name), col(column_B_name))\n )\n\n value_set_df = (\n SQLContext(self.spark_df._sc)\n .createDataFrame(value_pairs_set, [\"col_A\", \"col_B\"])\n .select(array(\"col_A\", \"col_B\").alias(\"set_AB\"))\n )\n\n return join_df.join(\n value_set_df, join_df[\"combine_AB\"] == value_set_df[\"set_AB\"], \"left\"\n ).withColumn(\n \"__success\", when(col(\"set_AB\").isNull(), lit(False)).otherwise(lit(True))\n )\n\n def expect_multicolumn_values_to_be_unique(\n self,\n column_list, # pyspark.sql.DataFrame\n mostly=None,\n ignore_row_if=\"all_values_are_missing\",\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n deprecation_warning = (\n \"expect_multicolumn_values_to_be_unique is being deprecated. Please use \"\n \"expect_select_column_values_to_be_unique_within_record instead.\"\n )\n warnings.warn(\n deprecation_warning, DeprecationWarning,\n )\n\n return self.expect_select_column_values_to_be_unique_within_record(\n column_list=column_list,\n mostly=mostly,\n ignore_row_if=ignore_row_if,\n result_format=result_format,\n include_config=include_config,\n catch_exceptions=catch_exceptions,\n meta=meta,\n )\n\n @DocInherit\n @MetaSparkDFDataset.multicolumn_map_expectation\n def expect_select_column_values_to_be_unique_within_record(\n self,\n column_list, # pyspark.sql.DataFrame\n mostly=None,\n ignore_row_if=\"all_values_are_missing\",\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n # Might want to throw an exception if only 1 column is passed\n column_names = column_list.schema.names[:]\n conditions = []\n for i in range(0, len(column_names) - 1):\n # Negate the `eqNullSafe` result and append to the conditions.\n conditions.append(\n ~(col(column_names[i]).eqNullSafe(col(column_names[i + 1])))\n )\n\n return column_list.withColumn(\n \"__success\", reduce(lambda a, b: a & b, conditions)\n )\n\n @DocInherit\n @MetaSparkDFDataset.multicolumn_map_expectation\n def expect_compound_columns_to_be_unique(\n self,\n column_list, # pyspark.sql.DataFrame\n mostly=None,\n ignore_row_if=\"all_values_are_missing\",\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n\n # Might want to throw an exception if only 1 column is passed\n column_names = column_list.schema.names[:]\n return column_list.withColumn(\n \"__success\",\n count(lit(1)).over(Window.partitionBy(struct(*column_names))) <= 1,\n )\n\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_values_to_be_increasing(\n self,\n column, # pyspark.sql.DataFrame\n strictly=False,\n mostly=None,\n parse_strings_as_datetimes=None,\n output_strftime_format=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n # string column name\n column_name = column.schema.names[0]\n # check if column is any type that could have na (numeric types)\n na_types = [\n isinstance(column.schema[column_name].dataType, typ)\n for typ in [\n sparktypes.LongType,\n sparktypes.DoubleType,\n sparktypes.IntegerType,\n ]\n ]\n\n # if column is any type that could have NA values, remove them (not filtered by .isNotNull())\n if any(na_types):\n column = column.filter(~isnan(column[0]))\n\n if parse_strings_as_datetimes:\n # convert column to timestamp format\n column = self._apply_dateutil_parse(column)\n # create constant column to order by in window function to preserve order of original df\n column = column.withColumn(\"constant\", lit(\"constant\")).withColumn(\n \"lag\", lag(column[0]).over(Window.orderBy(col(\"constant\")))\n )\n\n column = column.withColumn(\"diff\", datediff(col(column_name), col(\"lag\")))\n\n else:\n column = (\n column.withColumn(\"constant\", lit(\"constant\"))\n .withColumn(\"lag\", lag(column[0]).over(Window.orderBy(col(\"constant\"))))\n .withColumn(\"diff\", column[0] - col(\"lag\"))\n )\n\n # replace lag first row null with 1 so that it is not flagged as fail\n column = column.withColumn(\n \"diff\", when(col(\"diff\").isNull(), 1).otherwise(col(\"diff\"))\n )\n\n if strictly:\n return column.withColumn(\n \"__success\", when(col(\"diff\") >= 1, lit(True)).otherwise(lit(False))\n )\n\n else:\n return column.withColumn(\n \"__success\", when(col(\"diff\") >= 0, lit(True)).otherwise(lit(False))\n )\n\n @DocInherit\n @MetaSparkDFDataset.column_map_expectation\n def expect_column_values_to_be_decreasing(\n self,\n column, # pyspark.sql.DataFrame\n strictly=False,\n mostly=None,\n parse_strings_as_datetimes=None,\n output_strftime_format=None,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n # string column name\n column_name = column.schema.names[0]\n # check if column is any type that could have na (numeric types)\n na_types = [\n isinstance(column.schema[column_name].dataType, typ)\n for typ in [\n sparktypes.LongType,\n sparktypes.DoubleType,\n sparktypes.IntegerType,\n ]\n ]\n\n # if column is any type that could have NA values, remove them (not filtered by .isNotNull())\n if any(na_types):\n column = column.filter(~isnan(column[0]))\n\n if parse_strings_as_datetimes:\n # convert column to timestamp format\n column = self._apply_dateutil_parse(column)\n # create constant column to order by in window function to preserve order of original df\n column = column.withColumn(\"constant\", lit(\"constant\")).withColumn(\n \"lag\", lag(column[0]).over(Window.orderBy(col(\"constant\")))\n )\n\n column = column.withColumn(\"diff\", datediff(col(column_name), col(\"lag\")))\n\n else:\n column = (\n column.withColumn(\"constant\", lit(\"constant\"))\n .withColumn(\"lag\", lag(column[0]).over(Window.orderBy(col(\"constant\"))))\n .withColumn(\"diff\", column[0] - col(\"lag\"))\n )\n\n # replace lag first row null with -1 so that it is not flagged as fail\n column = column.withColumn(\n \"diff\", when(col(\"diff\").isNull(), -1).otherwise(col(\"diff\"))\n )\n\n if strictly:\n return column.withColumn(\n \"__success\", when(col(\"diff\") <= -1, lit(True)).otherwise(lit(False))\n )\n\n else:\n return column.withColumn(\n \"__success\", when(col(\"diff\") <= 0, lit(True)).otherwise(lit(False))\n )\n\n @DocInherit\n @MetaSparkDFDataset.multicolumn_map_expectation\n def expect_multicolumn_sum_to_equal(\n self,\n column_list,\n sum_total,\n result_format=None,\n include_config=True,\n catch_exceptions=None,\n meta=None,\n ):\n \"\"\" Multi-Column Map Expectation\n\n Expects that sum of all rows for a set of columns is equal to a specific value\n\n Args:\n column_list (List[str]): \\\n Set of columns to be checked\n sum_total (int): \\\n expected sum of columns\n \"\"\"\n expression = \"+\".join(\n [\"COALESCE({}, 0)\".format(col) for col in column_list.columns]\n )\n column_list = column_list.withColumn(\"actual_total\", expr(expression))\n return column_list.withColumn(\n \"__success\",\n when(col(\"actual_total\") == sum_total, lit(True)).otherwise(lit(False)),\n )\n" ]
[ [ "numpy.mean", "pandas.Index" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
pyccel/spl
[ "e3a799b6791807e71660dac72324b22593804cb6" ]
[ "psydac/core/tests/test_kernels.py" ]
[ "import numpy as np\nimport pytest\nimport os\nimport itertools as it\n\nfrom sympde.topology import Domain, ScalarFunctionSpace, Square, Cube\nfrom psydac.api.discretization import discretize\nfrom psydac.fem.basic import FemField\nfrom psydac.mapping.discrete import NurbsMapping\nfrom psydac.core.bsplines import cell_index, basis_ders_on_irregular_grid, breakpoints, elements_spans, basis_ders_on_quad_grid\n\nfrom psydac.core.kernels import (eval_fields_2d_no_weights, eval_fields_3d_no_weights,\n eval_fields_2d_irregular_no_weights, eval_fields_3d_irregular_no_weights,\n eval_fields_2d_weighted, eval_fields_3d_weighted,\n eval_fields_2d_irregular_weighted, eval_fields_3d_irregular_weighted, \n eval_jacobians_2d, eval_jacobians_3d,\n eval_jacobians_irregular_2d, eval_jacobians_irregular_3d,\n eval_jacobians_2d_weights, eval_jacobians_3d_weights,\n eval_jacobians_irregular_2d_weights, eval_jacobians_irregular_3d_weights,\n eval_jacobians_inv_2d, eval_jacobians_inv_3d,\n eval_jacobians_inv_irregular_2d, eval_jacobians_inv_irregular_3d,\n eval_jacobians_inv_2d_weights, eval_jacobians_inv_3d_weights,\n eval_jacobians_inv_irregular_2d_weights, eval_jacobians_inv_irregular_3d_weights,\n eval_jac_det_2d, eval_jac_det_3d,\n eval_jac_det_irregular_2d, eval_jac_det_irregular_3d,\n eval_jac_det_2d_weights, eval_jac_det_3d_weights,\n eval_jac_det_irregular_2d_weights, eval_jac_det_irregular_3d_weights,\n pushforward_2d_l2, pushforward_3d_l2,\n pushforward_2d_hdiv, pushforward_3d_hdiv,\n pushforward_2d_hcurl, pushforward_3d_hcurl)\n \n\n\n# Get mesh directory\ntry:\n mesh_dir = os.environ['PSYDAC_MESH_DIR']\nexcept KeyError:\n base_dir = os.path.dirname(os.path.realpath(__file__))\n base_dir = os.path.join(base_dir, '..', '..', '..')\n mesh_dir = os.path.join(base_dir, 'mesh')\n\n\n# Tolerance for testing float equality\nRTOL = 1e-14\nATOL = 1e-12\n\[email protected]('geometry', ('identity_2d.h5', 'identity_3d.h5', 'bent_pipe.h5',\n 'collela_2d.h5', 'collela_3d.h5'))\[email protected]('npts_per_cell', [2, 3, 4])\ndef test_regular_jacobians(geometry, npts_per_cell):\n filename = os.path.join(mesh_dir, geometry)\n domain = Domain.from_file(filename)\n\n # Discretization\n domainh = discretize(domain, filename=filename)\n mapping = list(domainh.mappings.values())[0]\n ldim = mapping.ldim\n space_h = mapping.space\n # Preprocessing\n is_nurbs = isinstance(mapping, NurbsMapping)\n\n ncells = tuple(len(space_h.breaks[i]) - 1 for i in range(ldim))\n regular_grid = [np.concatenate(\n [np.random.random(size=npts_per_cell) * (\n space_h.breaks[i][j + 1] \n - space_h.breaks[i][j]\n ) \n + space_h.breaks[i][j]\n for j in range(ncells[i])\n ]\n ) \n for i in range(ldim)]\n\n # Direct API\n if ldim == 2:\n jacobian_matrix_direct = np.array([[mapping.jac_mat(e1, e2) for e2 in regular_grid[1]] for e1 in regular_grid[0]])\n\n if ldim == 3:\n jacobian_matrix_direct = np.array([[[mapping.jac_mat(e1, e2, e3)\n for e3 in regular_grid[2]]\n for e2 in regular_grid[1]]\n for e1 in regular_grid[0]])\n\n # Mapping related quantities through kernel functions\n degree = space_h.degree\n knots = [space_h.spaces[i].knots for i in range(ldim)]\n \n global_basis = [basis_ders_on_quad_grid(knots[i], \n degree[i], \n np.reshape(regular_grid[i], (ncells[i], npts_per_cell)),\n 1, \n space_h.spaces[i].basis) for i in range(ldim)\n ]\n v = space_h.vector_space\n global_spans = [elements_spans(knots[i], degree[i]) - v.starts[i] + v.shifts[i] * v.pads[i] for i in range(ldim)]\n\n shape_grid = tuple(ncells[i] * npts_per_cell for i in range(ldim))\n n_eval_points = (npts_per_cell,) * ldim\n jac_mats = np.zeros(shape_grid + (ldim, ldim))\n inv_jac_mats = np.zeros(shape_grid + (ldim, ldim))\n jac_dets = np.zeros(shape_grid)\n\n if is_nurbs:\n global_arr_weights = mapping._weights_field.coeffs._data\n\n if ldim == 2:\n global_arr_x = mapping._fields[0].coeffs._data\n global_arr_y = mapping._fields[1].coeffs._data\n\n # Compute the jacobians\n eval_jacobians_2d_weights(*ncells, *degree, *n_eval_points, *global_basis, *global_spans,\n global_arr_x, global_arr_y, global_arr_weights, jac_mats)\n\n # Compute the inverses of the jacobians\n eval_jacobians_inv_2d_weights(*ncells, *degree, *n_eval_points, *global_basis, *global_spans,\n global_arr_x, global_arr_y,\n global_arr_weights, inv_jac_mats)\n\n # Compute the determinant of the jacobians\n eval_jac_det_2d_weights(*ncells, *degree, *n_eval_points, *global_basis, *global_spans,\n global_arr_x, global_arr_y,\n global_arr_weights, jac_dets)\n\n if ldim == 3:\n global_arr_x = mapping._fields[0].coeffs._data\n global_arr_y = mapping._fields[1].coeffs._data\n global_arr_z = mapping._fields[2].coeffs._data\n\n # Compute the jacobians\n eval_jacobians_3d_weights(*ncells, *degree, *n_eval_points, *global_basis, *global_spans,\n global_arr_x, global_arr_y, global_arr_z,\n global_arr_weights, jac_mats)\n\n # Compute the inverses of the jacobians\n eval_jacobians_inv_3d_weights(*ncells, *degree, *n_eval_points, *global_basis, *global_spans,\n global_arr_x, global_arr_y, global_arr_z,\n global_arr_weights, inv_jac_mats)\n\n # Compute the determinant of the jacobians\n eval_jac_det_3d_weights(*ncells, *degree, *n_eval_points, *global_basis, *global_spans,\n global_arr_x, global_arr_y, global_arr_z,\n global_arr_weights, jac_dets)\n else:\n if ldim == 2:\n global_arr_x = mapping._fields[0].coeffs._data\n global_arr_y = mapping._fields[1].coeffs._data\n # Compute the jacobians\n eval_jacobians_2d(*ncells, *degree, *n_eval_points, *global_basis, *global_spans,\n global_arr_x, global_arr_y, jac_mats)\n\n # Compute the inverses of the jacobians\n eval_jacobians_inv_2d(*ncells, *degree, *n_eval_points, *global_basis, *global_spans,\n global_arr_x, global_arr_y, inv_jac_mats)\n\n # Compute the determinant of the jacobians\n eval_jac_det_2d(*ncells, *degree, *n_eval_points, *global_basis, *global_spans,\n global_arr_x, global_arr_y,\n jac_dets)\n\n if ldim == 3:\n global_arr_x = mapping._fields[0].coeffs._data\n global_arr_y = mapping._fields[1].coeffs._data\n global_arr_z = mapping._fields[2].coeffs._data\n\n # Compute the jacobians\n eval_jacobians_3d(*ncells, *degree, *n_eval_points, *global_basis, *global_spans,\n global_arr_x, global_arr_y, global_arr_z, jac_mats)\n\n # Compute the inverses of the jacobians\n eval_jacobians_inv_3d(*ncells, *degree, *n_eval_points, *global_basis, *global_spans,\n global_arr_x, global_arr_y, global_arr_z, inv_jac_mats)\n\n # Compute the determinant of the jacobians\n eval_jac_det_3d(*ncells, *degree, *n_eval_points, *global_basis, *global_spans,\n global_arr_x, global_arr_y, global_arr_z,\n jac_dets)\n print(np.max(jacobian_matrix_direct - jac_mats))\n assert np.allclose(jacobian_matrix_direct, jac_mats, atol=ATOL, rtol=RTOL)\n\n if ldim == 2:\n for i, j in it.product(range(jac_mats.shape[0]), range(jac_mats.shape[1])):\n # Assert that the computed inverse is the inverse.\n assert np.allclose(np.dot(jac_mats[i, j], inv_jac_mats[i, j]), np.eye(ldim), atol=ATOL, rtol=RTOL)\n # Assert that the computed Jacobian determinant is the Jacobian determinant\n assert np.allclose(np.linalg.det(jac_mats[i, j]), jac_dets[i, j], atol=ATOL, rtol=RTOL)\n\n if ldim == 3:\n for i, j, k in it.product(range(jac_mats.shape[0]), range(jac_mats.shape[1]), range(jac_mats.shape[2])):\n # Assert that the computed inverse is the inverse.\n assert np.allclose(np.dot(jac_mats[i, j, k], inv_jac_mats[i, j, k]), np.eye(ldim), atol=ATOL, rtol=RTOL)\n # Assert that the computed Jacobian determinant is the Jacobian determinant\n assert np.allclose(np.linalg.det(jac_mats[i, j, k]), jac_dets[i, j, k], atol=ATOL, rtol=RTOL)\n\n\[email protected]('geometry', ('identity_2d.h5', 'identity_3d.h5', 'bent_pipe.h5',\n 'collela_2d.h5', 'collela_3d.h5'))\[email protected]('npts', [2, 10, 20])\ndef test_irregular_jacobians(geometry, npts):\n filename = os.path.join(mesh_dir, geometry)\n domain = Domain.from_file(filename)\n\n # Discretization\n domainh = discretize(domain, filename=filename)\n mapping = list(domainh.mappings.values())[0]\n ldim = mapping.ldim\n space_h = mapping.space\n # Preprocessing\n is_nurbs = isinstance(mapping, NurbsMapping)\n\n irregular_grid = [np.random.random(npts) for i in range(ldim)]\n\n # Direct API\n if ldim == 2:\n jacobian_matrix_direct = np.array([[mapping.jac_mat(e1, e2) for e2 in irregular_grid[1]] for e1 in irregular_grid[0]])\n\n if ldim == 3:\n jacobian_matrix_direct = np.array([[[mapping.jac_mat(e1, e2, e3)\n for e3 in irregular_grid[2]]\n for e2 in irregular_grid[1]]\n for e1 in irregular_grid[0]])\n\n # Mapping related quantities through kernel functions\n degree = space_h.degree\n knots = [space_h.spaces[i].knots for i in range(ldim)]\n cell_indexes =[cell_index(space_h.breaks[i], irregular_grid[i]) for i in range(ldim)]\n global_basis = [basis_ders_on_irregular_grid(knots[i], \n degree[i],\n irregular_grid[i],\n cell_indexes[i],\n 1, \n space_h.spaces[i].basis) for i in range(ldim)\n ]\n v = space_h.vector_space\n global_spans = [elements_spans(knots[i], degree[i]) - v.starts[i] + v.shifts[i] * v.pads[i] for i in range(ldim)]\n\n npts = (npts,) * ldim \n\n shape_grid = npts\n jac_mats = np.zeros(shape_grid + (ldim, ldim))\n inv_jac_mats = np.zeros(shape_grid + (ldim, ldim))\n jac_dets = np.zeros(shape_grid)\n\n if is_nurbs:\n global_arr_weights = mapping._weights_field.coeffs._data\n\n if ldim == 2:\n global_arr_x = mapping._fields[0].coeffs._data\n global_arr_y = mapping._fields[1].coeffs._data\n\n # Compute the jacobians\n eval_jacobians_irregular_2d_weights(*npts, *degree, *cell_indexes, *global_basis, *global_spans,\n global_arr_x, global_arr_y, global_arr_weights, jac_mats)\n\n # Compute the inverses of the jacobians\n eval_jacobians_inv_irregular_2d_weights(*npts, *degree, *cell_indexes, *global_basis, *global_spans,\n global_arr_x, global_arr_y,\n global_arr_weights, inv_jac_mats)\n\n # Compute the determinant of the jacobians\n eval_jac_det_irregular_2d_weights(*npts, *degree, *cell_indexes, *global_basis, *global_spans,\n global_arr_x, global_arr_y,\n global_arr_weights, jac_dets)\n\n if ldim == 3:\n global_arr_x = mapping._fields[0].coeffs._data\n global_arr_y = mapping._fields[1].coeffs._data\n global_arr_z = mapping._fields[2].coeffs._data\n\n # Compute the jacobians\n eval_jacobians_3d_weights(*npts, *degree, *cell_indexes, *global_basis, *global_spans,\n global_arr_x, global_arr_y, global_arr_z,\n global_arr_weights, jac_mats)\n\n # Compute the inverses of the jacobians\n eval_jacobians_inv_irregular_3d_weights(*npts, *degree, *cell_indexes, *global_basis, *global_spans,\n global_arr_x, global_arr_y, global_arr_z,\n global_arr_weights, inv_jac_mats)\n\n # Compute the determinant of the jacobians\n eval_jac_det_irregular_3d_weights(*npts, *degree, *cell_indexes, *global_basis, *global_spans,\n global_arr_x, global_arr_y, global_arr_z,\n global_arr_weights, jac_dets)\n else:\n if ldim == 2:\n global_arr_x = mapping._fields[0].coeffs._data\n global_arr_y = mapping._fields[1].coeffs._data\n # Compute the jacobians\n eval_jacobians_irregular_2d(*npts, *degree, *cell_indexes, *global_basis, *global_spans,\n global_arr_x, global_arr_y, jac_mats)\n\n # Compute the inverses of the jacobians\n eval_jacobians_inv_irregular_2d(*npts, *degree, *cell_indexes, *global_basis, *global_spans,\n global_arr_x, global_arr_y, inv_jac_mats)\n\n # Compute the determinant of the jacobians\n eval_jac_det_irregular_2d(*npts, *degree, *cell_indexes, *global_basis, *global_spans,\n global_arr_x, global_arr_y,\n jac_dets)\n\n if ldim == 3:\n global_arr_x = mapping._fields[0].coeffs._data\n global_arr_y = mapping._fields[1].coeffs._data\n global_arr_z = mapping._fields[2].coeffs._data\n\n # Compute the jacobians\n eval_jacobians_irregular_3d(*npts, *degree, *cell_indexes, *global_basis, *global_spans,\n global_arr_x, global_arr_y, global_arr_z, jac_mats)\n\n # Compute the inverses of the jacobians\n eval_jacobians_inv_irregular_3d(*npts, *degree, *cell_indexes, *global_basis, *global_spans,\n global_arr_x, global_arr_y, global_arr_z, inv_jac_mats)\n\n # Compute the determinant of the jacobians\n eval_jac_det_irregular_3d(*npts, *degree, *cell_indexes, *global_basis, *global_spans,\n global_arr_x, global_arr_y, global_arr_z,\n jac_dets)\n print(np.max(jacobian_matrix_direct - jac_mats))\n assert np.allclose(jacobian_matrix_direct, jac_mats, atol=ATOL, rtol=RTOL)\n\n if ldim == 2:\n for i, j in it.product(range(jac_mats.shape[0]), range(jac_mats.shape[1])):\n # Assert that the computed inverse is the inverse.\n assert np.allclose(np.dot(jac_mats[i, j], inv_jac_mats[i, j]), np.eye(ldim), atol=ATOL, rtol=RTOL)\n # Assert that the computed Jacobian determinant is the Jacobian determinant\n assert np.allclose(np.linalg.det(jac_mats[i, j]), jac_dets[i, j], atol=ATOL, rtol=RTOL)\n\n if ldim == 3:\n for i, j, k in it.product(range(jac_mats.shape[0]), range(jac_mats.shape[1]), range(jac_mats.shape[2])):\n # Assert that the computed inverse is the inverse.\n assert np.allclose(np.dot(jac_mats[i, j, k], inv_jac_mats[i, j, k]), np.eye(ldim), atol=ATOL, rtol=RTOL)\n # Assert that the computed Jacobian determinant is the Jacobian determinant\n assert np.allclose(np.linalg.det(jac_mats[i, j, k]), jac_dets[i, j, k], atol=ATOL, rtol=RTOL)\n\n\[email protected](\"knots, ldim, degree\", \n [([np.sort(np.concatenate((np.zeros(3), np.random.random(9), np.ones(3)))) for i in range(2)], 2, [2] * 2),\n ([np.sort(np.concatenate((np.zeros(4), np.random.random(9), np.ones(4)))) for i in range(2)], 2, [3] * 2),\n ([np.sort(np.concatenate((np.zeros(3), np.random.random(9), np.ones(3)))) for i in range(3)], 3, [2] * 3),\n ([np.sort(np.concatenate((np.zeros(4), np.random.random(9), np.ones(4)))) for i in range(3)], 3, [3] * 3),\n ([np.array([0.0] * 3 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 3)] * 2, 2, [2] * 2),\n ([np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4)] * 2, 2, [3] * 2),\n ([np.array([0.0] * 3 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 3)] * 3, 3, [2] * 3),\n ([np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4)] * 3, 3, [3] * 3),\n ([np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4),\n np.array([0.0] * 3 + [1.0] * 3)], \n 2, \n [3, 2]),\n ([np.array([0.0] * 3 + [1.0] * 3),\n np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4)], \n 2, \n [2, 3]),\n ([np.array([0.0] * 3 + [1.0] * 3),\n np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4),\n np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4)],\n 3, \n [2, 3, 3]),\n ([np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4),\n np.array([0.0] * 3 + [1.0] * 3),\n np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4)],\n 3, \n [3, 2, 3]),\n ([np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4),\n np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4),\n np.array([0.0] * 3 + [1.0] * 3)],\n 3, \n [3, 3, 2]),\n ]\n)\[email protected](\"npts_per_cell\", [2, 3, 4])\ndef test_regular_evaluations(knots, ldim, degree, npts_per_cell):\n if ldim == 2:\n domain = Square()\n else:\n domain = Cube()\n space = ScalarFunctionSpace('space', domain)\n\n ncells = [len(breakpoints(knots[i], degree[i])) - 1 for i in range(ldim)]\n\n domain_h = discretize(domain, ncells=ncells)\n\n space_h = discretize(space, domain_h, knots=knots, degree=degree)\n\n field = FemField(space_h)\n weight = FemField(space_h)\n\n field.coeffs._data[:] = np.random.random(field.coeffs._data.shape)\n weight.coeffs._data[:] = np.random.random(weight.coeffs._data.shape)\n\n regular_grid = [np.concatenate(\n [np.random.random(size=npts_per_cell) * (\n space_h.breaks[i][j + 1] \n - space_h.breaks[i][j]\n ) \n + space_h.breaks[i][j]\n for j in range(ncells[i])\n ]\n ) \n for i in range(ldim)]\n\n # Direct eval\n if ldim == 2:\n # No weights\n f_direct = np.array([[space_h.eval_fields([e1, e2], field) for e2 in regular_grid[1]] for e1 in regular_grid[0]])\n\n # Weighted\n f_direct_w = np.array([[np.array(space_h.eval_fields([e1, e2], field, weights=weight))\n / np.array(space_h.eval_fields([e1, e2], weight))\n for e2 in regular_grid[1]]\n for e1 in regular_grid[0]])\n\n if ldim == 3:\n # No weights\n f_direct = np.array([[[space_h.eval_fields([e1, e2, e3], field)\n for e3 in regular_grid[2]]\n for e2 in regular_grid[1]]\n for e1 in regular_grid[0]])\n\n # Weighted\n f_direct_w = np.array([[[np.array(space_h.eval_fields([e1, e2, e3], field, weights=weight))\n / np.array(space_h.eval_fields([e1, e2, e3], weight))\n for e3 in regular_grid[2]]\n for e2 in regular_grid[1]]\n for e1 in regular_grid[0]])\n \n global_basis = [basis_ders_on_quad_grid(knots[i], \n degree[i], \n np.reshape(regular_grid[i], (ncells[i], npts_per_cell)),\n 0, \n space_h.spaces[i].basis) for i in range(ldim)\n ]\n v = space_h.vector_space\n global_spans = [elements_spans(knots[i], degree[i]) - v.starts[i] + v.shifts[i] * v.pads[i] for i in range(ldim)]\n\n n_eval_points = (npts_per_cell,) * ldim\n out_field = np.zeros(tuple(ncells[i] * n_eval_points[i] for i in range(ldim)) + (1,))\n out_field_w = np.zeros_like(out_field)\n\n global_arr_field = field.coeffs._data.reshape(field.coeffs._data.shape + (1,))\n global_arr_w = weight.coeffs._data\n\n if ldim == 2:\n # No weights\n eval_fields_2d_no_weights(*ncells, *degree, *n_eval_points, *global_basis,\n *global_spans, global_arr_field, out_field)\n \n # Weighted\n eval_fields_2d_weighted(*ncells, *degree, *n_eval_points, *global_basis,\n *global_spans, global_arr_field, global_arr_w, out_field_w)\n \n if ldim == 3:\n # No weights\n eval_fields_3d_no_weights(*ncells, *degree, *n_eval_points, *global_basis,\n *global_spans, global_arr_field, out_field)\n \n # Weighted\n eval_fields_3d_weighted(*ncells, *degree, *n_eval_points, *global_basis,\n *global_spans, global_arr_field, global_arr_w, out_field_w)\n \n assert np.allclose(out_field, f_direct, atol=ATOL, rtol=RTOL)\n assert np.allclose(out_field_w, f_direct_w, atol=ATOL, rtol=RTOL)\n\n\[email protected](\"knots, ldim, degree\", \n [([np.sort(np.concatenate((np.zeros(3), np.random.random(9), np.ones(3)))) for i in range(2)], 2, [2] * 2),\n ([np.sort(np.concatenate((np.zeros(4), np.random.random(9), np.ones(4)))) for i in range(2)], 2, [3] * 2),\n ([np.sort(np.concatenate((np.zeros(3), np.random.random(9), np.ones(3)))) for i in range(3)], 3, [2] * 3),\n ([np.sort(np.concatenate((np.zeros(4), np.random.random(9), np.ones(4)))) for i in range(3)], 3, [3] * 3),\n ([np.array([0.0] * 3 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 3)] * 2, 2, [2] * 2),\n ([np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4)] * 2, 2, [3] * 2),\n ([np.array([0.0] * 3 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 3)] * 3, 3, [2] * 3),\n ([np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4)] * 3, 3, [3] * 3),\n ([np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4),\n np.array([0.0] * 3 + [1.0] * 3)], \n 2, \n [3, 2]),\n ([np.array([0.0] * 3 + [1.0] * 3),\n np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4)], \n 2, \n [2, 3]),\n ([np.array([0.0] * 3 + [1.0] * 3),\n np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4),\n np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4)],\n 3, \n [2, 3, 3]),\n ([np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4),\n np.array([0.0] * 3 + [1.0] * 3),\n np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4)],\n 3, \n [3, 2, 3]),\n ([np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4),\n np.array([0.0] * 4 + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + [1.0] * 4),\n np.array([0.0] * 3 + [1.0] * 3)],\n 3, \n [3, 3, 2]),\n ]\n)\[email protected]('npts', [2, 10, 20])\ndef test_irregular_evaluations(knots, ldim, degree, npts):\n if ldim == 2:\n domain = Square()\n else:\n domain = Cube()\n space = ScalarFunctionSpace('space', domain)\n\n ncells = [len(breakpoints(knots[i], degree[i])) - 1 for i in range(ldim)]\n\n domain_h = discretize(domain, ncells=ncells)\n\n space_h = discretize(space, domain_h, knots=knots, degree=degree)\n\n field = FemField(space_h)\n weight = FemField(space_h)\n\n field.coeffs._data[:] = np.random.random(field.coeffs._data.shape)\n weight.coeffs._data[:] = np.random.random(weight.coeffs._data.shape)\n\n irregular_grid = [np.random.random(npts) for i in range(ldim)]\n \n for i in range(ldim):\n j_left = np.random.randint(low=0, high=len(irregular_grid[i]))\n j_right = np.random.randint(low=0, high=len(irregular_grid[i]))\n j_interior = np.random.randint(low=0, high=len(irregular_grid[i]) - 1)\n\n # left boundary inserted at j_left\n irregular_grid[i][j_left] = space_h.breaks[i][0]\n # right boundary inserted at j_right\n irregular_grid[i][j_right] = space_h.breaks[i][-1]\n\n try:\n j_bk = np.random.randint(low=1, high=len(space_h.breaks[i]) - 1)\n # random interior breakpoint inserted at j_interior and j_interior + 1\n irregular_grid[i][j_interior:j_interior+2] = space_h.breaks[i][j_bk]\n except ValueError:\n pass\n \n # Direct eval\n if ldim == 2:\n # No weights\n f_direct = np.array([[space_h.eval_fields([e1, e2], field) for e2 in irregular_grid[1]] for e1 in irregular_grid[0]])\n\n # Weighted\n f_direct_w = np.array([[np.array(space_h.eval_fields([e1, e2], field, weights=weight))\n / np.array(space_h.eval_fields([e1, e2], weight))\n for e2 in irregular_grid[1]]\n for e1 in irregular_grid[0]])\n\n if ldim == 3:\n # No weights\n f_direct = np.array([[[space_h.eval_fields([e1, e2, e3], field)\n for e3 in irregular_grid[2]]\n for e2 in irregular_grid[1]]\n for e1 in irregular_grid[0]])\n\n # Weighted\n f_direct_w = np.array([[[np.array(space_h.eval_fields([e1, e2, e3], field, weights=weight))\n / np.array(space_h.eval_fields([e1, e2, e3], weight))\n for e3 in irregular_grid[2]]\n for e2 in irregular_grid[1]]\n for e1 in irregular_grid[0]])\n \n cell_indexes = [cell_index(space_h.breaks[i], irregular_grid[i]) for i in range(ldim)]\n global_basis = [basis_ders_on_irregular_grid(knots[i], \n degree[i], \n irregular_grid[i], \n cell_indexes[i], \n 0, \n space_h.spaces[i].basis) for i in range(ldim)\n ]\n v = space_h.vector_space\n global_spans = [elements_spans(knots[i], degree[i]) - v.starts[i] + v.shifts[i] * v.pads[i] for i in range(ldim)]\n\n npts = (npts,) * ldim\n\n out_field = np.zeros(npts + (1,))\n out_field_w = np.zeros_like(out_field)\n\n global_arr_field = field.coeffs._data.reshape(field.coeffs._data.shape + (1,))\n global_arr_w = weight.coeffs._data\n\n if ldim == 2:\n # No weights\n eval_fields_2d_irregular_no_weights(*npts,*degree, *cell_indexes, *global_basis,\n *global_spans, global_arr_field, out_field)\n \n # Weighted\n eval_fields_2d_irregular_weighted(*npts, *degree, *cell_indexes, *global_basis,\n *global_spans, global_arr_field, global_arr_w, out_field_w)\n \n if ldim == 3:\n # No weights\n eval_fields_3d_irregular_no_weights(*npts, *degree, *cell_indexes, *global_basis,\n *global_spans, global_arr_field, out_field)\n \n # Weighted\n eval_fields_3d_irregular_weighted(*npts, *degree, *cell_indexes, *global_basis,\n *global_spans, global_arr_field, global_arr_w, out_field_w)\n \n assert np.allclose(out_field, f_direct, atol=ATOL, rtol=RTOL)\n assert np.allclose(out_field_w, f_direct_w, atol=ATOL, rtol=RTOL)\n \n\[email protected]('jac_det, ldim, field_to_push', [(np.ones((5, 5)), 2, np.ones((5, 5, 1))),\n (np.ones((5, 5, 5)), 3, np.ones((5, 5, 5, 1))),\n (np.random.rand(5, 5), 2, np.random.rand(5, 5, 1)),\n (np.random.rand(5, 5, 5), 3, np.random.rand(5, 5, 5, 1))])\ndef test_pushforwards_l2(ldim, jac_det, field_to_push):\n expected = field_to_push[..., 0] / jac_det\n out = np.zeros_like(field_to_push)\n if ldim == 2:\n pushforward_2d_l2(field_to_push, jac_det, out)\n if ldim == 3:\n pushforward_3d_l2(field_to_push, jac_det, out)\n\n assert np.allclose(expected, out[..., 0], atol=ATOL, rtol=RTOL)\n\n\[email protected]('ldim', (2, 3))\ndef test_pushforwards_hdiv(ldim):\n jacobians = np.full((5,) * ldim + (ldim, ldim), np.eye(ldim))\n field_to_push = np.random.rand(ldim, *((5, ) * ldim), 1)\n expected = np.moveaxis(field_to_push,-1, 0)\n out = np.zeros(expected.shape)\n if ldim == 2:\n pushforward_2d_hdiv(field_to_push, jacobians, out)\n if ldim == 3:\n pushforward_3d_hdiv(field_to_push, jacobians, out)\n\n assert np.allclose(expected, out, atol=ATOL, rtol=RTOL)\n\n\[email protected]('ldim', (2, 3))\ndef test_pushforwards_hcurl(ldim):\n inv_jacobians = np.full((5,) * ldim + (ldim, ldim), np.eye(ldim))\n field_to_push = np.random.rand(ldim, *((5, ) * ldim), 1)\n expected = np.moveaxis(field_to_push, -1, 0)\n out = np.zeros(expected.shape)\n\n if ldim == 2:\n pushforward_2d_hcurl(field_to_push, inv_jacobians, out)\n if ldim == 3:\n pushforward_3d_hcurl(field_to_push, inv_jacobians, out)\n\n assert np.allclose(expected, out, atol=ATOL, rtol=RTOL)\n" ]
[ [ "numpy.dot", "numpy.random.random", "numpy.allclose", "numpy.reshape", "numpy.eye", "numpy.ones", "numpy.linalg.det", "numpy.max", "numpy.zeros_like", "numpy.random.rand", "numpy.moveaxis", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MLVPRASAD/KaggleProjects
[ "379e062cf58d83ff57a456552bb956df68381fdd" ]
[ "11 microsoft software prediction/lightgbm-baseline-model-using-sparse-matrix.py" ]
[ "import pandas as pd\nimport numpy as np\nimport lightgbm as lgb\n#import xgboost as xgb\nfrom scipy.sparse import vstack, csr_matrix, save_npz, load_npz\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.model_selection import StratifiedKFold\n#from sklearn.metrics import roc_auc_score\nimport gc\ngc.enable()\n\ndtypes = {\n 'MachineIdentifier': 'category',\n 'ProductName': 'category',\n 'EngineVersion': 'category',\n 'AppVersion': 'category',\n 'AvSigVersion': 'category',\n 'IsBeta': 'int8',\n 'RtpStateBitfield': 'float16',\n 'IsSxsPassiveMode': 'int8',\n 'DefaultBrowsersIdentifier': 'float16',\n 'AVProductStatesIdentifier': 'float32',\n 'AVProductsInstalled': 'float16',\n 'AVProductsEnabled': 'float16',\n 'HasTpm': 'int8',\n 'CountryIdentifier': 'int16',\n 'CityIdentifier': 'float32',\n 'OrganizationIdentifier': 'float16',\n 'GeoNameIdentifier': 'float16',\n 'LocaleEnglishNameIdentifier': 'int8',\n 'Platform': 'category',\n 'Processor': 'category',\n 'OsVer': 'category',\n 'OsBuild': 'int16',\n 'OsSuite': 'int16',\n 'OsPlatformSubRelease': 'category',\n 'OsBuildLab': 'category',\n 'SkuEdition': 'category',\n 'IsProtected': 'float16',\n 'AutoSampleOptIn': 'int8',\n 'PuaMode': 'category',\n 'SMode': 'float16',\n 'IeVerIdentifier': 'float16',\n 'SmartScreen': 'category',\n 'Firewall': 'float16',\n 'UacLuaenable': 'float32',\n 'Census_MDC2FormFactor': 'category',\n 'Census_DeviceFamily': 'category',\n 'Census_OEMNameIdentifier': 'float16',\n 'Census_OEMModelIdentifier': 'float32',\n 'Census_ProcessorCoreCount': 'float16',\n 'Census_ProcessorManufacturerIdentifier': 'float16',\n 'Census_ProcessorModelIdentifier': 'float16',\n 'Census_ProcessorClass': 'category',\n 'Census_PrimaryDiskTotalCapacity': 'float32',\n 'Census_PrimaryDiskTypeName': 'category',\n 'Census_SystemVolumeTotalCapacity': 'float32',\n 'Census_HasOpticalDiskDrive': 'int8',\n 'Census_TotalPhysicalRAM': 'float32',\n 'Census_ChassisTypeName': 'category',\n 'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'float16',\n 'Census_InternalPrimaryDisplayResolutionHorizontal': 'float16',\n 'Census_InternalPrimaryDisplayResolutionVertical': 'float16',\n 'Census_PowerPlatformRoleName': 'category',\n 'Census_InternalBatteryType': 'category',\n 'Census_InternalBatteryNumberOfCharges': 'float32',\n 'Census_OSVersion': 'category',\n 'Census_OSArchitecture': 'category',\n 'Census_OSBranch': 'category',\n 'Census_OSBuildNumber': 'int16',\n 'Census_OSBuildRevision': 'int32',\n 'Census_OSEdition': 'category',\n 'Census_OSSkuName': 'category',\n 'Census_OSInstallTypeName': 'category',\n 'Census_OSInstallLanguageIdentifier': 'float16',\n 'Census_OSUILocaleIdentifier': 'int16',\n 'Census_OSWUAutoUpdateOptionsName': 'category',\n 'Census_IsPortableOperatingSystem': 'int8',\n 'Census_GenuineStateName': 'category',\n 'Census_ActivationChannel': 'category',\n 'Census_IsFlightingInternal': 'float16',\n 'Census_IsFlightsDisabled': 'float16',\n 'Census_FlightRing': 'category',\n 'Census_ThresholdOptIn': 'float16',\n 'Census_FirmwareManufacturerIdentifier': 'float16',\n 'Census_FirmwareVersionIdentifier': 'float32',\n 'Census_IsSecureBootEnabled': 'int8',\n 'Census_IsWIMBootEnabled': 'float16',\n 'Census_IsVirtualDevice': 'float16',\n 'Census_IsTouchEnabled': 'int8',\n 'Census_IsPenCapable': 'int8',\n 'Census_IsAlwaysOnAlwaysConnectedCapable': 'float16',\n 'Wdft_IsGamer': 'float16',\n 'Wdft_RegionIdentifier': 'float16',\n 'HasDetections': 'int8'\n }\n\nprint('Download Train and Test Data.\\n')\ntrain = pd.read_csv('../input/train.csv', dtype=dtypes, low_memory=True)\ntrain['MachineIdentifier'] = train.index.astype('uint32')\ntest = pd.read_csv('../input/test.csv', dtype=dtypes, low_memory=True)\ntest['MachineIdentifier'] = test.index.astype('uint32')\n\ngc.collect()\n\nprint('Transform all features to category.\\n')\nfor usecol in train.columns.tolist()[1:-1]:\n\n train[usecol] = train[usecol].astype('str')\n test[usecol] = test[usecol].astype('str')\n \n #Fit LabelEncoder\n le = LabelEncoder().fit(\n np.unique(train[usecol].unique().tolist()+\n test[usecol].unique().tolist()))\n\n #At the end 0 will be used for dropped values\n train[usecol] = le.transform(train[usecol])+1\n test[usecol] = le.transform(test[usecol])+1\n\n agg_tr = (train\n .groupby([usecol])\n .aggregate({'MachineIdentifier':'count'})\n .reset_index()\n .rename({'MachineIdentifier':'Train'}, axis=1))\n agg_te = (test\n .groupby([usecol])\n .aggregate({'MachineIdentifier':'count'})\n .reset_index()\n .rename({'MachineIdentifier':'Test'}, axis=1))\n\n agg = pd.merge(agg_tr, agg_te, on=usecol, how='outer').replace(np.nan, 0)\n #Select values with more than 1000 observations\n agg = agg[(agg['Train'] > 1000)].reset_index(drop=True)\n agg['Total'] = agg['Train'] + agg['Test']\n #Drop unbalanced values\n agg = agg[(agg['Train'] / agg['Total'] > 0.2) & (agg['Train'] / agg['Total'] < 0.8)]\n agg[usecol+'Copy'] = agg[usecol]\n\n train[usecol] = (pd.merge(train[[usecol]], \n agg[[usecol, usecol+'Copy']], \n on=usecol, how='left')[usecol+'Copy']\n .replace(np.nan, 0).astype('int').astype('category'))\n\n test[usecol] = (pd.merge(test[[usecol]], \n agg[[usecol, usecol+'Copy']], \n on=usecol, how='left')[usecol+'Copy']\n .replace(np.nan, 0).astype('int').astype('category'))\n\n del le, agg_tr, agg_te, agg, usecol\n gc.collect()\n \ny_train = np.array(train['HasDetections'])\ntrain_ids = train.index\ntest_ids = test.index\n\ndel train['HasDetections'], train['MachineIdentifier'], test['MachineIdentifier']\ngc.collect()\n\nprint(\"If you don't want use Sparse Matrix choose Kernel Version 2 to get simple solution.\\n\")\n\nprint('--------------------------------------------------------------------------------------------------------')\nprint('Transform Data to Sparse Matrix.')\nprint('Sparse Matrix can be used to fit a lot of models, eg. XGBoost, LightGBM, Random Forest, K-Means and etc.')\nprint('To concatenate Sparse Matrices by column use hstack()')\nprint('Read more about Sparse Matrix https://docs.scipy.org/doc/scipy/reference/sparse.html')\nprint('Good Luck!')\nprint('--------------------------------------------------------------------------------------------------------')\n\n#Fit OneHotEncoder\nohe = OneHotEncoder(categories='auto', sparse=True, dtype='uint8').fit(train)\n\n#Transform data using small groups to reduce memory usage\nm = 100000\ntrain = vstack([ohe.transform(train[i*m:(i+1)*m]) for i in range(train.shape[0] // m + 1)])\ntest = vstack([ohe.transform(test[i*m:(i+1)*m]) for i in range(test.shape[0] // m + 1)])\nsave_npz('train.npz', train, compressed=True)\nsave_npz('test.npz', test, compressed=True)\n\ndel ohe, train, test\ngc.collect()\n\nskf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\nskf.get_n_splits(train_ids, y_train)\n\nlgb_test_result = np.zeros(test_ids.shape[0])\n#lgb_train_result = np.zeros(train_ids.shape[0])\n#xgb_test_result = np.zeros(test_ids.shape[0])\n#xgb_train_result = np.zeros(train_ids.shape[0])\ncounter = 0\n\nprint('\\nLightGBM\\n')\n\nfor train_index, test_index in skf.split(train_ids, y_train):\n \n print('Fold {}\\n'.format(counter + 1))\n \n train = load_npz('train.npz')\n X_fit = vstack([train[train_index[i*m:(i+1)*m]] for i in range(train_index.shape[0] // m + 1)])\n X_val = vstack([train[test_index[i*m:(i+1)*m]] for i in range(test_index.shape[0] // m + 1)])\n X_fit, X_val = csr_matrix(X_fit, dtype='float32'), csr_matrix(X_val, dtype='float32')\n y_fit, y_val = y_train[train_index], y_train[test_index]\n \n del train\n gc.collect()\n\n lgb_model = lgb.LGBMClassifier(max_depth=-1,\n n_estimators=30000,\n learning_rate=0.05,\n num_leaves=2**12-1,\n colsample_bytree=0.28,\n objective='binary', \n n_jobs=-1)\n \n #xgb_model = xgb.XGBClassifier(max_depth=6,\n # n_estimators=30000,\n # colsample_bytree=0.2,\n # learning_rate=0.1,\n # objective='binary:logistic', \n # n_jobs=-1)\n \n \n lgb_model.fit(X_fit, y_fit, eval_metric='auc', \n eval_set=[(X_val, y_val)], \n verbose=100, early_stopping_rounds=100)\n \n #xgb_model.fit(X_fit, y_fit, eval_metric='auc', \n # eval_set=[(X_val, y_val)], \n # verbose=1000, early_stopping_rounds=300)\n\n #lgb_train_result[test_index] += lgb_model.predict_proba(X_val)[:,1]\n #xgb_train_result[test_index] += xgb_model.predict_proba(X_val)[:,1]\n \n del X_fit, X_val, y_fit, y_val, train_index, test_index\n gc.collect()\n \n test = load_npz('test.npz')\n test = csr_matrix(test, dtype='float32')\n lgb_test_result += lgb_model.predict_proba(test)[:,1]\n #xgb_test_result += xgb_model.predict_proba(test)[:,1]\n counter += 1\n \n del test\n gc.collect()\n \n #Stop fitting to prevent time limit error\n #if counter == 3 : break\n\n#print('\\nLigthGBM VAL AUC Score: {}'.format(roc_auc_score(y_train, lgb_train_result)))\n#print('\\nXGBoost VAL AUC Score: {}'.format(roc_auc_score(y_train, xgb_train_result)))\n\nsubmission = pd.read_csv('../input/sample_submission.csv')\nsubmission['HasDetections'] = lgb_test_result / counter\nsubmission.to_csv('lgb_submission.csv', index=False)\n#submission['HasDetections'] = xgb_test_result / counter\n#submission.to_csv('xgb_submission.csv', index=False)\n#submission['HasDetections'] = 0.5 * lgb_test_result / counter + 0.5 * xgb_test_result / counter \n##submission.to_csv('lgb_xgb_submission.csv', index=False)\n\nprint('\\nDone.')" ]
[ [ "pandas.merge", "pandas.read_csv", "scipy.sparse.load_npz", "sklearn.preprocessing.OneHotEncoder", "sklearn.model_selection.StratifiedKFold", "scipy.sparse.csr_matrix", "sklearn.preprocessing.LabelEncoder", "scipy.sparse.save_npz", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "0.19", "1.5", "1.7", "1.0", "1.2", "1.8" ], "tensorflow": [] } ]
radiasoft/sirepo
[ "db3d1737bab7a84d39d456c0e8913c88deff3c31" ]
[ "sirepo/template/rs4pi.py" ]
[ "# -*- coding: utf-8 -*-\nu\"\"\"RS4PI execution template.\n\n:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.\n:license: http://www.apache.org/licenses/LICENSE-2.0.html\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nfrom pykern.pkcollections import PKDict\nfrom pykern import pkio\nfrom pykern import pkjinja\nfrom pykern.pkdebug import pkdc, pkdp\nfrom scipy.ndimage.interpolation import zoom\nfrom sirepo import simulation_db\nfrom sirepo.template import template_common\nimport ctypes\nimport datetime\nimport glob\nimport h5py\nimport numpy as np\nimport os\nimport os.path\nimport py.path\nimport re\nimport sirepo.sim_data\nimport sirepo.util\nimport struct\nimport time\nimport werkzeug\nimport zipfile\ntry:\n # pydicom is changing to pydicom in 1.0\n import pydicom as dicom\nexcept ImportError:\n import dicom\n\n_SIM_DATA, SIM_TYPE, _SCHEMA = sirepo.sim_data.template_globals()\n\nRTSTRUCT_EXPORT_FILENAME = 'rtstruct.dcm'\nRTDOSE_EXPORT_FILENAME = 'dose.dcm'\nPRESCRIPTION_FILENAME = 'prescription.json'\nWANT_BROWSER_FRAME_CACHE = True\nDOSE_CALC_SH = 'dose_calc.sh'\nDOSE_CALC_OUTPUT = 'Full_Dose.h5'\n_DICOM_CLASS = {\n 'CT_IMAGE': '1.2.840.10008.5.1.4.1.1.2',\n 'RT_DOSE': '1.2.840.10008.5.1.4.1.1.481.2',\n 'RT_STRUCT': '1.2.840.10008.5.1.4.1.1.481.3',\n 'DETATCHED_STUDY': '1.2.840.10008.3.1.2.3.1',\n}\n_DICOM_DIR = 'dicom'\n_DICOM_MAX_VALUE = 1000\n_DICOM_MIN_VALUE = -1000\n_DOSE_DICOM_FILE = RTDOSE_EXPORT_FILENAME\n_DOSE_FILE = 'dose3d.dat'\n_EXPECTED_ORIENTATION = np.array([1, 0, 0, 0, 1, 0])\n# using np.float32 for pixel storage\n_FLOAT_SIZE = 4\n_PIXEL_FILE = 'pixels3d.dat'\n_RADIASOFT_ID = 'RadiaSoft'\n_ROI_FILE_NAME = 'rs4pi-roi-data.json'\n_TMP_INPUT_FILE_FIELD = 'tmpDicomFilePath'\n_TMP_ZIP_DIR = 'tmp-dicom-files'\n_ZIP_FILE_NAME = 'input.zip'\n\n\ndef background_percent_complete(report, run_dir, is_running):\n data_path = run_dir.join(template_common.INPUT_BASE_NAME)\n if not os.path.exists(str(simulation_db.json_filename(data_path))):\n return PKDict(\n percentComplete=0,\n frameCount=0,\n )\n return PKDict(\n percentComplete=100,\n # real frame count depends on the series selected\n frameCount=1,\n )\n\n\ndef copy_related_files(data, source_path, target_path):\n # pixels3d.dat, rs4pi-roi-data.json, dicom/*.json\n for filename in (_PIXEL_FILE, _ROI_FILE_NAME, _DOSE_FILE, RTDOSE_EXPORT_FILENAME):\n f = py.path.local(source_path).join(filename)\n if f.exists():\n f.copy(py.path.local(target_path).join(filename))\n dicom_dir = py.path.local(target_path).join(_DICOM_DIR)\n pkio.mkdir_parent(str(dicom_dir))\n for f in glob.glob(str(py.path.local(source_path).join(_DICOM_DIR, '*'))):\n py.path.local(f).copy(dicom_dir)\n\n\ndef generate_rtdose_file(data, run_dir):\n dose_hd5 = str(run_dir.join(DOSE_CALC_OUTPUT))\n dicom_series = data['models']['dicomSeries']\n frame = PKDict(\n StudyInstanceUID=dicom_series['studyInstanceUID'],\n shape=np.array([\n dicom_series['planes']['t']['frameCount'],\n dicom_series['planes']['c']['frameCount'],\n dicom_series['planes']['s']['frameCount'],\n ]),\n spacing=np.array(_float_list(dicom_series['pixelSpacing'])),\n )\n with h5py.File(dose_hd5, 'r') as f:\n start = f['/dose'].attrs['dicom_start_cm'] * 10\n #TODO(pjm): assumes the size closely matches original dicom when scaled\n # size = f['/dose'].attrs['voxel_size_cm'] * 10\n ds = _create_dicom_dataset(frame['StudyInstanceUID'], 'RT_DOSE', 'RTDOSE')\n pixels = np.array(f['/dose'])\n shape = pixels.shape\n # reshape the pixels in place: z is actually first\n pixels.shape = (shape[2], shape[0], shape[1])\n shape = pixels.shape\n pixels = zoom(pixels, zoom=frame['shape']/shape, order=1)\n shape = pixels.shape\n\n ds.ImagePositionPatient = _string_list(start)\n ds.PixelSpacing = _string_list([frame['spacing'][0], frame['spacing'][1]])\n ds.Rows = shape[1]\n ds.Columns = shape[2]\n ds.NumberOfFrames = shape[0]\n ds.DoseUnits = 'GY'\n\n pixels = pixels.flatten()\n v = pixels.max()\n max_int = np.iinfo(np.uint32).max - 1\n scale = v / max_int\n ds.DoseGridScaling = scale\n pixels /= scale\n ds.BitsAllocated = 32\n ds.BitsStored = 32\n ds.HighBit = 31\n ds.PixelRepresentation = 0\n ds.SamplesPerPixel = 1\n ds.PixelData = pixels.astype(np.uint32)\n ds.file_meta.TransferSyntaxUID = '1.2.840.10008.1.2'\n\n # for dicompyler\n ds.PhotometricInterpretation = 'MONOCHROME2'\n ds.DoseType = 'PHYSICAL'\n ds.DoseSummationType = 'PLAN'\n ds.ImageOrientationPatient = _string_list(_EXPECTED_ORIENTATION)\n ds.GridFrameOffsetVector = np.linspace(0.0, frame['spacing'][2] * (shape[0] - 1), shape[0]).tolist()\n ds.save_as(_parent_file(run_dir, _DOSE_DICOM_FILE))\n return _summarize_rt_dose(None, ds, run_dir=run_dir)\n\n\ndef get_application_data(data, **kwargs):\n if data['method'] == 'roi_points':\n return _read_roi_file(data['simulationId'])\n elif data['method'] == 'update_roi_points':\n return _update_roi_file(data['simulationId'], data['editedContours'])\n else:\n raise RuntimeError('{}: unknown application data method'.format(data['method']))\n\n\ndef get_data_file(run_dir, model, frame, **kwargs):\n if model == 'dicomAnimation4':\n filename = filename=_parent_file(run_dir, _DOSE_DICOM_FILE)\n uri = RTDOSE_EXPORT_FILENAME\n with open(filename, mode='rb') as f:\n out = f.read()\n else:\n with simulation_db.tmp_dir() as tmp_dir:\n filename, _ = _generate_rtstruct_file(_parent_dir(run_dir), tmp_dir)\n uri = RTSTRUCT_EXPORT_FILENAME\n with open(filename, mode='rb') as f:\n out = f.read()\n return PKDict(\n content=out,\n uri=uri,\n )\n\n\ndef import_file(req, tmp_dir=None, **kwargs):\n if not pkio.has_file_extension(req.filename, 'zip'):\n raise sirepo.util.UserAlert('unsupported import filename: {}'.format(filename))\n #TODO(pjm): writing to simulation lib for now, tmp_dir will get removed after this request\n filepath = str(simulation_db.simulation_lib_dir(SIM_TYPE).join(_ZIP_FILE_NAME))\n pkio.mkdir_parent_only(filepath)\n with open(filepath, 'wb') as f:\n f.write(req.file_stream.read())\n data = simulation_db.default_data(SIM_TYPE)\n data['models']['simulation']['name'] = req.filename\n data['models']['simulation'][_TMP_INPUT_FILE_FIELD] = filepath\n # more processing occurs in prepare_for_client() via:\n # import_file => _save_new_and_reply => api_simulationData => prepare_for_client\n return data\n\n\ndef prepare_for_client(data):\n if _TMP_INPUT_FILE_FIELD in data['models']['simulation']:\n _move_import_file(data)\n return data\n\n\ndef remove_last_frame(run_dir):\n pass\n\n\ndef sim_frame(frame_args):\n frame_index = frame_args.frameIndex\n model_data = frame_args.sim_in\n if frame_args.frameReport.startswith('dicomAnimation'):\n plane = frame_args.dicomPlane\n res = simulation_db.read_json(_dicom_path(model_data['models']['simulation'], plane, frame_index))\n res['pixel_array'] = _read_pixel_plane(plane, frame_index, model_data)\n return res\n if frame_args.frameReport == 'dicomDose':\n return {\n 'dose_array': _read_dose_frame(frame_index, model_data)\n }\n assert False, '{}: unknown simulation frame model'.format(frame_args.frameReport)\n\n\ndef write_parameters(data, run_dir, is_parallel):\n rtfile = py.path.local(_parent_file(run_dir, RTSTRUCT_EXPORT_FILENAME))\n if data['report'] == 'dvhReport' and rtfile.exists():\n return\n if data['report'] in ('doseCalculation', 'dvhReport'):\n _, roi_models = _generate_rtstruct_file(_parent_dir(run_dir), _parent_dir(run_dir))\n if data['report'] == 'doseCalculation':\n dose_calc = data.models.doseCalculation\n roi_data = roi_models['regionsOfInterest']\n ptv_name = ''\n oar_names = []\n for roi_number in roi_data:\n if roi_number == dose_calc.selectedPTV:\n ptv_name = roi_data[roi_number]['name']\n elif roi_number in dose_calc.selectedOARs:\n oar_names.append(roi_data[roi_number]['name'])\n prescription = run_dir.join(PRESCRIPTION_FILENAME)\n simulation_db.write_json(\n prescription,\n {\n 'ptv': ptv_name,\n 'oar': oar_names,\n })\n pkjinja.render_file(\n _SIM_DATA.resource_path(f'{DOSE_CALC_SH}.jinja'),\n {\n 'prescription': prescription,\n 'beamlist': run_dir.join(_SIM_DATA.RS4PI_BEAMLIST_FILENAME),\n 'dicom_zip': _sim_file(data['simulationId'], _ZIP_FILE_NAME),\n },\n output=run_dir.join(DOSE_CALC_SH),\n strict_undefined=True,\n )\n\n\ndef _calculate_domain(frame):\n position = _float_list(frame['ImagePositionPatient'])\n spacing = frame['PixelSpacing']\n shape = frame['shape']\n return [\n [\n position[0] - spacing[0] / 2,\n position[1] - spacing[1] / 2,\n position[2],\n ],\n [\n position[0] + spacing[0] * shape[1] - spacing[0] / 2,\n position[1] + spacing[1] * shape[0] - spacing[1] / 2,\n position[2],\n ],\n ]\n\n\ndef _compute_histogram(simulation, frames):\n pixels = []\n for frame in frames:\n pixels.append(frame['pixels'])\n histogram = _histogram_from_pixels(pixels)\n filename = _roi_file(simulation['simulationId'])\n if os.path.exists(filename):\n roi_data = _read_roi_file(simulation['simulationId'])\n else:\n roi_data = {\n 'models': {\n 'regionsOfInterest': {},\n },\n }\n roi_data['models']['dicomHistogram'] = histogram\n roi_data['models']['dicomFrames'] = _summarize_frames(frames)\n simulation_db.write_json(filename, roi_data)\n\n\ndef _create_dicom_dataset(study_uid, dicom_class, modality):\n sop_uid = dicom.uid.generate_uid()\n\n file_meta = dicom.dataset.Dataset()\n file_meta.MediaStorageSOPClassUID = _DICOM_CLASS[dicom_class]\n file_meta.MediaStorageSOPInstanceUID = sop_uid\n #TODO(pjm): need proper implementation uid\n file_meta.ImplementationClassUID = \"1.2.3.4\"\n file_meta.ImplementationVersionName = 'dcm4che-2.0'\n\n ds = dicom.dataset.FileDataset('', {}, file_meta=file_meta, preamble=b\"\\0\" * 128)\n now = datetime.datetime.now()\n ds.InstanceCreationDate = now.strftime('%Y%m%d')\n ds.InstanceCreationTime = now.strftime('%H%M%S.%f')\n ds.SOPClassUID = _DICOM_CLASS[dicom_class]\n ds.SOPInstanceUID = sop_uid\n ds.StudyDate = ''\n ds.StudyTime = ''\n ds.AccessionNumber = ''\n ds.Modality = modality\n ds.Manufacturer = _RADIASOFT_ID\n ds.ReferringPhysiciansName = ''\n ds.ManufacturersModelName = _RADIASOFT_ID\n ds.PatientsName = _RADIASOFT_ID\n ds.PatientID = _RADIASOFT_ID\n ds.PatientsBirthDate = ''\n ds.PatientsSex = ''\n ds.StudyInstanceUID = study_uid\n ds.SeriesInstanceUID = dicom.uid.generate_uid()\n ds.StudyID = ''\n ds.SeriesNumber = ''\n return ds\n\n\ndef _dicom_path(simulation, plane, idx):\n return str(py.path.local(_sim_file(simulation['simulationId'], _DICOM_DIR)).join(_frame_file_name(plane, idx)))\n\n\ndef _dose_dicom_filename(simulation):\n return _sim_file(simulation['simulationId'], _DOSE_DICOM_FILE)\n\n\ndef _dose_filename(simulation):\n return _sim_file(simulation['simulationId'], _DOSE_FILE)\n\n\ndef _extract_series_frames(simulation, dicom_dir):\n #TODO(pjm): give user a choice between multiple study/series if present\n selected_series = None\n frames = {}\n dicom_dose = None\n rt_struct_path = None\n res = {\n 'description': '',\n }\n for path in pkio.walk_tree(dicom_dir):\n if pkio.has_file_extension(str(path), 'dcm'):\n plan = dicom.read_file(str(path))\n if plan.SOPClassUID == _DICOM_CLASS['RT_STRUCT']:\n rt_struct_path = str(path)\n elif plan.SOPClassUID == _DICOM_CLASS['RT_DOSE']:\n res['dicom_dose'] = _summarize_rt_dose(simulation, plan)\n plan.save_as(_dose_dicom_filename(simulation))\n if plan.SOPClassUID != _DICOM_CLASS['CT_IMAGE']:\n continue\n orientation = _float_list(plan.ImageOrientationPatient)\n if not (_EXPECTED_ORIENTATION == orientation).all():\n continue\n if not selected_series:\n selected_series = plan.SeriesInstanceUID\n res['StudyInstanceUID'] = plan.StudyInstanceUID\n res['PixelSpacing'] = plan.PixelSpacing\n if hasattr(plan, 'SeriesDescription'):\n res['description'] = plan.SeriesDescription\n if selected_series != plan.SeriesInstanceUID:\n continue\n info = {\n 'pixels': np.float32(plan.pixel_array),\n 'shape': plan.pixel_array.shape,\n 'ImagePositionPatient': _string_list(plan.ImagePositionPatient),\n 'ImageOrientationPatient': _float_list(plan.ImageOrientationPatient),\n 'PixelSpacing': _float_list(plan.PixelSpacing),\n }\n for f in ('FrameOfReferenceUID', 'StudyInstanceUID', 'SeriesInstanceUID', 'SOPInstanceUID'):\n info[f] = getattr(plan, f)\n z = _frame_id(info['ImagePositionPatient'][2])\n info['frameId'] = z\n if z in frames:\n raise RuntimeError('duplicate frame with z coord: {}'.format(z))\n _scale_pixel_data(plan, info['pixels'])\n frames[z] = info\n if not selected_series:\n raise RuntimeError('No series found with {} orientation'.format(_EXPECTED_ORIENTATION))\n if rt_struct_path:\n res['regionsOfInterest'] = _summarize_rt_structure(simulation, dicom.read_file(rt_struct_path), frames.keys())\n sorted_frames = []\n res['frames'] = sorted_frames\n for z in sorted(_float_list(frames.keys())):\n sorted_frames.append(frames[_frame_id(z)])\n return res\n\n\ndef _frame_id(v):\n # normalize on float's string format, ex 2 --> '2.0'\n return str(float(v))\n\n\ndef _frame_info(count):\n return {\n 'frameIndex': int(count / 2),\n 'frameCount': count,\n }\n\n\ndef _float_list(ar):\n return [float(x) for x in ar]\n\n\ndef _frame_file_name(plane, index):\n return plane + str(index).zfill(5)\n\n\ndef _generate_dicom_reference_frame_info(plan, frame_data):\n ref_ds = dicom.dataset.Dataset()\n ref_ds.FrameOfReferenceUID = frame_data['FrameOfReferenceUID']\n study_ds = dicom.dataset.Dataset()\n study_ds.ReferencedSOPClassUID = _DICOM_CLASS['DETATCHED_STUDY']\n study_ds.ReferencedSOPInstanceUID = frame_data['StudyInstanceUID']\n series_ds = dicom.dataset.Dataset()\n series_ds.SeriesInstanceUID = frame_data['SeriesInstanceUID']\n series_ds.ContourImageSequence = []\n for uid in frame_data['SOPInstanceUID']:\n instance_ds = dicom.dataset.Dataset()\n instance_ds.ReferencedSOPClassUID = _DICOM_CLASS['CT_IMAGE']\n instance_ds.ReferencedSOPInstanceUID = uid\n series_ds.ContourImageSequence.append(instance_ds)\n study_ds.RTReferencedSeriesSequence = [series_ds]\n ref_ds.RTReferencedStudySequence = [study_ds]\n plan.ReferencedFrameOfReferenceSequence = [ref_ds]\n\n\ndef _generate_dicom_roi_info(plan, frame_data, roi_data):\n plan.StructureSetROISequence = []\n plan.ROIContourSequence = []\n\n for roi_number in sorted(roi_data.keys()):\n roi = roi_data[roi_number]\n roi_ds = dicom.dataset.Dataset()\n roi_ds.ROINumber = roi_number\n roi_ds.ROIName = roi['name']\n roi_ds.ReferencedFrameOfReferenceUID = frame_data['FrameOfReferenceUID']\n plan.StructureSetROISequence.append(roi_ds)\n\n contour_ds = dicom.dataset.Dataset()\n contour_ds.ReferencedROINumber = roi_number\n contour_ds.ROIDisplayColor = _string_list(roi['color'])\n contour_ds.ContourSequence = []\n image_num = 1\n\n for frame_id in sorted(_float_list(roi['contour'].keys())):\n for points in roi['contour'][str(frame_id)]:\n image_ds = dicom.dataset.Dataset()\n image_ds.ContourGeometricType = 'CLOSED_PLANAR'\n image_ds.ContourNumber = str(image_num)\n image_num += 1\n image_ds.ContourData = []\n for i in range(0, len(points), 2):\n image_ds.ContourData.append(str(points[i]))\n image_ds.ContourData.append(str(points[i + 1]))\n image_ds.ContourData.append(str(frame_id))\n image_ds.NumberOfContourPoints = str(int(len(image_ds.ContourData) / 3))\n contour_ds.ContourSequence.append(image_ds)\n plan.ROIContourSequence.append(contour_ds)\n\n\ndef _generate_rtstruct_file(sim_dir, target_dir):\n models = simulation_db.read_json(sim_dir.join(_ROI_FILE_NAME))['models']\n frame_data = models['dicomFrames']\n roi_data = models['regionsOfInterest']\n plan = _create_dicom_dataset(frame_data['StudyInstanceUID'], 'RT_STRUCT', 'RTSTRUCT')\n plan.StructureSetLabel = '{} Exported'.format(_RADIASOFT_ID)\n plan.StructureSetDate = plan.InstanceCreationDate\n plan.StructureSetTime = plan.InstanceCreationTime\n _generate_dicom_reference_frame_info(plan, frame_data)\n _generate_dicom_roi_info(plan, frame_data, roi_data)\n filename = str(target_dir.join(RTSTRUCT_EXPORT_FILENAME))\n plan.save_as(filename)\n return filename, models\n\n\ndef _histogram_from_pixels(pixels):\n m = 50\n extent = [np.array(pixels).min(), np.array(pixels).max()]\n if extent[0] < _DICOM_MIN_VALUE:\n extent[0] = _DICOM_MIN_VALUE\n if extent[1] > _DICOM_MAX_VALUE:\n extent[1] = _DICOM_MAX_VALUE\n span = extent[1] - extent[0]\n step = np.power(10, np.floor(np.log(span / m) / np.log(10)))\n err = float(m) / span * step\n if err <= .15:\n step *= 10\n elif err <= .35:\n step *= 5\n elif err <= .75:\n step *= 2\n e = [\n np.ceil(extent[0] / step) * step,\n np.floor(extent[1] / step) * step + step * .5,\n step,\n ]\n bins = int(np.ceil((e[1] - e[0]) / e[2]))\n hist, edges = np.histogram(pixels, bins=bins, range=[e[0], e[0] + (bins - 1) * step])\n if hist[0] == hist.max():\n v = hist[0]\n hist[0] = 0\n if v > hist.max() * 2:\n hist[0] = hist.max() * 2\n else:\n hist[0] = v\n return {\n 'histogram': hist.tolist(),\n 'extent': [edges[0].item(), edges[-1].item(), bins],\n }\n\n\ndef _move_import_file(data):\n sim = data['models']['simulation']\n path = sim[_TMP_INPUT_FILE_FIELD]\n del sim[_TMP_INPUT_FILE_FIELD]\n if os.path.exists(path):\n zip_path = _sim_file(sim['simulationId'], _ZIP_FILE_NAME)\n os.rename(path, zip_path)\n tmp_dir = _sim_file(sim['simulationId'], _TMP_ZIP_DIR)\n zipfile.ZipFile(zip_path).extractall(tmp_dir)\n _summarize_dicom_files(data, tmp_dir)\n pkio.unchecked_remove(tmp_dir)\n simulation_db.save_simulation_json(data)\n\n\ndef _parent_dir(child_dir):\n return child_dir.join('..')\n\n\ndef _parent_file(child_dir, filename):\n return str(_parent_dir(child_dir).join(filename))\n\n\ndef _pixel_filename(simulation):\n return _sim_file(simulation['simulationId'], _PIXEL_FILE)\n\n\ndef _read_dose_frame(idx, data):\n res = []\n if 'dicomDose' not in data['models']:\n return res\n dicom_dose = data['models']['dicomDose']\n if idx >= dicom_dose['frameCount']:\n return res\n shape = dicom_dose['shape']\n with open (_dose_filename(data['models']['simulation']), 'rb') as f:\n f.seek(idx * _FLOAT_SIZE * shape[0] * shape[1], 1)\n for r in range(shape[0]):\n row = []\n res.append(row)\n for c in range(shape[1]):\n row.append(struct.unpack('f', f.read(_FLOAT_SIZE))[0])\n return res\n\n\ndef _read_pixel_plane(plane, idx, data):\n plane_info = data['models']['dicomSeries']['planes']\n size = [plane_info['c']['frameCount'], plane_info['s']['frameCount'], plane_info['t']['frameCount']]\n frame = []\n # pixels = np.array(all_frame_pixels)[:, idx]\n # pixels = np.array(all_frame_pixels)[:, :, idx]\n with open (_pixel_filename(data['models']['simulation']), 'rb') as f:\n if plane == 't':\n if idx > 0:\n f.seek(idx * _FLOAT_SIZE * size[0] * size[1], 1)\n for r in range(size[1]):\n row = []\n frame.append(row)\n for v in range(size[0]):\n row.append(struct.unpack('f', f.read(_FLOAT_SIZE))[0])\n elif plane == 'c':\n if idx > 0:\n f.seek(idx * _FLOAT_SIZE * size[0], 1)\n for r in range(size[2]):\n row = []\n frame.append(row)\n for v in range(size[0]):\n row.append(struct.unpack('f', f.read(_FLOAT_SIZE))[0])\n f.seek(_FLOAT_SIZE * (size[0] - 1) * size[1], 1)\n frame = np.flipud(frame).tolist()\n elif plane == 's':\n if idx > 0:\n f.seek(idx * _FLOAT_SIZE, 1)\n for r in range(size[2]):\n row = []\n frame.append(row)\n for v in range(size[1]):\n row.append(struct.unpack('f', f.read(_FLOAT_SIZE))[0])\n f.seek(_FLOAT_SIZE * (size[0] - 1), 1)\n frame = np.flipud(frame).tolist()\n else:\n raise RuntimeError('plane not supported: {}'.format(plane))\n return frame\n\n\ndef _read_roi_file(sim_id):\n return simulation_db.read_json(_roi_file(sim_id))\n\n\ndef _roi_file(sim_id):\n return _sim_file(sim_id, _ROI_FILE_NAME)\n\n\ndef _scale_pixel_data(plan, pixels):\n scale_required = False\n slope = 1\n offset = 0\n if 'RescaleSlope' in plan and plan.RescaleSlope != slope:\n slope = plan.RescaleSlope\n scale_required = True\n if 'RescaleIntercept' in plan and plan.RescaleIntercept != offset:\n offset = plan.RescaleIntercept\n scale_required = True\n if scale_required:\n pixels *= float(slope)\n pixels += float(offset)\n\n\ndef _sim_file(sim_id, filename):\n return str(simulation_db.simulation_dir(SIM_TYPE, sim_id).join(filename))\n\n\ndef _string_list(ar):\n return [str(x) for x in ar]\n\n\ndef _summarize_dicom_files(data, dicom_dir):\n simulation = data['models']['simulation']\n info = _extract_series_frames(simulation, dicom_dir)\n frames = info['frames']\n info['pixelSpacing'] = _summarize_dicom_series(simulation, frames)\n with open (_pixel_filename(simulation), 'wb') as f:\n for frame in frames:\n frame['pixels'].tofile(f)\n data['models']['dicomSeries'] = {\n 'description': info['description'],\n 'pixelSpacing': info['pixelSpacing'],\n 'studyInstanceUID': info['StudyInstanceUID'],\n 'planes': {\n 't': _frame_info(len(frames)),\n 's': _frame_info(len(frames[0]['pixels'])),\n 'c': _frame_info(len(frames[0]['pixels'][0])),\n }\n }\n time_stamp = int(time.time())\n for m in ('dicomAnimation', 'dicomAnimation2', 'dicomAnimation3', 'dicomAnimation4'):\n data['models'][m]['startTime'] = time_stamp\n if 'regionsOfInterest' in info:\n dose_calc = data['models']['doseCalculation']\n selectedPTV = None\n dose_calc['selectedOARs'] = []\n for roi_number in sorted(info['regionsOfInterest']):\n roi = info['regionsOfInterest'][roi_number]\n if not selectedPTV or re.search(r'\\bptv\\b', roi['name'], re.IGNORECASE):\n selectedPTV = str(roi_number)\n dose_calc['selectedOARs'].append(str(roi_number))\n if selectedPTV:\n dose_calc['selectedPTV'] = selectedPTV\n data['models']['dvhReport']['roiNumbers'] = [selectedPTV]\n if 'dicom_dose' in info:\n data['models']['dicomDose'] = info['dicom_dose']\n _compute_histogram(simulation, frames)\n\n\ndef _summarize_dicom_series(simulation, frames):\n idx = 0\n z_space = abs(float(frames[0]['ImagePositionPatient'][2]) - float(frames[1]['ImagePositionPatient'][2]))\n os.mkdir(_sim_file(simulation['simulationId'], _DICOM_DIR))\n for frame in frames:\n res = {\n 'shape': frame['shape'],\n 'ImagePositionPatient': frame['ImagePositionPatient'],\n 'PixelSpacing': frame['PixelSpacing'],\n 'domain': _calculate_domain(frame),\n 'frameId': frame['frameId'],\n }\n filename = _dicom_path(simulation, 't', idx)\n simulation_db.write_json(filename, res)\n idx += 1\n\n frame0 = frames[0]\n shape = [\n len(frames),\n len(frame0['pixels'][0]),\n ]\n res = {\n 'shape': shape,\n 'ImagePositionPatient': [\n frame0['ImagePositionPatient'][0],\n frame0['ImagePositionPatient'][2],\n frame0['ImagePositionPatient'][1],\n ],\n 'PixelSpacing': [\n frame0['PixelSpacing'][0],\n z_space,\n ],\n }\n for idx in range(len(frame0['pixels'][0])):\n res['ImagePositionPatient'][2] = str(float(frame0['ImagePositionPatient'][1]) + idx * float(frame0['PixelSpacing'][0]))\n res['domain'] = _calculate_domain(res)\n filename = _dicom_path(simulation, 'c', idx)\n simulation_db.write_json(filename, res)\n\n shape = [\n len(frames),\n len(frame0['pixels'][1]),\n ]\n res = {\n 'shape': shape,\n 'ImagePositionPatient': [\n frame0['ImagePositionPatient'][1],\n frame0['ImagePositionPatient'][2],\n frame0['ImagePositionPatient'][0],\n ],\n 'PixelSpacing': [\n frame0['PixelSpacing'][0],\n z_space,\n ],\n }\n for idx in range(len(frame0['pixels'][0])):\n res['ImagePositionPatient'][2] = str(float(frame0['ImagePositionPatient'][0]) + idx * float(frame0['PixelSpacing'][1]))\n res['domain'] = _calculate_domain(res)\n filename = _dicom_path(simulation, 's', idx)\n simulation_db.write_json(filename, res)\n spacing = frame0['PixelSpacing']\n return _string_list([spacing[0], spacing[1], z_space])\n\n\ndef _summarize_frames(frames):\n res = {}\n frame0 = frames[0]\n for n in ('FrameOfReferenceUID', 'StudyInstanceUID', 'SeriesInstanceUID'):\n res[n] = frame0[n]\n res['SOPInstanceUID'] = []\n for frame in frames:\n res['SOPInstanceUID'].append(frame['SOPInstanceUID'])\n return res\n\n\ndef _summarize_rt_dose(simulation, plan, run_dir=None):\n pixels = np.float32(plan.pixel_array)\n if plan.DoseGridScaling:\n pixels *= float(plan.DoseGridScaling)\n fn = _parent_file(run_dir, _DOSE_FILE) if run_dir else _dose_filename(simulation)\n with open (fn, 'wb') as f:\n pixels.tofile(f)\n #TODO(pjm): assuming frame start matches dicom frame start\n res = {\n 'frameCount': int(plan.NumberOfFrames),\n 'units': plan.DoseUnits,\n 'min': float(np.min(pixels)),\n 'max': float(np.max(pixels)),\n 'shape': [plan.Rows, plan.Columns],\n 'ImagePositionPatient': _string_list(plan.ImagePositionPatient),\n 'PixelSpacing': _float_list(plan.PixelSpacing),\n 'startTime': int(time.time()),\n }\n res['domain'] = _calculate_domain(res)\n return res\n\n\ndef _summarize_rt_structure(simulation, plan, frame_ids):\n rois = {}\n for roi in plan.StructureSetROISequence:\n rois[roi.ROINumber] = {\n 'name': roi.ROIName,\n }\n res = {}\n for roi_contour in plan.ROIContourSequence:\n roi = rois[roi_contour.ReferencedROINumber]\n if 'contour' in roi:\n raise RuntimeError('duplicate contour sequence for roi')\n if not hasattr(roi_contour, 'ContourSequence'):\n continue\n roi['contour'] = {}\n for contour in roi_contour.ContourSequence:\n if contour.ContourGeometricType != 'CLOSED_PLANAR':\n continue\n if contour.ContourData:\n # the z index is the key\n ct_id = _frame_id(contour.ContourData[2])\n if ct_id not in frame_ids:\n raise RuntimeError('contour z not in frames: {}', ct_id)\n contour_data = _float_list(contour.ContourData)\n if len(contour_data) > 3 and ct_id != _frame_id(contour_data[5]):\n raise RuntimeError('expected contour data z to be equal')\n del contour_data[2::3]\n if ct_id not in roi['contour']:\n roi['contour'][ct_id] = []\n roi['contour'][ct_id].append(contour_data)\n if roi['contour']:\n roi['color'] = _string_list(roi_contour.ROIDisplayColor)\n res[roi_contour.ReferencedROINumber] = roi\n simulation_db.write_json(_roi_file(simulation['simulationId']), {\n 'models': {\n 'regionsOfInterest': res,\n },\n })\n return res\n\n\ndef _update_roi_file(sim_id, contours):\n data = _read_roi_file(sim_id)\n rois = data['models']['regionsOfInterest']\n for roi_number in contours:\n if roi_number not in rois:\n rois[roi_number] = contours[roi_number]\n else:\n for frame_id in contours[roi_number]:\n points = contours[roi_number][frame_id]\n rois[roi_number]['contour'][frame_id] = points\n #TODO(pjm): file locking or atomic update\n simulation_db.write_json(_roi_file(sim_id), data)\n return {}\n" ]
[ [ "numpy.log", "numpy.histogram", "numpy.linspace", "numpy.min", "numpy.flipud", "numpy.ceil", "numpy.max", "numpy.iinfo", "numpy.float32", "numpy.floor", "numpy.array", "scipy.ndimage.interpolation.zoom" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] } ]
pepealessio/cr_pepper_shoppingbot
[ "8d1f500ef09b1064a03de2697886dbe37f921980" ]
[ "fp_audio/scripts/reidentification.py" ]
[ "#!/usr/bin/python3\n\nfrom config import *\nfrom datetime import datetime\nfrom fp_audio.srv import GetEmbedding, GetEmbeddingResponse, SetEmbedding, SetEmbeddingResponse, GetLabel, GetLabelResponse, NextLabel, NextLabelResponse\nfrom identification.deep_speaker.audio import get_mfcc\nfrom identification.deep_speaker.model import get_deep_speaker\nfrom identification.utils import batch_cosine_similarity, dist2id\nimport numpy as np\nimport os\nimport pickle\nimport rospy\nfrom std_msgs.msg import Float32MultiArray, String, Int16\nfrom threading import Lock\n\n\nclass EmbeddingManager(object):\n\n def __init__(self):\n \"\"\"Init an embedding manager node who has 3 service: get embeddng of an audio,\n set embedding associated with a label of an audio and get a label based on a \n similarity distance between the various embeddings.\n \"\"\"\n self._model = get_deep_speaker(os.path.join(REF_PATH, 'audio_embedding_model', 'deep_speaker.h5'))\n\n self._mutex_data = Lock()\n self._mutex_net = Lock()\n\n def _load_data(self):\n \"\"\"Try to load saved data and if there not exist, create a new empty data structure.\n\n Data is structured like:\n {\n 'X' : [emb1, emb2, ...],\n 'y' : [1, 2, ...],\n 'y2name : {\n 1 : 'alessio',\n 2 : 'teresa,\n }\n }\n\n \n Returns:\n Dict: the dictionary with the data.\n \"\"\"\n try:\n with open(os.path.join(REF_PATH, DATA_FILENAME), 'rb') as fh:\n data = pickle.load(fh)\n except Exception as e:\n data = dict()\n data['X'] = list()\n data['y'] = list()\n data['y2name'] = dict()\n return data\n \n def _store_data(self, data):\n \"\"\"Save the data in a file.\n\n\n Args:\n data (dict): The dictionary containing all the data.\n \"\"\"\n try:\n with open(os.path.join(REF_PATH, DATA_FILENAME), 'wb') as fh:\n pickle.dump(data, fh)\n except:\n print(\"[RE-IDENTIFICATION] Can't save the state.\")\n\n def start(self):\n \"\"\"Initialize the node and start the services provided by this node.\n \"\"\"\n rospy.init_node('reidentification_node', anonymous=True)\n rospy.Service('getEmbedding', GetEmbedding, self._handle_get_embedding)\n rospy.Service('setEmbedding', SetEmbedding, self._handle_set_embedding)\n rospy.Service('getLabel', GetLabel, self._handle_get_label)\n rospy.Service('nextLabel', NextLabel, self._handle_next_label)\n rospy.spin()\n\n def _handle_get_embedding(self, req):\n \"\"\"Callback function of GetEmbedding Service. That recive an audio, compute \n the embedding using a NN and return that.\n This method is thread-safe.\n\n\n Args:\n req: The input of the request.\n \"\"\"\n int_audio = np.array(req.input.data, dtype=np.int16)\n audio_data = int_audio.astype(np.float32, order='C') / 32768.0 # to float32\n \n mfcc = get_mfcc(audio_data, RATE)\n\n self._mutex_net.acquire()\n embedding = self._model.predict(np.expand_dims(mfcc, 0))\n self._mutex_net.release()\n\n embedding = embedding[0].tolist()\n\n to_return = Float32MultiArray()\n to_return.data = embedding\n\n return GetEmbeddingResponse(to_return)\n \n def _handle_set_embedding(self, req):\n \"\"\"Callback function of SetEmbedding Service. That recive an embedding and a label \n and save this association.\n This method is thread-safe.\n \n\n Args:\n req: The input of the request.\n \"\"\"\n # Get embeddings and label in \n embedding = np.array(req.in_embedding.data)\n name = req.in_name.data\n label = req.in_label.data\n\n # Load data from file\n self._mutex_data.acquire()\n data = self._load_data()\n\n # If label exist use that label, otherwise set a new label and\n # associate the name at that.\n unique, counts = np.unique(data['y'], return_counts=True)\n label_count = dict(zip(unique, counts))\n\n if label not in data['y2name']:\n label = len(unique) + 1\n data['y2name'][label] = name\n\n if (label not in label_count or label_count[label] <= MAX_EMBEDDING_PER_LABEL):\n data['X'].append(embedding)\n data['y'].append(label)\n self._store_data(data)\n \n self._mutex_data.release()\n\n # print(f\"[Re-Identification] Setting an embedding with label {label} .\")\n\n return SetEmbeddingResponse(Int16(label))\n\n def _handle_get_label(self, req):\n \"\"\"Callback function of GetLabel Service. That recive an embedding and get a label \n if that voice is known (or an empty label).\n This method is thread-safe.\n\n\n Args:\n req: The input of the request.\n \"\"\"\n embedding = np.array([req.in_embedding.data], dtype=np.float32)\n\n self._mutex_data.acquire()\n data = self._load_data()\n self._mutex_data.release()\n\n if len(data['X']) > 0:\n # Distance between the sample and the support set\n rep_embedding = np.repeat(embedding, len(data['X']), 0)\n cosine_dist = batch_cosine_similarity(np.array(data['X']), rep_embedding)\n # Matching\n label = dist2id(cosine_dist, data['y'], REID_TH, mode='avg')\n \n if len(data['X']) == 0 or label is None:\n label = -1\n name = ''\n else:\n name = data['y2name'][label]\n\n return GetLabelResponse(Int16(label), String(name))\n\n def _handle_next_label(self, req):\n \"\"\"Callback function for NextLabel Service. Return the label that will be assigned\n to the next identity.\n This method is thread-safe.\n \"\"\"\n self._mutex_data.acquire()\n data = self._load_data()\n self._mutex_data.release()\n\n unique = np.unique(data['y'])\n next_label = len(unique) + 1\n \n return NextLabelResponse(Int16(next_label))\n\n\nif __name__ == '__main__':\n try:\n node = EmbeddingManager()\n node.start()\n except rospy.ROSInterruptException:\n pass\n" ]
[ [ "numpy.array", "numpy.expand_dims", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mfzhang1/Optical-Flow-in-the-Dark
[ "4449519730f284b832884fd1fe5f304e2e07ae70" ]
[ "VBOF_dataset/raw_to_rgb.py" ]
[ "from __future__ import absolute_import, division, print_function\nimport argparse,glob,rawpy,tqdm,os,cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nparser = argparse.ArgumentParser(description='Process RAW files in VBOF to RGB')\n\nparser.add_argument('--input_raw_path', default='./VBOF_rawdata/', help='The path of RAW files folder')\nparser.add_argument('--output_rgb_path', default='./raw2jpg_output/', help='The path of output RGB image folder')\n\nargs = parser.parse_args()\n\ninput_dir = args.input_raw_path\noutput_dir = args.output_rgb_path\nif not os.path.exists(input_dir):\n print('Please download VBOF_raw dataset first.')\n exit()\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\ndef raw_info(addr):\n \"\"\"\n input a string of raw address\\n\n print raw_pattern, black_level, shot_wb, shape, min, mean, max\n \"\"\"\n print(addr)\n raw = rawpy.imread(addr)\n im = raw.raw_image_visible.astype(np.float32)\n print('raw pattern:')\n print(raw.color_desc)\n print(raw.raw_pattern)\n print('black level: ', raw.black_level_per_channel)\n print('shot white balance: ', raw.camera_whitebalance)\n print('shape: ', im.shape)\n print('min: %f, mean: %f, max: %f' % (np.min(im),np.mean(im),np.max(im)))\n\ndef raw_read(addr, rmin=2047, rmax=16383, orig=False):\n \"\"\"\n input a string of raw address\\n\n consult `raw_info()` before you decide the min and max\\n\n min should be slightly bigger than np.min(all_im). In most cases the black level works\\n\n max should be slightly bigger than np.max(all_im). In mosr cases 16383 works\\n\n try differnet values until the contrast seems fine\\n\n if orig then return the original im array [0,2^n]\\n\n else return (im2d-min)/max [0,1]\n \"\"\"\n raw = rawpy.imread(addr)\n im = raw.raw_image_visible.astype(np.float32)\n if orig: return im\n\n im = im - rmin\n im = im / (rmax-rmin)\n\n im = np.maximum(im,0.)\n im = np.minimum(im,1.)\n return im\n\ndef pack_bayer(bayer_2d):\n \"\"\"\n input an 2d image (H,W) from a bayer raw\\n\n return a 4d image (H/2,W/2,4)\\n\n a b\\n\n c d\\n\n im[x,x,0]-a, im[x,x,1]-b, im[x,x,2]-d, im[x,x,3]-c\\n\n \n normally,\\n\n im[x,x,0]-R, im[x,x,1]-G, im[x,x,2]-B, im[x,x,3]-G\n \"\"\"\n import numpy as np\n im = bayer_2d\n im = np.expand_dims(im, axis=2)\n \n out = np.concatenate((im[0::2, 0::2, :], # a\n im[0::2, 1::2, :], # b\n im[1::2, 1::2, :], # d\n im[1::2, 0::2, :] # c\n ), axis=2)\n return out\n\ndef pack_XTrans(bayer_2d):\n \"\"\"\n input a 2d xtrans raw image, pattern like this:\\n\n [0, 2, 1, 2, 0, 1],\\n\n [1, 1, 0, 1, 1, 2],\\n\n [1, 1, 2, 1, 1, 0],\\n\n [2, 0, 1, 0, 2, 1],\\n\n [1, 1, 2, 1, 1, 0],\\n\n [1, 1, 0, 1, 1, 2]\\n\n return a 9d image\\n\n R-0,4d G-1,5,6,7,8d B-1,5d\\n\n This is slightly different from the camerea used in Learning to See in the Dark\\n\n \"\"\"\n import numpy as np\n im = bayer_2d\n\n img_shape = im.shape\n H = (img_shape[0] // 6) * 6\n W = (img_shape[1] // 6) * 6\n\n out = np.zeros((H // 3, W // 3, 9))\n\n # 0 R\n out[0::2, 0::2, 0] = im[5:H:6, 0:W:6]\n out[0::2, 1::2, 0] = im[5:H:6, 4:W:6]\n out[1::2, 0::2, 0] = im[2:H:6, 1:W:6]\n out[1::2, 1::2, 0] = im[2:H:6, 3:W:6]\n\n # 1 G\n out[0::2, 0::2, 1] = im[5:H:6, 2:W:6]\n out[0::2, 1::2, 1] = im[5:H:6, 5:W:6]\n out[1::2, 0::2, 1] = im[2:H:6, 2:W:6]\n out[1::2, 1::2, 1] = im[2:H:6, 5:W:6]\n\n # 1 B\n out[0::2, 0::2, 2] = im[5:H:6, 1:W:6]\n out[0::2, 1::2, 2] = im[5:H:6, 3:W:6]\n out[1::2, 0::2, 2] = im[2:H:6, 0:W:6]\n out[1::2, 1::2, 2] = im[2:H:6, 4:W:6]\n\n # 4 R\n out[0::2, 0::2, 3] = im[0:H:6, 2:W:6]\n out[0::2, 1::2, 3] = im[1:H:6, 5:W:6]\n out[1::2, 0::2, 3] = im[4:H:6, 2:W:6]\n out[1::2, 1::2, 3] = im[3:H:6, 5:W:6]\n\n # 5 B\n out[0::2, 0::2, 4] = im[1:H:6, 2:W:6]\n out[0::2, 1::2, 4] = im[0:H:6, 5:W:6]\n out[1::2, 0::2, 4] = im[3:H:6, 2:W:6]\n out[1::2, 1::2, 4] = im[4:H:6, 5:W:6]\n\n out[:, :, 5] = im[0:H:3, 0:W:3]\n out[:, :, 6] = im[0:H:3, 1:W:3]\n out[:, :, 7] = im[1:H:3, 0:W:3]\n out[:, :, 8] = im[1:H:3, 1:W:3]\n return out\n\ndef imcrop(im,to_h,to_w,skip_n=1):\n assert(im.shape[0]>=to_h)\n assert(im.shape[1]>=to_w)\n \n marg_h = im.shape[0] - to_h\n marg_w = im.shape[1] - to_w\n h1=h2=w1=w2=0\n h1 = marg_h/2 if marg_h%2==0 else marg_h/2-0.5\n h2 = marg_h/2 if marg_h%2==0 else marg_h/2+0.5\n w1 = marg_w/2 if marg_w%2==0 else marg_w/2-0.5\n w2 = marg_w/2 if marg_w%2==0 else marg_w/2+0.5\n h1,h2,w1,w2 = int(h1),int(h2),int(w1),int(w2)\n newim = im[h1:-h2:skip_n,w1:-w2:skip_n,:]\n \n return newim\n\ndef nd_to_3d(ndarray, Rlist, Glist, Blist, G_plus=0):\n \"\"\"\n input an nd image, Rchannels, Gchannels, Bchannels\\n\n G_plus up, green color down\n return a 3d image\n \"\"\"\n import numpy as np\n out = np.zeros([ndarray.shape[0],ndarray.shape[1],3])\n\n for r in Rlist: out[:,:,0] += ndarray[:,:,r]\n out[:,:,0] /= len(Rlist)\n for g in Glist: out[:,:,1] += ndarray[:,:,g]\n out[:,:,1] /= (len(Glist)+G_plus)\n for b in Blist: out[:,:,2] += ndarray[:,:,b]\n out[:,:,2] /= len(Blist)\n\n return out\n\ndef adjust_br(im, to_mean):\n assert(np.max(im)<=1)\n ratio = to_mean/np.mean(im)\n im = im*ratio\n im = np.maximum(im,0.)\n im = np.minimum(im,1.)\n return im\n\n\n# SONY\nraws_path = glob.glob('%s/1*.ARW'%input_dir)\nfor raw_addr in tqdm.tqdm(raws_path):\n # raw_info(raw_addr)\n # break\n im2d = raw_read(raw_addr,512,16383)\n im4d = pack_bayer(im2d)\n im4d = imcrop(im4d,4000/2,6000/2) # crop the size of camera-produce raw to camera-produce jpg\n im = nd_to_3d(im4d,[0],[1,3],[2],2)\n im = adjust_br(im,0.4)\n im = cv2.resize(im,(736,480))\n plt.imsave('%s/%s.png'%(output_dir,os.path.basename(raw_addr)[:-4]),im)\n\n# CANON\nraws_path = glob.glob('%s/2*.CR2'%input_dir)\nfor raw_addr in tqdm.tqdm(raws_path):\n # raw_info(raw_addr)\n # break\n im2d = raw_read(raw_addr,2047,16383)\n im4d = pack_bayer(im2d)\n im4d = imcrop(im4d,4000/2,6000/2) # crop the size of camera-produce raw to camera-produce jpg\n im = nd_to_3d(im4d,[0],[1,3],[2],0.2)\n im = adjust_br(im,0.4)\n im = cv2.resize(im,(736,480))\n plt.imsave('%s/%s.png'%(output_dir,os.path.basename(raw_addr)[:-4]),im)\n\n# FUJIFILM\nraws_path = glob.glob('%s/3*.RAF'%input_dir)\nfor raw_addr in tqdm.tqdm(raws_path):\n # raw_info(raw_addr)\n # break\n im2d = raw_read(raw_addr,1022,16383)\n im9d = pack_XTrans(im2d)\n im9d = imcrop(im9d,1333,6000/3) # crop the size of camera-produce raw to camera-produce jpg\n im = nd_to_3d(im9d,[0,3],[1,5,6,7,8],[2,4],2)\n im = adjust_br(im,0.4)\n im = cv2.resize(im,(736,480))\n plt.imsave('%s/%s.png'%(output_dir,os.path.basename(raw_addr)[:-4]),im)\n\n# NIKON\nraws_path = glob.glob('%s/4*.NEF'%input_dir)\nfor raw_addr in tqdm.tqdm(raws_path):\n # raw_info(raw_addr)\n # break\n im2d = raw_read(raw_addr,100,16383)\n im4d = pack_bayer(im2d)\n im4d = imcrop(im4d,5504/2,8256/2) # crop the size of camera-produce raw to camera-produce jpg\n im = nd_to_3d(im4d,[0],[1,3],[2],1)\n im = adjust_br(im,0.4)\n im = cv2.resize(im,(736,480))\n plt.imsave('%s/%s.png'%(output_dir,os.path.basename(raw_addr)[:-4]),im)\n" ]
[ [ "numpy.expand_dims", "numpy.maximum", "numpy.minimum", "numpy.min", "numpy.concatenate", "numpy.max", "numpy.mean", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
intelligent-control-lab/Composable_Agent_Toolbox
[ "39d71cdc0475ae6901cb30b63d181737bea35889" ]
[ "env/base_world/agent.py" ]
[ "import numpy as np\nfrom abc import ABC, abstractmethod\n\nclass Agent(ABC):\n \n def __init__(self, name, spec, collision=True):\n self.name = name\n self._x = spec['init_x']\n self.collision = collision\n self.broadcast = {}\n\n @abstractmethod\n def forward(self):\n pass\n \n @property\n def state(self):\n return self._x\n \n @abstractmethod\n def pos(self):\n pass\n\n @abstractmethod\n def vel(self):\n pass\n \n @property\n def info(self):\n info = {\"state\": self.state, \"pos\":self.pos, \"vel\":self.vel}\n return info\n \nclass BB8Agent(Agent):\n \n def _f(self, x):\n return np.vstack([x[2], x[3], 0, 0])\n\n def _g(self, x):\n B = np.matrix(np.zeros((4,2)))\n B[2,0] = 0.5\n B[3,1] = 0.5\n return B\n\n def forward(self, action, dt):\n # x = [x y dx dy], u = [ax ay]\n u = action['control']\n dot_x = self._f(self._x) + (self._g(self._x)*np.vstack(u))\n self._x = self._x + (dot_x * dt)\n \n self.broadcast = action[\"broadcast\"] if \"broadcast\" in action.keys() else {}\n\n\n @property\n def pos(self):\n return self._x[[0,1]]\n \n @property\n def vel(self):\n return self._x[[2,3]]\n\n\nclass GoalAgent(BB8Agent):\n \"\"\"The goal agent.\n This agent is a virtual agent represents the goal of a real agent.\n This agent only flash to a new place when the real agent reaches it.\n The reason we inheritate it from BB8Agent is to make it possible to be a \n dynamic goal in the future.\n \"\"\"\n def __init__(self, name, hunter, goal_list, reaching_eps, collision=False):\n self.name = name\n self._x = np.zeros((4,1))\n self.goal_list = goal_list\n self.goal_idx = 0\n self.hunter = hunter\n self.reaching_eps = reaching_eps\n self.collision = collision\n self._set_pos()\n self.broadcast = {}\n\n def _set_pos(self):\n self._x[[0,1]] = np.vstack(self.goal_list[self.goal_idx])\n\n def forward(self):\n if np.max(abs(self.pos - self.hunter.pos)) < self.reaching_eps:\n self.goal_idx = min(len(self.goal_list)-1, self.goal_idx+1)\n self._set_pos()\n\n @property\n def info(self):\n info = {\"state\": self.state, \"pos\":self.pos, \"vel\":self.vel, \"count\":self.goal_idx}\n return info\n" ]
[ [ "numpy.zeros", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leduchuy225/HairNet
[ "2d3f0b82a686d2ccc7fee4429ef5925ffabd8982" ]
[ "src/train.py" ]
[ "import os\nimport time\nimport logging\nimport argparse\n\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nfrom dataloader import HairNetDataset\nfrom model import Net, MyLoss, CollisionLoss, CurMSE, PosMSE\n\n\nlog = logging.getLogger(\"HairNet\")\nlogging.basicConfig(level=logging.INFO)\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--epoch\", type=int, default=100)\n parser.add_argument(\"--batch_size\", type=int, default=8) # 32\n parser.add_argument(\"--lr\", type=float, default=0.0001)\n parser.add_argument(\"--lr_step\", type=int, default=10)\n parser.add_argument(\"--save_dir\", type=str, default=\"./weight/\")\n parser.add_argument(\"--data\", type=str, default=\"./\")\n parser.add_argument(\"--weight\", type=str, default=\"\")\n parser.add_argument(\"--test_step\", type=int, default=0)\n return parser.parse_args()\n\n\ndef train(model, dataloader, optimizer, device):\n model.train()\n for i, data in enumerate(dataloader, 0):\n img, convdata, visweight = data\n\n img = img.to(device)\n convdata = convdata.to(device)\n visweight = visweight.to(device)\n # img (bs, 3, 128, 128); convdata (bs, 100, 4, 32, 32); visweight (bs, 100, 32, 32)\n\n optimizer.zero_grad()\n\n output = net(img)\n my_loss = loss(output, convdata, visweight)\n\n my_loss.backward()\n\n optimizer.step()\n\n return my_loss\n\n\ndef test(model, dataloader, device):\n pos_error = PosMSE().to(device) # Position Loss\n cur_error = CurMSE().to(device) # Curvature Loss\n col_error = CollisionLoss().to(device) # Collision Loss\n\n tot_error = MyLoss().to(device)\n\n model.eval()\n for i, data in enumerate(dataloader, 0):\n img, convdata, visweight = data\n\n img = img.to(device)\n convdata = convdata.to(device)\n visweight = visweight.to(device)\n\n output = model(img)\n\n # cal loss\n pos = pos_error(output, convdata, visweight)\n cur = cur_error(output, convdata, visweight)\n col = col_error(output, convdata)\n\n tot = tot_error(output, convdata, visweight)\n\n log.info(\n f\"TESTING Epoch {i+1} | Loss[ Pos | Cur | Col | Total ]: \"\n f\"[ {pos:.8f} | {cur:.8f} | {col:.8f} | {tot:.8f} ]\"\n )\n\n # return pos.item(), cur.item(), col.item(), tot.item()\n\n\nif __name__ == \"__main__\":\n # load parameters\n opt = get_args()\n epochs, bs, lr, lr_step, save_dir, data, weight, test_step = (\n opt.epoch,\n opt.batch_size,\n opt.lr,\n opt.lr_step,\n opt.save_dir,\n opt.data,\n opt.weight,\n opt.test_step,\n )\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n log.info(f\"Training args: {opt}\")\n log.info(f\"Training device: {device}\")\n\n log.info(\"Initializing model and loss function ...\")\n net = Net().to(device)\n loss = MyLoss().to(device)\n\n if weight != \"\":\n log.info(\"Loading model's weight ...\")\n net.load_state_dict(torch.load(weight, map_location=torch.device(device)))\n\n # load data\n log.info(\"Loading data ...\")\n train_data = HairNetDataset(project_dir=data, train_flag=1, noise_flag=1)\n train_loader = DataLoader(dataset=train_data, batch_size=bs)\n log.info(f\"Train dataset: {len(train_data)} data points\")\n\n if test_step != 0:\n test_data = HairNetDataset(project_dir=data, train_flag=0, noise_flag=0)\n test_loader = DataLoader(dataset=test_data, batch_size=bs)\n log.info(f\"Test dataset: {len(test_data)} data points\")\n\n # setup optimizer & lr schedualer\n optimizer = optim.Adam(net.parameters(), lr=lr)\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n t = time.localtime()\n save_path = save_dir + time.strftime(\"%H:%M:%S\", t)\n os.mkdir(save_path)\n\n # train\n log.info(\"Training ...\")\n pre_loss = 100000\n for epoch in range(epochs):\n # measure executive time\n # torch.cuda.synchronize()\n since = int(round(time.time() * 1000))\n\n train_loss = train(net, train_loader, optimizer, device)\n scheduler.step()\n\n # torch.cuda.synchronize()\n time_elapsed = int(round(time.time() * 1000)) - since\n\n # Logging\n log.info(\n f\"TRAINING Epoch {epoch+1} | Loss: {train_loss:.8f} | time: {time_elapsed}ms\"\n )\n if test_step != 0 and (epoch + 1) % test_step == 0:\n test(net, test_loader, device)\n\n # Save model by performance\n if train_loss < pre_loss:\n pre_loss = train_loss\n torch.save(net.state_dict(), save_path + \"/weight.pt\")\n" ]
[ [ "torch.device", "torch.utils.data.DataLoader", "torch.cuda.is_available", "torch.optim.lr_scheduler.StepLR" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nolanstr/bingo_nolan_fork
[ "fad1547105d66fe91c58fb1c771af57cb26126c1" ]
[ "tests/performance_benchmarking/island_benchmarks.py" ]
[ "import timeit\n\nimport numpy as np\n\nfrom bingo.symbolic_regression.agraph.crossover import AGraphCrossover\nfrom bingo.symbolic_regression.agraph.mutation import AGraphMutation\nfrom bingo.symbolic_regression.agraph.generator import AGraphGenerator\nfrom bingo.symbolic_regression.agraph.component_generator \\\n import ComponentGenerator\nfrom bingo.symbolic_regression.explicit_regression import ExplicitRegression, \\\n ExplicitTrainingData\nfrom bingo.evolutionary_algorithms.age_fitness import AgeFitnessEA\nfrom bingo.evaluation.evaluation import Evaluation\nfrom bingo.evolutionary_optimizers.island import Island\nfrom bingo.local_optimizers.continuous_local_opt \\\n import ContinuousLocalOptimization\nfrom benchmark_data import StatsPrinter\n\nPOP_SIZE = 128\nSTACK_SIZE = 64\nMUTATION_PROBABILITY = 0.4\nCROSSOVER_PROBABILITY = 0.4\nNUM_POINTS = 100\nSTART = -10\nSTOP = 10\nERROR_TOLERANCE = 10e-9\nSEED = 20\n\n\ndef init_x_vals(start, stop, num_points):\n return np.linspace(start, stop, num_points).reshape([-1, 1])\n\n\ndef equation_eval(x):\n return x**2 + 3.5*x**3\n\n\ndef init_island():\n np.random.seed(15)\n x = init_x_vals(START, STOP, NUM_POINTS)\n y = equation_eval(x)\n training_data = ExplicitTrainingData(x, y)\n\n component_generator = ComponentGenerator(x.shape[1])\n component_generator.add_operator(2)\n component_generator.add_operator(3)\n component_generator.add_operator(4)\n\n crossover = AGraphCrossover(component_generator)\n mutation = AGraphMutation(component_generator)\n\n agraph_generator = AGraphGenerator(STACK_SIZE, component_generator)\n\n fitness = ExplicitRegression(training_data=training_data)\n local_opt_fitness = ContinuousLocalOptimization(fitness, algorithm='lm')\n evaluator = Evaluation(local_opt_fitness)\n\n ea_algorithm = AgeFitnessEA(evaluator, agraph_generator, crossover,\n mutation, MUTATION_PROBABILITY,\n CROSSOVER_PROBABILITY, POP_SIZE)\n\n island = Island(ea_algorithm, agraph_generator, POP_SIZE)\n return island\n\n\nTEST_ISLAND = init_island()\n\n\nclass IslandStatsPrinter(StatsPrinter):\n def __init__(self):\n super().__init__()\n self._output = [\"-\"*24+\":::: REGRESSION BENCHMARKS ::::\" + \"-\"*23,\n self._header_format_string.format(\"NAME\", \"MEAN\",\n \"STD\", \"MIN\", \"MAX\"),\n \"-\"*78]\n\n\ndef explicit_regression_benchmark():\n island = init_island()\n while island.get_best_individual().fitness > ERROR_TOLERANCE:\n island._execute_generational_step()\n\n\ndef do_benchmarking():\n printer = IslandStatsPrinter()\n printer.add_stats(\"Explicit Regression\",\n timeit.repeat(explicit_regression_benchmark,\n number=4,\n repeat=4))\n printer.print()\n\n\nif __name__ == \"__main__\":\n do_benchmarking()\n" ]
[ [ "numpy.random.seed", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ksekimoto/tensorflow
[ "d913cf12d0cca9823cf740a3ccfd2decb963f086" ]
[ "tensorflow/python/data/experimental/ops/readers.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Python wrappers for reader Datasets.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport csv\nimport functools\nimport gzip\n\nimport numpy as np\n\nfrom tensorflow.python import tf2\nfrom tensorflow.python.data.experimental.ops import error_ops\nfrom tensorflow.python.data.experimental.ops import parsing_ops\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import readers as core_readers\nfrom tensorflow.python.data.util import convert\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import gen_experimental_dataset_ops\nfrom tensorflow.python.ops import io_ops\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.util.tf_export import tf_export\n\n_ACCEPTABLE_CSV_TYPES = (dtypes.float32, dtypes.float64, dtypes.int32,\n dtypes.int64, dtypes.string)\n\n\ndef _is_valid_int32(str_val):\n try:\n # Checks equality to prevent int32 overflow\n return dtypes.int32.as_numpy_dtype(str_val) == dtypes.int64.as_numpy_dtype(\n str_val)\n except (ValueError, OverflowError):\n return False\n\n\ndef _is_valid_int64(str_val):\n try:\n dtypes.int64.as_numpy_dtype(str_val)\n return True\n except (ValueError, OverflowError):\n return False\n\n\ndef _is_valid_float(str_val, float_dtype):\n try:\n return float_dtype.as_numpy_dtype(str_val) < np.inf\n except ValueError:\n return False\n\n\ndef _infer_type(str_val, na_value, prev_type):\n \"\"\"Given a string, infers its tensor type.\n\n Infers the type of a value by picking the least 'permissive' type possible,\n while still allowing the previous type inference for this column to be valid.\n\n Args:\n str_val: String value to infer the type of.\n na_value: Additional string to recognize as a NA/NaN CSV value.\n prev_type: Type previously inferred based on values of this column that\n we've seen up till now.\n Returns:\n Inferred dtype.\n \"\"\"\n if str_val in (\"\", na_value):\n # If the field is null, it gives no extra information about its type\n return prev_type\n\n type_list = [\n dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string\n ] # list of types to try, ordered from least permissive to most\n\n type_functions = [\n _is_valid_int32,\n _is_valid_int64,\n lambda str_val: _is_valid_float(str_val, dtypes.float32),\n lambda str_val: _is_valid_float(str_val, dtypes.float64),\n lambda str_val: True,\n ] # Corresponding list of validation functions\n\n for i in range(len(type_list)):\n validation_fn = type_functions[i]\n if validation_fn(str_val) and (prev_type is None or\n prev_type in type_list[:i + 1]):\n return type_list[i]\n\n\ndef _next_csv_row(filenames, num_cols, field_delim, use_quote_delim, header,\n file_io_fn):\n \"\"\"Generator that yields rows of CSV file(s) in order.\"\"\"\n for fn in filenames:\n with file_io_fn(fn) as f:\n rdr = csv.reader(\n f,\n delimiter=field_delim,\n quoting=csv.QUOTE_MINIMAL if use_quote_delim else csv.QUOTE_NONE)\n if header:\n next(rdr) # Skip header lines\n\n for csv_row in rdr:\n if len(csv_row) != num_cols:\n raise ValueError(\n \"Problem inferring types: CSV row has different number of fields \"\n \"than expected.\")\n yield csv_row\n\n\ndef _infer_column_defaults(filenames, num_cols, field_delim, use_quote_delim,\n na_value, header, num_rows_for_inference,\n select_columns, file_io_fn):\n \"\"\"Infers column types from the first N valid CSV records of files.\"\"\"\n if select_columns is None:\n select_columns = range(num_cols)\n inferred_types = [None] * len(select_columns)\n\n for i, csv_row in enumerate(\n _next_csv_row(filenames, num_cols, field_delim, use_quote_delim, header,\n file_io_fn)):\n if num_rows_for_inference is not None and i >= num_rows_for_inference:\n break\n\n for j, col_index in enumerate(select_columns):\n inferred_types[j] = _infer_type(csv_row[col_index], na_value,\n inferred_types[j])\n\n # Replace None's with a default type\n inferred_types = [t or dtypes.string for t in inferred_types]\n # Default to 0 or '' for null values\n return [\n constant_op.constant([0 if t is not dtypes.string else \"\"], dtype=t)\n for t in inferred_types\n ]\n\n\ndef _infer_column_names(filenames, field_delim, use_quote_delim, file_io_fn):\n \"\"\"Infers column names from first rows of files.\"\"\"\n csv_kwargs = {\n \"delimiter\": field_delim,\n \"quoting\": csv.QUOTE_MINIMAL if use_quote_delim else csv.QUOTE_NONE\n }\n with file_io_fn(filenames[0]) as f:\n try:\n column_names = next(csv.reader(f, **csv_kwargs))\n except StopIteration:\n raise ValueError((\"Received StopIteration when reading the header line \"\n \"of %s. Empty file?\") % filenames[0])\n\n for name in filenames[1:]:\n with file_io_fn(name) as f:\n try:\n if next(csv.reader(f, **csv_kwargs)) != column_names:\n raise ValueError(\n \"Files have different column names in the header row.\")\n except StopIteration:\n raise ValueError((\"Received StopIteration when reading the header line \"\n \"of %s. Empty file?\") % filenames[0])\n return column_names\n\n\ndef _get_sorted_col_indices(select_columns, column_names):\n \"\"\"Transforms select_columns argument into sorted column indices.\"\"\"\n names_to_indices = {n: i for i, n in enumerate(column_names)}\n num_cols = len(column_names)\n\n results = []\n for v in select_columns:\n # If value is already an int, check if it's valid.\n if isinstance(v, int):\n if v < 0 or v >= num_cols:\n raise ValueError(\n \"Column index %d specified in select_columns out of valid range.\" %\n v)\n results.append(v)\n # Otherwise, check that it's a valid column name and convert to the\n # the relevant column index.\n elif v not in names_to_indices:\n raise ValueError(\n \"Value '%s' specified in select_columns not a valid column index or \"\n \"name.\" % v)\n else:\n results.append(names_to_indices[v])\n\n # Sort and ensure there are no duplicates\n results = sorted(set(results))\n if len(results) != len(select_columns):\n raise ValueError(\"select_columns contains duplicate columns\")\n return results\n\n\ndef _maybe_shuffle_and_repeat(\n dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed):\n \"\"\"Optionally shuffle and repeat dataset, as requested.\"\"\"\n if shuffle:\n dataset = dataset.shuffle(shuffle_buffer_size, shuffle_seed)\n if num_epochs != 1:\n dataset = dataset.repeat(num_epochs)\n return dataset\n\n\ndef make_tf_record_dataset(file_pattern,\n batch_size,\n parser_fn=None,\n num_epochs=None,\n shuffle=True,\n shuffle_buffer_size=None,\n shuffle_seed=None,\n prefetch_buffer_size=None,\n num_parallel_reads=None,\n num_parallel_parser_calls=None,\n drop_final_batch=False):\n \"\"\"Reads and optionally parses TFRecord files into a dataset.\n\n Provides common functionality such as batching, optional parsing, shuffling,\n and performant defaults.\n\n Args:\n file_pattern: List of files or patterns of TFRecord file paths.\n See `tf.io.gfile.glob` for pattern rules.\n batch_size: An int representing the number of records to combine\n in a single batch.\n parser_fn: (Optional.) A function accepting string input to parse\n and process the record contents. This function must map records\n to components of a fixed shape, so they may be batched. By\n default, uses the record contents unmodified.\n num_epochs: (Optional.) An int specifying the number of times this\n dataset is repeated. If None (the default), cycles through the\n dataset forever.\n shuffle: (Optional.) A bool that indicates whether the input\n should be shuffled. Defaults to `True`.\n shuffle_buffer_size: (Optional.) Buffer size to use for\n shuffling. A large buffer size ensures better shuffling, but\n increases memory usage and startup time.\n shuffle_seed: (Optional.) Randomization seed to use for shuffling.\n prefetch_buffer_size: (Optional.) An int specifying the number of\n feature batches to prefetch for performance improvement.\n Defaults to auto-tune. Set to 0 to disable prefetching.\n num_parallel_reads: (Optional.) Number of threads used to read\n records from files. By default or if set to a value >1, the\n results will be interleaved. Defaults to `24`.\n num_parallel_parser_calls: (Optional.) Number of parallel\n records to parse in parallel. Defaults to `batch_size`.\n drop_final_batch: (Optional.) Whether the last batch should be\n dropped in case its size is smaller than `batch_size`; the\n default behavior is not to drop the smaller batch.\n\n Returns:\n A dataset, where each element matches the output of `parser_fn`\n except it will have an additional leading `batch-size` dimension,\n or a `batch_size`-length 1-D tensor of strings if `parser_fn` is\n unspecified.\n \"\"\"\n if num_parallel_reads is None:\n # NOTE: We considered auto-tuning this value, but there is a concern\n # that this affects the mixing of records from different files, which\n # could affect training convergence/accuracy, so we are defaulting to\n # a constant for now.\n num_parallel_reads = 24\n\n if num_parallel_parser_calls is None:\n # TODO(josh11b): if num_parallel_parser_calls is None, use some function\n # of num cores instead of `batch_size`.\n num_parallel_parser_calls = batch_size\n\n if prefetch_buffer_size is None:\n prefetch_buffer_size = dataset_ops.AUTOTUNE\n\n files = dataset_ops.Dataset.list_files(\n file_pattern, shuffle=shuffle, seed=shuffle_seed)\n\n dataset = core_readers.TFRecordDataset(\n files, num_parallel_reads=num_parallel_reads)\n\n if shuffle_buffer_size is None:\n # TODO(josh11b): Auto-tune this value when not specified\n shuffle_buffer_size = 10000\n dataset = _maybe_shuffle_and_repeat(\n dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)\n\n # NOTE(mrry): We set `drop_final_batch=True` when `num_epochs is None` to\n # improve the shape inference, because it makes the batch dimension static.\n # It is safe to do this because in that case we are repeating the input\n # indefinitely, and all batches will be full-sized.\n drop_final_batch = drop_final_batch or num_epochs is None\n\n if parser_fn is None:\n dataset = dataset.batch(batch_size, drop_remainder=drop_final_batch)\n else:\n dataset = dataset.map(\n parser_fn, num_parallel_calls=num_parallel_parser_calls)\n dataset = dataset.batch(batch_size, drop_remainder=drop_final_batch)\n\n if prefetch_buffer_size == 0:\n return dataset\n else:\n return dataset.prefetch(buffer_size=prefetch_buffer_size)\n\n\n@tf_export(\"data.experimental.make_csv_dataset\", v1=[])\ndef make_csv_dataset_v2(\n file_pattern,\n batch_size,\n column_names=None,\n column_defaults=None,\n label_name=None,\n select_columns=None,\n field_delim=\",\",\n use_quote_delim=True,\n na_value=\"\",\n header=True,\n num_epochs=None,\n shuffle=True,\n shuffle_buffer_size=10000,\n shuffle_seed=None,\n prefetch_buffer_size=None,\n num_parallel_reads=None,\n sloppy=False,\n num_rows_for_inference=100,\n compression_type=None,\n ignore_errors=False,\n):\n \"\"\"Reads CSV files into a dataset.\n\n Reads CSV files into a dataset, where each element of the dataset is a\n (features, labels) tuple that corresponds to a batch of CSV rows. The features\n dictionary maps feature column names to `Tensor`s containing the corresponding\n feature data, and labels is a `Tensor` containing the batch's label data.\n\n By default, the first rows of the CSV files are expected to be headers listing\n the column names. If the first rows are not headers, set `header=False` and\n provide the column names with the `column_names` argument.\n\n By default, the dataset is repeated indefinitely, reshuffling the order each\n time. This behavior can be modified by setting the `num_epochs` and `shuffle`\n arguments.\n\n For example, suppose you have a CSV file containing\n\n | Feature_A | Feature_B |\n | --------- | --------- |\n | 1 | \"a\" |\n | 2 | \"b\" |\n | 3 | \"c\" |\n | 4 | \"d\" |\n\n ```\n # No label column specified\n dataset = tf.data.experimental.make_csv_dataset(filename, batch_size=2)\n iterator = ds.as_numpy_iterator()\n print(dict(next(iterator)))\n # prints a dictionary of batched features:\n # OrderedDict([('Feature_A', array([1, 4], dtype=int32)),\n # ('Feature_B', array([b'a', b'd'], dtype=object))])\n ```\n\n ```\n # Set Feature_B as label column\n dataset = tf.data.experimental.make_csv_dataset(\n filename, batch_size=2, label_name=\"Feature_B\")\n iterator = ds.as_numpy_iterator()\n print(next(iterator))\n # prints (features, labels) tuple:\n # (OrderedDict([('Feature_A', array([1, 2], dtype=int32))]),\n # array([b'a', b'b'], dtype=object))\n ```\n\n See the\n [Load CSV data guide](https://www.tensorflow.org/tutorials/load_data/csv) for\n more examples of using `make_csv_dataset` to read CSV data.\n\n Args:\n file_pattern: List of files or patterns of file paths containing CSV\n records. See `tf.io.gfile.glob` for pattern rules.\n batch_size: An int representing the number of records to combine\n in a single batch.\n column_names: An optional list of strings that corresponds to the CSV\n columns, in order. One per column of the input record. If this is not\n provided, infers the column names from the first row of the records.\n These names will be the keys of the features dict of each dataset element.\n column_defaults: A optional list of default values for the CSV fields. One\n item per selected column of the input record. Each item in the list is\n either a valid CSV dtype (float32, float64, int32, int64, or string), or a\n `Tensor` with one of the aforementioned types. The tensor can either be\n a scalar default value (if the column is optional), or an empty tensor (if\n the column is required). If a dtype is provided instead of a tensor, the\n column is also treated as required. If this list is not provided, tries\n to infer types based on reading the first num_rows_for_inference rows of\n files specified, and assumes all columns are optional, defaulting to `0`\n for numeric values and `\"\"` for string values. If both this and\n `select_columns` are specified, these must have the same lengths, and\n `column_defaults` is assumed to be sorted in order of increasing column\n index.\n label_name: A optional string corresponding to the label column. If\n provided, the data for this column is returned as a separate `Tensor` from\n the features dictionary, so that the dataset complies with the format\n expected by a `tf.Estimator.train` or `tf.Estimator.evaluate` input\n function.\n select_columns: An optional list of integer indices or string column\n names, that specifies a subset of columns of CSV data to select. If\n column names are provided, these must correspond to names provided in\n `column_names` or inferred from the file header lines. When this argument\n is specified, only a subset of CSV columns will be parsed and returned,\n corresponding to the columns specified. Using this results in faster\n parsing and lower memory usage. If both this and `column_defaults` are\n specified, these must have the same lengths, and `column_defaults` is\n assumed to be sorted in order of increasing column index.\n field_delim: An optional `string`. Defaults to `\",\"`. Char delimiter to\n separate fields in a record.\n use_quote_delim: An optional bool. Defaults to `True`. If false, treats\n double quotation marks as regular characters inside of the string fields.\n na_value: Additional string to recognize as NA/NaN.\n header: A bool that indicates whether the first rows of provided CSV files\n correspond to header lines with column names, and should not be included\n in the data.\n num_epochs: An int specifying the number of times this dataset is repeated.\n If None, cycles through the dataset forever.\n shuffle: A bool that indicates whether the input should be shuffled.\n shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size\n ensures better shuffling, but increases memory usage and startup time.\n shuffle_seed: Randomization seed to use for shuffling.\n prefetch_buffer_size: An int specifying the number of feature\n batches to prefetch for performance improvement. Recommended value is the\n number of batches consumed per training step. Defaults to auto-tune.\n num_parallel_reads: Number of threads used to read CSV records from files.\n If >1, the results will be interleaved. Defaults to `1`.\n sloppy: If `True`, reading performance will be improved at\n the cost of non-deterministic ordering. If `False`, the order of elements\n produced is deterministic prior to shuffling (elements are still\n randomized if `shuffle=True`. Note that if the seed is set, then order\n of elements after shuffling is deterministic). Defaults to `False`.\n num_rows_for_inference: Number of rows of a file to use for type inference\n if record_defaults is not provided. If None, reads all the rows of all\n the files. Defaults to 100.\n compression_type: (Optional.) A `tf.string` scalar evaluating to one of\n `\"\"` (no compression), `\"ZLIB\"`, or `\"GZIP\"`. Defaults to no compression.\n ignore_errors: (Optional.) If `True`, ignores errors with CSV file parsing,\n such as malformed data or empty lines, and moves on to the next valid\n CSV record. Otherwise, the dataset raises an error and stops processing\n when encountering any invalid records. Defaults to `False`.\n\n Returns:\n A dataset, where each element is a (features, labels) tuple that corresponds\n to a batch of `batch_size` CSV rows. The features dictionary maps feature\n column names to `Tensor`s containing the corresponding column data, and\n labels is a `Tensor` containing the column data for the label column\n specified by `label_name`.\n\n Raises:\n ValueError: If any of the arguments is malformed.\n \"\"\"\n if num_parallel_reads is None:\n num_parallel_reads = 1\n\n if prefetch_buffer_size is None:\n prefetch_buffer_size = dataset_ops.AUTOTUNE\n\n # Create dataset of all matching filenames\n filenames = _get_file_names(file_pattern, False)\n dataset = dataset_ops.Dataset.from_tensor_slices(filenames)\n if shuffle:\n dataset = dataset.shuffle(len(filenames), shuffle_seed)\n\n # Clean arguments; figure out column names and defaults\n if column_names is None or column_defaults is None:\n # Find out which io function to open the file\n file_io_fn = lambda filename: file_io.FileIO(filename, \"r\")\n if compression_type is not None:\n compression_type_value = tensor_util.constant_value(compression_type)\n if compression_type_value is None:\n raise ValueError(\"Received unknown compression_type\")\n if compression_type_value == \"GZIP\":\n file_io_fn = lambda filename: gzip.open(filename, \"rt\")\n elif compression_type_value == \"ZLIB\":\n raise ValueError(\n \"compression_type (%s) is not supported for probing columns\" %\n compression_type)\n elif compression_type_value != \"\":\n raise ValueError(\"compression_type (%s) is not supported\" %\n compression_type)\n if column_names is None:\n if not header:\n raise ValueError(\"Cannot infer column names without a header line.\")\n # If column names are not provided, infer from the header lines\n column_names = _infer_column_names(filenames, field_delim, use_quote_delim,\n file_io_fn)\n if len(column_names) != len(set(column_names)):\n raise ValueError(\"Cannot have duplicate column names.\")\n\n if select_columns is not None:\n select_columns = _get_sorted_col_indices(select_columns, column_names)\n\n if column_defaults is not None:\n column_defaults = [\n constant_op.constant([], dtype=x)\n if not tensor_util.is_tf_type(x) and x in _ACCEPTABLE_CSV_TYPES else x\n for x in column_defaults\n ]\n else:\n # If column defaults are not provided, infer from records at graph\n # construction time\n column_defaults = _infer_column_defaults(filenames, len(column_names),\n field_delim, use_quote_delim,\n na_value, header,\n num_rows_for_inference,\n select_columns, file_io_fn)\n\n if select_columns is not None and len(column_defaults) != len(select_columns):\n raise ValueError(\n \"If specified, column_defaults and select_columns must have same \"\n \"length.\"\n )\n if select_columns is not None and len(column_names) > len(select_columns):\n # Pick the relevant subset of column names\n column_names = [column_names[i] for i in select_columns]\n\n if label_name is not None and label_name not in column_names:\n raise ValueError(\"`label_name` provided must be one of the columns.\")\n\n def filename_to_dataset(filename):\n dataset = CsvDataset(\n filename,\n record_defaults=column_defaults,\n field_delim=field_delim,\n use_quote_delim=use_quote_delim,\n na_value=na_value,\n select_cols=select_columns,\n header=header,\n compression_type=compression_type\n )\n if ignore_errors:\n dataset = dataset.apply(error_ops.ignore_errors())\n return dataset\n\n def map_fn(*columns):\n \"\"\"Organizes columns into a features dictionary.\n\n Args:\n *columns: list of `Tensor`s corresponding to one csv record.\n Returns:\n An OrderedDict of feature names to values for that particular record. If\n label_name is provided, extracts the label feature to be returned as the\n second element of the tuple.\n \"\"\"\n features = collections.OrderedDict(zip(column_names, columns))\n if label_name is not None:\n label = features.pop(label_name)\n return features, label\n return features\n\n if num_parallel_reads == dataset_ops.AUTOTUNE:\n dataset = dataset.interleave(\n filename_to_dataset, num_parallel_calls=num_parallel_reads)\n options = dataset_ops.Options()\n options.experimental_deterministic = not sloppy\n dataset = dataset.with_options(options)\n else:\n # Read files sequentially (if num_parallel_reads=1) or in parallel\n def apply_fn(dataset):\n return core_readers.ParallelInterleaveDataset(\n dataset,\n filename_to_dataset,\n cycle_length=num_parallel_reads,\n block_length=1,\n sloppy=sloppy,\n buffer_output_elements=None,\n prefetch_input_elements=None)\n\n dataset = dataset.apply(apply_fn)\n\n dataset = _maybe_shuffle_and_repeat(\n dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)\n\n # Apply batch before map for perf, because map has high overhead relative\n # to the size of the computation in each map.\n # NOTE(mrry): We set `drop_remainder=True` when `num_epochs is None` to\n # improve the shape inference, because it makes the batch dimension static.\n # It is safe to do this because in that case we are repeating the input\n # indefinitely, and all batches will be full-sized.\n dataset = dataset.batch(batch_size=batch_size,\n drop_remainder=num_epochs is None)\n dataset = dataset_ops.MapDataset(\n dataset, map_fn, use_inter_op_parallelism=False)\n dataset = dataset.prefetch(prefetch_buffer_size)\n\n return dataset\n\n\n@tf_export(v1=[\"data.experimental.make_csv_dataset\"])\ndef make_csv_dataset_v1(\n file_pattern,\n batch_size,\n column_names=None,\n column_defaults=None,\n label_name=None,\n select_columns=None,\n field_delim=\",\",\n use_quote_delim=True,\n na_value=\"\",\n header=True,\n num_epochs=None,\n shuffle=True,\n shuffle_buffer_size=10000,\n shuffle_seed=None,\n prefetch_buffer_size=None,\n num_parallel_reads=None,\n sloppy=False,\n num_rows_for_inference=100,\n compression_type=None,\n ignore_errors=False,\n): # pylint: disable=missing-docstring\n return dataset_ops.DatasetV1Adapter(make_csv_dataset_v2(\n file_pattern, batch_size, column_names, column_defaults, label_name,\n select_columns, field_delim, use_quote_delim, na_value, header,\n num_epochs, shuffle, shuffle_buffer_size, shuffle_seed,\n prefetch_buffer_size, num_parallel_reads, sloppy, num_rows_for_inference,\n compression_type, ignore_errors))\nmake_csv_dataset_v1.__doc__ = make_csv_dataset_v2.__doc__\n\n\n_DEFAULT_READER_BUFFER_SIZE_BYTES = 4 * 1024 * 1024 # 4 MB\n\n\n@tf_export(\"data.experimental.CsvDataset\", v1=[])\nclass CsvDatasetV2(dataset_ops.DatasetSource):\n r\"\"\"A Dataset comprising lines from one or more CSV files.\n\n The `tf.data.experimental.CsvDataset` class provides a minimal CSV Dataset\n interface. There is also a richer `tf.data.experimental.make_csv_dataset`\n function which provides additional convenience features such as column header\n parsing, column type-inference, automatic shuffling, and file interleaving.\n\n The elements of this dataset correspond to records from the file(s).\n RFC 4180 format is expected for CSV files\n (https://tools.ietf.org/html/rfc4180)\n Note that we allow leading and trailing spaces for int or float fields.\n\n For example, suppose we have a file 'my_file0.csv' with four CSV columns of\n different data types:\n\n >>> with open('/tmp/my_file0.csv', 'w') as f:\n ... f.write('abcdefg,4.28E10,5.55E6,12\\n')\n ... f.write('hijklmn,-5.3E14,,2\\n')\n\n We can construct a CsvDataset from it as follows:\n\n >>> dataset = tf.data.experimental.CsvDataset(\n ... \"/tmp/my_file0.csv\",\n ... [tf.float32, # Required field, use dtype or empty tensor\n ... tf.constant([0.0], dtype=tf.float32), # Optional field, default to 0.0\n ... tf.int32, # Required field, use dtype or empty tensor\n ... ],\n ... select_cols=[1,2,3] # Only parse last three columns\n ... )\n\n The expected output of its iterations is:\n\n >>> for element in dataset.as_numpy_iterator():\n ... print(element)\n (4.28e10, 5.55e6, 12)\n (-5.3e14, 0.0, 2)\n\n See\n https://www.tensorflow.org/tutorials/load_data/csv#tfdataexperimentalcsvdataset\n for more in-depth example usage.\n \"\"\"\n\n def __init__(self,\n filenames,\n record_defaults,\n compression_type=None,\n buffer_size=None,\n header=False,\n field_delim=\",\",\n use_quote_delim=True,\n na_value=\"\",\n select_cols=None,\n exclude_cols=None):\n \"\"\"Creates a `CsvDataset` by reading and decoding CSV files.\n\n Args:\n filenames: A `tf.string` tensor containing one or more filenames.\n record_defaults: A list of default values for the CSV fields. Each item in\n the list is either a valid CSV `DType` (float32, float64, int32, int64,\n string), or a `Tensor` object with one of the above types. One per\n column of CSV data, with either a scalar `Tensor` default value for the\n column if it is optional, or `DType` or empty `Tensor` if required. If\n both this and `select_columns` are specified, these must have the same\n lengths, and `column_defaults` is assumed to be sorted in order of\n increasing column index. If both this and 'exclude_cols' are specified,\n the sum of lengths of record_defaults and exclude_cols should equal\n the total number of columns in the CSV file.\n compression_type: (Optional.) A `tf.string` scalar evaluating to one of\n `\"\"` (no compression), `\"ZLIB\"`, or `\"GZIP\"`. Defaults to no\n compression.\n buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes\n to buffer while reading files. Defaults to 4MB.\n header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s)\n have header line(s) that should be skipped when parsing. Defaults to\n `False`.\n field_delim: (Optional.) A `tf.string` scalar containing the delimiter\n character that separates fields in a record. Defaults to `\",\"`.\n use_quote_delim: (Optional.) A `tf.bool` scalar. If `False`, treats\n double quotation marks as regular characters inside of string fields\n (ignoring RFC 4180, Section 2, Bullet 5). Defaults to `True`.\n na_value: (Optional.) A `tf.string` scalar indicating a value that will\n be treated as NA/NaN.\n select_cols: (Optional.) A sorted list of column indices to select from\n the input data. If specified, only this subset of columns will be\n parsed. Defaults to parsing all columns. At most one of `select_cols`\n and `exclude_cols` can be specified.\n exclude_cols: (Optional.) A sorted list of column indices to exclude from\n the input data. If specified, only the complement of this set of column\n will be parsed. Defaults to parsing all columns. At most one of\n `select_cols` and `exclude_cols` can be specified.\n\n Raises:\n InvalidArgumentError: If exclude_cols is not None and\n len(exclude_cols) + len(record_defaults) does not match the total\n number of columns in the file(s)\n\n\n \"\"\"\n self._filenames = ops.convert_to_tensor(\n filenames, dtype=dtypes.string, name=\"filenames\")\n self._compression_type = convert.optional_param_to_tensor(\n \"compression_type\",\n compression_type,\n argument_default=\"\",\n argument_dtype=dtypes.string)\n record_defaults = [\n constant_op.constant([], dtype=x)\n if not tensor_util.is_tf_type(x) and x in _ACCEPTABLE_CSV_TYPES else x\n for x in record_defaults\n ]\n self._record_defaults = ops.convert_n_to_tensor(\n record_defaults, name=\"record_defaults\")\n self._buffer_size = convert.optional_param_to_tensor(\n \"buffer_size\", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)\n self._header = ops.convert_to_tensor(\n header, dtype=dtypes.bool, name=\"header\")\n self._field_delim = ops.convert_to_tensor(\n field_delim, dtype=dtypes.string, name=\"field_delim\")\n self._use_quote_delim = ops.convert_to_tensor(\n use_quote_delim, dtype=dtypes.bool, name=\"use_quote_delim\")\n self._na_value = ops.convert_to_tensor(\n na_value, dtype=dtypes.string, name=\"na_value\")\n self._select_cols = convert.optional_param_to_tensor(\n \"select_cols\",\n select_cols,\n argument_default=[],\n argument_dtype=dtypes.int64,\n )\n self._exclude_cols = convert.optional_param_to_tensor(\n \"exclude_cols\",\n exclude_cols,\n argument_default=[],\n argument_dtype=dtypes.int64,\n )\n self._element_spec = tuple(\n tensor_spec.TensorSpec([], d.dtype) for d in self._record_defaults)\n variant_tensor = gen_experimental_dataset_ops.csv_dataset_v2(\n filenames=self._filenames,\n record_defaults=self._record_defaults,\n buffer_size=self._buffer_size,\n header=self._header,\n output_shapes=self._flat_shapes,\n field_delim=self._field_delim,\n use_quote_delim=self._use_quote_delim,\n na_value=self._na_value,\n select_cols=self._select_cols,\n exclude_cols=self._exclude_cols,\n compression_type=self._compression_type)\n super(CsvDatasetV2, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._element_spec\n\n\n@tf_export(v1=[\"data.experimental.CsvDataset\"])\nclass CsvDatasetV1(dataset_ops.DatasetV1Adapter):\n \"\"\"A Dataset comprising lines from one or more CSV files.\"\"\"\n\n @functools.wraps(CsvDatasetV2.__init__, (\"__module__\", \"__name__\"))\n def __init__(self,\n filenames,\n record_defaults,\n compression_type=None,\n buffer_size=None,\n header=False,\n field_delim=\",\",\n use_quote_delim=True,\n na_value=\"\",\n select_cols=None):\n \"\"\"Creates a `CsvDataset` by reading and decoding CSV files.\n\n The elements of this dataset correspond to records from the file(s).\n RFC 4180 format is expected for CSV files\n (https://tools.ietf.org/html/rfc4180)\n Note that we allow leading and trailing spaces with int or float field.\n\n\n For example, suppose we have a file 'my_file0.csv' with four CSV columns of\n different data types:\n ```\n abcdefg,4.28E10,5.55E6,12\n hijklmn,-5.3E14,,2\n ```\n\n We can construct a CsvDataset from it as follows:\n\n ```python\n dataset = tf.data.experimental.CsvDataset(\n \"my_file*.csv\",\n [tf.float32, # Required field, use dtype or empty tensor\n tf.constant([0.0], dtype=tf.float32), # Optional field, default to 0.0\n tf.int32, # Required field, use dtype or empty tensor\n ],\n select_cols=[1,2,3] # Only parse last three columns\n )\n ```\n\n The expected output of its iterations is:\n\n ```python\n for element in dataset:\n print(element)\n\n >> (4.28e10, 5.55e6, 12)\n >> (-5.3e14, 0.0, 2)\n ```\n\n Args:\n filenames: A `tf.string` tensor containing one or more filenames.\n record_defaults: A list of default values for the CSV fields. Each item in\n the list is either a valid CSV `DType` (float32, float64, int32, int64,\n string), or a `Tensor` object with one of the above types. One per\n column of CSV data, with either a scalar `Tensor` default value for the\n column if it is optional, or `DType` or empty `Tensor` if required. If\n both this and `select_columns` are specified, these must have the same\n lengths, and `column_defaults` is assumed to be sorted in order of\n increasing column index. If both this and 'exclude_cols' are specified,\n the sum of lengths of record_defaults and exclude_cols should equal the\n total number of columns in the CSV file.\n compression_type: (Optional.) A `tf.string` scalar evaluating to one of\n `\"\"` (no compression), `\"ZLIB\"`, or `\"GZIP\"`. Defaults to no\n compression.\n buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes\n to buffer while reading files. Defaults to 4MB.\n header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s)\n have header line(s) that should be skipped when parsing. Defaults to\n `False`.\n field_delim: (Optional.) A `tf.string` scalar containing the delimiter\n character that separates fields in a record. Defaults to `\",\"`.\n use_quote_delim: (Optional.) A `tf.bool` scalar. If `False`, treats double\n quotation marks as regular characters inside of string fields (ignoring\n RFC 4180, Section 2, Bullet 5). Defaults to `True`.\n na_value: (Optional.) A `tf.string` scalar indicating a value that will be\n treated as NA/NaN.\n select_cols: (Optional.) A sorted list of column indices to select from\n the input data. If specified, only this subset of columns will be\n parsed. Defaults to parsing all columns. At most one of `select_cols`\n and `exclude_cols` can be specified.\n \"\"\"\n wrapped = CsvDatasetV2(filenames, record_defaults, compression_type,\n buffer_size, header, field_delim, use_quote_delim,\n na_value, select_cols)\n super(CsvDatasetV1, self).__init__(wrapped)\n\n\n@tf_export(\"data.experimental.make_batched_features_dataset\", v1=[])\ndef make_batched_features_dataset_v2(file_pattern,\n batch_size,\n features,\n reader=None,\n label_key=None,\n reader_args=None,\n num_epochs=None,\n shuffle=True,\n shuffle_buffer_size=10000,\n shuffle_seed=None,\n prefetch_buffer_size=None,\n reader_num_threads=None,\n parser_num_threads=None,\n sloppy_ordering=False,\n drop_final_batch=False):\n \"\"\"Returns a `Dataset` of feature dictionaries from `Example` protos.\n\n If label_key argument is provided, returns a `Dataset` of tuple\n comprising of feature dictionaries and label.\n\n Example:\n\n ```\n serialized_examples = [\n features {\n feature { key: \"age\" value { int64_list { value: [ 0 ] } } }\n feature { key: \"gender\" value { bytes_list { value: [ \"f\" ] } } }\n feature { key: \"kws\" value { bytes_list { value: [ \"code\", \"art\" ] } } }\n },\n features {\n feature { key: \"age\" value { int64_list { value: [] } } }\n feature { key: \"gender\" value { bytes_list { value: [ \"f\" ] } } }\n feature { key: \"kws\" value { bytes_list { value: [ \"sports\" ] } } }\n }\n ]\n ```\n\n We can use arguments:\n\n ```\n features: {\n \"age\": FixedLenFeature([], dtype=tf.int64, default_value=-1),\n \"gender\": FixedLenFeature([], dtype=tf.string),\n \"kws\": VarLenFeature(dtype=tf.string),\n }\n ```\n\n And the expected output is:\n\n ```python\n {\n \"age\": [[0], [-1]],\n \"gender\": [[\"f\"], [\"f\"]],\n \"kws\": SparseTensor(\n indices=[[0, 0], [0, 1], [1, 0]],\n values=[\"code\", \"art\", \"sports\"]\n dense_shape=[2, 2]),\n }\n ```\n\n Args:\n file_pattern: List of files or patterns of file paths containing\n `Example` records. See `tf.io.gfile.glob` for pattern rules.\n batch_size: An int representing the number of records to combine\n in a single batch.\n features: A `dict` mapping feature keys to `FixedLenFeature` or\n `VarLenFeature` values. See `tf.io.parse_example`.\n reader: A function or class that can be\n called with a `filenames` tensor and (optional) `reader_args` and returns\n a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.\n label_key: (Optional) A string corresponding to the key labels are stored in\n `tf.Examples`. If provided, it must be one of the `features` key,\n otherwise results in `ValueError`.\n reader_args: Additional arguments to pass to the reader class.\n num_epochs: Integer specifying the number of times to read through the\n dataset. If None, cycles through the dataset forever. Defaults to `None`.\n shuffle: A boolean, indicates whether the input should be shuffled. Defaults\n to `True`.\n shuffle_buffer_size: Buffer size of the ShuffleDataset. A large capacity\n ensures better shuffling but would increase memory usage and startup time.\n shuffle_seed: Randomization seed to use for shuffling.\n prefetch_buffer_size: Number of feature batches to prefetch in order to\n improve performance. Recommended value is the number of batches consumed\n per training step. Defaults to auto-tune.\n reader_num_threads: Number of threads used to read `Example` records. If >1,\n the results will be interleaved. Defaults to `1`.\n parser_num_threads: Number of threads to use for parsing `Example` tensors\n into a dictionary of `Feature` tensors. Defaults to `2`.\n sloppy_ordering: If `True`, reading performance will be improved at\n the cost of non-deterministic ordering. If `False`, the order of elements\n produced is deterministic prior to shuffling (elements are still\n randomized if `shuffle=True`. Note that if the seed is set, then order\n of elements after shuffling is deterministic). Defaults to `False`.\n drop_final_batch: If `True`, and the batch size does not evenly divide the\n input dataset size, the final smaller batch will be dropped. Defaults to\n `False`.\n\n Returns:\n A dataset of `dict` elements, (or a tuple of `dict` elements and label).\n Each `dict` maps feature keys to `Tensor` or `SparseTensor` objects.\n\n Raises:\n TypeError: If `reader` is of the wrong type.\n ValueError: If `label_key` is not one of the `features` keys.\n \"\"\"\n if reader is None:\n reader = core_readers.TFRecordDataset\n\n if reader_num_threads is None:\n reader_num_threads = 1\n if parser_num_threads is None:\n parser_num_threads = 2\n if prefetch_buffer_size is None:\n prefetch_buffer_size = dataset_ops.AUTOTUNE\n\n # Create dataset of all matching filenames\n dataset = dataset_ops.Dataset.list_files(\n file_pattern, shuffle=shuffle, seed=shuffle_seed)\n\n if isinstance(reader, type) and issubclass(reader, io_ops.ReaderBase):\n raise TypeError(\"The `reader` argument must return a `Dataset` object. \"\n \"`tf.ReaderBase` subclasses are not supported. For \"\n \"example, pass `tf.data.TFRecordDataset` instead of \"\n \"`tf.TFRecordReader`.\")\n\n # Read `Example` records from files as tensor objects.\n if reader_args is None:\n reader_args = []\n\n if reader_num_threads == dataset_ops.AUTOTUNE:\n dataset = dataset.interleave(\n lambda filename: reader(filename, *reader_args),\n num_parallel_calls=reader_num_threads)\n options = dataset_ops.Options()\n options.experimental_deterministic = not sloppy_ordering\n dataset = dataset.with_options(options)\n else:\n # Read files sequentially (if reader_num_threads=1) or in parallel\n def apply_fn(dataset):\n return core_readers.ParallelInterleaveDataset(\n dataset,\n lambda filename: reader(filename, *reader_args),\n cycle_length=reader_num_threads,\n block_length=1,\n sloppy=sloppy_ordering,\n buffer_output_elements=None,\n prefetch_input_elements=None)\n\n dataset = dataset.apply(apply_fn)\n\n # Extract values if the `Example` tensors are stored as key-value tuples.\n if dataset_ops.get_legacy_output_types(dataset) == (\n dtypes.string, dtypes.string):\n dataset = dataset_ops.MapDataset(\n dataset, lambda _, v: v, use_inter_op_parallelism=False)\n\n # Apply dataset repeat and shuffle transformations.\n dataset = _maybe_shuffle_and_repeat(\n dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)\n\n # NOTE(mrry): We set `drop_remainder=True` when `num_epochs is None` to\n # improve the shape inference, because it makes the batch dimension static.\n # It is safe to do this because in that case we are repeating the input\n # indefinitely, and all batches will be full-sized.\n dataset = dataset.batch(\n batch_size, drop_remainder=drop_final_batch or num_epochs is None)\n\n # Parse `Example` tensors to a dictionary of `Feature` tensors.\n dataset = dataset.apply(\n parsing_ops.parse_example_dataset(\n features, num_parallel_calls=parser_num_threads))\n\n if label_key:\n if label_key not in features:\n raise ValueError(\n \"The `label_key` provided (%r) must be one of the `features` keys.\" %\n label_key)\n dataset = dataset.map(lambda x: (x, x.pop(label_key)))\n\n dataset = dataset.prefetch(prefetch_buffer_size)\n return dataset\n\n\n@tf_export(v1=[\"data.experimental.make_batched_features_dataset\"])\ndef make_batched_features_dataset_v1(file_pattern, # pylint: disable=missing-docstring\n batch_size,\n features,\n reader=None,\n label_key=None,\n reader_args=None,\n num_epochs=None,\n shuffle=True,\n shuffle_buffer_size=10000,\n shuffle_seed=None,\n prefetch_buffer_size=None,\n reader_num_threads=None,\n parser_num_threads=None,\n sloppy_ordering=False,\n drop_final_batch=False):\n return dataset_ops.DatasetV1Adapter(make_batched_features_dataset_v2(\n file_pattern, batch_size, features, reader, label_key, reader_args,\n num_epochs, shuffle, shuffle_buffer_size, shuffle_seed,\n prefetch_buffer_size, reader_num_threads, parser_num_threads,\n sloppy_ordering, drop_final_batch))\nmake_batched_features_dataset_v1.__doc__ = (\n make_batched_features_dataset_v2.__doc__)\n\n\ndef _get_file_names(file_pattern, shuffle):\n \"\"\"Parse list of file names from pattern, optionally shuffled.\n\n Args:\n file_pattern: File glob pattern, or list of glob patterns.\n shuffle: Whether to shuffle the order of file names.\n\n Returns:\n List of file names matching `file_pattern`.\n\n Raises:\n ValueError: If `file_pattern` is empty, or pattern matches no files.\n \"\"\"\n if isinstance(file_pattern, list):\n if not file_pattern:\n raise ValueError(\"File pattern is empty.\")\n file_names = []\n for entry in file_pattern:\n file_names.extend(gfile.Glob(entry))\n else:\n file_names = list(gfile.Glob(file_pattern))\n\n if not file_names:\n raise ValueError(\"No files match %s.\" % file_pattern)\n\n # Sort files so it will be deterministic for unit tests.\n if not shuffle:\n file_names = sorted(file_names)\n return file_names\n\n\n@tf_export(\"data.experimental.SqlDataset\", v1=[])\nclass SqlDatasetV2(dataset_ops.DatasetSource):\n \"\"\"A `Dataset` consisting of the results from a SQL query.\n\n `SqlDataset` allows a user to read data from the result set of a SQL query.\n For example:\n\n ```python\n dataset = tf.data.experimental.SqlDataset(\"sqlite\", \"/foo/bar.sqlite3\",\n \"SELECT name, age FROM people\",\n (tf.string, tf.int32))\n # Prints the rows of the result set of the above query.\n for element in dataset:\n print(element)\n ```\n \"\"\"\n\n def __init__(self, driver_name, data_source_name, query, output_types):\n \"\"\"Creates a `SqlDataset`.\n\n Args:\n driver_name: A 0-D `tf.string` tensor containing the database type.\n Currently, the only supported value is 'sqlite'.\n data_source_name: A 0-D `tf.string` tensor containing a connection string\n to connect to the database.\n query: A 0-D `tf.string` tensor containing the SQL query to execute.\n output_types: A tuple of `tf.DType` objects representing the types of the\n columns returned by `query`.\n \"\"\"\n self._driver_name = ops.convert_to_tensor(\n driver_name, dtype=dtypes.string, name=\"driver_name\")\n self._data_source_name = ops.convert_to_tensor(\n data_source_name, dtype=dtypes.string, name=\"data_source_name\")\n self._query = ops.convert_to_tensor(\n query, dtype=dtypes.string, name=\"query\")\n self._element_spec = nest.map_structure(\n lambda dtype: tensor_spec.TensorSpec([], dtype), output_types)\n variant_tensor = gen_experimental_dataset_ops.sql_dataset(\n self._driver_name, self._data_source_name, self._query,\n **self._flat_structure)\n super(SqlDatasetV2, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._element_spec\n\n\n@tf_export(v1=[\"data.experimental.SqlDataset\"])\nclass SqlDatasetV1(dataset_ops.DatasetV1Adapter):\n \"\"\"A `Dataset` consisting of the results from a SQL query.\"\"\"\n\n @functools.wraps(SqlDatasetV2.__init__)\n def __init__(self, driver_name, data_source_name, query, output_types):\n wrapped = SqlDatasetV2(driver_name, data_source_name, query, output_types)\n super(SqlDatasetV1, self).__init__(wrapped)\n\n\nif tf2.enabled():\n CsvDataset = CsvDatasetV2\n SqlDataset = SqlDatasetV2\n make_batched_features_dataset = make_batched_features_dataset_v2\n make_csv_dataset = make_csv_dataset_v2\nelse:\n CsvDataset = CsvDatasetV1\n SqlDataset = SqlDatasetV1\n make_batched_features_dataset = make_batched_features_dataset_v1\n make_csv_dataset = make_csv_dataset_v1\n" ]
[ [ "tensorflow.python.data.ops.dataset_ops.MapDataset", "tensorflow.python.framework.dtypes.int64.as_numpy_dtype", "tensorflow.python.ops.gen_experimental_dataset_ops.sql_dataset", "tensorflow.python.data.ops.dataset_ops.get_legacy_output_types", "tensorflow.python.framework.dtypes.int32.as_numpy_dtype", "tensorflow.python.data.ops.readers.ParallelInterleaveDataset", "tensorflow.python.data.experimental.ops.parsing_ops.parse_example_dataset", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.gen_experimental_dataset_ops.csv_dataset_v2", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.data.ops.readers.TFRecordDataset", "tensorflow.python.data.util.convert.optional_param_to_tensor", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.data.ops.dataset_ops.Dataset.list_files", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.lib.io.file_io.FileIO", "tensorflow.python.platform.gfile.Glob", "tensorflow.python.data.ops.dataset_ops.Options", "tensorflow.python.framework.tensor_util.is_tf_type", "tensorflow.python.framework.ops.convert_n_to_tensor", "tensorflow.python.tf2.enabled", "tensorflow.python.data.experimental.ops.error_ops.ignore_errors", "tensorflow.python.framework.constant_op.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.5", "2.6" ] } ]
mihikagaonkar/book-recommender-system
[ "c171f2e25018ed1c7c45ae9d62846a0823fa147e" ]
[ "database.py" ]
[ "import sqlite3\nimport pandas as pd\n\nconn = sqlite3.connect('books.db')\nc = conn.cursor()\ntiles = pd.read_csv('Titles.csv')\ntiles.to_sql('titles', conn, if_exists='append', index = False, chunksize = 1000)" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Rock-100/MonoDet
[ "fd50fc9f93a37cf435d3395bf85d8af85cdab5c9" ]
[ "detectron2/utils/env.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport importlib\nimport importlib.util\nimport logging\nimport numpy as np\nimport os\nimport random\nimport sys\nfrom datetime import datetime\nimport torch\n\n__all__ = [\"seed_all_rng\"]\n\n\nTORCH_VERSION = tuple(int(x) for x in torch.__version__.split(\".\")[:2])\n\"\"\"\nPyTorch version as a tuple of 2 ints. Useful for comparison.\n\"\"\"\n\n\ndef seed_all_rng(seed=None):\n \"\"\"\n Set the random seed for the RNG in torch, numpy and python.\n\n Args:\n seed (int): if None, will use a strong random seed.\n \"\"\"\n if seed is None:\n seed = (\n os.getpid()\n + int(datetime.now().strftime(\"%S%f\"))\n + int.from_bytes(os.urandom(2), \"big\")\n )\n logger = logging.getLogger(__name__)\n logger.info(\"Using a generated random seed {}\".format(seed))\n np.random.seed(seed)\n torch.set_rng_state(torch.manual_seed(seed).get_state())\n random.seed(seed)\n \n\n# from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path\ndef _import_file(module_name, file_path, make_importable=False):\n spec = importlib.util.spec_from_file_location(module_name, file_path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n if make_importable:\n sys.modules[module_name] = module\n return module\n\n\ndef _configure_libraries():\n \"\"\"\n Configurations for some libraries.\n \"\"\"\n # An environment option to disable `import cv2` globally,\n # in case it leads to negative performance impact\n disable_cv2 = int(os.environ.get(\"DETECTRON2_DISABLE_CV2\", False))\n if disable_cv2:\n sys.modules[\"cv2\"] = None\n else:\n # Disable opencl in opencv since its interaction with cuda often has negative effects\n # This envvar is supported after OpenCV 3.4.0\n os.environ[\"OPENCV_OPENCL_RUNTIME\"] = \"disabled\"\n try:\n import cv2\n\n if int(cv2.__version__.split(\".\")[0]) >= 3:\n cv2.ocl.setUseOpenCL(False)\n except ImportError:\n pass\n\n def get_version(module, digit=2):\n return tuple(map(int, module.__version__.split(\".\")[:digit]))\n\n # fmt: off\n assert get_version(torch) >= (1, 4), \"Requires torch>=1.4\"\n import fvcore\n assert get_version(fvcore, 3) >= (0, 1, 1), \"Requires fvcore>=0.1.1\"\n import yaml\n assert get_version(yaml) >= (5, 1), \"Requires pyyaml>=5.1\"\n # fmt: on\n\n\n_ENV_SETUP_DONE = False\n\n\ndef setup_environment():\n \"\"\"Perform environment setup work. The default setup is a no-op, but this\n function allows the user to specify a Python source file or a module in\n the $DETECTRON2_ENV_MODULE environment variable, that performs\n custom setup work that may be necessary to their computing environment.\n \"\"\"\n global _ENV_SETUP_DONE\n if _ENV_SETUP_DONE:\n return\n _ENV_SETUP_DONE = True\n\n _configure_libraries()\n\n custom_module_path = os.environ.get(\"DETECTRON2_ENV_MODULE\")\n\n if custom_module_path:\n setup_custom_environment(custom_module_path)\n else:\n # The default setup is a no-op\n pass\n\n\ndef setup_custom_environment(custom_module):\n \"\"\"\n Load custom environment setup by importing a Python source file or a\n module, and run the setup function.\n \"\"\"\n if custom_module.endswith(\".py\"):\n module = _import_file(\"detectron2.utils.env.custom_module\", custom_module)\n else:\n module = importlib.import_module(custom_module)\n assert hasattr(module, \"setup_environment\") and callable(module.setup_environment), (\n \"Custom environment module defined in {} does not have the \"\n \"required callable attribute 'setup_environment'.\"\n ).format(custom_module)\n module.setup_environment()\n" ]
[ [ "torch.manual_seed", "torch.__version__.split", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MaxOSmith/benri
[ "ef3333adc8f4c5680b8f5a8cac225eb25bb713fa" ]
[ "benri/gym/observation.py" ]
[ "\"\"\" Utility functions for observation spaces. \"\"\"\nfrom collections import defaultdict\nimport copy\nimport operator\n\nimport numpy as np\n\nimport benri.dict as dict_ops\n\n\ndef select_from_batch(observation, batch_i):\n \"\"\"\n\n :param observations: List of observations. \n :return:\n \"\"\"\n selected = []\n\n for obs in observation:\n selected_from_obs = {}\n\n for key, value in obs.items():\n selected_from_obs[key] = value[batch_i]\n\n selected += [selected_from_obs]\n\n return selected \n\n\ndef merge_batch_dynamic_observations(observations, o_types):\n \"\"\"\n\n :param observations: List of observations. Each observation is a list \n of sensor readings.\n :return:\n \"\"\"\n batch_size = len(observations)\n\n # Start working backward from each batch.\n pointers = np.array([len(o) - 1 for o in observations])\n\n merged = [] \n mask = []\n\n while not np.all(pointers == -1):\n # Look at the pointers, get the most frequent type.\n types = [o_types[i][p] if p != -1 else None for i, p in enumerate(pointers)]\n\n counts = defaultdict(int)\n for t in types:\n if t is not None:\n counts[t] += 1\n o_type = max(counts.items(), key=operator.itemgetter(1))[0]\n\n o_is = [i for i, t in enumerate(types) if t == o_type]\n assert len(o_is) > 0\n\n # If 1: Expand dims\n if counts[o_type] == 1:\n assert len(o_is) == 1\n i = o_is[0]\n o = observations[i][pointers[i]]\n \n # Shift pointer.\n pointers[i] -= 1\n # Record.\n alive = np.zeros([batch_size])\n alive[i] = 1\n merged = [o] + merged\n mask += [alive]\n continue\n \n # Else: > 1. \n \n # Grab one of the observation types, and tile it so that it \n # has B as the leading dimension. Also start an aliveness mask\n # to determine which inputs to process.\n obs = observations[o_is[0]][pointers[o_is[0]]]\n\n alive = np.zeros([batch_size])\n\n for obs_batch_i in o_is:\n pointer = pointers[obs_batch_i]\n o = observations[obs_batch_i][pointer]\n\n # Load the correct data at this index.\n for key in o.keys():\n obs[key][obs_batch_i] = o[key][obs_batch_i]\n\n # Mark this data to be read.\n alive[obs_batch_i] = 1\n pointers[obs_batch_i] -= 1\n\n # Record.\n merged = [o] + merged\n mask += [alive]\n \n # mask = np.concatenate(mask, axis=0)\n return merged, mask\n\n\ndef merge_dynamic_observations(observations, o_types):\n \"\"\"\n\n :param observations: List of observations. Each observation is a list \n of sensor readings.\n :return:\n \"\"\"\n batch_size = len(observations)\n\n def _expand_and_tile(x):\n x = np.expand_dims(x, axis=0)\n shape = np.ones_like(x.shape)\n shape[0] = batch_size \n x = np.tile(x, shape)\n return x\n\n # Start working backward from each batch.\n pointers = np.array([len(o) - 1 for o in observations])\n\n merged = [] \n mask = []\n\n while not np.all(pointers == -1):\n # Look at the pointers, get the most frequent type.\n types = [o_types[i][p] if p != -1 else None for i, p in enumerate(pointers)]\n\n counts = defaultdict(int)\n for t in types:\n if t is not None:\n counts[t] += 1\n o_type = max(counts.items(), key=operator.itemgetter(1))[0]\n\n o_is = [i for i, t in enumerate(types) if t == o_type]\n assert len(o_is) > 0\n\n # If 1: Expand dims\n if counts[o_type] == 1:\n assert len(o_is) == 1\n i = o_is[0]\n o = copy.deepcopy(observations[i][pointers[i]])\n o = dict_ops.apply_fn(o, _expand_and_tile)\n \n # Shift pointer.\n pointers[i] -= 1\n # Record.\n alive = np.zeros([batch_size])\n alive[i] = 1\n # o = dict_ops.apply_fn(o, lambda x: np.concatenate(x, axis=0))\n merged = [o] + merged\n mask += [alive]\n continue\n \n # Else: > 1. \n \n # Grab one of the observation types, and tile it so that it \n # has B as the leading dimension. Also start an aliveness mask\n # to determine which inputs to process.\n obs = copy.deepcopy(observations[o_is[0]][pointers[o_is[0]]])\n obs = dict_ops.apply_fn(obs, _expand_and_tile)\n\n alive = np.zeros([batch_size])\n\n for obs_batch_i in o_is:\n pointer = pointers[obs_batch_i]\n o = observations[obs_batch_i][pointer]\n\n # Load the correct data at this index.\n for key in o.keys():\n obs[key][obs_batch_i] = o[key]\n\n # Mark this data to be read.\n alive[obs_batch_i] = 1\n pointers[obs_batch_i] -= 1\n\n # Record.\n # obs = dict_ops.apply_fn(obs, lambda x: np.concatenate(x, axis=0))\n merged = [obs] + merged\n mask += [alive]\n \n # mask = np.concatenate(mask, axis=0)\n return merged, mask\n" ]
[ [ "numpy.expand_dims", "numpy.ones_like", "numpy.tile", "numpy.all", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RuiHirano/distributed-dreamerv2
[ "243b94d1cbecc6fc7cc91bb032d8a5d4e35ddb23" ]
[ "dreamerv2/expl2.py" ]
[ "import tensorflow as tf\nfrom tensorflow_probability import distributions as tfd\n\nimport learner_actor\nimport common\n\n\nclass Random(common.Module):\n\n def __init__(self, config, act_space, wm, reward):\n self.config = config\n self.act_space = self.act_space\n\n def actor(self, feat):\n shape = feat.shape[:-1] + self.act_space.shape\n if self.config.actor.dist == 'onehot':\n return common.OneHotDist(tf.zeros(shape))\n else:\n dist = tfd.Uniform(-tf.ones(shape), tf.ones(shape))\n return tfd.Independent(dist, 1)\n\n def train(self, start, context, data):\n return None, {}\n\n\nclass Plan2Explore(common.Module):\n\n def __init__(self, config, act_space, wm, reward):\n self.config = config\n self.reward = reward\n self.wm = wm\n self.ac = learner_actor.ActorCritic(config, act_space)\n self.actor = self.ac.actor\n stoch_size = config.rssm.stoch\n if config.rssm.discrete:\n stoch_size *= config.rssm.discrete\n size = {\n 'embed': 32 * config.encoder.cnn_depth,\n 'stoch': stoch_size,\n 'deter': config.rssm.deter,\n 'feat': config.rssm.stoch + config.rssm.deter,\n }[self.config.disag_target]\n self._networks = [\n common.MLP(size, **config.expl_head)\n for _ in range(config.disag_models)]\n self.opt = common.Optimizer('expl', **config.expl_opt)\n self.extr_rewnorm = common.StreamNorm(**self.config.expl_reward_norm)\n self.intr_rewnorm = common.StreamNorm(**self.config.expl_reward_norm)\n\n def train(self, start, context, data):\n metrics = {}\n stoch = start['stoch']\n if self.config.rssm.discrete:\n stoch = tf.reshape(\n stoch, stoch.shape[:-2] + (stoch.shape[-2] * stoch.shape[-1]))\n target = {\n 'embed': context['embed'],\n 'stoch': stoch,\n 'deter': start['deter'],\n 'feat': context['feat'],\n }[self.config.disag_target]\n inputs = context['feat']\n if self.config.disag_action_cond:\n action = tf.cast(data['action'], inputs.dtype)\n inputs = tf.concat([inputs, action], -1)\n metrics.update(self._train_ensemble(inputs, target))\n metrics.update(self.ac.train(\n self.wm, start, data['is_terminal'], self._intr_reward))\n return None, metrics\n\n def _intr_reward(self, seq):\n inputs = seq['feat']\n if self.config.disag_action_cond:\n action = tf.cast(seq['action'], inputs.dtype)\n inputs = tf.concat([inputs, action], -1)\n preds = [head(inputs).mode() for head in self._networks]\n disag = tf.tensor(preds).std(0).mean(-1)\n if self.config.disag_log:\n disag = tf.math.log(disag)\n reward = self.config.expl_intr_scale * self.intr_rewnorm(disag)[0]\n if self.config.expl_extr_scale:\n reward += self.config.expl_extr_scale * self.extr_rewnorm(\n self.reward(seq))[0]\n return reward\n\n def _train_ensemble(self, inputs, targets):\n if self.config.disag_offset:\n targets = targets[:, self.config.disag_offset:]\n inputs = inputs[:, :-self.config.disag_offset]\n targets = tf.stop_gradient(targets)\n inputs = tf.stop_gradient(inputs)\n with tf.GradientTape() as tape:\n preds = [head(inputs) for head in self._networks]\n loss = -sum([pred.log_prob(targets).mean() for pred in preds])\n metrics = self.opt(tape, loss, self._networks)\n return metrics\n\n\nclass ModelLoss(common.Module):\n\n def __init__(self, config, act_space, wm, reward):\n self.config = config\n self.reward = reward\n self.wm = wm\n self.ac = learner_actor.ActorCritic(config, act_space)\n self.actor = self.ac.actor\n self.head = common.MLP([], **self.config.expl_head)\n self.opt = common.Optimizer('expl', **self.config.expl_opt)\n\n def train(self, start, context, data):\n metrics = {}\n target = tf.cast(context[self.config.expl_model_loss], tf.float32)\n with tf.GradientTape() as tape:\n loss = -self.head(context['feat']).log_prob(target).mean()\n metrics.update(self.opt(tape, loss, self.head))\n metrics.update(self.ac.train(\n self.wm, start, data['is_terminal'], self._intr_reward))\n return None, metrics\n\n def _intr_reward(self, seq):\n reward = self.config.expl_intr_scale * self.head(seq['feat']).mode()\n if self.config.expl_extr_scale:\n reward += self.config.expl_extr_scale * self.reward(seq)\n return reward\n" ]
[ [ "tensorflow.concat", "tensorflow.zeros", "tensorflow.reshape", "tensorflow.cast", "tensorflow.ones", "tensorflow.math.log", "tensorflow.stop_gradient", "tensorflow.tensor", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
MichiganCOG/Surgical_Hands_RELEASE
[ "a66bdcf4953ef4b03eb8907d833c68b436f07c20" ]
[ "models/flowtrack/flowtrack.py" ]
[ "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom collections import OrderedDict\n\nimport datasets.preprocessing_transforms as pt\n\nBN_MOMENTUM = 0.1\nlogger = logging.getLogger(__name__)\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck_CAFFE(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck_CAFFE, self).__init__()\n # add stride to conv1x1\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nresnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),\n 34: (BasicBlock, [3, 4, 6, 3]),\n 50: (Bottleneck, [3, 4, 6, 3]),\n 101: (Bottleneck, [3, 4, 23, 3]),\n 152: (Bottleneck, [3, 8, 36, 3])}\n\nclass FlowTrack(nn.Module):\n\n def __init__(self, **kwargs):\n\n hands_preprocessing = ['Mixed_Hands', 'Manual_Hands', 'Hand_Dets', 'Surgical_Hands', 'Surgical_Hands_v2']\n\n if kwargs['dataset'] in hands_preprocessing:\n self.train_transforms = PreprocessTrainHand(**kwargs)\n self.test_transforms = PreprocessEvalHand(**kwargs)\n else:\n self.train_transforms = PreprocessTrainFlowTrack(**kwargs) \n self.test_transforms = PreprocessEvalFlowTrack(**kwargs)\n\n num_layers = kwargs['num_layers']\n block, layers = resnet_spec[num_layers]\n\n self.inplanes = 64\n self.deconv_with_bias = kwargs['deconv_with_bias']\n\n super(FlowTrack, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n # used for deconv layers\n self.deconv_layers = self._make_deconv_layer(\n kwargs['num_deconv_layers'],\n kwargs['num_deconv_filters'],\n kwargs['num_deconv_kernels'],)\n\n self.final_layer = nn.Conv2d(\n in_channels=kwargs['num_deconv_filters'][-1],\n out_channels=kwargs['num_joints'],\n kernel_size=kwargs['final_conv_kernel'],\n stride=1,\n padding=1 if kwargs['final_conv_kernel'] == 3 else 0\n )\n\n #for saving and/or outputting visual features\n self.save_feat = kwargs['save_feat']\n self.out_feat = kwargs['out_feat'] \n #self.pooling = nn.AdaptiveAvgPool2d((1,1))\n\n #Also try with max pooling instead \n self.pooling = nn.AdaptiveMaxPool2d((1,1))\n\n if isinstance(kwargs['pretrained'], int) and kwargs['pretrained']:\n self.init_weights()\n\n image_height, image_width = kwargs['final_shape']\n self.heatmap_size = kwargs['heatmap_size']\n\n self.network_stride = (image_width/self.heatmap_size[0],\n image_height/self.heatmap_size[1])#effective stride of the entire network\n\n self.num_joints = kwargs['num_joints']\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _get_deconv_cfg(self, deconv_kernel, index):\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n\n return deconv_kernel, padding, output_padding\n\n def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n assert num_layers == len(num_filters), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n assert num_layers == len(num_kernels), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i], i)\n\n planes = num_filters[i]\n layers.append(\n nn.ConvTranspose2d(\n in_channels=self.inplanes,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=self.deconv_with_bias))\n layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n layers.append(nn.ReLU(inplace=True))\n self.inplanes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n outputs = []\n feats = []\n #flip_pairs = [[1, 2], [3, 4], [5, 6], [7, 8],\n # [9, 10], [11, 12], [13, 14], [15, 16]]\n flip_pairs = [[3,4],[5,6],[7,8],[9,10],[11,12],[13,14],[15,16]]\n \n #expected shape: [batch,3,T,H,W]\n B = x.shape[0]\n T = x.shape[2]\n\n test = False \n for t in range(T):\n output = self.forward_one(x[:,:,t])\n\n if test:\n input_flipped = np.flip(x[:,:,t].cpu().numpy(), 3).copy()\n input_flipped = torch.from_numpy(input_flipped).cuda()\n output_flipped = self.forward_one(input_flipped)\n output_flipped = flip_back(output_flipped.cpu().numpy(),\n flip_pairs)\n output_flipped = torch.from_numpy(output_flipped.copy()).cuda()\n\n #NOTE: Figure out what this means\n # feature is not aligned, shift flipped heatmap for higher accuracy\n output_flipped[:, :, :, 1:] = output_flipped.clone()[:, :, :, 0:-1]\n\n out = (output + output_flipped) * 0.5\n else:\n out = output \n\n if self.save_feat or self.out_feat:\n outputs.append(out[0])\n feats.append(out[1])\n else:\n outputs.append(out)\n\n if self.save_feat or self.out_feat:\n return {'outputs':torch.stack(outputs, dim=1), 'feat':torch.stack(feats, dim=1)}\n else: \n return torch.stack(outputs, dim=1)\n\n def forward_one(self, x):\n #x1_0 = x.cpu()\n\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n vis = self.pooling(x)\n vis = torch.flatten(vis,1)\n\n x = self.layer1(x)\n #x2_0 = torch.sum(x,dim=1).cpu().numpy()\n x = self.layer2(x)\n #x3_0 = torch.sum(x,dim=1).cpu().numpy()\n x = self.layer3(x)\n #x4_0 = torch.sum(x,dim=1).cpu().numpy()\n x = self.layer4(x)\n #x5_0 = torch.sum(x,dim=1).cpu().numpy()\n\n x = self.deconv_layers(x)\n #x6_0 = torch.sum(x,dim=1).cpu().numpy()\n x = self.final_layer(x)\n\n '''\n import matplotlib.pyplot as plt\n plt.subplot(2,4,1)\n plt.title('rgb image')\n plt.imshow(x1_0[0].permute(1,2,0))\n plt.subplot(2,4,2)\n plt.title('Layer 1')\n plt.imshow(x2_0[0])\n plt.colorbar()\n plt.subplot(2,4,3)\n plt.title('Layer 2')\n plt.imshow(x3_0[0])\n plt.colorbar()\n plt.subplot(2,4,4)\n plt.title('Layer 3')\n plt.imshow(x4_0[0])\n plt.colorbar()\n plt.subplot(2,4,5)\n plt.title('Layer 4')\n plt.imshow(x5_0[0])\n plt.colorbar()\n plt.subplot(2,4,6)\n plt.title('Deconv Layer')\n plt.imshow(x6_0[0])\n plt.colorbar()\n plt.subplot(2,4,7)\n plt.title('final output')\n plt.imshow(torch.sum(x[0], dim=0).cpu().numpy())\n plt.colorbar()\n\n plt.show()\n '''\n\n if self.save_feat or self.out_feat:\n return x, vis\n else:\n return x\n\n def init_weights(self, pretrained='./weights/resnet152-b121ed2d.pth'):\n if os.path.isfile(pretrained):\n logger.info('=> init deconv weights from normal distribution')\n for name, m in self.deconv_layers.named_modules():\n if isinstance(m, nn.ConvTranspose2d):\n logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))\n logger.info('=> init {}.bias as 0'.format(name))\n nn.init.normal_(m.weight, std=0.001)\n if self.deconv_with_bias:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n logger.info('=> init {}.weight as 1'.format(name))\n logger.info('=> init {}.bias as 0'.format(name))\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n logger.info('=> init final conv weights from normal distribution')\n for m in self.final_layer.modules():\n if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))\n logger.info('=> init {}.bias as 0'.format(name))\n nn.init.normal_(m.weight, std=0.001)\n nn.init.constant_(m.bias, 0)\n\n pretrained_state_dict = torch.load(pretrained)\n logger.info('=> loading pretrained model {}'.format(pretrained))\n self.load_state_dict(pretrained_state_dict, strict=False)\n print('Loaded pretrained model {}'.format(pretrained))\n\n else:\n logger.error('=> imagenet pretrained model dose not exist')\n logger.error('=> please download it first')\n raise ValueError('imagenet pretrained model does not exist')\n\ndef flip_back(output_flipped, matched_parts):\n ''' \n ouput_flipped: numpy.ndarray(batch_size, num_joints, height, width)\n '''\n assert output_flipped.ndim == 4\n 'output_flipped should be [batch_size, num_joints, height, width]'\n\n output_flipped = output_flipped[:, :, :, ::-1]\n\n for pair in matched_parts:\n tmp = output_flipped[:, pair[0], :, :].copy()\n output_flipped[:, pair[0], :, :] = output_flipped[:, pair[1], :, :]\n output_flipped[:, pair[1], :, :] = tmp \n\n return output_flipped\n\n#Source: python-openpose repo; Hzzone\ndef padRightDownCorner(img, stride, padValue):\n h = img.shape[0]\n w = img.shape[1]\n\n pad = 4 * [None]\n pad[0] = 0 # up\n pad[1] = 0 # left\n pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down\n pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right\n\n img_padded = img \n pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1)) \n img_padded = np.concatenate((pad_up, img_padded), axis=0)\n pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1)) \n img_padded = np.concatenate((pad_left, img_padded), axis=1)\n pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1)) \n img_padded = np.concatenate((img_padded, pad_down), axis=0)\n pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1)) \n img_padded = np.concatenate((img_padded, pad_right), axis=1)\n\n return img_padded, pad\n\nclass PreprocessTrainFlowTrack(object):\n \"\"\"\n Container for all transforms used to preprocess clips for training in this dataset.\n \"\"\"\n def __init__(self, **kwargs):\n \"\"\"\n Initialize preprocessing class for training set\n Args:\n preprocess (String): Keyword to select different preprocessing types \n crop_type (String): Select random or central crop \n\n Return:\n None\n \"\"\"\n\n self.transforms = []\n self.preprocess = kwargs['preprocess']\n crop_type = kwargs['crop_type']\n\n self.transforms.append(pt.SubtractRGBMean(**kwargs))\n self.transforms.append(pt.AffineTransformClip(**kwargs))\n self.transforms.append(pt.RandomFlipClip(direction='h', p=0.5, **kwargs))\n\n self.transforms.append(pt.ToTensorClip(**kwargs))\n \n def __call__(self, input_data, params):\n center = params['center']\n scale = params['scale']\n key_pts = params['key_pts']\n\n out_params = {}\n out_params['trans'] = None \n out_params['inv_trans'] = None \n out_params['flip'] = False\n\n for transform in self.transforms:\n if isinstance(transform, pt.AffineTransformClip):\n transform._update_params(center=center, scale=scale)\n out_params['trans'] = transform.trans\n out_params['inv_trans'] = transform.inv_trans\n\n if key_pts == []:\n input_data = transform(input_data)\n else:\n input_data, key_pts = transform(input_data, key_pts)\n\n if isinstance(transform, pt.RandomFlipClip):\n out_params['flip'] = transform.flip \n\n return input_data, key_pts, out_params \n\n\nclass PreprocessEvalFlowTrack(object):\n \"\"\"\n Container for all transforms used to preprocess clips for training in this dataset.\n \"\"\"\n def __init__(self, **kwargs):\n \"\"\"\n Initialize preprocessing class for training set\n Args:\n preprocess (String): Keyword to select different preprocessing types \n crop_type (String): Select random or central crop \n\n Return:\n None\n \"\"\"\n\n self.transforms = []\n\n self.transforms.append(pt.SubtractRGBMean(**kwargs))\n self.transforms.append(pt.AffineTransformClip(test=True,**kwargs))\n\n self.transforms.append(pt.ToTensorClip(**kwargs))\n\n def __call__(self, input_data, params):\n center = params['center']\n scale = params['scale']\n key_pts = params['key_pts']\n\n out_params = {}\n out_params['trans'] = None \n out_params['inv_trans'] = None \n out_params['flip'] = False\n\n for transform in self.transforms:\n if isinstance(transform, pt.AffineTransformClip):\n transform._update_params(center=center, scale=scale)\n out_params['trans'] = transform.trans\n out_params['inv_trans'] = transform.inv_trans\n\n if key_pts == []:\n input_data = transform(input_data)\n else:\n input_data, key_pts = transform(input_data, key_pts)\n\n return input_data, key_pts, out_params\n\nclass PreprocessTrainHand(object):\n \"\"\"\n Container for all transforms used to preprocess clips for training in this dataset.\n \"\"\"\n def __init__(self, **kwargs):\n crop_type = kwargs['crop_type']\n self.transforms = []\n\n if kwargs['hand_jitter']:\n #Perform this transform first because PIL operations destroy floating point accuracy\n class_kwargs = {'brightness':0.4,'contrast':0.4,'saturation':0.4,'hue':0.4}\n self.transforms.append(pt.ApplyToPIL(transform=torchvision.transforms.ColorJitter, class_kwargs=class_kwargs))\n\n self.transforms.append(pt.SubtractRGBMean(**kwargs))\n\n if crop_type == 'Random':\n self.transforms.append(pt.RandomCropClip(**kwargs))\n elif crop_type=='RandomFrame':\n self.transforms.append(pt.ApplyToClip(transform=torchvision.transforms.RandomCrop(**kwargs)))\n elif crop_type == 'Center':\n self.transforms.append(pt.CenterCropClip(**kwargs))\n elif crop_type == 'CropClip':\n self.transforms.append(pt.CropClip(**kwargs))\n\n if kwargs['hand_scale']:\n min_scale = kwargs['hand_scale_amount'][0]\n max_scale = kwargs['hand_scale_amount'][1]\n self.transforms.append(pt.RandomZoomClip(scale=(min_scale, max_scale)))\n\n if kwargs['hand_rotate']:\n min_deg = kwargs['hand_rotate_amount'][0]\n max_deg = kwargs['hand_rotate_amount'][1]\n self.transforms.append(pt.RandomRotateClip(angles=np.arange(min_deg,max_deg), **kwargs))\n\n if kwargs['hand_translate']:\n max_tx = kwargs['hand_translate_amount'][0]\n max_ty = kwargs['hand_translate_amount'][1]\n self.transforms.append(pt.RandomTranslateClip(translate=(max_tx, max_ty), **kwargs))\n\n self.transforms.append(pt.RandomFlipClip(direction='h', p=0.5, **kwargs))\n\n self.transforms.append(pt.ResizeClip(**kwargs))\n self.transforms.append(pt.ToTensorClip(**kwargs))\n\n def __call__(self, input_data, params):\n \"\"\"\n Preprocess the clip and the bbox data accordingly\n Args:\n input_data: List of PIL images containing clip frames \n bbox_data: Numpy array containing bbox coordinates per object per frame \n hand_crop: Region (around hand) to crop from input image\n label: Is left hand \n\n Return:\n input_data: Pytorch tensor containing the processed clip data \n bbox_data: Numpy tensor containing the augmented bbox coordinates\n \"\"\"\n\n bbox_data = params['bbox_data']\n hand_crop = params['hand_crop']\n label = params['label']\n angle = params.get('in_rot', None)\n\n out_params = {}\n out_params['flip'] = False\n\n for transform in self.transforms:\n if isinstance(transform, pt.CropClip):\n transform._update_bbox(hand_crop[0], hand_crop[2], hand_crop[1], hand_crop[3], True)\n\n if isinstance(transform, pt.RandomRotateClip) and angle is not None:\n transform._update_angles([angle])\n\n input_data, bbox_data = transform(input_data, bbox_data)\n\n if isinstance(transform, pt.RandomFlipClip):\n out_params['flip'] = transform.flip \n\n if isinstance(transform, pt.RandomRotateClip):\n out_params['out_rot'] = transform.out_rot\n \n return input_data, bbox_data, out_params\n\nclass PreprocessEvalHand(object):\n \"\"\"\n Container for all transforms used to preprocess clips for evaluation in this dataset.\n \"\"\"\n def __init__(self, **kwargs):\n crop_type = kwargs['crop_type']\n self.transforms = []\n\n self.transforms.append(pt.SubtractRGBMean(**kwargs))\n\n if crop_type == 'Random':\n self.transforms.append(pt.RandomCropClip(**kwargs))\n elif crop_type=='RandomFrame':\n self.transforms.append(pt.ApplyToClip(transform=torchvision.transforms.RandomCrop(**kwargs)))\n elif crop_type == 'Center':\n self.transforms.append(pt.CenterCropClip(**kwargs))\n elif crop_type == 'CropClip':\n self.transforms.append(pt.CropClip(**kwargs))\n\n #self.transforms.append(pt.RandomFlipClip(direction='h', p=1.0, **kwargs))\n self.transforms.append(pt.ResizeClip(**kwargs))\n self.transforms.append(pt.ToTensorClip())\n\n\n def __call__(self, input_data, params):\n \"\"\"\n Preprocess the clip and the bbox data accordingly\n Args:\n input_data: List of PIL images containing clip frames \n bbox_data: Numpy array containing bbox coordinates per object per frame \n hand_crop: Region (around hand) to crop from input image\n label: Is left hand \n\n Return:\n input_data: Pytorch tensor containing the processed clip data \n bbox_data: Numpy tensor containing the augmented bbox coordinates\n \"\"\"\n\n bbox_data = params['bbox_data']\n hand_crop = params['hand_crop']\n label = params['label']\n\n out_params = {}\n out_params['flip'] = False\n\n for transform in self.transforms:\n if isinstance(transform, pt.CropClip):\n transform._update_bbox(hand_crop[0], hand_crop[2], hand_crop[1], hand_crop[3], True)\n input_data, bbox_data = transform(input_data, bbox_data)\n\n\n return input_data, bbox_data, out_params \n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.AdaptiveMaxPool2d", "torch.nn.ConvTranspose2d", "torch.load", "torch.nn.init.constant_", "numpy.arange", "torch.nn.Conv2d", "numpy.tile", "torch.from_numpy", "numpy.concatenate", "torch.nn.MaxPool2d", "torch.flatten", "torch.nn.init.normal_", "torch.nn.BatchNorm2d", "torch.stack", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rishabhbhardwaj15/identify-referred-object-in-image
[ "7641464008005cdbf5191f6265164e204b5cdf5c" ]
[ "code_to_train/anchors.py" ]
[ "\"\"\"\nCreates anchor based on the backbone\nBased on code from https://github.com/fastai/fastai_docs/blob/master/dev_nb/102a_coco.ipynb\nAuthor: Arka Sadhu\n\"\"\"\nimport torch\nimport numpy as np\nfrom torch import nn\n\n\ndef cthw2tlbr(boxes):\n \"Convert center/size format `boxes` to top/left bottom/right corners.\"\n top_left = boxes[..., :2] - boxes[..., 2:]/2\n bot_right = boxes[..., :2] + boxes[..., 2:]/2\n return torch.cat([top_left, bot_right], dim=-1)\n\n\ndef tlbr2cthw(boxes):\n \"Convert top/left bottom/right format `boxes` to center/size corners.\"\n center = (boxes[..., :2] + boxes[..., 2:])/2\n sizes = boxes[..., 2:] - boxes[..., :2]\n return torch.cat([center, sizes], dim=-1)\n\n\ndef tlbr2tlhw(boxes):\n \"Convert tl br format `boxes` to tl hw format\"\n top_left = boxes[:, :2]\n height_width = boxes[:, 2:] - boxes[:, :2]\n return torch.cat([top_left, height_width], 1)\n\n\ndef tlhw2tlbr(boxes):\n \"Convert tl br format `boxes` to tl hw format\"\n top_left = boxes[..., :2]\n bottom_right = boxes[..., 2:] + boxes[..., :2]\n return torch.cat([top_left, bottom_right], -1)\n\n\ndef x1y1x2y2_to_y1x1y2x2(boxes):\n \"Convert xy boxes to yx boxes and vice versa\"\n box_tmp = boxes.clone()\n box_tmp[..., 0], box_tmp[..., 1] = boxes[..., 1], boxes[..., 0]\n box_tmp[..., 2], box_tmp[..., 3] = boxes[..., 3], boxes[..., 2]\n return box_tmp\n\n\ndef create_grid(size, flatten=True):\n \"Create a grid of a given `size`.\"\n if isinstance(size, tuple):\n H, W = size\n else:\n H, W = size, size\n\n grid = torch.FloatTensor(H, W, 2)\n linear_points = torch.linspace(-1+1/W, 1-1/W,\n W) if W > 1 else torch.tensor([0.])\n grid[:, :, 1] = torch.ger(torch.ones(\n H), linear_points).expand_as(grid[:, :, 0])\n linear_points = torch.linspace(-1+1/H, 1-1/H,\n H) if H > 1 else torch.tensor([0.])\n grid[:, :, 0] = torch.ger(\n linear_points, torch.ones(W)).expand_as(grid[:, :, 1])\n return grid.view(-1, 2) if flatten else grid\n\n\ndef create_anchors(sizes, ratios, scales, flatten=True, device=torch.device('cuda')):\n \"Create anchor of `sizes`, `ratios` and `scales`.\"\n # device = torch.device('cuda')\n aspects = [[[s*np.sqrt(r), s*np.sqrt(1/r)]\n for s in scales] for r in ratios]\n aspects = torch.tensor(aspects).to(device).view(-1, 2)\n anchors = []\n for h, w in sizes:\n if type(h) == torch.Tensor:\n h = int(h.item())\n w = int(w.item())\n\n sized_aspects = (\n aspects * torch.tensor([2/h, 2/w]).to(device)).unsqueeze(0)\n base_grid = create_grid((h, w)).to(device).unsqueeze(1)\n n, a = base_grid.size(0), aspects.size(0)\n ancs = torch.cat([base_grid.expand(n, a, 2),\n sized_aspects.expand(n, a, 2)], 2)\n anchors.append(ancs.view(h, w, a, 4))\n anchs = torch.cat([anc.view(-1, 4)\n for anc in anchors], 0) if flatten else anchors\n return cthw2tlbr(anchs) if flatten else anchors\n\n\ndef intersection(anchors, targets):\n \"\"\"\n Compute the sizes of the intersections of `anchors` by `targets`.\n Assume both anchors and targets are in tl br format\n \"\"\"\n ancs, tgts = anchors, targets\n a, t = ancs.size(0), tgts.size(0)\n ancs, tgts = ancs.unsqueeze(1).expand(\n a, t, 4), tgts.unsqueeze(0).expand(a, t, 4)\n top_left_i = torch.max(ancs[..., :2], tgts[..., :2])\n bot_right_i = torch.min(ancs[..., 2:], tgts[..., 2:])\n\n sizes = torch.clamp(bot_right_i - top_left_i, min=0)\n return sizes[..., 0] * sizes[..., 1]\n\n\ndef IoU_values(anchors, targets):\n \"\"\"\n Compute the IoU values of `anchors` by `targets`.\n Expects both in tlbr format\n \"\"\"\n inter = intersection(anchors, targets)\n ancs, tgts = tlbr2cthw(anchors), tlbr2cthw(targets)\n anc_sz, tgt_sz = ancs[:, 2] * \\\n ancs[:, 3], tgts[:, 2] * tgts[:, 3]\n union = anc_sz.unsqueeze(1) + tgt_sz.unsqueeze(0) - inter\n return inter/(union+1e-8)\n\n\ndef simple_iou(box1, box2):\n \"\"\"\n Simple iou between box1 and box2\n \"\"\"\n def simple_inter(ancs, tgts):\n top_left_i = torch.max(ancs[..., :2], tgts[..., :2])\n bot_right_i = torch.min(ancs[..., 2:], tgts[..., 2:])\n sizes = torch.clamp(bot_right_i - top_left_i, min=0)\n return sizes[..., 0] * sizes[..., 1]\n\n inter = intersection(box1, box2)\n ancs, tgts = tlbr2tlhw(box1), tlbr2tlhw(box2)\n anc_sz, tgt_sz = ancs[:, 2] * \\\n ancs[:, 3], tgts[:, 2] * tgts[:, 3]\n union = anc_sz + tgt_sz - inter\n return inter / (union + 1e-8)\n\n\ndef match_anchors(anchors, targets, match_thr=0.5, bkg_thr=0.4):\n \"\"\"\n Match `anchors` to targets. -1 is match to background, -2 is ignore.\n \"\"\"\n ious = IoU_values(anchors, targets)\n matches = anchors.new(anchors.size(0)).zero_().long() - 2\n vals, idxs = torch.max(ious, 1)\n matches[vals < bkg_thr] = -1\n matches[vals > match_thr] = idxs[vals > match_thr]\n # Overwrite matches with each target getting the anchor that has the max IoU.\n vals, idxs = torch.max(ious, 0)\n # If idxs contains repetition, this doesn't bug and only the last is considered.\n matches[idxs] = targets.new_tensor(list(range(targets.size(0)))).long()\n return matches\n\n\ndef simple_match_anchors(anchors, targets, match_thr=0.4, bkg_thr=0.1):\n \"\"\"\n Match `anchors` to targets. -1 is match to background, -2 is ignore.\n Note here:\n anchors are fixed\n targets are from a batch\n \"\"\"\n # ious = IoU_values(anchors, targets)\n ious = IoU_values(targets, anchors)\n matches = ious.new(ious.shape).zero_().long() - 2\n matches[ious < bkg_thr] = -1\n matches[ious > match_thr] = 1\n return matches\n\n\ndef bbox_to_reg_params(anchors, boxes):\n \"\"\"\n Converts boxes to corresponding reg params\n Assume both in rchw format\n \"\"\"\n boxes = tlbr2cthw(boxes)\n anchors = tlbr2cthw(anchors)\n anchors = anchors.expand(boxes.size(0), anchors.size(0), 4)\n boxes = boxes.unsqueeze(1)\n trc = (boxes[..., :2] - anchors[..., :2]) / (anchors[..., 2:] + 1e-8)\n thw = torch.log(boxes[..., 2:] / (anchors[..., 2:] + 1e-8))\n return torch.cat((trc, thw), 2)\n\n\ndef reg_params_to_bbox(anchors, boxes, std12=[1, 1]):\n \"\"\"\n Converts reg_params to corresponding boxes\n Assume anchors in r1c1r2c2 format\n Boxes in standard form r*, c*, h*, w*\n \"\"\"\n anc1 = anchors.clone()\n anc1 = tlbr2cthw(anc1)\n b1 = boxes[..., :2] * std12[0]\n a111 = anc1[..., 2:] * b1 + anc1[..., :2]\n\n b2 = boxes[..., 2:] * std12[1]\n a222 = torch.exp(b2) * anc1[..., 2:]\n af = torch.cat([a111, a222], dim=2)\n aft = cthw2tlbr(af)\n return aft\n" ]
[ [ "torch.linspace", "torch.ones", "torch.max", "numpy.sqrt", "torch.cat", "torch.min", "torch.tensor", "torch.exp", "torch.FloatTensor", "torch.log", "torch.device", "torch.clamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
redjerdai/iseeyourcarm8
[ "acdad0ba2a963e47f0305b801a0418cf0b3e1548" ]
[ "compare.py" ]
[ "#\nimport sys\nimport pandas\nfrom pyxdameraulevenshtein import normalized_damerau_levenshtein_distance\n\n\n#\nfrom compare_funcs_im import deepai_im_cmp, tf_mobilenet_im_cmp\n\n\n#\ncmp_im, cmp_code = sys.argv[1], sys.argv[2]\nim0, im1 = sys.argv[3], sys.argv[4]\ncode0, code1 = sys.argv[5], sys.argv[6]\n\nif cmp_im == 'deepai':\n cmp_im_value = deepai_im_cmp(im0, im1)\nelif cmp_im == 'tf_mobilenet':\n cmp_im_value = tf_mobilenet_im_cmp(im0, im1)\nelse:\n cmp_im_value = 'UNKNOWN'\n\nif cmp_code == 'damerau_levenshtein':\n cmp_code_value = normalized_damerau_levenshtein_distance(code0, code1)\nelse:\n cmp_code_value = 'UNKNOWN'\n\nresult_frame = pandas.DataFrame(data={'cmp_im': [cmp_im_value], 'cmp_code': [cmp_code_value]})\nresult_frame.to_csv('./result.csv', index=False)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
huangzh13/ReID.template.pytorch
[ "34ce8318bbd35db5a21e0914b429a71a4ea26998" ]
[ "models/losses/triplet.py" ]
[ "\"\"\"\n-------------------------------------------------\n File Name: triplet.py\n Author: Zhonghao Huang\n Date: 2019/9/10\n Description:\n-------------------------------------------------\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\n\ndef topk_mask(input, dim, K=10, **kwargs):\n index = input.topk(max(1, min(K, input.size(dim))), dim=dim, **kwargs)[1]\n return torch.autograd.Variable(torch.zeros_like(input.data)).scatter(dim, index, 1.0)\n\n\ndef pdist(A, squared=False, eps=1e-4):\n prod = torch.mm(A, A.t())\n norm = prod.diag().unsqueeze(1).expand_as(prod)\n res = (norm + norm.t() - 2 * prod).clamp(min=0)\n return res if squared else res.clamp(min=eps).sqrt()\n\n\ndef normalize(x, axis=-1):\n \"\"\"Normalizing to unit length along the specified dimension.\n Args:\n x: pytorch Variable\n Returns:\n x: pytorch Variable, same shape as input\n \"\"\"\n x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)\n return x\n\n\ndef euclidean_dist(x, y):\n \"\"\"\n Args:\n x: pytorch Variable, with shape [m, d]\n y: pytorch Variable, with shape [n, d]\n Returns:\n dist: pytorch Variable, with shape [m, n]\n \"\"\"\n m, n = x.size(0), y.size(0)\n xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)\n yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()\n dist = xx + yy\n dist.addmm_(1, -2, x, y.t())\n dist = dist.clamp(min=1e-12).sqrt() # for numerical stability\n return dist\n\n\ndef hard_example_mining(dist_mat, labels, margin, return_inds=False):\n \"\"\"For each anchor, find the hardest positive and negative sample.\n Args:\n dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]\n labels: pytorch LongTensor, with shape [N]\n return_inds: whether to return the indices. Save time if `False`(?)\n Returns:\n dist_ap: pytorch Variable, distance(anchor, positive); shape [N]\n dist_an: pytorch Variable, distance(anchor, negative); shape [N]\n p_inds: pytorch LongTensor, with shape [N];\n indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1\n n_inds: pytorch LongTensor, with shape [N];\n indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1\n NOTE: Only consider the case in which all labels have same num of samples,\n thus we can cope with all anchors in parallel.\n \"\"\"\n\n torch.set_printoptions(threshold=5000)\n assert len(dist_mat.size()) == 2\n assert dist_mat.size(0) == dist_mat.size(1)\n N = dist_mat.size(0)\n\n # shape [N, N]\n is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())\n is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())\n # `dist_ap` means distance(anchor, positive)\n # both `dist_ap` and `relative_p_inds` with shape [N, 1]\n dist_ap, relative_p_inds = torch.max(\n dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)\n # `dist_an` means distance(anchor, negative)\n # both `dist_an` and `relative_n_inds` with shape [N, 1]\n dist_an, relative_n_inds = torch.min(\n dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)\n # shape [N]\n dist_ap = dist_ap.squeeze(1)\n dist_an = dist_an.squeeze(1)\n\n if return_inds:\n # shape [N, N]\n ind = (labels.new().resize_as_(labels)\n .copy_(torch.arange(0, N).long())\n .unsqueeze(0).expand(N, N))\n # shape [N, 1]\n p_inds = torch.gather(\n ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data)\n n_inds = torch.gather(\n ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data)\n # shape [N]\n p_inds = p_inds.squeeze(1)\n n_inds = n_inds.squeeze(1)\n return dist_ap, dist_an, p_inds, n_inds\n\n return dist_ap, dist_an\n\n\nclass TripletLoss(object):\n \"\"\"Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid).\n Related Triplet Loss theory can be found in paper 'In Defense of the Triplet\n Loss for Person Re-Identification'.\"\"\"\n\n def __init__(self, margin=None):\n self.margin = margin\n if margin is not None:\n self.ranking_loss = nn.MarginRankingLoss(margin=margin)\n else:\n self.ranking_loss = nn.SoftMarginLoss()\n\n def __call__(self, global_feat, labels, normalize_feature=False):\n if normalize_feature:\n global_feat = normalize(global_feat, axis=-1)\n dist_mat = euclidean_dist(global_feat, global_feat)\n dist_ap, dist_an = hard_example_mining(dist_mat, labels, self.margin)\n y = dist_an.new().resize_as_(dist_an).fill_(1)\n if self.margin is not None:\n loss = self.ranking_loss(dist_an, dist_ap, y)\n else:\n loss = self.ranking_loss(dist_an - dist_ap, y)\n return loss, dist_ap, dist_an\n" ]
[ [ "torch.nn.MarginRankingLoss", "torch.norm", "torch.set_printoptions", "torch.zeros_like", "torch.nn.SoftMarginLoss", "torch.arange", "torch.pow" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Shen-Lab/GraphCL
[ "d857849d51bb168568267e07007c0b0c8bb6d869" ]
[ "semisupervised_MNIST_CIFAR10/pre-training/train/aug.py" ]
[ "import torch\nimport torch.nn as nn\nimport dgl\nfrom random import randint\nimport random\nimport copy\nimport pdb\nimport numpy as np\nfrom collections import Counter\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ndouble\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\ndef aug_double(graph_list, type):\n type1 = type[0]\n type2 = type[1]\n if type1 == 'n':\n aug_list1 = aug_drop_node_list(graph_list, 0.2)\n elif type1 == 'l':\n aug_list1 = aug_drop_add_link_list(graph_list, 0.2)\n elif type1 == 'm':\n aug_list1 = aug_mask_list(graph_list, 0.2)\n elif type1 == 's':\n aug_list1 = aug_subgraph_list(graph_list, 0.4)\n elif type1 == 'o':\n aug_list1 = graph_list\n \n if type2 == 'n':\n aug_list2 = aug_drop_node_list(graph_list, 0.2)\n elif type2 == 'l':\n aug_list2 = aug_drop_add_link_list(graph_list, 0.2)\n elif type2 == 'm':\n aug_list2 = aug_mask_list(graph_list, 0.2)\n elif type2 == 's':\n aug_list2 = aug_subgraph_list(graph_list, 0.4)\n elif type2 == 'o':\n aug_list2 = graph_list\n return aug_list1, aug_list2\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\nrandom3\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\ndef aug_random3(graph_list):\n graph_num = len(graph_list)\n aug_list =[]\n random_list = torch.randint(1, 4, (graph_num,)).tolist()\n aug_count = Counter(random_list)\n for i in range(graph_num):\n if random_list[i] == 1:\n aug_graph = aug_drop_node(graph_list[i], drop_percent=0.2)\n elif random_list[i] == 2:\n aug_graph = aug_drop_add_link(graph_list[i], drop_percent=0.2)\n elif random_list[i] == 3:\n aug_graph = aug_subgraph(graph_list[i], drop_percent=0.4)\n aug_list.append(aug_graph)\n return aug_list, aug_count\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\nrandom\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\ndef aug_random(graph_list):\n graph_num = len(graph_list)\n aug_list =[]\n random_list = torch.randint(1, 5, (graph_num,)).tolist()\n aug_count = Counter(random_list)\n for i in range(graph_num):\n if random_list[i] == 1:\n aug_graph = aug_drop_node(graph_list[i], drop_percent=0.2)\n elif random_list[i] == 2:\n aug_graph = aug_drop_add_link(graph_list[i], drop_percent=0.2)\n elif random_list[i] == 3:\n aug_graph = aug_mask(graph_list[i], drop_percent=0.2)\n elif random_list[i] == 4:\n aug_graph = aug_subgraph(graph_list[i], drop_percent=0.4)\n aug_list.append(aug_graph)\n return aug_list, aug_count\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ndrop nodes\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ndef aug_drop_node_list(graph_list, drop_percent):\n \n graph_num = len(graph_list) # number of graphs\n aug_list = []\n for i in range(graph_num):\n aug_graph = aug_drop_node(graph_list[i], drop_percent)\n aug_list.append(aug_graph)\n return aug_list\n\n\ndef aug_drop_node(graph, drop_percent=0.2):\n\n num = graph.number_of_nodes() # number of nodes of one graph\n drop_num = int(num * drop_percent) # number of drop nodes\n aug_graph = copy.deepcopy(graph)\n all_node_list = [i for i in range(num)]\n drop_node_list = random.sample(all_node_list, drop_num)\n aug_graph.remove_nodes(drop_node_list)\n return aug_graph\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ndrop add links\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ndef aug_drop_add_link_list(graph_list, drop_percent):\n \n graph_num = len(graph_list) # number of graphs\n aug_list = []\n for i in range(graph_num):\n aug_graph = aug_drop_add_link(graph_list[i], drop_percent)\n aug_list.append(aug_graph)\n return aug_list\n\n\ndef aug_drop_add_link(graph, drop_percent=0.2):\n\n pro = drop_percent / 2\n aug_graph = copy.deepcopy(graph)\n edge_num = aug_graph.number_of_edges()\n\n drop_num = int(edge_num * pro / 2) \n add_num = int(edge_num * pro / 2) \n del_edges_id_list = [] \n all_edges_id_list = [i for i in range(edge_num)]\n \n for i in range(drop_num):\n\n random_idx = randint(0, edge_num - 1) \n u_v = aug_graph.find_edges(all_edges_id_list[random_idx]) \n del_edge_id1 = aug_graph.edge_ids(u_v[0], u_v[1])\n del_edge_id2 = aug_graph.edge_ids(u_v[1], u_v[0])\n if del_edge_id1.size(0):\n del_edges_id_list.append(del_edge_id1)\n all_edges_id_list.remove(del_edge_id1.item())\n if del_edge_id2.size(0):\n del_edges_id_list.append(del_edge_id2)\n all_edges_id_list.remove(del_edge_id2.item())\n edge_num -= 2\n aug_graph.remove_edges(del_edges_id_list)\n '''\n above finish drop edges\n '''\n node_num = aug_graph.number_of_nodes() \n l = [[i, j] for i in range(node_num) for j in range(i)]\n d = torch.tensor(random.sample(l, add_num))\n add_edges_src_list = d.t()[0]\n add_edges_dst_list = d.t()[1]\n aug_graph.add_edges(add_edges_src_list, add_edges_dst_list)\n aug_graph.add_edges(add_edges_dst_list, add_edges_src_list)\n\n return aug_graph\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\nmask\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ndef aug_mask_list(graph_list, drop_percent):\n \n graph_num = len(graph_list) # number of graphs\n aug_list = []\n for i in range(graph_num):\n aug_graph = aug_mask(graph_list[i], drop_percent)\n aug_list.append(aug_graph)\n return aug_list\n\n\ndef aug_mask(graph, drop_percent=0.2):\n \n num = graph.number_of_nodes() \n mask_num = int(num * drop_percent) \n node_idx = [i for i in range(num)]\n mask_list = random.sample(node_idx, mask_num)\n aug_graph = copy.deepcopy(graph)\n zeros = torch.zeros_like(aug_graph.ndata['feat'][0])\n for j in mask_list:\n aug_graph.ndata['feat'][j] = zeros\n return aug_graph\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\nsubgraph\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\ndef aug_subgraph_list(graph_list, drop_percent):\n \n graph_num = len(graph_list)\n aug_list = []\n for i in range(graph_num):\n s_graph = aug_subgraph(graph_list[i], drop_percent)\n aug_list.append(s_graph)\n return aug_list\n\n\ndef aug_subgraph(graph, drop_percent):\n\n graph = copy.deepcopy(graph)\n num = graph.number_of_nodes()\n all_node_list = [i for i in range(num)]\n s_num = int(num * (1 -drop_percent))\n center_node_id = random.randint(0, num - 1)\n sub_node_id_list = [center_node_id]\n all_neighbor_list = []\n for i in range(s_num - 1):\n \n all_neighbor_list += graph.successors(sub_node_id_list[i]).numpy().tolist()\n all_neighbor_list = list(set(all_neighbor_list))\n new_neighbor_list = [n for n in all_neighbor_list if not n in sub_node_id_list]\n if len(new_neighbor_list) != 0:\n new_node = random.sample(new_neighbor_list, 1)[0]\n sub_node_id_list.append(new_node)\n else:\n break\n del_node_list = [i for i in all_node_list if not i in sub_node_id_list]\n graph.remove_nodes(del_node_list)\n return graph\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\nnew\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\ndef aug_new_list(graph_list, threshold):\n \n graph_num = len(graph_list)\n aug_list = []\n for i in range(graph_num):\n s_graph = aug_new(graph_list[i], threshold)\n aug_list.append(s_graph)\n return aug_list\n\ndef aug_new(graph, threshold):\n\n node_feature_matrix = graph.ndata['h']\n adjacent = torch.mm(node_feature_matrix, node_feature_matrix.t())\n adjacent_s = torch.sigmoid(adjacent)\n add_edge_list = (adjacent_s > threshold).nonzero()\n src = add_edge_list.t()[0]\n dst = add_edge_list.t()[1]\n aug_graph = copy.deepcopy(graph)\n aug_graph.remove_edges([j for j in range(aug_graph.number_of_edges())])\n aug_graph.add_edges(src, dst)\n aug_graph.add_edges(dst, src)\n return aug_graph\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ntsp dataset drop add links\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\ndef aug_tsp_drop_add_link(graph_list, drop_percent=0.2):\n\n pro = drop_percent / 2 # 0.1 160 * 0.1 = 16 \n graph_num = len(graph_list) \n aug_list = []\n \n for i in range(graph_num):\n \n aug_graph = copy.deepcopy(graph_list[i])\n edge_num = aug_graph.number_of_edges()\n drop_num = int(edge_num * pro) \n add_num = int(edge_num * pro) \n all_edges_id_list = [i for i in range(edge_num)]\n del_edges_id_list = random.sample(all_edges_id_list, drop_num)\n aug_graph.remove_edges(del_edges_id_list)\n '''\n above finish drop edges\n '''\n node_num = aug_graph.number_of_nodes() \n l = [(i, j) for i in range(node_num) for j in range(node_num)]\n d = random.sample(l, add_num)\n add_edges_src_list = []\n add_edges_dst_list = []\n for i in range(add_num):\n add_edges_src_list.append(d[i][0])\n add_edges_dst_list.append(d[i][1])\n aug_graph.add_edges(add_edges_src_list, add_edges_dst_list)\n aug_list.append(aug_graph)\n return aug_list\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ndd dataset subgraph\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\ndef aug_dd_subgraph_list(graph_list, drop_percent):\n \n graph_num = len(graph_list)\n aug_list = []\n for i in range(graph_num):\n s_graph = aug_dd_subgraph(graph_list[i], drop_percent)\n aug_list.append(s_graph)\n return aug_list\n\n\ndef aug_dd_subgraph(ori_graph, drop_percent):\n graph = copy.deepcopy(ori_graph)\n num = graph.number_of_nodes()\n if num > 2000:\n return graph\n else:\n all_node_list = [i for i in range(num)]\n s_num = int(num * (1 -drop_percent))\n center_node_id = random.randint(0, num - 1)\n\n sub_node_id_list = [center_node_id]\n all_neighbor_list = []\n for i in range(s_num - 1):\n \n all_neighbor_list += graph.successors(sub_node_id_list[i]).numpy().tolist()\n all_neighbor_list = list(set(all_neighbor_list))\n new_neighbor_list = [n for n in all_neighbor_list if not n in sub_node_id_list]\n\n if len(new_neighbor_list) != 0:\n new_node = random.sample(new_neighbor_list, 1)[0]\n sub_node_id_list.append(new_node)\n else:\n break\n \n del_node_list = [i for i in all_node_list if not i in sub_node_id_list]\n graph.remove_nodes(del_node_list)\n return graph\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\nadd links\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\ndef aug_add_edges(graph_list, drop_percent=0.2):\n\n graph_num = len(graph_list) \n aug_list = []\n for i in range(graph_num):\n\n aug_graph = copy.deepcopy(graph_list[i])\n edge_num = aug_graph.number_of_edges()\n add_num = int(edge_num * drop_percent / 2)\n\n node_num = aug_graph.number_of_nodes()\n l = []\n for i in range(node_num):\n for j in range(i):\n l.append((i, j))\n d = random.sample(l, add_num)\n\n add_edges_src_list = []\n add_edges_dst_list = []\n\n for i in range(add_num):\n if not aug_graph.has_edge_between(d[i][0], d[i][1]):\n add_edges_src_list.append(d[i][0])\n add_edges_src_list.append(d[i][1])\n add_edges_dst_list.append(d[i][1])\n add_edges_dst_list.append(d[i][0])\n aug_graph.add_edges(add_edges_src_list, add_edges_dst_list)\n aug_list.append(aug_graph)\n return aug_list\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ndrop links\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ndef aug_drop_edges(graph_list, drop_percent=0.2):\n\n graph_num = len(graph_list) \n aug_list = []\n for i in range(graph_num):\n\n aug_graph = copy.deepcopy(graph_list[i])\n edge_num = aug_graph.number_of_edges()\n drop_num = int(edge_num * drop_percent / 2) \n \n del_edges_id_list = [] \n all_edges_id_list = [i for i in range(edge_num)]\n for i in range(drop_num):\n\n random_idx = randint(0, edge_num - 1) \n u_v = aug_graph.find_edges(all_edges_id_list[random_idx])\n del_edge_id1 = aug_graph.edge_ids(u_v[0], u_v[1])\n del_edge_id2 = aug_graph.edge_ids(u_v[1], u_v[0])\n del_edges_id_list.append(del_edge_id1)\n del_edges_id_list.append(del_edge_id2)\n all_edges_id_list.remove(del_edge_id1.item())\n all_edges_id_list.remove(del_edge_id2.item())\n edge_num -= 2\n\n aug_graph.remove_edges(del_edges_id_list)\n aug_list.append(aug_graph)\n return aug_list\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\nadd noise\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\ndef add_guassian_noise(graph_list, drop_percent=1):\n\n graph_num = len(graph_list) \n aug_list = []\n for i in range(graph_num):\n aug_graph = copy.deepcopy(graph_list[i])\n noise = torch.randn(aug_graph.ndata['feat'].shape) * drop_percent\n aug_graph.ndata['feat'] += noise\n aug_list.append(aug_graph)\n return aug_list\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\nSBM dataset mask\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\ndef add_SBM_mask(graph_list, drop_percent=0.2):\n \n graph_num = len(graph_list) \n aug_list = []\n for i in range(graph_num):\n num = graph_list[i].number_of_nodes() \n mask_num = int(num * drop_percent) \n node_idx = [i for i in range(num)]\n mask_list = random.sample(node_idx, mask_num)\n aug_graph = copy.deepcopy(graph_list[i])\n for j in mask_list:\n aug_graph.ndata['feat'][j] = 3\n aug_list.append(aug_graph)\n return aug_list\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\nbatched\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ndef collate_batched_graph(graphs):\n # The input samples is a list of pairs (graph, label).\n tab_sizes_n = [ graphs[i].number_of_nodes() for i in range(len(graphs))]\n tab_snorm_n = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_n ]\n snorm_n = torch.cat(tab_snorm_n).sqrt() \n tab_sizes_e = [ graphs[i].number_of_edges() for i in range(len(graphs))]\n \n while 0 in tab_sizes_e:\n tab_sizes_e[tab_sizes_e.index(0)] = 1\n \n tab_snorm_e = [ torch.FloatTensor(size,1).fill_(1./float(size)) for size in tab_sizes_e ]\n snorm_e = torch.cat(tab_snorm_e).sqrt()\n batched_graph = dgl.batch(graphs)\n return batched_graph, snorm_n, snorm_e\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\nsim matrix\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ndef sim_matrix2(ori_vector, arg_vector, temp=1.0):\n \n for i in range(len(ori_vector)):\n sim = torch.cosine_similarity(ori_vector[i].unsqueeze(0), arg_vector, dim=1) * (1/temp)\n if i == 0:\n sim_tensor = sim.unsqueeze(0)\n else:\n sim_tensor = torch.cat((sim_tensor, sim.unsqueeze(0)), 0)\n return sim_tensor\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ncompute\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ndef compute_diag_sum(tensor):\n num = len(tensor)\n diag_sum = 0\n for i in range(num):\n diag_sum += tensor[i][i]\n return diag_sum\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\nvisual a graph\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\ndef vis(g, name):\n import networkx as nx\n import matplotlib.pyplot as plt\n\n nx.draw(g.to_networkx(), with_labels=True)\n plt.savefig('./'+ str(name) +'.png')\n plt.show()\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\ncompute acc\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\ndef aug_acc(inputs, device, topk=(1, 5)):\n \"\"\"Computes acc \"\"\"\n \n maxk = max(topk)\n batch_size = inputs.size(0)\n _, pred = inputs.topk(maxk, 1, True, True)\n pred = pred.t()\n target = torch.arange(batch_size)\n target = target.view(1, -1).expand_as(pred).to(device)\n correct = pred.eq(target)\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\nAverageMeter\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\nvis_a_batch_of_graph_data\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\ndef vis_a_batch_of_graph_data(graph_list):\n num = len(graph_list)\n node_num_list = []\n edge_num_list = []\n for i in range(num):\n node_num_list.append(graph_list[i].number_of_nodes())\n edge_num_list.append(graph_list[i].number_of_edges())\n return node_num_list, edge_num_list\n" ]
[ [ "torch.sigmoid", "torch.randint", "torch.cat", "torch.randn", "torch.zeros_like", "torch.FloatTensor", "torch.arange", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jjeamin/sleep
[ "c38ee3ef51405ae7ebd49b833c4cec9c6132f320" ]
[ "encoding_test.py" ]
[ "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport itertools\r\nfrom tqdm import tqdm\r\nimport torch\r\nimport torch.nn as nn\r\nimport torchvision.transforms as transforms\r\nimport argparse\r\nfrom tqdm import tqdm\r\nfrom med import resnet18\r\nfrom med.utils import make_weights_for_balanced_classes, Encoding_Dataset\r\nfrom torch.utils.data import Dataset\r\nfrom pathlib import Path\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\nLABELS = [\"N1\", \"N2\", \"N3\", \"REM\", \"Wake\"]\r\n\r\n\r\ndef test(model, test_loader, criterion, device=\"cuda\"):\r\n model.eval()\r\n\r\n total = len(test_loader)\r\n test_correct = 0\r\n test_loss = 0\r\n\r\n total_labels = []\r\n total_predicted = []\r\n\r\n for i, (data, labels) in enumerate(tqdm(test_loader, total=total)):\r\n data = data.float().to(device)\r\n labels = labels.to(device)\r\n\r\n pred = model(data)\r\n _, predicted = torch.max(pred, 1)\r\n\r\n test_correct += (predicted == labels).sum().item()\r\n\r\n total_labels.append(labels.detach().cpu().numpy()[0])\r\n total_predicted.append(predicted.detach().cpu().numpy()[0])\r\n\r\n loss = criterion(pred, labels)\r\n test_loss += loss.item()\r\n\r\n metrics = confusion_matrix(total_labels, total_predicted, labels=[0, 1, 2, 3, 4])\r\n plot_confusion_matrix(metrics, classes=LABELS, normalize=False, title='Confusion matrix')\r\n plot_confusion_matrix(metrics, classes=LABELS, normalize=True, title='Confusion matrix')\r\n\r\n return test_correct, test_loss\r\n\r\n\r\ndef plot_confusion_matrix(cm, classes,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n \"\"\"\r\n This function prints and plots the confusion matrix.\r\n Normalization can be applied by setting `normalize=True`.\r\n \"\"\"\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n plt.show()\r\n\r\n\r\ndef main(args):\r\n DATA_PATH = Path(args.data_path)\r\n\r\n test_data_path = DATA_PATH / \"Fpz-Cz_test_encoding\"\r\n\r\n test_transforms = transforms.Compose([\r\n transforms.ToTensor(),\r\n ])\r\n\r\n test_dataset = Encoding_Dataset(root_path=test_data_path,\r\n transform=test_transforms)\r\n\r\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True)\r\n\r\n test_total = len(test_loader)\r\n\r\n model = resnet18(num_classes=5).to(args.device)\r\n model.load_state_dict(torch.load('./checkpoint/resnet18_encoding.pth'))\r\n\r\n criterion = nn.CrossEntropyLoss().to(args.device)\r\n\r\n test_correct, test_loss = test(model, test_loader, criterion, device=args.device)\r\n test_acc = test_correct / (test_total * args.batch_size)\r\n test_loss = test_loss / (test_total * args.batch_size)\r\n print(f\"[TEST ACC : {test_acc}] | [TEST LOSS : {test_loss}]\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--data_path\", default=\"store/public_dataset\")\r\n parser.add_argument(\"--device\", default=\"cuda\")\r\n parser.add_argument(\"--batch_size\", default=1)\r\n args = parser.parse_args()\r\n\r\n main(args)" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.yticks", "matplotlib.pyplot.tight_layout", "torch.max", "matplotlib.pyplot.title", "torch.load", "torch.nn.CrossEntropyLoss", "torch.utils.data.DataLoader", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wkcn/mobula
[ "4eec938d6477776f5f2d68bcf41de83fb8da5195" ]
[ "tests/test_layers/test_softmax.py" ]
[ "import mobula.layers as L\nfrom mobula.testing import gradcheck\nfrom mobula.layers.utils.Defines import *\nimport numpy as np\n\ndef test_softmax():\n N, C, H, W = 2,3,4,5\n a = np.random.random((N, C, H, W)) - 0.5\n for axis in range(4):\n l = L.Softmax(a, axis = axis)\n label = np.random.randint(0, a.shape[axis], size = a.size // a.shape[axis])\n loss_l = L.SoftmaxWithLoss(a, axis = axis, label = label)\n\n l.reshape()\n loss_l.reshape()\n\n y = l.eval()\n loss = loss_l.eval()\n\n exp = np.exp(a)\n su = np.sum(exp, axis = axis)\n axes = [slice(None)] * 4\n axes[axis] = np.newaxis\n pu = [1] * 4\n pu[axis] = a.shape[axis]\n s = np.tile(su[tuple(axes)], pu)\n\n # softmax forward\n assert np.allclose(y, exp / s)\n assert np.allclose(np.sum(y, axis), np.ones(su.shape))\n # softmax-with-loss forward\n assert np.allclose(loss_l.softmax, l.Y)\n assert np.allclose(loss_l.Y, -np.mean(np.log(get_val_from_arg(y, label, axis))))\n # softmax backward\n l.dY = np.random.random(l.Y.shape)\n l.backward()\n # softmax-with-loss backward\n loss_l.dY = np.random.random(loss_l.Y.shape)\n loss_l.backward()\n z = np.zeros(y.shape)\n z.ravel()[get_idx_from_arg(z, label, axis)] = 1\n tl = y - z \n assert np.allclose(tl * loss_l.dY, loss_l.dX)\n\ndef test_softmax_grad():\n N, C, H, W = 2, 3, 4, 5\n a = np.random.random((N, C, H, W)) - 0.5\n for axis in range(4):\n l = L.Softmax(a, axis = axis)\n gradcheck(l, a)\n" ]
[ [ "numpy.random.random", "numpy.allclose", "numpy.ones", "numpy.exp", "numpy.zeros", "numpy.sum", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
larrybradley/statmorph
[ "3e6d01a031fbe4518999f966ff08b3670f7a19f9" ]
[ "statmorph/utils/image_diagnostics.py" ]
[ "\"\"\"\nThis file defines the `make_figure` function, which can be useful for\ndebugging and/or examining the morphology of a source in detail.\n\"\"\"\n# Author: Vicente Rodriguez-Gomez <[email protected]>\n# Licensed under a 3-Clause BSD License.\n\nimport numpy as np\nimport warnings\nimport time\nimport sys\nif 'matplotlib' not in sys.modules:\n import matplotlib\n if sys.version_info[0] == 2: # Python 2\n matplotlib.use('agg')\n elif sys.version_info[0] == 3: # Python 3\n matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.colors\nimport matplotlib.cm\nimport scipy.signal\nimport scipy.ndimage as ndi\nimport skimage.transform\nfrom astropy.io import fits\nfrom astropy.visualization import LogStretch\n\n__all__ = ['make_figure']\n\ndef normalize(image, m=None, M=None):\n if m is None:\n m = np.min(image)\n if M is None:\n M = np.max(image)\n\n retval = (image-m) / (M-m)\n retval[image <= m] = 0.0\n retval[image >= M] = 1.0\n\n return retval\n\ndef get_ax(fig, row, col, nrows, ncols, wpanel, hpanel, htop, eps, wfig, hfig):\n x_ax = (col+1)*eps + col*wpanel\n y_ax = eps + (nrows-1-row)*(hpanel+htop)\n return fig.add_axes([x_ax/wfig, y_ax/hfig, wpanel/wfig, hpanel/hfig])\n\ndef make_figure(morph):\n \"\"\"\n Creates a figure analogous to Fig. 4 from Rodriguez-Gomez et al. (2019)\n for a given ``SourceMorphology`` object.\n \n Parameters\n ----------\n morph : ``statmorph.SourceMorphology``\n An object containing the morphological measurements of a single\n source.\n\n Returns\n -------\n fig : ``matplotlib.figure.Figure``\n The figure.\n\n \"\"\"\n # I'm tired of dealing with plt.add_subplot, plt.subplots, plg.GridSpec,\n # plt.subplot2grid, etc. and never getting the vertical and horizontal\n # inter-panel spacings to have the same size, so instead let's do\n # everything manually:\n nrows = 2\n ncols = 4\n wpanel = 4.0 # panel width\n hpanel = 4.0 # panel height\n htop = 0.05*nrows*hpanel # top margin and vertical space between panels\n eps = 0.005*nrows*hpanel # all other margins\n wfig = ncols*wpanel + (ncols+1)*eps # total figure width\n hfig = nrows*(hpanel+htop) + eps # total figure height\n fig = plt.figure(figsize=(wfig, hfig))\n\n # For drawing circles/ellipses\n theta_vec = np.linspace(0.0, 2.0*np.pi, 200)\n\n # Add black to pastel colormap\n cmap_orig = matplotlib.cm.Pastel1\n colors = ((0.0, 0.0, 0.0), *cmap_orig.colors)\n cmap = matplotlib.colors.ListedColormap(colors)\n\n log_stretch = LogStretch(a=10000.0)\n\n # Get some general info about the image\n image = np.float64(morph._cutout_stamp_maskzeroed) # skimage wants double\n ny, nx = image.shape\n m = np.min(image)\n M = np.max(image)\n m_stretch, M_stretch = log_stretch([m, M])\n xc, yc = morph._xc_stamp, morph._yc_stamp # centroid\n xca, yca = morph._asymmetry_center # asym. center\n xcs, ycs = morph._sersic_model.x_0.value, morph._sersic_model.y_0.value # Sersic center\n\n # Plot everything w.r.t. centers of pixels (instead of lower-left corners):\n xc += 0.5; yc += 0.5; xca += 0.5; yca += 0.5; xcs += 0.5; ycs += 0.5\n\n ##################\n # Original image #\n ##################\n ax = get_ax(fig, 0, 0, nrows, ncols, wpanel, hpanel, htop, eps, wfig, hfig)\n ax.imshow(log_stretch(normalize(image, m=m, M=M)), cmap='gray', origin='lower',\n vmin=m_stretch, vmax=M_stretch)\n\n ax.plot(xc, yc, 'go', markersize=5, label='Centroid')\n R = float(nx**2 + ny**2)\n theta = morph.orientation_centroid\n x0, x1 = xc - R*np.cos(theta), xc + R*np.cos(theta)\n y0, y1 = yc - R*np.sin(theta), yc + R*np.sin(theta)\n ax.plot([x0, x1], [y0, y1], 'g--', lw=1.5, label='Major Axis (Centroid)')\n ax.plot(xca, yca, 'bo', markersize=5, label='Asym. Center')\n R = float(nx**2 + ny**2)\n theta = morph.orientation_asymmetry\n x0, x1 = xca - R*np.cos(theta), xca + R*np.cos(theta)\n y0, y1 = yca - R*np.sin(theta), yca + R*np.sin(theta)\n ax.plot([x0, x1], [y0, y1], 'b--', lw=1.5, label='Major Axis (Asym.)')\n # Half-radius ellipse\n a = morph.rhalf_ellip\n b = a / morph.elongation_asymmetry\n theta = morph.orientation_asymmetry\n xprime, yprime = a*np.cos(theta_vec), b*np.sin(theta_vec)\n x = xca + (xprime*np.cos(theta) - yprime*np.sin(theta))\n y = yca + (xprime*np.sin(theta) + yprime*np.cos(theta))\n ax.plot(x, y, 'b', label='Half-Light Ellipse')\n # Some text\n text = 'flag = %d\\nEllip. (Centroid) = %.4f\\nEllip. (Asym.) = %.4f' % (\n morph.flag, morph.ellipticity_centroid, morph.ellipticity_asymmetry)\n ax.text(0.034, 0.966, text,\n horizontalalignment='left', verticalalignment='top',\n transform=ax.transAxes,\n bbox=dict(facecolor='white', alpha=1.0, boxstyle='round'))\n # Finish plot\n ax.legend(loc=4, fontsize=12, facecolor='w', framealpha=1.0, edgecolor='k')\n ax.set_xlim(0, nx)\n ax.set_ylim(0, ny)\n ax.set_title('Original Image (Log Stretch)', fontsize=14)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n ##############\n # Sersic fit #\n ##############\n ax = get_ax(fig, 0, 1, nrows, ncols, wpanel, hpanel, htop, eps, wfig, hfig)\n y, x = np.mgrid[0:ny, 0:nx]\n sersic_model = morph._sersic_model(x, y)\n # Add background noise (for realism)\n if morph.sky_sigma > 0:\n sersic_model += np.random.normal(scale=morph.sky_sigma, size=(ny, nx))\n ax.imshow(log_stretch(normalize(sersic_model, m=m, M=M)), cmap='gray',\n origin='lower', vmin=m_stretch, vmax=M_stretch)\n ax.plot(xcs, ycs, 'ro', markersize=5, label='Sérsic Center')\n R = float(nx**2 + ny**2)\n theta = morph.sersic_theta\n x0, x1 = xcs - R*np.cos(theta), xcs + R*np.cos(theta)\n y0, y1 = ycs - R*np.sin(theta), ycs + R*np.sin(theta)\n ax.plot([x0, x1], [y0, y1], 'r--', lw=1.5, label='Major Axis (Sérsic)')\n # Half-radius ellipse\n a = morph.sersic_rhalf\n b = a * (1.0 - morph.sersic_ellip)\n xprime, yprime = a*np.cos(theta_vec), b*np.sin(theta_vec)\n x = xcs + (xprime*np.cos(theta) - yprime*np.sin(theta))\n y = ycs + (xprime*np.sin(theta) + yprime*np.cos(theta))\n ax.plot(x, y, 'r', label='Half-Light Ellipse (Sérsic)')\n # Some text\n text = ('flag_sersic = %d' % (morph.flag_sersic) + '\\n' +\n 'Ellip. (Sérsic) = %.4f' % (morph.sersic_ellip) + '\\n' +\n r'$n = %.4f$' % (morph.sersic_n))\n ax.text(0.034, 0.966, text,\n horizontalalignment='left', verticalalignment='top',\n transform=ax.transAxes,\n bbox=dict(facecolor='white', alpha=1.0, boxstyle='round'))\n # Finish plot\n ax.legend(loc=4, fontsize=12, facecolor='w', framealpha=1.0, edgecolor='k')\n ax.set_title('Sérsic Model + Noise', fontsize=14)\n ax.set_xlim(0, nx)\n ax.set_ylim(0, ny)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n ###################\n # Sersic residual #\n ###################\n ax = get_ax(fig, 0, 2, nrows, ncols, wpanel, hpanel, htop, eps, wfig, hfig)\n y, x = np.mgrid[0:ny, 0:nx]\n sersic_res = morph._cutout_stamp_maskzeroed - morph._sersic_model(x, y)\n sersic_res[morph._mask_stamp] = 0.0\n ax.imshow(normalize(sersic_res), cmap='gray', origin='lower')\n ax.set_title('Sérsic Residual, ' + r'$I - I_{\\rm model}$', fontsize=14)\n ax.set_xlim(0, nx)\n ax.set_ylim(0, ny)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n ######################\n # Asymmetry residual #\n ######################\n ax = get_ax(fig, 0, 3, nrows, ncols, wpanel, hpanel, htop, eps, wfig, hfig)\n # Rotate image around asym. center\n # (note that skimage expects pixel positions at lower-left corners)\n image_180 = skimage.transform.rotate(image, 180.0, center=(xca-0.5, yca-0.5))\n image_res = image - image_180\n # Apply symmetric mask\n mask = morph._mask_stamp.copy()\n mask_180 = skimage.transform.rotate(mask, 180.0, center=(xca-0.5, yca-0.5))\n mask_180 = mask_180 >= 0.5 # convert back to bool\n mask_symmetric = mask | mask_180\n image_res = np.where(~mask_symmetric, image_res, 0.0)\n ax.imshow(normalize(image_res), cmap='gray', origin='lower')\n ax.set_title('Asymmetry Residual, ' + r'$I - I_{180}$', fontsize=14)\n ax.set_xlim(0, nx)\n ax.set_ylim(0, ny)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n ###################\n # Original segmap #\n ###################\n ax = get_ax(fig, 1, 0, nrows, ncols, wpanel, hpanel, htop, eps, wfig, hfig)\n ax.imshow(log_stretch(normalize(image, m=m, M=M)), cmap='gray', origin='lower',\n vmin=m_stretch, vmax=M_stretch)\n # Show original segmap\n contour_levels = [0.5]\n contour_colors = [(0,0,0)]\n segmap_stamp = morph._segmap.data[morph._slice_stamp]\n Z = np.float64(segmap_stamp == morph.label)\n C = ax.contour(Z, contour_levels, colors=contour_colors, linewidths=1.5)\n # Show skybox\n xmin = morph._slice_skybox[1].start\n ymin = morph._slice_skybox[0].start\n xmax = morph._slice_skybox[1].stop - 1\n ymax = morph._slice_skybox[0].stop - 1\n ax.plot(np.array([xmin, xmax, xmax, xmin, xmin]) + 0.5,\n np.array([ymin, ymin, ymax, ymax, ymin]) + 0.5,\n 'b', lw=1.5, label='Skybox')\n # Some text\n text = ('Sky Mean = %.4f' % (morph.sky_mean) + '\\n' +\n 'Sky Median = %.4f' % (morph.sky_median) + '\\n' +\n 'Sky Sigma = %.4f' % (morph.sky_sigma))\n ax.text(0.034, 0.966, text,\n horizontalalignment='left', verticalalignment='top',\n transform=ax.transAxes,\n bbox=dict(facecolor='white', alpha=1.0, boxstyle='round'))\n # Finish plot\n ax.legend(loc=4, fontsize=12, facecolor='w', framealpha=1.0, edgecolor='k')\n ax.set_title('Original Segmap', fontsize=14)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n ###############\n # Gini segmap #\n ###############\n ax = get_ax(fig, 1, 1, nrows, ncols, wpanel, hpanel, htop, eps, wfig, hfig)\n ax.imshow(log_stretch(normalize(image, m=m, M=M)),\n cmap='gray', origin='lower', vmin=m_stretch, vmax=M_stretch)\n # Show Gini segmap\n contour_levels = [0.5]\n contour_colors = [(0,0,0)]\n Z = np.float64(morph._segmap_gini)\n C = ax.contour(Z, contour_levels, colors=contour_colors, linewidths=1.5)\n # Some text\n text = r'$\\left\\langle {\\rm S/N} \\right\\rangle = %.4f$' % (morph.sn_per_pixel)\n ax.text(0.034, 0.966, text, fontsize=12,\n horizontalalignment='left', verticalalignment='top',\n transform=ax.transAxes,\n bbox=dict(facecolor='white', alpha=1.0, boxstyle='round'))\n text = (r'$G = %.4f$' % (morph.gini) + '\\n' +\n r'$M_{20} = %.4f$' % (morph.m20) + '\\n' +\n r'$F(G, M_{20}) = %.4f$' % (morph.gini_m20_bulge) + '\\n' +\n r'$S(G, M_{20}) = %.4f$' % (morph.gini_m20_merger))\n ax.text(0.034, 0.034, text, fontsize=12,\n horizontalalignment='left', verticalalignment='bottom',\n transform=ax.transAxes,\n bbox=dict(facecolor='white', alpha=1.0, boxstyle='round'))\n text = (r'$C = %.4f$' % (morph.concentration) + '\\n' +\n r'$A = %.4f$' % (morph.asymmetry) + '\\n' +\n r'$S = %.4f$' % (morph.smoothness))\n ax.text(0.966, 0.034, text, fontsize=12,\n horizontalalignment='right', verticalalignment='bottom',\n transform=ax.transAxes,\n bbox=dict(facecolor='white', alpha=1.0, boxstyle='round'))\n # Finish plot\n ax.set_xlim(0, nx)\n ax.set_ylim(0, ny)\n ax.set_title('Gini Segmap', fontsize=14)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n ####################\n # Watershed segmap #\n ####################\n ax = get_ax(fig, 1, 2, nrows, ncols, wpanel, hpanel, htop, eps, wfig, hfig)\n labeled_array, peak_labels, xpeak, ypeak = morph._watershed_mid\n labeled_array_plot = (labeled_array % (cmap.N-1)) + 1\n labeled_array_plot[labeled_array == 0] = 0.0 # background is black\n ax.imshow(labeled_array_plot, cmap=cmap, origin='lower',\n norm=matplotlib.colors.NoNorm())\n sorted_flux_sums, sorted_xpeak, sorted_ypeak = morph._intensity_sums\n if len(sorted_flux_sums) > 0:\n ax.plot(sorted_xpeak[0] + 0.5, sorted_ypeak[0] + 0.5, 'bo', markersize=2,\n label='First Peak')\n if len(sorted_flux_sums) > 1:\n ax.plot(sorted_xpeak[1] + 0.5, sorted_ypeak[1] + 0.5, 'ro', markersize=2,\n label='Second Peak')\n # Some text\n text = (r'$M = %.4f$' % (morph.multimode) + '\\n' +\n r'$I = %.4f$' % (morph.intensity) + '\\n' +\n r'$D = %.4f$' % (morph.deviation))\n ax.text(0.034, 0.034, text, fontsize=12,\n horizontalalignment='left', verticalalignment='bottom',\n transform=ax.transAxes,\n bbox=dict(facecolor='white', alpha=1.0, boxstyle='round'))\n ax.legend(loc=4, fontsize=12, facecolor='w', framealpha=1.0, edgecolor='k')\n ax.set_title('Watershed Segmap (' + r'$I$' + ' statistic)', fontsize=14)\n ax.set_xlim(0, nx)\n ax.set_ylim(0, ny)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n ##########################\n # Shape asymmetry segmap #\n ##########################\n ax = get_ax(fig, 1, 3, nrows, ncols, wpanel, hpanel, htop, eps, wfig, hfig)\n ax.imshow(morph._segmap_shape_asym, cmap='gray', origin='lower')\n ax.plot(xca, yca, 'bo', markersize=5, label='Asym. Center')\n r = morph.rpetro_circ\n ax.plot(xca + r*np.cos(theta_vec), yca + r*np.sin(theta_vec), 'b',\n label=r'$r_{\\rm petro, circ}$')\n r = morph.rpetro_ellip\n ax.plot(xca + r*np.cos(theta_vec), yca + r*np.sin(theta_vec), 'r',\n label=r'$r_{\\rm petro, ellip}$')\n r = morph.rmax_circ\n ax.plot(np.floor(xca) + r*np.cos(theta_vec), np.floor(yca) + r*np.sin(theta_vec),\n 'c', lw=1.5, label=r'$r_{\\rm max}$')\n text = (r'$A_S = %.4f$' % (morph.shape_asymmetry))\n ax.text(0.034, 0.034, text, fontsize=12,\n horizontalalignment='left', verticalalignment='bottom',\n transform=ax.transAxes,\n bbox=dict(facecolor='white', alpha=1.0, boxstyle='round'))\n ax.legend(loc=4, fontsize=12, facecolor='w', framealpha=1.0, edgecolor='k')\n ax.set_xlim(0, nx)\n ax.set_ylim(0, ny)\n ax.set_title('Shape Asymmetry Segmap', fontsize=14)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n fig.subplots_adjust(left=eps/wfig, right=1-eps/wfig, bottom=eps/hfig,\n top=1.0-htop/hfig, wspace=eps/wfig, hspace=htop/hfig)\n\n #fig.savefig('test_segmap.png', dpi=150)\n \n return fig\n" ]
[ [ "matplotlib.colors.NoNorm", "numpy.linspace", "numpy.min", "matplotlib.use", "numpy.cos", "numpy.sin", "numpy.max", "numpy.random.normal", "matplotlib.colors.ListedColormap", "numpy.float64", "numpy.floor", "numpy.array", "numpy.where", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ROCmSoftwarePlatform/tensorflow-upstream
[ "8bd1e3fcaee378945b11c96ed1474d8b890232fc" ]
[ "tensorflow/python/kernel_tests/reduction_ops_test.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for reduction ops.\"\"\"\n\nimport itertools\nimport numbers\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n# The maximum input rank to test.\n_MAX_RANK = 5\n\n\ndef _powerset(iterable):\n \"\"\"Helper for generating all possible reduction_axes arguments.\n\n Example:\n powerset([0,1,2]): () (0,) (1,) (2,) (0,1) (0,2) (1,2) (0,1,2)\n\n Args:\n iterable: An iterable of items to generate the powerset of.\n\n Returns:\n The powerset of all items in iterable.\n \"\"\"\n s = list(iterable)\n return itertools.chain.from_iterable(\n itertools.combinations(s, r) for r in range(len(s) + 1))\n\n\nclass ReducedShapeTest(test.TestCase):\n\n def _check(self, shape, axes, result):\n output = math_ops.reduced_shape(shape, axes=axes)\n self.assertAllEqual(output, result)\n\n @test_util.run_deprecated_v1\n def testSimple(self):\n with self.cached_session():\n self._check([3], [], [3])\n self._check([3], [0], [1])\n self._check([5, 3], [], [5, 3])\n self._check([5, 3], [0], [1, 3])\n self._check([5, 3], [1], [5, 1])\n self._check([5, 3], [0, 1], [1, 1])\n\n @test_util.run_deprecated_v1\n def testZeros(self):\n \"\"\"Check that reduced_shape does the right thing with zero dimensions.\"\"\"\n with self.cached_session():\n self._check([0], [], [0])\n self._check([0], [0], [1])\n self._check([0, 3], [], [0, 3])\n self._check([0, 3], [0], [1, 3])\n self._check([0, 3], [1], [0, 1])\n self._check([0, 3], [0, 1], [1, 1])\n self._check([3, 0], [], [3, 0])\n self._check([3, 0], [0], [1, 0])\n self._check([3, 0], [1], [3, 1])\n self._check([3, 0], [0, 1], [1, 1])\n\n @test_util.run_deprecated_v1\n def testNegAxes(self):\n with self.cached_session():\n self._check([10, 10, 10], [-1], [10, 10, 1])\n self._check([10, 10, 10], [-1, 2], [10, 10, 1])\n self._check([10, 10, 10], [-1, -1], [10, 10, 1])\n self._check([10, 10, 10], [-1, 0], [1, 10, 1])\n self._check([10, 10, 10], [-3], [1, 10, 10])\n\n\nclass ReductionUnknownShape(test.TestCase):\n\n @test_util.run_deprecated_v1\n def testBasic(self):\n with self.cached_session():\n for dtype, reductions in [(dtypes.float32,\n (math_ops.reduce_sum, math_ops.reduce_mean,\n math_ops.reduce_prod, math_ops.reduce_max,\n math_ops.reduce_min,\n math_ops.reduce_euclidean_norm)),\n (dtypes.bool, (math_ops.reduce_all,\n math_ops.reduce_any))]:\n for reduction in reductions:\n x = array_ops.placeholder(\n dtype=dtype, shape=None) # Some tensor w/ unknown shape.\n y = reduction(x)\n self.assertEqual(y.shape, ())\n\n\nclass ReductionInvalidKeepdims(test.TestCase):\n\n def testBasic(self):\n # Test case for GitHub issue 46700.\n for dtype, reductions in [\n (dtypes.float32, (math_ops.reduce_sum, math_ops.reduce_mean,\n math_ops.reduce_prod, math_ops.reduce_max,\n math_ops.reduce_min, math_ops.reduce_euclidean_norm)),\n (dtypes.bool, (math_ops.reduce_all, math_ops.reduce_any))\n ]:\n for reduction in reductions:\n with self.assertRaisesRegex(ValueError, \"The truth value\"):\n x = True if dtype == dtypes.bool else 1\n y = reduction(\n input_tensor=x, keepdims=np.array([63600, 1], dtype=np.float16))\n self.evaluate(y)\n\n\nclass BaseReductionTest(test.TestCase):\n\n def _tf_reduce(self, x, reduction_axes, keepdims):\n raise NotImplementedError()\n\n def _np_reduce(self, x, reduction_axes, keepdims):\n raise NotImplementedError()\n\n def _makeIncremental(self, shape, dtype):\n data = np.arange(np.prod(shape)).reshape(shape).astype(dtype.as_numpy_dtype)\n if dtype.is_complex:\n data -= 2j * data\n return data\n\n def _makeRandom(self, shape, dtype):\n data = np.random.rand(*shape).astype(dtype.as_numpy_dtype)\n if dtype.is_complex:\n data -= 2j * data\n return data\n\n def _compare(self, x, reduction_axes, keepdims, feed_dict=None):\n np_ans = self._np_reduce(x, reduction_axes, keepdims)\n with self.cached_session() as sess:\n tf_ans = self._tf_reduce(x, reduction_axes, keepdims)\n out = sess.run(tf_ans, feed_dict)\n self.assertAllClose(np_ans, out)\n self.assertShapeEqual(np_ans, tf_ans)\n\n def _compareAll(self, x, reduction_axes, feed_dict=None):\n if reduction_axes is not None and np.shape(reduction_axes) == (1,):\n # Test scalar reduction_axes argument\n self._compareAll(x, reduction_axes[0])\n self._compare(x, reduction_axes, keepdims=False, feed_dict=feed_dict)\n self._compare(x, reduction_axes, keepdims=True, feed_dict=feed_dict)\n\n def _compareAllAxes(self, x, feed_dict=None):\n self._compareAll(x, None)\n for axes in _powerset(range(x.ndim)):\n self._compareAll(x, axes, feed_dict)\n\n def _compareGradient(self, x, reduction_axes, rtol=1e-8, atol=1e-8):\n if reduction_axes is not None and np.shape(reduction_axes) == (1,):\n # Test scalar reduction_axes argument\n self._compareGradient(x, reduction_axes[0], rtol=rtol, atol=atol)\n with self.cached_session():\n t = ops.convert_to_tensor(x)\n su = self._tf_reduce(t, reduction_axes, False)\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n t, x.shape, su, su.get_shape().as_list(), x_init_value=x, delta=1)\n self.assertAllClose(jacob_t, jacob_n, rtol=rtol, atol=atol)\n\n def _compareGradientAxes(self, x, rtol=1e-8, atol=1e-8):\n self._compareGradient(x, None, rtol=rtol, atol=atol)\n self._compareGradient(x, [], rtol=rtol, atol=atol)\n self._compareGradient(x, 0, rtol=rtol, atol=atol)\n self._compareGradient(x, [1], rtol=rtol, atol=atol)\n self._compareGradient(x, [2], rtol=rtol, atol=atol)\n self._compareGradient(x, [1, 2], rtol=rtol, atol=atol)\n self._compareGradient(x, [0, 1, 2, 3], rtol=rtol, atol=atol)\n\n\nclass SumReductionTest(BaseReductionTest):\n\n def _tf_reduce(self, x, reduction_axes, keepdims):\n return math_ops.reduce_sum(x, reduction_axes, keepdims)\n\n def _np_reduce(self, x, reduction_axes, keepdims):\n if isinstance(reduction_axes, list) or isinstance(reduction_axes,\n np.ndarray):\n reduction_axes = tuple(reduction_axes)\n return np.sum(x, axis=reduction_axes, keepdims=keepdims)\n\n def testAxesType(self):\n for dtype in [dtypes.int64, dtypes.int32]:\n with self.cached_session():\n v = math_ops.reduce_sum([0, 0], constant_op.constant(0, dtype=dtype))\n tf_v = self.evaluate(v)\n self.assertAllEqual(tf_v, 0)\n\n @test_util.run_deprecated_v1\n def testInfinity(self):\n for dtype in [np.float32, np.float64]:\n for special_value_x in [-np.inf, np.inf]:\n for special_value_y in [-np.inf, np.inf]:\n np_arr = np.array([special_value_x, special_value_y]).astype(dtype)\n self._compareAll(np_arr, None)\n\n @test_util.run_deprecated_v1\n def testInt32(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.int32)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testFloat16(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.float16)\n self._compareAllAxes(np_arr)\n\n # test that mean doesn't overflow\n # only on GPU, since it has the more accurate implementation\n if not test.is_gpu_available():\n return\n for n in (200, 500, 5000, 68000):\n arr = np.ones([n], dtype=np.float16)\n\n\n with self.session(graph=ops.Graph(), use_gpu=True) as sess:\n tf_arr = variables.Variable(arr)\n self.evaluate(variables.global_variables_initializer())\n tf_mean = math_ops.reduce_mean(tf_arr, 0, False)\n tf_out_mean = self.evaluate(tf_mean)\n self.assertAllClose(tf_out_mean, 1.)\n\n @test_util.run_deprecated_v1\n def testFloat32(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.float32)\n self._compareAllAxes(np_arr)\n\n for _ in range(10):\n size_x = int(2**np.random.uniform(0, 15))\n size_y = int(2**np.random.uniform(0, 15))\n\n if size_x * size_y > 1e7:\n size_y = int(1e7 / size_x)\n\n arr = np.ones([size_x, size_y], dtype=np.float32)\n col_sum = np.sum(arr, axis=0)\n row_sum = np.sum(arr, axis=1)\n\n with self.session(graph=ops.Graph(), use_gpu=True) as sess:\n tf_row_sum = self._tf_reduce(arr, 1, False)\n tf_col_sum = self._tf_reduce(arr, 0, False)\n tf_out_row, tf_out_col = self.evaluate([tf_row_sum, tf_col_sum])\n self.assertAllClose(col_sum, tf_out_col)\n self.assertAllClose(row_sum, tf_out_row)\n\n for size_x in [1, 3, 16, 33]:\n for size_y in [1, 3, 16, 33]:\n for size_z in [1, 3, 16, 33]:\n arr = np.ones([size_x, size_y, size_z], dtype=np.float32)\n sum_y = np.sum(arr, axis=1)\n sum_xz = np.sum(arr, axis=(0, 2))\n\n with self.session(graph=ops.Graph(), use_gpu=True) as sess:\n tf_sum_xz = self._tf_reduce(arr, [0, 2], False)\n tf_sum_y = self._tf_reduce(arr, 1, False)\n tf_out_sum_xz, tf_out_sum_y = self.evaluate([tf_sum_xz, tf_sum_y])\n self.assertAllClose(sum_y, tf_out_sum_y)\n self.assertAllClose(sum_xz, tf_out_sum_xz)\n\n @test_util.run_deprecated_v1\n def testFloat64(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.float64)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testComplex64(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testComplex128(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testInvalidIndex(self):\n np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)\n input_tensor = ops.convert_to_tensor(np_arr)\n with self.assertRaisesWithPredicateMatch(\n ValueError, lambda e: \"Invalid reduction dimension\" in str(e)):\n math_ops.reduce_sum(input_tensor, [-3])\n with self.assertRaisesWithPredicateMatch(\n ValueError, lambda e: \"Invalid reduction dimension\" in str(e)):\n math_ops.reduce_sum(input_tensor, [2])\n with self.assertRaisesWithPredicateMatch(\n ValueError, lambda e: \"Invalid reduction dimension\" in str(e)):\n math_ops.reduce_sum(input_tensor, [0, 2])\n\n @test_util.run_deprecated_v1\n def testPartialShapes(self):\n np.random.seed(1618)\n\n # Input shape is unknown.\n reduction_axes = [1, 2]\n c_unknown = array_ops.placeholder(dtypes.float32)\n s_unknown = math_ops.reduce_sum(c_unknown, reduction_axes)\n self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())\n\n np_input = np.random.randn(3, 3, 3)\n self._compareAll(np_input, reduction_axes, {c_unknown: np_input})\n\n # Input shape only has known rank.\n c_known_rank = array_ops.placeholder(dtypes.float32)\n c_known_rank.set_shape(tensor_shape.unknown_shape(rank=3))\n s_known_rank = math_ops.reduce_sum(\n c_known_rank, reduction_axes, keepdims=True)\n self.assertEqual(3, s_known_rank.get_shape().rank)\n\n np_input = np.random.randn(3, 3, 3)\n self._compareAll(np_input, reduction_axes, {c_known_rank: np_input})\n\n # Reduction indices are unknown.\n unknown_indices = array_ops.placeholder(dtypes.int32)\n c_unknown_indices = constant_op.constant([[10.0], [20.0]])\n s_unknown_indices = math_ops.reduce_sum(\n c_unknown_indices, unknown_indices, keepdims=False)\n self.assertEqual(tensor_shape.unknown_shape(),\n s_unknown_indices.get_shape())\n s_unknown_indices_keep = math_ops.reduce_sum(\n c_unknown_indices, unknown_indices, keepdims=True)\n self.assertEqual(2, s_unknown_indices_keep.get_shape().rank)\n\n @test_util.run_deprecated_v1\n def testWrongShapeForReductionIndices(self):\n reduction_axes = [[1], [2]]\n c_unknown = array_ops.placeholder(dtypes.float32)\n with self.assertRaisesWithPredicateMatch(ValueError,\n \".*must be at most rank 1.*\"):\n math_ops.reduce_sum(c_unknown, reduction_axes)\n\n def testInvalidRepeatedReductionIndices(self):\n reduction_axes = constant_op.constant([0, 0])\n c = constant_op.constant([1.0, 2.0])\n with self.assertRaisesWithPredicateMatch(\n errors.InvalidArgumentError,\n \".*Axes contains duplicate dimension: 0.*\"):\n self.evaluate(math_ops.reduce_sum(c, reduction_axes))\n\n # Int64??\n\n @test_util.run_deprecated_v1\n def testGradient(self):\n for dtype in [\n dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128\n ]:\n x = self._makeIncremental([2, 3, 4, 2], dtype)\n self._compareGradientAxes(x)\n\n @test_util.run_deprecated_v1\n def testHighRank(self):\n # Do a bunch of random high dimensional reductions\n np.random.seed(42)\n for _ in range(20):\n rank = np.random.randint(4, 10 + 1)\n axes, = np.nonzero(np.random.randint(2, size=rank))\n shape = tuple(np.random.randint(1, 3 + 1, size=rank))\n data = np.random.randint(1024, size=shape)\n self._compareAll(data, axes)\n # Check some particular axis patterns\n for rank in 4, 7, 10:\n shape = tuple(np.random.randint(1, 3 + 1, size=rank))\n data = np.random.randint(1024, size=shape)\n for axes in ([], np.arange(rank), np.arange(0, rank, 2),\n np.arange(1, rank, 2)):\n self._compareAll(data, axes)\n\n @test_util.run_deprecated_v1\n def testExpand(self):\n # Reduce an empty tensor to a nonempty tensor\n x = np.zeros((5, 0))\n self._compareAll(x, [1])\n\n @test_util.run_deprecated_v1\n def testEmptyGradients(self):\n with self.session():\n x = array_ops.zeros([0, 3])\n y = math_ops.reduce_sum(x, [1])\n error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])\n self.assertEqual(error, 0)\n\n @test_util.run_deprecated_v1\n def testDegenerate(self):\n with self.session():\n for dtype in (dtypes.float16, dtypes.float32, dtypes.float64,\n dtypes.complex64, dtypes.complex128):\n # A large number is needed to get Eigen to die\n x = array_ops.zeros((0, 9938), dtype=dtype)\n y = math_ops.reduce_sum(x, [0])\n self.assertAllEqual(y, np.zeros(9938))\n\n\nclass MeanReductionTest(BaseReductionTest):\n\n def _tf_reduce(self, x, reduction_axes, keepdims):\n return math_ops.reduce_mean(x, reduction_axes, keepdims)\n\n def _np_reduce(self, x, reduction_axes, keepdims):\n if isinstance(reduction_axes, list) or isinstance(reduction_axes,\n np.ndarray):\n reduction_axes = tuple(reduction_axes)\n elif isinstance(reduction_axes, numbers.Integral):\n reduction_axes = (reduction_axes,)\n\n if reduction_axes is None:\n count = np.prod(x.shape)\n else:\n count = np.prod([x.shape[ax] for ax in reduction_axes])\n # np.mean automatically converts integer inputs to float, while TensorFlow's\n # reduce_mean does not. For integer inputs, we emulate TensorFlow's behavior\n # using np.sum and truncating division.\n np_sum = np.sum(x, axis=reduction_axes, keepdims=keepdims)\n if np.issubdtype(x.dtype, np.integer):\n return np_sum // count\n return np_sum / count\n\n def testAxesType(self):\n for dtype in [dtypes.int64, dtypes.int32]:\n with self.cached_session():\n v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))\n tf_v = self.evaluate(v)\n self.assertAllEqual(tf_v, 0)\n\n @test_util.run_deprecated_v1\n def testInfinity(self):\n for dtype in [np.float32, np.float64]:\n for special_value_x in [-np.inf, np.inf]:\n for special_value_y in [-np.inf, np.inf]:\n np_arr = np.array([special_value_x, special_value_y]).astype(dtype)\n self._compareAll(np_arr, None)\n\n @test_util.run_deprecated_v1\n def testInt32(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.int32)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testUint8(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeRandom((2,) * rank, dtypes.uint8)\n self._compareAllAxes(np_arr)\n\n # This tests the issue reported in b/145030710.\n @test_util.run_deprecated_v1\n def testSizeOverflowUint8(self):\n np_arr = self._makeRandom((2**8,), dtypes.uint8)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testSizeOverflowInt8(self):\n np_arr = self._makeRandom((2**7,), dtypes.int8)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testSizeOverflowUint16(self):\n np_arr = self._makeRandom((2**16,), dtypes.uint16)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testSizeOverflowInt16(self):\n np_arr = self._makeRandom((2**15,), dtypes.int16)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testFloat32(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.float32)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testFloat64(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.float64)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testComplex64(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testComplex128(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testGradient(self):\n s = [2, 3, 4, 2]\n for dtype in [dtypes.float32, dtypes.float64]:\n x = self._makeIncremental(s, dtype)\n self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)\n\n @test_util.run_deprecated_v1\n def testEmptyGradients(self):\n with self.session():\n x = array_ops.zeros([0, 3])\n y = math_ops.reduce_mean(x, [1])\n error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])\n self.assertEqual(error, 0)\n\n @test_util.run_deprecated_v1\n def testDegenerate(self):\n with self.session():\n for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):\n # A large number is needed to get Eigen to die\n x = array_ops.zeros((0, 9938), dtype=dtype)\n y = math_ops.reduce_mean(x, [0]).eval()\n self.assertEqual(y.shape, (9938,))\n self.assertTrue(np.all(np.isnan(y)))\n\n\nclass EuclideanNormReductionTest(BaseReductionTest):\n\n def _tf_reduce(self, x, reduction_axes, keepdims):\n return math_ops.reduce_euclidean_norm(x, reduction_axes, keepdims)\n\n def _np_reduce(self, x, reduction_axes, keepdims):\n if isinstance(reduction_axes, list) or isinstance(reduction_axes,\n np.ndarray):\n reduction_axes = tuple(reduction_axes)\n np_fro = np.sqrt(\n np.sum(x * np.conj(x), axis=reduction_axes, keepdims=keepdims))\n if np.issubdtype(x.dtype, np.integer):\n np_fro = np.floor(np_fro)\n return np_fro\n\n @test_util.run_deprecated_v1\n def testAxesType(self):\n for dtype in [dtypes.int64, dtypes.int32]:\n with self.cached_session():\n v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))\n tf_v = self.evaluate(v)\n self.assertAllEqual(tf_v, 0)\n\n @test_util.run_deprecated_v1\n def testInfinity(self):\n for dtype in [np.float32, np.float64]:\n for special_value_x in [-np.inf, np.inf]:\n for special_value_y in [-np.inf, np.inf]:\n np_arr = np.array([special_value_x, special_value_y]).astype(dtype)\n self._compareAll(np_arr, None)\n\n @test_util.run_deprecated_v1\n def testSingleton(self):\n for dtype in [np.float32, np.float64]:\n np_arr = np.array([-1.]).astype(dtype)\n self._compareAll(np_arr, None)\n\n @test_util.run_deprecated_v1\n def testInt32(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.int32)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testFloat32(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.float32)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testFloat64(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.float64)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testComplex64(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testComplex128(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)\n self._compareAllAxes(np_arr)\n\n with self.session():\n for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):\n # A large number is needed to get Eigen to die\n x = array_ops.zeros((0, 9938), dtype=dtype)\n y = math_ops.reduce_euclidean_norm(x, [0]).eval()\n self.assertEqual(y.shape, (9938,))\n self.assertAllEqual(y, np.zeros(9938))\n\n @test_util.run_deprecated_v1\n def testGradient(self):\n shape = [2, 3, 4, 2]\n for dtype in [dtypes.float32, dtypes.float64]:\n # zero value entry will result NaN gradient if reduction doesn't happen.\n # e.g., `tf.math.reduce_sum([0, 1], axis=[])` so add one to avoid it.\n x = self._makeIncremental(shape, dtype) + 1.0\n self._compareGradientAxes(x, rtol=1e-2, atol=1e-2)\n\n\nclass ProdReductionTest(BaseReductionTest):\n\n def _tf_reduce(self, x, reduction_axes, keepdims):\n return math_ops.reduce_prod(x, reduction_axes, keepdims)\n\n def _np_reduce(self, x, reduction_axes, keepdims):\n if isinstance(reduction_axes, list) or isinstance(reduction_axes,\n np.ndarray):\n reduction_axes = tuple(reduction_axes)\n return np.prod(x, axis=reduction_axes, keepdims=keepdims)\n\n def testAxesType(self):\n for dtype in [dtypes.int64, dtypes.int32]:\n with self.cached_session():\n v = math_ops.reduce_prod([0, 0], constant_op.constant(0, dtype=dtype))\n tf_v = self.evaluate(v)\n self.assertAllEqual(tf_v, 0)\n\n @test_util.run_deprecated_v1\n def testInfinity(self):\n for dtype in [np.float32, np.float64]:\n for special_value_x in [-np.inf, np.inf]:\n for special_value_y in [-np.inf, np.inf]:\n np_arr = np.array([special_value_x, special_value_y]).astype(dtype)\n self._compareAll(np_arr, None)\n\n @test_util.run_deprecated_v1\n def testInt32(self):\n # Numpy automatically upgrades the type of np.prod from int32 to int64, so\n # Numpy does not overflow an int32 np.prod while TensorFlow does. To avoid\n # overflow, limit array values.\n for rank in range(1, _MAX_RANK):\n np_arr = self._makeIncremental((2,) * rank, dtypes.int32) % 5 + 1\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testInt64(self):\n for rank in range(1, _MAX_RANK):\n # Avoid overflow by limiting array values.\n np_arr = self._makeIncremental((2,) * rank, dtypes.int64) % 11 + 1\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testFloat32(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.float32)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testFloat64(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.float64)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testComplex64(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testComplex128(self):\n for rank in range(1, _MAX_RANK + 1):\n np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)\n self._compareAllAxes(np_arr)\n\n @test_util.run_deprecated_v1\n def testGradientWithZeros(self):\n s = [2, 3, 4, 2]\n x = self._makeIncremental(s, dtypes.float32) / 20.\n # No zeros in input\n self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)\n # Zero at beginning\n x1 = x.copy()\n x1[:, :, 0, :] = 0\n self._compareGradientAxes(x1, rtol=1e-3, atol=1e-3)\n # Zero at end\n x2 = x.copy()\n x2[:, :, -1, :] = 0\n self._compareGradientAxes(x2, rtol=1e-3, atol=1e-3)\n # Zero in middle\n x3 = x.copy()\n x3[:, :, 2, :] = 0\n self._compareGradientAxes(x3, rtol=1e-3, atol=1e-3)\n # All zeros\n x4 = x.copy()\n x4[:, :, :, :] = 0\n self._compareGradientAxes(x4, rtol=1e-3, atol=1e-3)\n\n @test_util.run_deprecated_v1\n def testEmptyGradients(self):\n with self.session():\n x = array_ops.zeros([0, 3])\n y = math_ops.reduce_prod(x, [1])\n error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])\n self.assertEqual(error, 0)\n\n @test_util.run_deprecated_v1\n def testDegenerate(self):\n with self.session():\n for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):\n # A large number is needed to get Eigen to die\n x = array_ops.zeros((0, 9938), dtype=dtype)\n y = math_ops.reduce_prod(x, [0])\n self.assertAllEqual(y, np.ones(9938))\n\n\nclass MinReductionTest(test.TestCase):\n\n def _compare(self, x, reduction_axes, keepdims, use_gpu=False):\n np_ans = x\n if reduction_axes is None:\n np_ans = np.amin(np_ans, keepdims=keepdims)\n else:\n for ra in reduction_axes[::-1]:\n np_ans = np.amin(np_ans, axis=ra, keepdims=keepdims)\n with self.cached_session(use_gpu=use_gpu):\n if reduction_axes is not None:\n reduction_axes = np.array(reduction_axes).astype(np.int32)\n tf_ans = math_ops.reduce_min(x, reduction_axes, keepdims)\n out = self.evaluate(tf_ans)\n self.assertAllClose(np_ans, out)\n self.assertShapeEqual(np_ans, tf_ans)\n\n def _compareAll(self, x, reduction_axes):\n self._compare(x, reduction_axes, False, use_gpu=True)\n self._compare(x, reduction_axes, True, use_gpu=True)\n\n def testAxesType(self):\n for dtype in [dtypes.int64, dtypes.int32]:\n with self.cached_session():\n v = math_ops.reduce_min([0, 0], constant_op.constant(0, dtype=dtype))\n tf_v = self.evaluate(v)\n self.assertAllEqual(tf_v, 0)\n\n @test_util.disable_xla(\"b/168718272\") # XLA handling of NaN is inconsistent\n def testSpecialValues(self):\n for dtype in [np.float32, np.float64]:\n for size in range(1, 4):\n for arr in itertools.product([-np.inf, 1., np.nan, np.inf],\n repeat=size):\n self._compareAll(np.array(arr, dtype=dtype), None)\n\n def testFloatReduce3D(self):\n # Create a 3D array of floats and reduce across all possible\n # dimensions\n np_arr = np.arange(1, 31).reshape([2, 3, 5]).astype(np.float32)\n self._compareAll(np_arr, None)\n self._compareAll(np_arr, [])\n self._compareAll(np_arr, [0])\n self._compareAll(np_arr, [1])\n self._compareAll(np_arr, [2])\n self._compareAll(np_arr, [0, 1])\n self._compareAll(np_arr, [1, 2])\n self._compareAll(np_arr, [0, 2])\n self._compareAll(np_arr, [0, 1, 2])\n\n def testDoubleReduce3D(self):\n # Create a 3D array of doubles and reduce across all possible\n # dimensions\n np_arr = np.arange(1, 31).reshape([2, 3, 5]).astype(np.float64)\n self._compareAll(np_arr, None)\n self._compareAll(np_arr, [])\n self._compareAll(np_arr, [0])\n self._compareAll(np_arr, [1])\n self._compareAll(np_arr, [2])\n self._compareAll(np_arr, [0, 1])\n self._compareAll(np_arr, [1, 2])\n self._compareAll(np_arr, [0, 2])\n self._compareAll(np_arr, [0, 1, 2])\n\n @test_util.run_deprecated_v1\n def testGradient(self):\n s = [2, 3, 4, 2]\n x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)\n with self.cached_session():\n t = ops.convert_to_tensor(x)\n su = math_ops.reduce_min(t, [1, 2])\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n t, s, su, [2, 2], x_init_value=x, delta=1)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)\n\n @test_util.run_deprecated_v1\n def testGradient2(self):\n s = [2, 3, 4, 2]\n x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)\n with self.cached_session():\n t = ops.convert_to_tensor(x)\n su = math_ops.reduce_min(t, [1])\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n t, s, su, [2, 4, 2], x_init_value=x, delta=1)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)\n\n @test_util.run_deprecated_v1\n def testGradient3(self):\n s = [2, 3, 4, 2]\n x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)\n with self.cached_session():\n t = ops.convert_to_tensor(x)\n su = math_ops.reduce_min(t, [2])\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n t, s, su, [2, 3, 2], x_init_value=x, delta=1)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)\n\n @test_util.run_deprecated_v1\n def testGradient4(self):\n s = [2, 3, 4, 2]\n x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)\n with self.cached_session():\n t = ops.convert_to_tensor(x)\n su = math_ops.reduce_min(t)\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n t, s, su, [1], x_init_value=x, delta=1)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)\n\n @test_util.run_deprecated_v1\n def testEmptyGradients(self):\n with self.cached_session():\n x = array_ops.zeros([0, 3])\n y = math_ops.reduce_min(x, [1])\n error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])\n self.assertEqual(error, 0)\n\n\nclass MaxReductionTest(test.TestCase):\n\n def _compare(self, x, reduction_axes, keepdims, use_gpu=False):\n np_ans = x\n if reduction_axes is None:\n np_ans = np.amax(np_ans, keepdims=keepdims)\n else:\n for ra in reduction_axes[::-1]:\n np_ans = np.amax(np_ans, axis=ra, keepdims=keepdims)\n with self.cached_session(use_gpu=use_gpu):\n if reduction_axes is not None:\n reduction_axes = np.array(reduction_axes).astype(np.int32)\n tf_ans = math_ops.reduce_max(x, reduction_axes, keepdims)\n out = self.evaluate(tf_ans)\n self.assertAllClose(np_ans, out)\n self.assertShapeEqual(np_ans, tf_ans)\n\n def _compareAll(self, x, reduction_axes):\n self._compare(x, reduction_axes, False, use_gpu=True)\n self._compare(x, reduction_axes, True, use_gpu=True)\n\n def testAxesType(self):\n for dtype in [dtypes.int64, dtypes.int32]:\n with self.cached_session():\n v = math_ops.reduce_max([0, 0], constant_op.constant(0, dtype=dtype))\n tf_v = self.evaluate(v)\n self.assertAllEqual(tf_v, 0)\n\n @test_util.disable_xla(\"b/168718272\") # XLA handling of NaN is inconsistent\n def testSpecialValues(self):\n for dtype in [np.float32, np.float64]:\n for size in range(1, 4):\n for arr in itertools.product([-np.inf, 1., np.nan, np.inf],\n repeat=size):\n self._compareAll(np.array(arr, dtype=dtype), None)\n\n def testInt64Reduce3D(self):\n # Create a 3D array of int64s and reduce across all possible\n # dimensions\n np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.int64)\n self._compareAll(np_arr, None)\n self._compareAll(np_arr, [])\n self._compareAll(np_arr, [0])\n self._compareAll(np_arr, [1])\n self._compareAll(np_arr, [2])\n self._compareAll(np_arr, [0, 1])\n self._compareAll(np_arr, [1, 2])\n self._compareAll(np_arr, [0, 2])\n self._compareAll(np_arr, [0, 1, 2])\n\n def testFloatReduce3D(self):\n # Create a 3D array of floats and reduce across all possible\n # dimensions\n np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float32)\n self._compareAll(np_arr, None)\n self._compareAll(np_arr, [])\n self._compareAll(np_arr, [0])\n self._compareAll(np_arr, [1])\n self._compareAll(np_arr, [2])\n self._compareAll(np_arr, [0, 1])\n self._compareAll(np_arr, [1, 2])\n self._compareAll(np_arr, [0, 2])\n self._compareAll(np_arr, [0, 1, 2])\n\n def testDoubleReduce3D(self):\n # Create a 3D array of doubles and reduce across all possible\n # dimensions\n np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float64)\n self._compareAll(np_arr, None)\n self._compareAll(np_arr, [])\n self._compareAll(np_arr, [0])\n self._compareAll(np_arr, [1])\n self._compareAll(np_arr, [2])\n self._compareAll(np_arr, [0, 1])\n self._compareAll(np_arr, [1, 2])\n self._compareAll(np_arr, [0, 2])\n self._compareAll(np_arr, [0, 1, 2])\n\n @test_util.run_deprecated_v1\n def testGradient(self):\n s = [2, 3, 4, 2]\n x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)\n with self.cached_session():\n t = ops.convert_to_tensor(x)\n su = math_ops.reduce_max(t, [1, 2])\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n t, s, su, [2, 2], x_init_value=x, delta=1)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)\n\n @test_util.run_deprecated_v1\n def testGradient2(self):\n s = [2, 3, 4, 2]\n x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)\n with self.cached_session():\n t = ops.convert_to_tensor(x)\n su = math_ops.reduce_max(t, [1])\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n t, s, su, [2, 4, 2], x_init_value=x, delta=1)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)\n\n @test_util.run_deprecated_v1\n def testGradient3(self):\n s = [2, 3, 4, 2]\n x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)\n with self.cached_session():\n t = ops.convert_to_tensor(x)\n su = math_ops.reduce_max(t, [2])\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n t, s, su, [2, 3, 2], x_init_value=x, delta=1)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)\n\n @test_util.run_deprecated_v1\n def testGradient4(self):\n s = [2, 3, 4, 2]\n x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)\n with self.cached_session():\n t = ops.convert_to_tensor(x)\n su = math_ops.reduce_max(t)\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n t, s, su, [1], x_init_value=x, delta=1)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)\n\n @test_util.run_deprecated_v1\n def testEmptyGradients(self):\n with self.cached_session():\n x = array_ops.zeros([0, 3])\n y = math_ops.reduce_max(x, [1])\n error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])\n self.assertEqual(error, 0)\n\n\nclass AllReductionTest(test.TestCase):\n\n def _compare(self, x, reduction_axes, keepdims, use_gpu=False):\n np_ans = x\n if reduction_axes is None:\n np_ans = np.all(np_ans, keepdims=keepdims)\n else:\n for ra in reduction_axes[::-1]:\n np_ans = np.all(np_ans, axis=ra, keepdims=keepdims)\n with self.cached_session(use_gpu=use_gpu):\n if reduction_axes is not None:\n reduction_axes = np.array(reduction_axes).astype(np.int32)\n tf_ans = math_ops.reduce_all(x, reduction_axes, keepdims)\n out = self.evaluate(tf_ans)\n self.assertAllEqual(np_ans, out)\n self.assertShapeEqual(np_ans, tf_ans)\n\n def _compareAll(self, x, reduction_axes):\n self._compare(x, reduction_axes, False, use_gpu=True)\n self._compare(x, reduction_axes, False, use_gpu=False)\n self._compare(x, reduction_axes, True, use_gpu=True)\n self._compare(x, reduction_axes, True, use_gpu=False)\n\n def testAxesType(self):\n for dtype in [dtypes.int64, dtypes.int32]:\n with self.session():\n v = math_ops.reduce_all([True, True],\n constant_op.constant(0, dtype=dtype))\n tf_v = self.evaluate(v)\n self.assertAllEqual(tf_v, True)\n\n def testAll3D(self):\n # Create a 3D array of bools and reduce across all possible\n # dimensions\n np_arr = (np.random.uniform(0, 1, 30) > 0.1).reshape([2, 3, 5])\n self._compareAll(np_arr, None)\n self._compareAll(np_arr, [])\n self._compareAll(np_arr, [0])\n self._compareAll(np_arr, [1])\n self._compareAll(np_arr, [2])\n self._compareAll(np_arr, [0, 1])\n self._compareAll(np_arr, [1, 2])\n self._compareAll(np_arr, [0, 2])\n self._compareAll(np_arr, [0, 1, 2])\n\n def testEmpty(self):\n self._compareAll([], [0])\n\n\nclass AnyReductionTest(test.TestCase):\n\n def _compare(self, x, reduction_axes, keepdims, use_gpu=False):\n np_ans = x\n if reduction_axes is None:\n np_ans = np.any(np_ans, keepdims=keepdims)\n else:\n for ra in reduction_axes[::-1]:\n np_ans = np.any(np_ans, axis=ra, keepdims=keepdims)\n with self.cached_session(use_gpu=use_gpu):\n if reduction_axes is not None:\n reduction_axes = np.array(reduction_axes).astype(np.int32)\n tf_ans = math_ops.reduce_any(x, reduction_axes, keepdims)\n out = self.evaluate(tf_ans)\n self.assertAllEqual(np_ans, out)\n self.assertShapeEqual(np_ans, tf_ans)\n\n def _compareAll(self, x, reduction_axes):\n self._compare(x, reduction_axes, False, use_gpu=True)\n self._compare(x, reduction_axes, False, use_gpu=False)\n self._compare(x, reduction_axes, True, use_gpu=True)\n self._compare(x, reduction_axes, True, use_gpu=False)\n\n def testAxesType(self):\n for dtype in [dtypes.int64, dtypes.int32]:\n with self.session():\n v = math_ops.reduce_any([True, True],\n constant_op.constant(0, dtype=dtype))\n tf_v = self.evaluate(v)\n self.assertAllEqual(tf_v, True)\n\n def testAll3D(self):\n # Create a 3D array of bools and reduce across all possible\n # dimensions\n np_arr = (np.random.uniform(0, 1, 30) > 0.9).reshape([2, 3, 5])\n self._compareAll(np_arr, None)\n self._compareAll(np_arr, [])\n self._compareAll(np_arr, [0])\n self._compareAll(np_arr, [1])\n self._compareAll(np_arr, [2])\n self._compareAll(np_arr, [0, 1])\n self._compareAll(np_arr, [1, 2])\n self._compareAll(np_arr, [0, 2])\n self._compareAll(np_arr, [0, 1, 2])\n\n def testEmpty(self):\n self._compareAll([], [0])\n\n\nclass CountNonzeroReductionTest(test.TestCase):\n\n def _compare(self, x, reduction_axes, keepdims, use_gpu=False, zero=0,\n feed_dict=None):\n np_ans = (x != zero).astype(np.int32)\n if reduction_axes is None:\n np_ans = np.sum(np_ans, keepdims=keepdims)\n else:\n reduction_axes = np.array(reduction_axes).astype(np.int32)\n for ra in reduction_axes.ravel()[::-1]:\n np_ans = np.sum(np_ans, axis=ra, keepdims=keepdims)\n with self.cached_session(use_gpu=use_gpu) as sess:\n tf_ans = math_ops.count_nonzero(x, reduction_axes, keepdims)\n out = sess.run(tf_ans, feed_dict)\n self.assertAllClose(np_ans, out)\n self.assertShapeEqual(np_ans, tf_ans)\n\n def _compareAll(self, x, reduction_axes, feed_dict=None):\n if reduction_axes is not None and np.shape(reduction_axes) == (1,):\n # Test scalar reduction_axes argument\n self._compareAll(x, reduction_axes[0])\n self._compare(x, reduction_axes, False, use_gpu=True, feed_dict=feed_dict)\n self._compare(x, reduction_axes, False, use_gpu=False, feed_dict=feed_dict)\n self._compare(x, reduction_axes, True, use_gpu=True, feed_dict=feed_dict)\n self._compare(x, reduction_axes, True, use_gpu=False, feed_dict=feed_dict)\n\n @test_util.run_deprecated_v1\n def testBoolReduce1D(self):\n # Create a 1D array of floats\n np_arr = np.asarray([False, False, True, False, False, True])\n self._compareAll(np_arr, None)\n self._compareAll(np_arr, [])\n self._compareAll(np_arr, [0])\n\n @test_util.run_deprecated_v1\n def testFloatReduce1D(self):\n # Create a 1D array of floats\n np_arr = np.asarray([0.0, 1.0, -1.0, 0.0, 0.0, 3.0]).astype(np.float32)\n self._compareAll(np_arr, [0])\n\n @test_util.run_deprecated_v1\n def testFloatReduce4D(self):\n # Create a 4D array of floats and reduce across some\n # dimensions\n np_arr = np.floor(np.arange(0.0, 210.0) / 100.0).reshape([2, 3, 5,\n 7]).astype(\n np.float32)\n self._compareAll(np_arr, None)\n self._compareAll(np_arr, [])\n self._compareAll(np_arr, [0])\n self._compareAll(np_arr, [1])\n self._compareAll(np_arr, [2])\n self._compareAll(np_arr, [0, 1])\n self._compareAll(np_arr, [1, 2])\n # Need specialization for reduce(4D, [0, 2])\n # self._compareAll(np_arr, [0, 2])\n self._compareAll(np_arr, [0, 1, 2])\n self._compareAll(np_arr, [1, 2, 3])\n self._compareAll(np_arr, [0, 1, 2, 3])\n\n @test_util.run_deprecated_v1\n def testExpand(self):\n # Reduce an empty tensor to a nonempty tensor\n x = np.zeros((5, 0))\n self._compareAll(x, [1])\n\n @test_util.run_deprecated_v1\n def testDegenerate(self):\n for use_gpu in False, True:\n with self.cached_session(use_gpu=use_gpu):\n for dtype in (dtypes.bool,):\n # A large number is needed to get Eigen to die\n x = array_ops.zeros((0, 9938), dtype=dtype)\n y = math_ops.count_nonzero(x, [0])\n self.assertAllEqual(y, np.zeros(9938))\n\n def testStringReduce(self):\n # Test case for GitHub issue 18712\n with self.cached_session() as sess:\n v = math_ops.count_nonzero(constant_op.constant([\"test\"]))\n self.assertAllClose(self.evaluate(v), 1)\n\n @test_util.run_deprecated_v1\n def testStringReduce1D(self):\n # Create a 1D array of strings\n x = np.asarray([\"\", \"\", \"a\", \"\", \"\", \"b\"])\n self._compare(x, None, keepdims=False, zero=np.str_(\"\"))\n self._compare(x, [], keepdims=False, zero=np.str_(\"\"))\n self._compare(x, [0], keepdims=False, zero=np.str_(\"\"))\n self._compare(x, None, keepdims=True, zero=np.str_(\"\"))\n self._compare(x, [], keepdims=True, zero=np.str_(\"\"))\n self._compare(x, [0], keepdims=True, zero=np.str_(\"\"))\n\n @test_util.run_deprecated_v1\n def testStringReduce2D(self):\n # Create a 2D array of strings\n x = np.asarray([[\"\", \"\", \"a\", \"\", \"\", \"b\"],\n [\"\", \"c\", \"\", \"d\", \"\", \"\"],\n [\"e\", \"\", \"f\", \"\", \"\", \"\"]])\n self._compare(x, None, keepdims=False, zero=np.str_(\"\"))\n self._compare(x, [], keepdims=False, zero=np.str_(\"\"))\n self._compare(x, [0], keepdims=False, zero=np.str_(\"\"))\n self._compare(x, [1], keepdims=False, zero=np.str_(\"\"))\n self._compare(x, [0, 1], keepdims=False, zero=np.str_(\"\"))\n self._compare(x, None, keepdims=True, zero=np.str_(\"\"))\n self._compare(x, [], keepdims=True, zero=np.str_(\"\"))\n self._compare(x, [0], keepdims=True, zero=np.str_(\"\"))\n self._compare(x, [0, 1], keepdims=True, zero=np.str_(\"\"))\n\n\nif __name__ == \"__main__\":\n test.main()\n" ]
[ [ "numpy.amax", "tensorflow.python.ops.gradient_checker.compute_gradient", "tensorflow.python.ops.math_ops.reduce_max", "numpy.asarray", "numpy.issubdtype", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.variables.Variable", "numpy.all", "tensorflow.python.ops.array_ops.zeros", "numpy.random.randn", "numpy.any", "tensorflow.python.ops.math_ops.reduce_any", "numpy.str_", "tensorflow.python.ops.math_ops.reduce_euclidean_norm", "numpy.random.randint", "tensorflow.python.framework.test_util.disable_xla", "tensorflow.python.ops.math_ops.reduce_all", "numpy.arange", "tensorflow.python.ops.math_ops.reduce_prod", "tensorflow.python.ops.math_ops.reduce_min", "tensorflow.python.platform.test.main", "numpy.zeros", "tensorflow.python.ops.math_ops.reduced_shape", "tensorflow.python.ops.gradient_checker.compute_gradient_error", "numpy.amin", "numpy.isnan", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.python.ops.math_ops.reduce_mean", "numpy.random.rand", "tensorflow.python.framework.ops.convert_to_tensor", "numpy.floor", "tensorflow.python.framework.tensor_shape.unknown_shape", "numpy.array", "numpy.sum", "tensorflow.python.ops.math_ops.count_nonzero", "numpy.conj", "numpy.random.seed", "tensorflow.python.framework.ops.Graph", "numpy.ones", "numpy.shape", "numpy.prod", "numpy.random.uniform", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.framework.constant_op.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
meteahishali/CycleGAN-Tensorflow-2
[ "11f49e812aa6144d89229a86df9fa7432aeef9aa" ]
[ "tf2lib/utils/utils.py" ]
[ "import tensorflow as tf\n\n\nclass Checkpoint:\n \"\"\"Enhanced \"tf.train.Checkpoint\".\"\"\"\n\n def __init__(self,\n checkpoint_kwargs, # for \"tf.train.Checkpoint\"\n directory, # for \"tf.train.CheckpointManager\"\n max_to_keep=5,\n keep_checkpoint_every_n_hours=None):\n self.checkpoint = tf.train.Checkpoint(**checkpoint_kwargs)\n self.manager = tf.train.CheckpointManager(self.checkpoint, directory, keep_checkpoint_every_n_hours, step_counter = checkpoint_kwargs['ep_cnt'], checkpoint_interval = 100)\n\n def restore(self, save_path=None):\n save_path = self.manager.latest_checkpoint if save_path is None else save_path\n return self.checkpoint.restore(save_path)\n\n def save(self, file_prefix_or_checkpoint_number=None, session=None):\n if isinstance(file_prefix_or_checkpoint_number, str):\n return self.checkpoint.save(file_prefix_or_checkpoint_number, session=session)\n else:\n return self.manager.save(checkpoint_number=file_prefix_or_checkpoint_number)\n\n def __getattr__(self, attr):\n if hasattr(self.checkpoint, attr):\n return getattr(self.checkpoint, attr)\n elif hasattr(self.manager, attr):\n return getattr(self.manager, attr)\n else:\n self.__getattribute__(attr) # this will raise an exception\n\n\ndef summary(name_data_dict,\n step=None,\n types=['mean', 'std', 'max', 'min', 'sparsity', 'histogram'],\n historgram_buckets=None,\n name='summary'):\n \"\"\"Summary.\n\n Examples\n --------\n >>> summary({'a': data_a, 'b': data_b})\n\n \"\"\"\n def _summary(name, data):\n if data.shape == ():\n tf.summary.scalar(name, data, step=step)\n else:\n if 'mean' in types:\n tf.summary.scalar(name + '-mean', tf.math.reduce_mean(data), step=step)\n if 'std' in types:\n tf.summary.scalar(name + '-std', tf.math.reduce_std(data), step=step)\n if 'max' in types:\n tf.summary.scalar(name + '-max', tf.math.reduce_max(data), step=step)\n if 'min' in types:\n tf.summary.scalar(name + '-min', tf.math.reduce_min(data), step=step)\n if 'sparsity' in types:\n tf.summary.scalar(name + '-sparsity', tf.math.zero_fraction(data), step=step)\n if 'histogram' in types:\n tf.summary.histogram(name, data, step=step, buckets=historgram_buckets)\n\n with tf.name_scope(name):\n for name, data in name_data_dict.items():\n _summary(name, data)\n" ]
[ [ "tensorflow.train.CheckpointManager", "tensorflow.math.reduce_min", "tensorflow.math.zero_fraction", "tensorflow.math.reduce_max", "tensorflow.train.Checkpoint", "tensorflow.math.reduce_mean", "tensorflow.math.reduce_std", "tensorflow.name_scope", "tensorflow.summary.scalar", "tensorflow.summary.histogram" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
MeRajat/modeldb
[ "3cf07d8292ab73016e72ac8e24d4b69938407c18" ]
[ "client/python/samples/sklearn/OttoGroup-Calibration.py" ]
[ "\"\"\"\nSource: https://www.kaggle.com/cbourguignat/otto-group-product-classification-challenge/why-calibration-works\n\"\"\"\nimport os\nimport unittest\nimport argparse\nimport pandas as pd\nimport sklearn\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier, BaggingClassifier \nfrom sklearn.metrics import log_loss\nfrom sklearn.calibration import CalibratedClassifierCV\n\nfrom modeldb.sklearn_native.ModelDbSyncer import *\nfrom modeldb.sklearn_native import SyncableRandomSplit\nfrom modeldb.sklearn_native import SyncableMetrics\n\n\n# During the Otto Group competition, some Kagglers discussed in the forum about Calibration for Random Forests.\n# It was a brand new functionality of the last scikit-learn version (0.16) : \n# see : http://scikit-learn.org/stable/whats_new.html\n# Calibration makes that the output of the models gives a true probability of a sample to belong to\n# a particular class\n# For instance, a well calibrated (binary) classifier should classify the samples such that among\n# the samples \n# to which it gave a predict_proba value close to 0.8, approximately 80% actually belong to the positive class\n# See http://scikit-learn.org/stable/modules/calibration.html for more details\n# This script is an example of how to implement calibration, and check if it boosts performance.\n\nROOT_DIR = '../../../../server/'\nDATA_PATH = '../../../../data/'\n\ndef run_otto_workflow():\n name = \"test1\"\n author = \"author\"\n description = \"kaggle-otto-script\"\n # Creating a new project\n syncer_obj = Syncer(\n NewOrExistingProject(name, author, description),\n NewOrExistingExperiment(\"expName\", \"expDesc\"),\n NewExperimentRun(\"otto test\"))\n\n # Import Data\n # Note: This dataset is not included in the repo because of Kaggle restrictions. \n # It can be downloaded from https://www.kaggle.com/c/otto-group-product-classification-challenge/data \n X = pd.read_csv_sync(DATA_PATH + 'otto-train.csv')\n syncer_obj.add_tag(X, \"original otto csv data\")\n X = X.drop_sync('id', axis=1)\n\n syncer_obj.add_tag(X, \"dropped id column\")\n # Extract target\n # Encode it to make it manageable by ML algo\n y = X.target.values\n\n y = LabelEncoder().fit_transform_sync(y)\n\n # Remove target from train, else it's too easy ...\n X = X.drop_sync('target', axis=1)\n\n syncer_obj.add_tag(X, \"data with dropped id and target columns\")\n\n # Split Train / Test\n x_train, x_test, y_train, y_test = cross_validation.train_test_split_sync(X, y, test_size=0.20, random_state=36)\n\n syncer_obj.add_tag(x_test, \"testing data\")\n syncer_obj.add_tag(x_train, \"training data\")\n # First, we will train and apply a Random Forest WITHOUT calibration\n # we use a BaggingClassifier to make 5 predictions, and average\n # because that's what CalibratedClassifierCV do behind the scene,\n # and we want to compare things fairly, i.e. be sure that averaging several models\n # is not what explains a performance difference between no calibration, and calibration.\n\n clf = RandomForestClassifier(n_estimators=50, n_jobs=-1)\n\n clfbag = BaggingClassifier(clf, n_estimators=5)\n clfbag.fit_sync(x_train, y_train)\n\n y_preds = clfbag.predict_proba_sync(x_test)\n\n SyncableMetrics.compute_metrics(clfbag, log_loss, y_test, y_preds, x_test, \"\", \"\", eps=1e-15, normalize=True)\n #print(\"loss WITHOUT calibration : \", log_loss(ytest, ypreds, eps=1e-15, normalize=True))\n\n\n # Now, we train and apply a Random Forest WITH calibration\n # In our case, 'isotonic' worked better than default 'sigmoid'\n # This is not always the case. Depending of the case, you have to test the two possibilities\n\n clf = RandomForestClassifier(n_estimators=50, n_jobs=-1)\n calibrated_clf = CalibratedClassifierCV(clf, method='isotonic', cv=5)\n calibrated_clf.fit_sync(x_train, y_train)\n y_preds = calibrated_clf.predict_proba_sync(x_test)\n SyncableMetrics.compute_metrics(calibrated_clf, log_loss, y_test, y_preds, x_test, \"\", \"\",\n eps=1e-15, normalize=True)\n\n #print(\"loss WITH calibration : \", log_loss(ytest, ypreds, eps=1e-15, normalize=True))\n\n print(\" \")\n print(\"Conclusion : in our case, calibration improved performance a lot ! (reduced loss)\")\n syncer_obj.sync()\n return syncer_obj, x_train, x_test\n # We can see that we highly improved performance with calibration (loss is reduced) !\n # Using calibration helped our team a lot to climb the leaderboard.\n # In the future competitions, that's for sure, I will not forget to test this trick !\n\n\nclass TestOttoCalibration(unittest.TestCase):\n \"\"\"\n Tests if workflow above is stored in database correctly.\n \"\"\"\n @classmethod\n def setUpClass(self):\n \"\"\"\n This executes at the beginning of unittest.\n Database is cleared before testing.\n \"\"\"\n os.system(\"cat \" + ROOT_DIR + \"codegen/sqlite/clearDb.sql \"\n \"| sqlite3 \" + ROOT_DIR + \"modeldb_test.db\")\n self.syncer_obj, self.x_train, self.x_test = run_otto_workflow()\n\n def test_project(self):\n \"\"\"\n Tests if project is stored correctly.\n \"\"\"\n projectOverview = self.syncer_obj.client.getProjectOverviews()[0]\n project = projectOverview.project\n self.assertEquals(project.description, 'kaggle-otto-script')\n self.assertEquals(project.author, 'author')\n self.assertEquals(project.name, 'test1')\n self.assertGreaterEqual(project.id, 0)\n self.assertGreaterEqual(projectOverview.numExperimentRuns, 0)\n self.assertGreaterEqual(projectOverview.numExperiments, 0)\n\n def test_dataframe_ancestry(self):\n \"\"\"\n Tests if dataframe ancestry is stored correctly for training dataset.\n \"\"\"\n # Check ancestry for the Xtrain dataframe (data the model is fit on)\n dataframe_id = self.syncer_obj.id_for_object[id(self.x_train)]\n ancestry = self.syncer_obj.client.getDataFrameAncestry(dataframe_id).ancestors\n self.assertEqual(len(ancestry), 4)\n\n df_1 = ancestry[0]\n df_2 = ancestry[1]\n df_3 = ancestry[2]\n df_4 = ancestry[3]\n\n self.assertEqual(df_1.tag, 'training data')\n self.assertEqual(df_2.tag, 'data with dropped id and target columns')\n self.assertEqual(df_3.tag, 'dropped id column')\n self.assertEqual(df_4.tag, 'original otto csv data')\n\n def test_models_derived_from_dataframe(self):\n \"\"\"\n Tests if models are properly derived from dataframe, given id\n \"\"\"\n dataframe_id = self.syncer_obj.id_for_object[id(self.x_train)]\n\n # Two models use the x_train dataset.\n model_ids = self.syncer_obj.client.modelsDerivedFromDataFrame(dataframe_id)\n self.assertEqual(len(model_ids), 2)\n model1_spec = self.syncer_obj.client.getModel(model_ids[0]).specification\n model2_spec = self.syncer_obj.client.getModel(model_ids[1]).specification\n self.assertEqual(model1_spec.transformerType, 'BaggingClassifier')\n self.assertEqual(model2_spec.transformerType, 'CalibratedClassifierCV')\n\n def test_metrics(self):\n \"\"\"\n Tests if metrics are stored correctly.\n \"\"\"\n projectOverview = self.syncer_obj.client.getProjectOverviews()[0]\n project = projectOverview.project\n runs_and_exps = self.syncer_obj.client.getRunsAndExperimentsInProject(project.id)\n\n # Get the latest experiment run id\n exp_id = runs_and_exps.experimentRuns[-1].id\n model_responses = self.syncer_obj.client.getExperimentRunDetails(exp_id).modelResponses\n\n # There are three models: LabelEncoder, BaggingClassifier, CalibratedClassifierCV\n self.assertEqual(len(model_responses), 3)\n # The classifier models have metrics\n model2 = model_responses[1]\n model3 = model_responses[2]\n\n self.assertEquals(len(model2.metrics), 1)\n self.assertEquals(len(model3.metrics), 1)\n\n dataframe_id = self.syncer_obj.id_for_object[id(self.x_test)]\n # Calibrated Classifier has lower log loss than Bagging Classfier\n self.assertGreater(model2.metrics['log_loss'][dataframe_id], model3.metrics['log_loss'][dataframe_id])\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Pass in -test flag if you wish'\n ' to run unittests on this workflow')\n parser.add_argument('-test', action='store_true')\n args = parser.parse_args()\n if args.test:\n suite = unittest.TestLoader().loadTestsFromTestCase(TestOttoCalibration)\n unittest.TextTestRunner().run(suite)\n else:\n run_otto_workflow()\n" ]
[ [ "sklearn.ensemble.BaggingClassifier", "sklearn.ensemble.RandomForestClassifier", "pandas.read_csv_sync", "sklearn.calibration.CalibratedClassifierCV", "sklearn.preprocessing.LabelEncoder" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Magic-Bubble/espnet
[ "5fb4077226bd62c71ec39ce5e5510552f1a05de0" ]
[ "espnet/nets/chainer_backend/e2e_asr_transformer.py" ]
[ "# encoding: utf-8\nfrom distutils.util import strtobool\nimport logging\nimport math\nimport numpy as np\n\nimport chainer\nfrom chainer import reporter\n\nimport chainer.functions as F\nfrom chainer.training import extension\n\nfrom espnet.asr import asr_utils\nfrom espnet.nets.asr_interface import ASRInterface\nfrom espnet.nets.chainer_backend.attentions_transformer import MultiHeadAttention\nfrom espnet.nets.chainer_backend.decoders_transformer import Decoder\nfrom espnet.nets.chainer_backend.encoders_transformer import Encoder\nfrom espnet.nets.chainer_backend.nets_utils_transformer import plot_multi_head_attention\nfrom espnet.nets.chainer_backend.nets_utils_transformer import savefig\n\nMAX_DECODER_OUTPUT = 5\nMIN_VALUE = float(np.finfo(np.float32).min)\n\n\nclass VaswaniRule(extension.Extension):\n\n \"\"\"Trainer extension to shift an optimizer attribute magically by Vaswani.\n\n Args:\n attr (str): Name of the attribute to shift.\n rate (float): Rate of the exponential shift. This value is multiplied\n to the attribute at each call.\n init (float): Initial value of the attribute. If it is ``None``, the\n extension extracts the attribute at the first call and uses it as\n the initial value.\n target (float): Target value of the attribute. If the attribute reaches\n this value, the shift stops.\n optimizer (~chainer.Optimizer): Target optimizer to adjust the\n attribute. If it is ``None``, the main optimizer of the updater is\n used.\n\n \"\"\"\n\n def __init__(self, attr, d, warmup_steps=4000,\n init=None, target=None, optimizer=None,\n scale=1.):\n self._attr = attr\n self._d_inv05 = d ** (-0.5) * scale\n self._warmup_steps_inv15 = warmup_steps ** (-1.5)\n self._init = init\n self._target = target\n self._optimizer = optimizer\n self._t = 0\n self._last_value = None\n\n def initialize(self, trainer):\n optimizer = self._get_optimizer(trainer)\n # ensure that _init is set\n if self._init is None:\n self._init = self._d_inv05 * (1. * self._warmup_steps_inv15)\n if self._last_value is not None: # resuming from a snapshot\n self._update_value(optimizer, self._last_value)\n else:\n self._update_value(optimizer, self._init)\n\n def __call__(self, trainer):\n self._t += 1\n optimizer = self._get_optimizer(trainer)\n value = self._d_inv05 * \\\n min(self._t ** (-0.5), self._t * self._warmup_steps_inv15)\n self._update_value(optimizer, value)\n\n def serialize(self, serializer):\n self._t = serializer('_t', self._t)\n self._last_value = serializer('_last_value', self._last_value)\n\n def _get_optimizer(self, trainer):\n return self._optimizer or trainer.updater.get_optimizer('main')\n\n def _update_value(self, optimizer, value):\n setattr(optimizer, self._attr, value)\n self._last_value = value\n\n\nclass E2E(ASRInterface, chainer.Chain):\n @staticmethod\n def add_arguments(parser):\n group = parser.add_argument_group(\"transformer model setting\")\n group.add_argument(\"--transformer-init\", type=str, default=\"pytorch\",\n help='how to initialize transformer parameters')\n group.add_argument(\"--transformer-input-layer\", type=str, default=\"conv2d\",\n choices=[\"conv2d\", \"linear\", \"embed\"],\n help='transformer input layer type')\n group.add_argument('--transformer-attn-dropout-rate', default=None, type=float,\n help='dropout in transformer attention. use --dropout-rate if None is set')\n group.add_argument('--transformer-lr', default=10.0, type=float,\n help='Initial value of learning rate')\n group.add_argument('--transformer-warmup-steps', default=25000, type=int,\n help='optimizer warmup steps')\n group.add_argument('--transformer-length-normalized-loss', default=True, type=strtobool,\n help='normalize loss by length')\n return parser\n\n def __init__(self, idim, odim, args, ignore_id=-1, flag_return=True):\n chainer.Chain.__init__(self)\n self.mtlalpha = args.mtlalpha\n assert 0 <= self.mtlalpha <= 1, \"mtlalpha must be [0,1]\"\n if args.transformer_attn_dropout_rate is None:\n self.dropout = args.dropout_rate\n else:\n self.dropout = args.transformer_attn_dropout_rate\n self.n_target_vocab = len(args.char_list)\n self.use_label_smoothing = False\n self.char_list = args.char_list\n self.scale_emb = args.adim ** 0.5\n self.sos = odim - 1\n self.eos = odim - 1\n self.subsample = [0]\n self.verbose = 0 if 'verbose' not in args else args.verbose\n self.ignore_id = ignore_id\n self.reset_parameters(args)\n with self.init_scope():\n self.encoder = Encoder(args.transformer_input_layer, idim, args.elayers, args.adim,\n d_units=args.eunits, h=args.aheads, dropout=self.dropout,\n initialW=self.initialW, initial_bias=self.initialB)\n self.decoder = Decoder(odim, args.dlayers, args.adim, d_units=args.dunits,\n h=args.aheads, dropout=self.dropout,\n initialW=self.initialW, initial_bias=self.initialB)\n if args.mtlalpha > 0.0:\n raise NotImplementedError('Joint CTC/Att training. WIP')\n self.normalize_length = args.transformer_length_normalized_loss\n self.dims = args.adim\n self.odim = odim\n if args.lsm_weight > 0:\n logging.info(\"Use label smoothing\")\n self.use_label_smoothing = True\n self.lsm_weight = args.lsm_weight\n self.flag_return = flag_return\n\n def reset_parameters(self, args):\n type_init = args.transformer_init\n if type_init == 'lecun_uniform':\n logging.info('Using LeCunUniform as Parameter initializer')\n self.initialW = chainer.initializers.LeCunUniform\n elif type_init == 'lecun_normal':\n logging.info('Using LeCunNormal as Parameter initializer')\n self.initialW = chainer.initializers.LeCunNormal\n elif type_init == 'gorot_uniform':\n logging.info('Using GlorotUniform as Parameter initializer')\n self.initialW = chainer.initializers.GlorotUniform\n elif type_init == 'gorot_normal':\n logging.info('Using GlorotNormal as Parameter initializer')\n self.initialW = chainer.initializers.GlorotNormal\n elif type_init == 'he_uniform':\n logging.info('Using HeUniform as Parameter initializer')\n self.initialW = chainer.initializers.HeUniform\n elif type_init == 'he_normal':\n logging.info('Using HeNormal as Parameter initializer')\n self.initialW = chainer.initializers.HeNormal\n elif type_init == 'pytorch':\n logging.info('Using Pytorch initializer')\n self.initialW = chainer.initializers.Uniform\n else:\n logging.info('Using Chainer default as Parameter initializer')\n self.initialW = chainer.initializers.Uniform\n self.initialB = chainer.initializers.Uniform\n\n def make_attention_mask(self, source_block, target_block):\n mask = (target_block[:, None, :] >= 0) * \\\n (source_block[:, :, None] >= 0)\n # (batch, source_length, target_length)\n return mask\n\n def make_history_mask(self, block):\n batch, length = block.shape\n arange = self.xp.arange(length)\n history_mask = (arange[None, ] <= arange[:, None])[None, ]\n history_mask = self.xp.broadcast_to(\n history_mask, (batch, length, length))\n return history_mask\n\n def output_and_loss(self, concat_logit_block, t_block, batch, length):\n # Output (all together at once for efficiency)\n rebatch, _ = concat_logit_block.shape\n # Make target\n concat_t_block = t_block.reshape((rebatch)).data\n ignore_mask = (concat_t_block >= 0)\n n_token = ignore_mask.sum()\n normalizer = n_token if self.normalize_length else batch\n if not self.use_label_smoothing:\n loss = F.softmax_cross_entropy(concat_logit_block, concat_t_block)\n loss = loss * n_token / normalizer\n else:\n p_lsm = self.lsm_weight\n p_loss = 1. - p_lsm\n log_prob = F.log_softmax(concat_logit_block)\n broad_ignore_mask = self.xp.broadcast_to(\n ignore_mask[:, None],\n concat_logit_block.shape)\n pre_loss = ignore_mask * \\\n log_prob[self.xp.arange(rebatch), concat_t_block]\n loss = - F.sum(pre_loss) / normalizer\n label_smoothing = broad_ignore_mask * \\\n - 1. / self.n_target_vocab * log_prob\n label_smoothing = F.sum(label_smoothing) / normalizer\n loss = p_loss * loss + p_lsm * label_smoothing\n accuracy = F.accuracy(\n concat_logit_block, concat_t_block, ignore_label=-1)\n\n if self.verbose > 0 and self.char_list is not None:\n with chainer.no_backprop_mode():\n rc_block = F.transpose(concat_logit_block.reshape((batch, length, -1)), (0, 2, 1))\n rc_block.to_cpu()\n t_block.to_cpu()\n for (i, y_hat_), y_true_ in zip(enumerate(rc_block.data), t_block.data):\n if i == MAX_DECODER_OUTPUT:\n break\n idx_hat = np.argmax(y_hat_[:, y_true_ != -1], axis=0)\n idx_true = y_true_[y_true_ != -1]\n eos_true = np.where(y_true_ == self.eos)[0][0]\n seq_hat = [self.char_list[int(idx)] for idx in idx_hat]\n seq_true = [self.char_list[int(idx)] for idx in idx_true[: eos_true]]\n seq_hat = \"\".join(seq_hat).replace('<space>', ' ')\n seq_true = \"\".join(seq_true).replace('<space>', ' ')\n logging.info(\"groundtruth[%d]: \" % i + seq_true)\n logging.info(\"prediction [%d]: \" % i + seq_hat)\n return loss, accuracy\n\n def forward(self, xs, ilens, ys, calculate_attentions=False):\n xp = self.xp\n ilens = np.array([int(x) for x in ilens])\n\n with chainer.no_backprop_mode():\n eos = xp.array([self.eos], 'i')\n sos = xp.array([self.sos], 'i')\n ys_out = [F.concat([y, eos], axis=0) for y in ys]\n ys = [F.concat([sos, y], axis=0) for y in ys]\n # Labels int32 is not supported\n ys = F.pad_sequence(ys, padding=self.eos).data.astype(xp.int64)\n xs = F.pad_sequence(xs, padding=-1)\n if len(xs.shape) == 3:\n xs = F.pad(xs, ((0, 0), (0, 1), (0, 0)),\n 'constant', constant_values=-1)\n else:\n xs = F.pad(xs, ((0, 0), (0, 1)),\n 'constant', constant_values=-1)\n ys_out = F.pad_sequence(ys_out, padding=-1)\n logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))\n # Encode Sources\n # xs: utt x frame x dim\n logging.debug('Init size: ' + str(xs.shape))\n logging.debug('Out size: ' + str(ys.shape))\n # Dims along enconder and decoder: batchsize * length x dims\n xs, x_mask, ilens = self.encoder(xs, ilens)\n logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))\n logging.info(self.__class__.__name__ + ' output lengths: ' + str(xp.array([y.shape[0] for y in ys_out])))\n xy_mask = self.make_attention_mask(ys, xp.array(x_mask))\n yy_mask = self.make_attention_mask(ys, ys)\n yy_mask *= self.make_history_mask(ys)\n batch, length = ys.shape\n ys = self.decoder(ys, yy_mask, xs, xy_mask)\n if calculate_attentions:\n return xs\n loss_att, acc = self.output_and_loss(ys, ys_out, batch, length)\n alpha = self.mtlalpha\n loss_ctc = None\n if alpha == 0:\n self.loss = loss_att\n elif alpha == 1:\n self.loss = None # WIP\n else:\n self.loss = alpha * loss_ctc + (1 - alpha) * loss_att\n if not math.isnan(self.loss.data):\n reporter.report({'loss_ctc': loss_ctc}, self)\n reporter.report({'loss_att': loss_att}, self)\n reporter.report({'acc': acc}, self)\n\n logging.info('mtl loss:' + str(self.loss.data))\n reporter.report({'loss': self.loss}, self)\n else:\n logging.warning('loss (=%f) is not correct', self.loss.data)\n if self.flag_return:\n loss_ctc = None\n return self.loss, loss_ctc, loss_att, acc\n else:\n return self.loss\n\n def recognize(self, x_block, recog_args, char_list=None, rnnlm=None):\n '''E2E beam search\n\n :param ndarray x: input acouctic feature (B, T, D) or (T, D)\n :param namespace recog_args: argment namespace contraining options\n :param list char_list: list of characters\n :param torch.nn.Module rnnlm: language model module\n :return: N-best decoding results\n :rtype: list\n '''\n\n xp = self.xp\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n ilens = [x_block.shape[0]]\n batch = len(ilens)\n xs, x_mask, ilens = self.encoder(x_block[None, :, :], ilens)\n logging.info('Encoder size: ' + str(xs.shape))\n if recog_args.ctc_weight > 0.0:\n raise NotImplementedError('use joint ctc/tranformer decoding. WIP')\n if recog_args.beam_size == 1:\n logging.info('Use greedy search implementation')\n ys = xp.full((1, 1), self.sos)\n score = xp.zeros(1)\n maxlen = xs.shape[1] + 1\n for step in range(maxlen):\n yy_mask = self.make_attention_mask(ys, ys)\n yy_mask *= self.make_history_mask(ys)\n xy_mask = self.make_attention_mask(ys, xp.array(x_mask))\n out = self.decoder(ys, yy_mask, xs, xy_mask).reshape(batch, -1, self.odim)\n prob = F.log_softmax(out[:, -1], axis=-1)\n max_prob = prob.array.max(axis=1)\n next_id = F.argmax(prob, axis=1).array.astype(np.int64)\n score += max_prob\n if step == maxlen - 1:\n next_id[0] = self.eos\n ys = F.concat((ys, next_id[None, :]), axis=1).data\n if next_id[0] == self.eos:\n break\n nbest_hyps = [{\"score\": score, \"yseq\": ys[0].tolist()}]\n else:\n raise NotImplementedError('use beam search implementation. WIP')\n return nbest_hyps\n\n def calculate_all_attentions(self, xs, ilens, ys):\n '''E2E attention calculation\n\n :param list xs_pad: list of padded input sequences [(T1, idim), (T2, idim), ...]\n :param ndarray ilens: batch of lengths of input sequences (B)\n :param list ys: list of character id sequence tensor [(L1), (L2), (L3), ...]\n :return: attention weights (B, Lmax, Tmax)\n :rtype: float ndarray\n '''\n\n with chainer.no_backprop_mode():\n results = self(xs, ilens, ys, calculate_attentions=True) # NOQA\n ret = dict()\n for name, m in self.namedlinks():\n if isinstance(m, MultiHeadAttention):\n var = m.attn\n var.to_cpu()\n _name = name[1:].replace('/', '_')\n ret[_name] = var.data\n return ret\n\n @property\n def attention_plot_class(self):\n return PlotAttentionReport\n\n\nclass PlotAttentionReport(asr_utils.PlotAttentionReport):\n def __call__(self, trainer):\n attn_dict = self.get_attention_weights()\n suffix = \"ep.{.updater.epoch}.png\".format(trainer)\n plot_multi_head_attention(\n self.data, attn_dict, self.outdir, suffix, savefig)\n\n def get_attention_weights(self):\n batch = self.converter([self.transform(self.data)], self.device)\n return self.att_vis_fn(*batch)\n\n def log_attentions(self, logger, step):\n def log_fig(plot, filename):\n import matplotlib.pyplot as plt\n from os.path import basename\n logger.add_figure(basename(filename), plot, step)\n plt.clf()\n\n attn_dict = self.get_attention_weights()\n plot_multi_head_attention(\n self.data, attn_dict, self.outdir, \"\", log_fig)\n" ]
[ [ "matplotlib.pyplot.clf", "numpy.where", "numpy.argmax", "numpy.finfo" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Tom-Davidson/logLikelihood
[ "c13ed39245c24bc74932e292151f76a084618e17" ]
[ "loglikelihood/__init__.py" ]
[ "\"\"\"\nA library for python to implement the 'Log Likelihood'\nand 'Root Log Likelihood' algorithms.\nYou most likely need to just:\n import loglikelihood\n ...\n loglikelihood.llr(numpy.matrix([[k11, k12], [k21, k22]]))\n\"\"\"\n\nimport math\nimport numpy\n\n\ndef entropymatrix(elements):\n \"\"\"Calculates the unnormalized Shannon entropy for a numpy matrix.\"\"\"\n entropysum = 0\n result = 0.0\n for (x, y), value in numpy.ndenumerate(elements):\n element = elements[x, y]\n if element > 0:\n result += xlogx(element)\n entropysum += element\n return xlogx(entropysum) - result\n\n\ndef entropy2(a, b):\n \"\"\"Calculates the unnormalized Shannon entropy for a 2 numbers.\"\"\"\n return xlogx(a + b) - xlogx(a) - xlogx(b)\n\n\ndef entropy4(a, b, c, d):\n \"\"\"Calculates the unnormalized Shannon entropy for a 4 numbers.\"\"\"\n return xlogx(a + b + c + d) - xlogx(a) - xlogx(b) - xlogx(c) - xlogx(d)\n\n\ndef xlogx(x):\n \"\"\"Helper to calculate `log(x) * x`\"\"\"\n if x == 0.0:\n return 0\n else:\n return x * math.log(x)\n\n\ndef loglikelihoodratio(k11, k12, k21, k22):\n \"\"\"\n Credit to\n http://tdunning.blogspot.com/2008/03/surprise-and-coincidence.html\n for the table and the descriptions.\n \"\"\"\n if k11 > 0 and k12 > 0 and k21 > 0 and k22 > 0:\n # note that we have counts here, not probabilities,\n # and that the entropy is not normalized.\n rowEntropy = entropy2(k11 + k12, k21 + k22)\n columnEntropy = entropy2(k11 + k21, k12 + k22)\n matrixEntropy = entropy4(k11, k12, k21, k22)\n if rowEntropy + columnEntropy < matrixEntropy:\n # round off error\n return 0.0\n else:\n return 2.0 * (rowEntropy + columnEntropy - matrixEntropy)\n else:\n return 0\n\n\ndef rootloglikelihoodratio(k11, k12, k21, k22):\n \"\"\"\n Calculation of the root log likelihood from the 4 matrix values.\n \"\"\"\n llr = loglikelihoodratio(k11, k12, k21, k22)\n sqrt = math.sqrt(llr)\n if k11 / (k11 + k12) < k21 / (k21 + k22):\n sqrt = -sqrt\n return sqrt\n\n\ndef llr(k):\n \"\"\"\n Calculation of the root log likelihood from a 2x2 matrix.\n \"\"\"\n return rootloglikelihoodratio(k[0, 0], k[0, 1], k[1, 0], k[1, 1])\n" ]
[ [ "numpy.ndenumerate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kepei1106/cotk
[ "29b25b9469468dfd6d2aba433c2b935831351de7" ]
[ "tests/dataloader/test_single_turn_dialog.py" ]
[ "import copy\n\nimport pytest\nfrom pytest_mock import mocker\nimport random\nimport operator\nfrom cotk.dataloader import SingleTurnDialog, OpenSubtitles\nfrom cotk.metric import MetricBase\n\nfrom version_test_base import base_test_version\n\n\ndef setup_module():\n\timport random\n\trandom.seed(0)\n\timport numpy as np\n\tnp.random.seed(0)\n\nclass TestSingleTurnDialog():\n\tdef base_test_init(self, dl):\n\t\tassert isinstance(dl, SingleTurnDialog)\n\t\tassert isinstance(dl.ext_vocab, list)\n\t\tassert dl.ext_vocab[:4] == [\"<pad>\", \"<unk>\", \"<go>\", \"<eos>\"]\n\t\tassert [dl.pad_id, dl.unk_id, dl.go_id, dl.eos_id] == [0, 1, 2, 3]\n\t\tassert isinstance(dl.key_name, list)\n\t\tassert dl.key_name\n\t\tfor word in dl.key_name:\n\t\t\tassert isinstance(word, str)\n\t\tassert isinstance(dl.all_vocab_list, list)\n\t\tassert dl.vocab_list[:len(dl.ext_vocab)] == dl.ext_vocab\n\t\tassert isinstance(dl.word2id, dict)\n\t\tassert len(dl.word2id) == len(dl.all_vocab_list)\n\t\tassert dl.vocab_size == len(dl.vocab_list)\n\t\tfor i, word in enumerate(dl.all_vocab_list):\n\t\t\tassert isinstance(word, str)\n\t\t\tassert dl.word2id[word] == i\n\t\tassert dl.all_vocab_size == len(dl.all_vocab_list)\n\t\tfor key in dl.key_name:\n\t\t\tpost = dl.data[key]['post']\n\t\t\tresp = dl.data[key]['resp']\n\t\t\tassert len(post) == len(resp)\n\t\t\tassert isinstance(post[0], list)\n\t\t\tassert isinstance(resp[0], list)\n\t\t\tassert post[0][0] == dl.go_id\n\t\t\tassert post[0][-1] == dl.eos_id\n\t\t\tassert resp[0][0] == dl.go_id\n\t\t\tassert resp[0][-1] == dl.eos_id\n\n\t\t# assert the data has valid token\n\t\tassert dl.vocab_size > 4\n\t\t# assert the data has invalid token\n\t\tassert dl.all_vocab_size > dl.vocab_size\n\n\tdef base_test_all_unknown(self, dl):\n\t\t# if invalid_vocab_times very big, there is no invalid words.\n\t\tassert dl.vocab_size == dl.all_vocab_size\n\n\tdef base_test_restart(self, dl):\n\t\twith pytest.raises(ValueError):\n\t\t\tdl.restart(\"unknown set\")\n\t\tfor key in dl.key_name:\n\t\t\twith pytest.raises(ValueError):\n\t\t\t\tdl.restart(key)\n\t\t\trecord_index = copy.copy(dl.index[key])\n\t\t\tdl.restart(key, batch_size=3, shuffle=False)\n\t\t\tassert record_index == dl.index[key]\n\t\t\tassert dl.batch_id[key] == 0\n\t\t\tassert dl.batch_size[key] == 3\n\t\t\trng_state_st = random.getstate()\n\t\t\tdl.restart(key, shuffle=True)\n\t\t\trng_state_ed = random.getstate()\n\t\t\tassert operator.eq(rng_state_st, rng_state_ed)\t\t\t\n\t\t\tassert dl.batch_id[key] == 0\n\t\t\trecord_index = copy.copy(dl.index[key])\n\t\t\tdl.restart(key, shuffle=False)\n\t\t\tassert record_index == dl.index[key]\n\t\t\tassert dl.batch_id[key] == 0\n\n\tdef base_test_get_batch(self, dl):\n\t\twith pytest.raises(ValueError):\n\t\t\tdl.get_batch(\"unknown set\", [0, 1])\n\t\tfor key in dl.key_name:\n\t\t\twith pytest.raises(IndexError):\n\t\t\t\tlength = len(dl.data[key]['post'])\n\t\t\t\tdl.get_batch(key, [length-1, length])\n\t\t\tassert len(dl.index[key]) >= 2\n\t\t\tbatch = dl.get_batch(key, [0, 1])\n\t\t\tassert len(batch[\"post_length\"]) == 2\n\t\t\tassert len(batch[\"resp_length\"]) == 2\n\t\t\tassert batch[\"post\"].shape[0] == 2\n\t\t\tassert batch[\"resp\"].shape[0] == 2\n\n\t\t\tfor sent, length in [(\"post\", \"post_length\"), (\"resp\", \"resp_length\")]:\n\t\t\t\tfor idx in [0, 1]:\n\t\t\t\t\tif batch[length][idx] < batch[sent].shape[1]:\n\t\t\t\t\t\tassert batch[sent][idx][batch[length][idx]-1] == dl.eos_id\n\t\t\t\t\tassert batch[sent][idx][0] == dl.go_id\n\n\t\t# this is true, only when there is no unknown words in dl\n\t\t# (Only valid & invalid words)\n\t\tflag = False\n\t\tfor key in dl.key_name:\n\t\t\tlength = len(dl.data[key]['post'])\n\t\t\tfor i in range(length):\n\t\t\t\tbatch = dl.get_batch(key, [i])\n\t\t\t\tassert dl.unk_id not in batch[\"post_allvocabs\"]\n\t\t\t\tassert dl.unk_id not in batch[\"resp_allvocabs\"]\n\t\t\t\tbatch = dl.get_batch(key, [i])\n\t\t\t\tif dl.unk_id in batch[\"post\"] or \\\n\t\t\t\t\tdl.unk_id in batch[\"resp\"]:\n\t\t\t\t\tflag = True\n\t\tassert flag\n\n\tdef base_test_get_next_batch(self, dl):\n\t\twith pytest.raises(ValueError):\n\t\t\tdl.get_next_batch(\"unknown set\")\n\n\t\tfor key in dl.key_name:\n\t\t\twith pytest.raises(RuntimeError):\n\t\t\t\tdl.get_next_batch(key)\n\n\t\t\tdl.restart(key, 7)\n\t\t\tsample_num = 0\n\t\t\twhile True:\n\t\t\t\tbatch = dl.get_next_batch(key, ignore_left_samples=True)\n\t\t\t\tif not batch:\n\t\t\t\t\tbreak\n\t\t\t\tassert batch[\"post\"].shape[0] == 7\n\t\t\t\tsample_num += batch[\"post\"].shape[0]\n\t\t\tassert sample_num + 7 >= len(dl.data[key][\"post\"])\n\n\t\t\tdl.restart(key, 7)\n\t\t\tsample_num = 0\n\t\t\twhile True:\n\t\t\t\tbatch = dl.get_next_batch(key)\n\t\t\t\tassert batch is not None # dummy dataset must not be multiple of 7\n\t\t\t\tif batch[\"post\"].shape[0] == 7:\n\t\t\t\t\tsample_num += 7\n\t\t\t\telse:\n\t\t\t\t\tsample_num += batch[\"post\"].shape[0]\n\t\t\t\t\tbatch = dl.get_next_batch(key)\n\t\t\t\t\tassert not batch\n\t\t\t\t\tbreak\n\t\t\tassert sample_num == len(dl.data[key][\"post\"])\n\n\tdef base_test_convert(self, dl):\n\t\tsent_id = [0, 1, 2]\n\t\tsent = [\"<pad>\", \"<unk>\", \"<go>\"]\n\t\tassert sent == dl.convert_ids_to_tokens(sent_id)\n\t\tassert sent_id == dl.convert_tokens_to_ids(sent)\n\n\t\tsent = [\"<unk>\", \"<go>\", \"<pad>\", \"<unkownword>\", \"<pad>\", \"<go>\"]\n\t\tsent_id = [1, 2, 0, 1, 0, 2]\n\t\tassert sent_id == dl.convert_tokens_to_ids(sent)\n\t\tassert sent_id == dl.convert_tokens_to_ids(sent, invalid_vocab=True)\n\n\t\tsent = [dl.all_vocab_list[dl.vocab_size]]\n\t\tassert [1] == dl.convert_tokens_to_ids(sent)\n\t\tassert [dl.vocab_size] == dl.convert_tokens_to_ids(sent, invalid_vocab=True)\n\n\n\t\tsent_id = [0, 1, 2, 0, 0, 3, 1, 0, 0]\n\t\tsent = [\"<pad>\", \"<unk>\", \"<go>\", \"<pad>\", \"<pad>\", \"<eos>\", \"<unk>\", \"<pad>\", \"<pad>\"]\n\t\tassert sent == dl.convert_ids_to_tokens(sent_id, trim=False)\n\t\tsent = [\"<pad>\", \"<unk>\", \"<go>\"]\n\t\tassert sent == dl.convert_ids_to_tokens(sent_id)\n\n\t\tsent_id = [0, 0, 3]\n\t\tsent = [\"<pad>\", \"<pad>\", \"<eos>\"]\n\t\tassert sent == dl.convert_ids_to_tokens(sent_id, trim=False)\n\t\tassert not dl.convert_ids_to_tokens(sent_id)\n\n\t\tsent_id = [3, 3, 3]\n\t\tsent = [\"<eos>\", \"<eos>\", \"<eos>\"]\n\t\tassert sent == dl.convert_ids_to_tokens(sent_id, trim=False)\n\t\tassert not dl.convert_ids_to_tokens(sent_id)\n\n\t\tsent_id = [0, 0, 0]\n\t\tsent = [\"<pad>\", \"<pad>\", \"<pad>\"]\n\t\tassert sent == dl.convert_ids_to_tokens(sent_id, trim=False)\n\t\tassert not dl.convert_ids_to_tokens(sent_id)\n\n\tdef base_test_teacher_forcing_metric(self, dl):\n\t\tassert isinstance(dl.get_teacher_forcing_metric(), MetricBase)\n\n\tdef base_test_teacher_inference_metric(self, dl):\n\t\tassert isinstance(dl.get_inference_metric(), MetricBase)\n\n\tdef base_test_multi_runs(self, dl_list):\n\t\tassert all(x.vocab_list == dl_list[0].vocab_list for x in dl_list)\n\[email protected]\ndef load_opensubtitles():\n\tdef _load_opensubtitles(invalid_vocab_times=0):\n\t\treturn OpenSubtitles(\"./tests/dataloader/dummy_opensubtitles#OpenSubtitles\", invalid_vocab_times=invalid_vocab_times)\n\treturn _load_opensubtitles\n\nclass TestOpenSubtitles(TestSingleTurnDialog):\n\n\[email protected]()\n\tdef test_init(self, load_opensubtitles):\n\t\tsuper().base_test_init(load_opensubtitles())\n\t\tsuper().base_test_all_unknown(load_opensubtitles(10000))\n\n\tdef test_restart(self, load_opensubtitles):\n\t\tsuper().base_test_restart(load_opensubtitles())\n\n\[email protected](depends=[\"TestOpenSubtitles::test_init\"])\n\tdef test_get_batch(self, load_opensubtitles):\n\t\tsuper().base_test_get_batch(load_opensubtitles())\n\n\[email protected](depends=[\"TestOpenSubtitles::test_init\"])\n\tdef test_get_next_batch(self, load_opensubtitles):\n\t\tsuper().base_test_get_next_batch(load_opensubtitles())\n\n\[email protected](depends=[\"TestOpenSubtitles::test_init\"])\n\tdef test_convert(self, load_opensubtitles):\n\t\tsuper().base_test_convert(load_opensubtitles())\n\n\tdef test_teacher_forcing_metric(self, load_opensubtitles):\n\t\tsuper().base_test_teacher_forcing_metric(load_opensubtitles())\n\n\tdef test_teacher_inference_metric(self, load_opensubtitles):\n\t\tsuper().base_test_teacher_inference_metric(load_opensubtitles())\n\n\tdef test_init_multi_runs(self, load_opensubtitles):\n\t\tsuper().base_test_multi_runs([load_opensubtitles() for i in range(3)])\n\n\nbase_test_version(OpenSubtitles)\n" ]
[ [ "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jimgoo/auto-sklearn
[ "a263efb49f7b7f597963bc1e787105ea7615ea75" ]
[ "setup.py" ]
[ "# -*- encoding: utf-8 -*-\nimport setuptools\nfrom setuptools.extension import Extension\nimport numpy as np\nfrom Cython.Build import cythonize\n\nextensions = cythonize(\n [Extension('autosklearn.data.competition_c_functions',\n sources=['autosklearn/data/competition_c_functions.pyx'],\n language='c',\n include_dirs=[np.get_include()])\n ])\n\nrequirements = [\n \"setuptools\",\n \"nose\",\n \"six\",\n \"Cython\",\n \"numpy>=1.9.0\",\n \"scipy>=0.14.1\",\n \"scikit-learn>=0.18.1,<0.19.0\",\n \"lockfile\",\n \"joblib\",\n \"psutil\",\n \"pyyaml\",\n \"liac-arff\",\n \"pandas\",\n \"ConfigSpace>=0.3.3,<0.4\",\n \"pynisher>=0.4,<0.5\",\n \"pyrfr>=0.6.1,<0.7\",\n \"smac>=0.6.0,<0.7\"\n]\n\nwith open(\"autosklearn/__version__.py\") as fh:\n version = fh.readlines()[-1].split()[-1].strip(\"\\\"'\")\n\nsetuptools.setup(\n name='auto-sklearn',\n description='Automated machine learning.',\n version=version,\n ext_modules=extensions,\n packages=setuptools.find_packages(exclude=['test']),\n install_requires=requirements,\n test_suite='nose.collector',\n include_package_data=True,\n author='Matthias Feurer',\n author_email='[email protected]',\n license='BSD',\n platforms=['Linux'],\n classifiers=[],\n url='https://automl.github.io/auto-sklearn')\n" ]
[ [ "numpy.get_include" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
michaelcukier/Poker-Hand-Tracker
[ "9adae42fab9f640e6939ba06bd588ab1a2feb90f" ]
[ "db_api/plots/player_range.py" ]
[ "from GLOBAL_VARIABLES import FOLDER_PLOT_DUMP, PLAYER_NAME\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nimport pandas as pd\nimport seaborn as sns\nfrom utils.run_sql_command import run_sql_command\n\n\nhand_matrix = [\n ['AA', 'AKs', 'AQs', 'AJs', 'ATs', 'A9s', 'A8s', 'A7s', 'A6s', 'A5s', 'A4s', 'A3s', 'A2s'],\n ['KAo', 'KK', 'KQs', 'KJs', 'KTs', 'K9s', 'K8s', 'K7s', 'K6s', 'K5s', 'K4s', 'K3s', 'K2s'],\n ['QAo', 'QKo', 'QQ', 'QJs', 'QTs', 'Q9s', 'Q8s', 'Q7s', 'Q6s', 'Q5s', 'Q4s', 'Q3s', 'Q2s'],\n ['JAo', 'JKo', 'JQo', 'JJ', 'JTs', 'J9s', 'J8s', 'J7s', 'J6s', 'J5s', 'J4s', 'J3s', 'J2s'],\n ['TAo', 'TKo', 'TQo', 'TJo', 'TT', 'T9s', 'T8s', 'T7s', 'T6s', 'T5s', 'T4s', 'T3s', 'T2s'],\n ['9Ao', '9Ko', '9Qo', '9Jo', '9To', '99', '98s', '97s', '96s', '95s', '94s', '93s', '92s'],\n ['8Ao', '8Ko', '8Qo', '8Jo', '8To', '89o', '88', '87s', '86s', '85s', '84s', '83s', '82s'],\n ['7Ao', '7Ko', '7Qo', '7Jo', '7To', '79o', '78o', '77', '76s', '75s', '74s', '73s', '72s'],\n ['6Ao', '6Ko', '6Qo', '6Jo', '6To', '69o', '68o', '67o', '66', '65s', '64s', '63s', '62s'],\n ['5Ao', '5Ko', '5Qo', '5Jo', '5To', '59o', '58o', '57o', '56o', '55', '54s', '53s', '52s'],\n ['4Ao', '4Ko', '4Qo', '4Jo', '4To', '49o', '48o', '47o', '46o', '45o', '44', '43s', '42s'],\n ['3Ao', '3Ko', '3Qo', '3Jo', '3To', '39o', '38o', '37o', '36o', '35o', '34o', '33', '32s'],\n ['2Ao', '2Ko', '2Qo', '2Jo', '2To', '29o', '28o', '27o', '26o', '25o', '24o', '23o', '22']]\n\n\ndef create_config_for_sql_query(position_at_table: str, player_name: str) -> dict:\n\n config = {}\n\n # position_at_table = blinds | early | middle | late\n\n if position_at_table == 'early':\n config['sqlColumsToRetrieve'] = ['UTG', 'UTGp1']\n\n if position_at_table == 'blinds':\n config['sqlColumsToRetrieve'] = ['SB', 'BB']\n\n if position_at_table == 'middle':\n config['sqlColumsToRetrieve'] = ['MP', 'MPp1', 'MPp2']\n\n if position_at_table == 'late':\n config['sqlColumsToRetrieve'] = ['CO', 'BTN']\n\n select_query = []\n for col in config['sqlColumsToRetrieve']:\n select_query.append(col + '_cards')\n config['select_query'] = select_query\n\n where_query = []\n for col in config['sqlColumsToRetrieve']:\n where_query.append(col + \"_player_name='{0}'\".format(player_name))\n config['where_query'] = where_query\n\n config['player_name'] = player_name\n\n return config\n\n\ndef check_if_folded_pre(hand_txt):\n for line in hand_txt.split('\\n'):\n if (PLAYER_NAME in line) and ('folded on the Pre-Flop' in line):\n return True\n return False\n\n\ndef get_sql_data(config: dict, database_file_path: str) -> list:\n\n where_query = config['where_query']\n select_query = config['select_query']\n\n data = []\n for whereQ, selectQ in zip(where_query, select_query):\n query = '''\n SELECT\n {0}, hand_txt\n FROM\n hands\n WHERE\n {1}\n AND\n {0} != 'None'\n '''.format(selectQ, whereQ)\n\n retrieval = run_sql_command(\n query=query,\n unique_items=False,\n database_file_path=database_file_path)\n\n if config['player_name'] == PLAYER_NAME:\n remove_folded_preflop = []\n for cards, hand_txt in retrieval:\n if not check_if_folded_pre(hand_txt):\n remove_folded_preflop.append(cards)\n data.extend(remove_folded_preflop)\n else:\n for cards, hand_txt in retrieval:\n data.append(cards)\n\n return data\n\n\ndef transform_cards(cards: list) -> list:\n # eg ['As Ad', 'Tc 9s', ...] --> ['AA', 'T9o', ...]\n cards_without_suit = []\n for c in cards:\n if c[0] == c[3]: # eg As Ad --> AA\n cleaned_h = c[0] + c[3]\n elif c[1] == c[4]: # eg: Td 9d --> T9s\n cleaned_h = c[0] + c[3] + 's'\n elif c[1] != c[4]: # eg: 8c 9s --> 89o\n cleaned_h = c[0] + c[3] + 'o'\n cards_without_suit.append(cleaned_h)\n\n return cards_without_suit\n\n\ndef transform_to_frequencies(observed_hands: list) -> dict:\n # eg ['6Ko', 'K6s', 'K6s', 'AA'] --> {'AA': 0.25, ... , K6s': 0.5, ..., '6Ko': 0.25, '22': 0}\n\n all_possible_hands = {'AA': 0, 'AKs': 0, 'AQs': 0, 'AJs': 0, 'ATs': 0, 'A9s': 0, 'A8s': 0, 'A7s': 0, 'A6s': 0, 'A5s': 0, 'A4s': 0, 'A3s': 0, 'A2s': 0, 'KAo': 0, 'KK': 0, 'KQs': 0, 'KJs': 0, 'KTs': 0, 'K9s': 0, 'K8s': 0, 'K7s': 0, 'K6s': 0, 'K5s': 0, 'K4s': 0, 'K3s': 0, 'K2s': 0, 'QAo': 0, 'QKo': 0, 'QQ': 0, 'QJs': 0, 'QTs': 0, 'Q9s': 0, 'Q8s': 0, 'Q7s': 0, 'Q6s': 0, 'Q5s': 0, 'Q4s': 0, 'Q3s': 0, 'Q2s': 0, 'JAo': 0, 'JKo': 0, 'JQo': 0, 'JJ': 0, 'JTs': 0, 'J9s': 0, 'J8s': 0, 'J7s': 0,\n 'J6s': 0, 'J5s': 0, 'J4s': 0, 'J3s': 0, 'J2s': 0, 'TAo': 0, 'TKo': 0, 'TQo': 0, 'TJo': 0, 'TT': 0, 'T9s': 0, 'T8s': 0, 'T7s': 0, 'T6s': 0, 'T5s': 0, 'T4s': 0, 'T3s': 0, 'T2s': 0, '9Ao': 0, '9Ko': 0, '9Qo': 0, '9Jo': 0, '9To': 0, '99': 0, '98s': 0, '97s': 0, '96s': 0, '95s': 0, '94s': 0, '93s': 0, '92s': 0, '8Ao': 0, '8Ko': 0, '8Qo': 0, '8Jo': 0, '8To': 0, '89o': 0, '88': 0, '87s': 0, '86s': 0, '85s': 0, '84s': 0, '83s': 0, '82s': 0, '7Ao': 0, '7Ko': 0, '7Qo': 0,\n '7Jo': 0, '7To': 0, '79o': 0, '78o': 0, '77': 0, '76s': 0, '75s': 0, '74s': 0, '73s': 0, '72s': 0, '6Ao': 0, '6Ko': 0, '6Qo': 0, '6Jo': 0, '6To': 0, '69o': 0, '68o': 0, '67o': 0, '66': 0, '65s': 0, '64s': 0, '63s': 0, '62s': 0, '5Ao': 0, '5Ko': 0, '5Qo': 0, '5Jo': 0, '5To': 0, '59o': 0, '58o': 0, '57o': 0, '56o': 0, '55': 0, '54s': 0, '53s': 0, '52s': 0, '4Ao': 0, '4Ko': 0, '4Qo': 0, '4Jo': 0, '4To': 0, '49o': 0, '48o': 0, '47o': 0, '46o': 0, '45o': 0, '44': 0,\n '43s': 0, '42s': 0, '3Ao': 0, '3Ko': 0, '3Qo': 0, '3Jo': 0, '3To': 0, '39o': 0, '38o': 0, '37o': 0, '36o': 0, '35o': 0, '34o': 0, '33': 0, '32s': 0, '2Ao': 0, '2Ko': 0, '2Qo': 0, '2Jo': 0, '2To': 0, '29o': 0, '28o': 0, '27o': 0, '26o': 0, '25o': 0, '24o': 0, '23o': 0, '22': 0}\n\n # step 1 --> ['6Ko', 'K6s', '6Ks', 'AA'] = ['6Ko', 'K6s', 'K6s', 'AA']\n hands_1 = []\n for c in observed_hands:\n permutHand = c[1] + c[0] + c[2:] # K6s --> 6Ks\n if permutHand in hands_1:\n hands_1.append(permutHand)\n else:\n hands_1.append(c)\n\n # step 2 --> ['6Ko', '6Ks', '6Ks', 'AA'] = {'6Ko': 0.25, '6Ks': 0.5, 'AA': 0.25}\n hands_2 = {}\n for c in hands_1:\n hands_2[c] = hands_1.count(c) / len(hands_1)\n\n # step 3 -- set the values to all_possible_hands\n for hand, freq in hands_2.items():\n permutHand = hand[1] + hand[0] + hand[2:]\n if permutHand in all_possible_hands:\n all_possible_hands[permutHand] = freq\n elif hand in all_possible_hands:\n all_possible_hands[hand] = freq\n\n return all_possible_hands\n\n\ndef create_freq_matrix(freq: dict) -> list:\n # creates a 13*13 matrix with each value set to the frequencies\n matrix = [[0 for i in range(13)] for j in range(13)]\n\n for i in freq:\n for m in range(13):\n b = 0\n for n in range(13):\n if hand_matrix[m][n] == i:\n matrix[m][n] = freq[i]\n b = 1\n break\n if b:\n break\n\n return matrix\n\n\ndef create_heatmaps(freq_matrix: list, pos: str, player_name: str, nb_of_samples: int) -> str:\n rcParams['figure.figsize'] = 15, 7\n df = pd.DataFrame(freq_matrix)\n fig = plt.figure()\n gs = fig.add_gridspec(ncols=1, nrows=1)\n ax1 = fig.add_subplot(gs[0, 0])\n\n sns.heatmap(\n df,\n cbar=False,\n cmap=\"YlGnBu\",\n square=True,\n annot=hand_matrix,\n annot_kws={\"size\": 7},\n fmt='',\n xticklabels=False,\n yticklabels=False,\n ax=ax1)\n\n def generatePositionsNames(_pos: str) -> str:\n if _pos == 'early':\n return 'UTG | UTGp1'\n if _pos == 'blinds':\n return 'SB | BB'\n if _pos == 'middle':\n return 'MP | MPp1 | MPp2'\n if _pos == 'late':\n return 'CO | BTN'\n\n ax1.set_title('{2} position range of \"{0}\"\\n\\n{1} samples\\n\\n{3}'.format(\n player_name,\n nb_of_samples,\n pos,\n generatePositionsNames(pos)))\n\n plt.savefig(FOLDER_PLOT_DUMP + player_name + '_' + pos + '.png', bbox_inches='tight', pad_inches=0.2, dpi=300)\n return FOLDER_PLOT_DUMP + player_name + '_' + pos + '.png'" ]
[ [ "matplotlib.pyplot.savefig", "pandas.DataFrame", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]