_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q500
load_parcellation_coords
train
def load_parcellation_coords(parcellation_name): """ Loads coordinates of included parcellations. Parameters ---------- parcellation_name : str options: 'gordon2014_333', 'power2012_264', 'shen2013_278'. Returns ------- parc : array parcellation cordinates """
python
{ "resource": "" }
q501
create_traj_ranges
train
def create_traj_ranges(start, stop, N): """ Fills in the trajectory range. # Adapted from https://stackoverflow.com/a/40624614 """ steps = (1.0/(N-1)) * (stop - start)
python
{ "resource": "" }
q502
get_dimord
train
def get_dimord(measure, calc=None, community=None): """ Get the dimension order of a network measure. Parameters ---------- measure : str Name of funciton in teneto.networkmeasures. calc : str, default=None Calc parameter for the function community : bool, default=None If not null, then community property is assumed to be believed. Returns ------- dimord : str Dimension order. So "node,node,time" would define the dimensions of the network measure. """ if not calc: calc = '' else: calc = '_' + calc if not community: community = '' else: community = 'community' if 'community' in calc and 'community' in community: community = '' if calc == 'community_avg' or calc == 'community_pairs': community = '' dimord_dict = { 'temporal_closeness_centrality': 'node', 'temporal_degree_centrality': 'node', 'temporal_degree_centralit_avg': 'node', 'temporal_degree_centrality_time': 'node,time', 'temporal_efficiency': 'global', 'temporal_efficiency_global': 'global', 'temporal_efficiency_node': 'node', 'temporal_efficiency_to': 'node', 'sid_global': 'global,time', 'community_pairs': 'community,community,time', 'community_avg': 'community,time', 'sid': 'community,community,time',
python
{ "resource": "" }
q503
create_supraadjacency_matrix
train
def create_supraadjacency_matrix(tnet, intersliceweight=1): """ Returns a supraadjacency matrix from a temporal network structure Parameters -------- tnet : TemporalNetwork Temporal network (any network type) intersliceweight : int Weight that links the same node from adjacent time-points Returns -------- supranet : dataframe Supraadjacency matrix """ newnetwork = tnet.network.copy()
python
{ "resource": "" }
q504
tnet_to_nx
train
def tnet_to_nx(df, t=None): """ Creates undirected networkx object """ if t is not None: df = get_network_when(df, t=t) if 'weight' in df.columns: nxobj = nx.from_pandas_edgelist(
python
{ "resource": "" }
q505
temporal_louvain
train
def temporal_louvain(tnet, resolution=1, intersliceweight=1, n_iter=100, negativeedge='ignore', randomseed=None, consensus_threshold=0.5, temporal_consensus=True, njobs=1): r""" Louvain clustering for a temporal network. Parameters ----------- tnet : array, dict, TemporalNetwork Input network resolution : int resolution of Louvain clustering ($\gamma$) intersliceweight : int interslice weight of multilayer clustering ($\omega$). Must be positive. n_iter : int Number of iterations to run louvain for randomseed : int Set for reproduceability negativeedge : str If there are negative edges, what should be done with them. Options: 'ignore' (i.e. set to 0). More options to be added. consensus : float (0.5 default) When creating consensus matrix to average over number of iterations, keep values when the consensus is this amount. Returns ------- communities : array (node,time) node,time array of community assignment Notes ------- References ---------- """ tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') # Divide resolution by the number of timepoints resolution = resolution / tnet.T supranet = create_supraadjacency_matrix( tnet, intersliceweight=intersliceweight) if negativeedge == 'ignore': supranet = supranet[supranet['weight'] > 0] nxsupra = tnet_to_nx(supranet) np.random.seed(randomseed) while True: comtmp = [] with ProcessPoolExecutor(max_workers=njobs) as executor:
python
{ "resource": "" }
q506
make_temporal_consensus
train
def make_temporal_consensus(com_membership): r""" Matches community labels accross time-points Jaccard matching is in a greedy fashiong. Matching the largest community at t with the community at t-1. Parameters ---------- com_membership : array Shape should be node, time. Returns ------- D : array temporal consensus matrix using Jaccard distance """ com_membership = np.array(com_membership) # make first indicies be between 0 and 1. com_membership[:, 0] = clean_community_indexes(com_membership[:, 0]) # loop over all timepoints, get jacccard distance in greedy manner for largest community to time period before for t in range(1, com_membership.shape[1]): ct, counts_t = np.unique(com_membership[:, t], return_counts=True) ct = ct[np.argsort(counts_t)[::-1]] c1back = np.unique(com_membership[:, t-1]) new_index = np.zeros(com_membership.shape[0]) for n in ct: if len(c1back) > 0: d = np.ones(int(c1back.max())+1) for m in c1back:
python
{ "resource": "" }
q507
flexibility
train
def flexibility(communities): """ Amount a node changes community Parameters ---------- communities : array Community array of shape (node,time) Returns -------- flex : array Size with the flexibility of each node. Notes ----- Flexbility calculates the number of times a node switches its community label during a time series. It is normalized by the number of possible changes which could occur. It is important to make sure that the different community labels accross time points are not artbirary. References ----------- Bassett,
python
{ "resource": "" }
q508
load_tabular_file
train
def load_tabular_file(fname, return_meta=False, header=True, index_col=True): """ Given a file name loads as a pandas data frame Parameters ---------- fname : str file name and path. Must be tsv. return_meta : header : bool (default True) if there is a header in the tsv file, true will use first row in file. index_col : bool (default None) if there is an index column in the csv or tsv file, true will use first row in file. Returns ------- df : pandas The loaded file info : pandas, if return_meta=True Meta infomration in json file (if specified)
python
{ "resource": "" }
q509
get_sidecar
train
def get_sidecar(fname, allowedfileformats='default'): """ Loads sidecar or creates one """ if allowedfileformats == 'default': allowedfileformats = ['.tsv', '.nii.gz'] for f in allowedfileformats: fname = fname.split(f)[0] fname += '.json' if os.path.exists(fname): with open(fname) as fs:
python
{ "resource": "" }
q510
process_exclusion_criteria
train
def process_exclusion_criteria(exclusion_criteria): """ Parses an exclusion critera string to get the function and threshold. Parameters ---------- exclusion_criteria : list list of strings where each string is of the format [relation][threshold]. E.g. \'<0.5\' or \'>=1\' Returns ------- relfun : list list of numpy functions for the exclusion criteria threshold : list list of floats for threshold for each relfun """ relfun = [] threshold = [] for ec in exclusion_criteria: if ec[0:2] == '>=': relfun.append(np.greater_equal)
python
{ "resource": "" }
q511
reachability_latency
train
def reachability_latency(tnet=None, paths=None, rratio=1, calc='global'): """ Reachability latency. This is the r-th longest temporal path. Parameters --------- data : array or dict Can either be a network (graphlet or contact), binary unidrected only. Alternative can be a paths dictionary (output of teneto.networkmeasure.shortest_temporal_path) rratio: float (default: 1) reachability ratio that the latency is calculated in relation to. Value must be over 0 and up to 1. 1 (default) - all nodes must be reached. Other values (e.g. .5 imply that 50% of nodes are reached) This is rounded to the nearest node inter. E.g. if there are 6 nodes [1,2,3,4,5,6], it will be node 4 (due to round upwards) calc : str what to calculate. Alternatives: 'global' entire network; 'nodes': for each node. Returns -------- reach_lat : array Reachability latency Notes ------ Reachability latency calculates the time it takes for the paths. """ if tnet is not None and paths is not None: raise ValueError('Only network or path input allowed.') if tnet is None and paths is None: raise ValueError('No input.') # if shortest paths are not calculated, calculate them if tnet is not None: paths = shortest_temporal_path(tnet)
python
{ "resource": "" }
q512
recruitment
train
def recruitment(temporalcommunities, staticcommunities): """ Calculates recruitment coefficient for each node. Recruitment coefficient is the average probability of nodes from the same static communities being in the same temporal communities at other time-points or during different tasks. Parameters: ------------ temporalcommunities : array temporal communities vector (node,time) staticcommunities : array Static communities vector for each node Returns: ------- Rcoeff : array recruitment coefficient for each node References: ----------- Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton. Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51. Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533.
python
{ "resource": "" }
q513
integration
train
def integration(temporalcommunities, staticcommunities): """ Calculates the integration coefficient for each node. Measures the average probability that a node is in the same community as nodes from other systems. Parameters: ------------ temporalcommunities : array temporal communities vector (node,time) staticcommunities : array Static communities vector for each node Returns: ------- Icoeff : array integration coefficient for each node References: ---------- Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton. Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51. Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533. """ # make sure the static and
python
{ "resource": "" }
q514
intercontacttimes
train
def intercontacttimes(tnet): """ Calculates the intercontacttimes of each edge in a network. Parameters ----------- tnet : array, dict Temporal network (craphlet or contact). Nettype: 'bu', 'bd' Returns --------- contacts : dict Intercontact times as numpy array in dictionary. contacts['intercontacttimes'] Notes ------ The inter-contact times is calculated by the time between consequecutive "active" edges (where active means that the value is 1 in a binary network). Examples -------- This example goes through how inter-contact times are calculated. >>> import teneto >>> import numpy as np Make a network with 2 nodes and 4 time-points with 4 edges spaced out. >>> G = np.zeros([2,2,10]) >>> edge_on = [1,3,5,9] >>> G[0,1,edge_on] = 1 The network visualised below make it clear what the inter-contact times are between the two nodes: .. plot:: import teneto import numpy as np import matplotlib.pyplot as plt G = np.zeros([2,2,10]) edge_on = [1,3,5,9] G[0,1,edge_on] = 1 fig, ax = plt.subplots(1, figsize=(4,2)) teneto.plot.slice_plot(G, ax=ax, cmap='Pastel2') ax.set_ylim(-0.25, 1.25) plt.tight_layout() fig.show() Calculating the inter-contact times of these edges becomes: 2,2,4 between nodes 0 and 1. >>> ict = teneto.networkmeasures.intercontacttimes(G) The function returns a dictionary with the icts in the key: intercontacttimes. This is of the size NxN. So the icts between nodes 0 and 1 are found by: >>> ict['intercontacttimes'][0,1] array([2, 2, 4]) """ # Process input tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') if tnet.nettype[0] == 'w': print('WARNING: assuming connections to be binary when
python
{ "resource": "" }
q515
gen_report
train
def gen_report(report, sdir='./', report_name='report.html'): """ Generates report of derivation and postprocess steps in teneto.derive """ # Create report directory if not os.path.exists(sdir): os.makedirs(sdir) # Add a slash to file directory if not included to avoid DirNameFleName # instead of DirName/FileName being creaated if sdir[-1] != '/': sdir += '/' report_html = '<html><body>' if 'method' in report.keys(): report_html += "<h1>Method: " + report['method'] + "</h1><p>" for i in report[report['method']]: if i == 'taper_window': fig, ax = plt.subplots(1) ax.plot(report[report['method']]['taper_window'], report[report['method']]['taper']) ax.set_xlabel('Window (time). 0 in middle of window.') ax.set_title( 'Taper from ' + report[report['method']]['distribution'] + ' distribution (PDF).') fig.savefig(sdir + 'taper.png') report_html += "<img src='./taper.png' width=500>" + "<p>" else: report_html += "- <b>" + i + "</b>: " + \ str(report[report['method']][i]) + "<br>" if 'postprocess' in report.keys(): report_html += "<p><h2>Postprocessing:</h2><p>" report_html += "<b>Pipeline: </b>" for i in report['postprocess']: report_html += " " + i + "," for i in report['postprocess']: report_html += "<p><h3>" + i + "</h3><p>" for j in report[i]: if j == 'lambda': report_html += "- <b>" + j + "</b>: " + "<br>" lambda_val = np.array(report['boxcox']['lambda']) fig, ax = plt.subplots(1)
python
{ "resource": "" }
q516
TenetoBIDS.add_history
train
def add_history(self, fname, fargs, init=0): """ Adds a processing step to TenetoBIDS.history.
python
{ "resource": "" }
q517
TenetoBIDS.derive_temporalnetwork
train
def derive_temporalnetwork(self, params, update_pipeline=True, tag=None, njobs=1, confound_corr_report=True): """ Derive time-varying connectivity on the selected files. Parameters ---------- params : dict. See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons) update_pipeline : bool If true, the object updates the selected files with those derived here. njobs : int How many parallel jobs to run confound_corr_report : bool If true, histograms and summary statistics of TVC and confounds are plotted in a report directory. tag : str any additional tag that will be placed in the saved file name. Will be placed as 'desc-[tag]' Returns ------- dfc : files saved in .../derivatives/teneto/sub-xxx/tvc/..._tvc.npy """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) files = self.get_selected_files(quiet=1) confound_files = self.get_selected_files(quiet=1, pipeline='confound') if confound_files: confounds_exist = True else:
python
{ "resource": "" }
q518
TenetoBIDS.make_functional_connectivity
train
def make_functional_connectivity(self, njobs=None, returngroup=False, file_hdr=None, file_idx=None): """ Makes connectivity matrix for each of the subjects. Parameters ---------- returngroup : bool, default=False If true, returns the group average connectivity matrix. njobs : int How many parallel jobs to run file_idx : bool Default False, true if to ignore index column in loaded file. file_hdr : bool Default False, true if to ignore header row in loaded file. Returns
python
{ "resource": "" }
q519
TenetoBIDS._save_namepaths_bids_derivatives
train
def _save_namepaths_bids_derivatives(self, f, tag, save_directory, suffix=None): """ Creates output directory and output name Paramters --------- f : str input files, includes the file bids_suffix tag : str what should be added to f in the output file. save_directory : str additional directory that the output file should go in suffix : str add new suffix to data Returns ------- save_name : str previous filename with new tag save_dir : str directory where it will be saved base_dir : str subjective base directory (i.e. derivatives/teneto/func[/anythingelse/]) """ file_name = f.split('/')[-1].split('.')[0] if tag != '': tag = '_' + tag if suffix: file_name, _ = drop_bids_suffix(file_name) save_name = file_name + tag save_name += '_' + suffix else: save_name = file_name + tag paths_post_pipeline = f.split(self.pipeline) if self.pipeline_subdir: paths_post_pipeline = paths_post_pipeline[1].split(self.pipeline_subdir)[ 0] else: paths_post_pipeline = paths_post_pipeline[1].split(file_name)[0] base_dir = self.BIDS_dir + '/derivatives/' + 'teneto_' + \
python
{ "resource": "" }
q520
TenetoBIDS.get_tags
train
def get_tags(self, tag, quiet=1): """ Returns which tag alternatives can be identified in the BIDS derivatives structure. """ if not self.pipeline: print('Please set pipeline first.') self.get_pipeline_alternatives(quiet) else: if tag == 'sub': datapath = self.BIDS_dir + '/derivatives/' + self.pipeline + '/' tag_alternatives = [ f.split('sub-')[1] for f in os.listdir(datapath) if os.path.isdir(datapath + f) and 'sub-' in f] elif tag == 'ses': tag_alternatives = [] for sub in self.bids_tags['sub']: tag_alternatives += [f.split('ses-')[1] for f in os.listdir( self.BIDS_dir + '/derivatives/' + self.pipeline + '/' + 'sub-' + sub) if 'ses' in f] tag_alternatives = set(tag_alternatives) else: files = self.get_selected_files(quiet=1)
python
{ "resource": "" }
q521
TenetoBIDS.set_exclusion_file
train
def set_exclusion_file(self, confound, exclusion_criteria, confound_stat='mean'): """ Excludes subjects given a certain exclusion criteria. Parameters ---------- confound : str or list string or list of confound name(s) from confound files exclusion_criteria : str or list for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected. confound_stat : str or list Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.). Returns -------- calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria. """ self.add_history(inspect.stack()[0][3], locals(), 1) if isinstance(confound, str): confound = [confound] if isinstance(exclusion_criteria, str): exclusion_criteria = [exclusion_criteria] if isinstance(confound_stat, str): confound_stat = [confound_stat] if len(exclusion_criteria) != len(confound): raise ValueError( 'Same number of confound names and exclusion criteria must be given') if len(confound_stat) != len(confound): raise ValueError( 'Same number of confound names and confound stats must be given') relex, crit = process_exclusion_criteria(exclusion_criteria) files = sorted(self.get_selected_files(quiet=1)) confound_files = sorted( self.get_selected_files(quiet=1, pipeline='confound')) files, confound_files = confound_matching(files, confound_files) bad_files = [] bs = 0 foundconfound = [] foundreason = [] for s, cfile in enumerate(confound_files): df = load_tabular_file(cfile, index_col=None) found_bad_subject = False for i, _ in enumerate(confound): if confound_stat[i] == 'median': if relex[i](df[confound[i]].median(), crit[i]):
python
{ "resource": "" }
q522
TenetoBIDS.make_parcellation
train
def make_parcellation(self, parcellation, parc_type=None, parc_params=None, network='defaults', update_pipeline=True, removeconfounds=False, tag=None, njobs=None, clean_params=None, yeonetworkn=None): """ Reduces the data from voxel to parcellation space. Files get saved in a teneto folder in the derivatives with a roi tag at the end. Parameters ----------- parcellation : str specify which parcellation that you would like to use. For MNI: 'power2012_264', 'gordon2014_333'. TAL: 'shen2013_278' parc_type : str can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used. parc_params : dict **kwargs for nilearn functions network : str if "defaults", it selects static parcellation, _if available_ (other options will be made available soon). removeconfounds : bool if true, regresses out confounds that are specfied in self.set_confounds with linear regression. update_pipeline : bool TenetoBIDS gets updated with the parcellated files being selected. tag : str or list any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids). clean_params : dict **kwargs for nilearn function nilearn.signal.clean yeonetworkn : int (7 or 17) Only relevant for when parcellation is schaeffer2018. Use 7 or 17 template networks njobs : n number of processes to run. Overrides TenetoBIDS.njobs Returns ------- Files are saved in ./BIDS_dir/derivatives/teneto_<version>/.../parcellation/. To load these files call TenetoBIDS.load_parcellation. NOTE ---- These functions make use of nilearn. Please cite nilearn if used in a publicaiton. """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) parc_name = parcellation.split('_')[0].lower() # Check confounds have been specified if not self.confounds and removeconfounds: raise ValueError( 'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first.') # Check confounds have been specified if update_pipeline == False and removeconfounds: raise ValueError( 'Pipeline must be updated in order to remove confounds within this funciton.')
python
{ "resource": "" }
q523
TenetoBIDS.communitydetection
train
def communitydetection(self, community_detection_params, community_type='temporal', tag=None, file_hdr=False, file_idx=False, njobs=None): """ Calls temporal_louvain_with_consensus on connectivity data Parameters ---------- community_detection_params : dict kwargs for detection. See teneto.communitydetection.louvain.temporal_louvain_with_consensus community_type : str Either 'temporal' or 'static'. If temporal, community is made per time-point for each timepoint. file_idx : bool (default false) if true, index column present in data and this will be ignored file_hdr : bool (default false) if true, header row present in data and this will be ignored njobs : int number of processes to run. Overrides TenetoBIDS.njobs Note ---- All non-positive edges are made to zero.
python
{ "resource": "" }
q524
TenetoBIDS.removeconfounds
train
def removeconfounds(self, confounds=None, clean_params=None, transpose=None, njobs=None, update_pipeline=True, overwrite=True, tag=None): """ Removes specified confounds using nilearn.signal.clean Parameters ---------- confounds : list List of confounds. Can be prespecified in set_confounds clean_params : dict Dictionary of kawgs to pass to nilearn.signal.clean transpose : bool (default False) Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal. njobs : int Number of jobs. Otherwise tenetoBIDS.njobs is run. update_pipeline : bool update pipeline with '_clean' tag for new files created overwrite : bool tag : str Returns ------- Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end. Note ---- There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data. """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) if not self.confounds and not confounds: raise ValueError( 'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first or pass confounds as input to
python
{ "resource": "" }
q525
TenetoBIDS.networkmeasures
train
def networkmeasures(self, measure=None, measure_params=None, tag=None, njobs=None): """ Calculates a network measure For available funcitons see: teneto.networkmeasures Parameters ---------- measure : str or list Mame of function(s) from teneto.networkmeasures that will be run. measure_params : dict or list of dctionaries) Containing kwargs for the argument in measure. See note regarding Communities key. tag : str Add additional tag to saved filenames. Note ---- In measure_params, if communities can equal 'template', 'static', or 'temporal'. These options must be precalculated. If template, Teneto tries to load default for parcellation. If static, loads static communities in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-static....npy. If temporal, loads static communities in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-temporal....npy Returns ------- Saves in ./BIDS_dir/derivatives/teneto/sub-NAME/func//temporalnetwork/MEASURE/ Load the measure with tenetoBIDS.load_network_measure """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) # measure can be string or list if isinstance(measure, str): measure = [measure]
python
{ "resource": "" }
q526
TenetoBIDS.set_bids_suffix
train
def set_bids_suffix(self, bids_suffix): """ The last analysis step is the final tag that is present in files. """
python
{ "resource": "" }
q527
TenetoBIDS.set_pipeline
train
def set_pipeline(self, pipeline): """ Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string. """ self.add_history(inspect.stack()[0][3], locals(), 1)
python
{ "resource": "" }
q528
TenetoBIDS.load_frompickle
train
def load_frompickle(cls, fname, reload_object=False): """ Loaded saved instance of fname : str path to pickle object (output of TenetoBIDS.save_aspickle) reload_object : bool (default False) reloads object by calling teneto.TenetoBIDS (some information lost, for development) Returns ------- self : TenetoBIDS instance """ if fname[-4:] != '.pkl': fname += '.pkl' with open(fname, 'rb') as f: tnet = pickle.load(f) if
python
{ "resource": "" }
q529
temporal_closeness_centrality
train
def temporal_closeness_centrality(tnet=None, paths=None): ''' Returns temporal closeness centrality per node. Parameters ----------- Input should be *either* tnet or paths. data : array or dict Temporal network input (graphlet or contact). nettype: 'bu', 'bd'. paths : pandas dataframe Output of TenetoBIDS.networkmeasure.shortest_temporal_paths Returns -------- :close: array temporal closness centrality (nodal measure) ''' if tnet is not None and paths is not None: raise ValueError('Only network or path input allowed.') if tnet is None and paths is None: raise ValueError('No input.') # if shortest paths are not calculated, calculate them
python
{ "resource": "" }
q530
flatten
train
def flatten(d, reducer='tuple', inverse=False): """Flatten dict-like object. Parameters ---------- d: dict-like object The dict that will be flattened. reducer: {'tuple', 'path', function} (default: 'tuple') The key joining method. If a function is given, the function will be used to reduce. 'tuple': The resulting key will be tuple of the original keys 'path': Use ``os.path.join`` to join keys. inverse: bool (default: False) Whether you want invert the resulting key and value. Returns ------- flat_dict: dict """ if isinstance(reducer, str): reducer = REDUCER_DICT[reducer] flat_dict = {} def _flatten(d, parent=None):
python
{ "resource": "" }
q531
nested_set_dict
train
def nested_set_dict(d, keys, value): """Set a value to a sequence of nested keys Parameters ---------- d: Mapping keys: Sequence[str] value: Any """ assert keys key = keys[0] if len(keys) == 1: if key in
python
{ "resource": "" }
q532
unflatten
train
def unflatten(d, splitter='tuple', inverse=False): """Unflatten dict-like object. Parameters ---------- d: dict-like object The dict that will be unflattened. splitter: {'tuple', 'path', function} (default: 'tuple') The key splitting method. If a function is given, the function will be used to split. 'tuple': Use each element in the tuple key as the key of the unflattened dict. 'path': Use ``pathlib.Path.parts`` to split keys. inverse: bool (default: False) Whether you want to invert the key and value before flattening. Returns ------- unflattened_dict: dict """
python
{ "resource": "" }
q533
plot_track
train
def plot_track(track, filename=None, beat_resolution=None, downbeats=None, preset='default', cmap='Blues', xtick='auto', ytick='octave', xticklabel=True, yticklabel='auto', tick_loc=None, tick_direction='in', label='both', grid='both', grid_linestyle=':', grid_linewidth=.5): """ Plot the pianoroll or save a plot of the pianoroll. Parameters ---------- filename : The filename to which the plot is saved. If None, save nothing. beat_resolution : int The number of time steps used to represent a beat. Required and only effective when `xtick` is 'beat'. downbeats : list An array that indicates whether the time step contains a downbeat (i.e., the first time step of a bar). preset : {'default', 'plain', 'frame'} A string that indicates the preset theme to use. - In 'default' preset, the ticks, grid and labels are on. - In 'frame' preset, the ticks and grid are both off. - In 'plain' preset, the x- and y-axis are both off. cmap : `matplotlib.colors.Colormap` The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to 'Blues'. Only effective when `pianoroll` is 2D. xtick : {'auto', 'beat', 'step', 'off'} A string that indicates what to use as ticks along the x-axis. If 'auto' is given, automatically set to 'beat' if `beat_resolution` is also given and set to 'step', otherwise. Defaults to 'auto'. ytick : {'octave', 'pitch', 'off'} A string that indicates what to use as ticks along the y-axis. Defaults to 'octave'. xticklabel : bool Whether to add tick labels along the x-axis. Only effective when `xtick` is not 'off'. yticklabel : {'auto', 'name', 'number', 'off'} If 'name', use octave name and pitch name (key name when `is_drum` is True) as tick labels along the y-axis. If 'number', use pitch number. If 'auto', set to 'name' when `ytick` is 'octave' and 'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective when `ytick` is not 'off'. tick_loc : tuple or list The locations to put the ticks. Availables elements are 'bottom', 'top', 'left' and 'right'. Defaults to ('bottom', 'left'). tick_direction : {'in', 'out', 'inout'} A string that indicates where to put the ticks. Defaults to 'in'. Only effective when one of `xtick` and `ytick` is on. label : {'x', 'y', 'both', 'off'} A string that indicates whether to add labels to the x-axis and y-axis. Defaults
python
{ "resource": "" }
q534
Multitrack.append_track
train
def append_track(self, track=None, pianoroll=None, program=0, is_drum=False, name='unknown'): """ Append a multitrack.Track instance to the track list or create a new multitrack.Track object and append it to the track list. Parameters ---------- track : pianoroll.Track A :class:`pypianoroll.Track` instance to be appended to the track list. pianoroll : np.ndarray, shape=(n_time_steps, 128) A pianoroll matrix. The first and second dimension represents time and pitch, respectively. Available datatypes are bool, int and float. Only effective when `track` is None. program: int A program number according to General MIDI specification [1]. Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano). Only effective when `track` is None. is_drum : bool A boolean number that indicates whether it is a percussion track. Defaults to False. Only effective when `track` is None. name : str
python
{ "resource": "" }
q535
Multitrack.check_validity
train
def check_validity(self): """ Raise an error if any invalid attribute found. Raises ------ TypeError If an attribute has an invalid type. ValueError If an attribute has an invalid value (of the correct type). """ # tracks for track in self.tracks: if not isinstance(track, Track): raise TypeError("`tracks` must be a list of " "`pypianoroll.Track` instances.") track.check_validity() # tempo if not isinstance(self.tempo, np.ndarray): raise TypeError("`tempo` must be int or a numpy array.") elif not np.issubdtype(self.tempo.dtype, np.number): raise TypeError("Data type of `tempo` must be a subdtype of " "np.number.") elif self.tempo.ndim != 1: raise ValueError("`tempo` must be a 1D numpy array.") if np.any(self.tempo <= 0.0): raise ValueError("`tempo` should contain only positive numbers.") # downbeat if self.downbeat is not None: if not isinstance(self.downbeat, np.ndarray):
python
{ "resource": "" }
q536
Multitrack.clip
train
def clip(self, lower=0, upper=127): """ Clip the pianorolls of all tracks by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianorolls. Defaults to 0. upper :
python
{ "resource": "" }
q537
Multitrack.get_downbeat_steps
train
def get_downbeat_steps(self): """ Return the indices of time steps that contain downbeats. Returns ------- downbeat_steps : list The indices of time steps that contain downbeats. """
python
{ "resource": "" }
q538
Multitrack.get_empty_tracks
train
def get_empty_tracks(self): """ Return the indices of tracks with empty pianorolls. Returns ------- empty_track_indices : list The indices of tracks with empty pianorolls. """
python
{ "resource": "" }
q539
Multitrack.get_merged_pianoroll
train
def get_merged_pianoroll(self, mode='sum'): """ Return the merged pianoroll. Parameters ---------- mode : {'sum', 'max', 'any'} A string that indicates the merging strategy to apply along the track axis. Default to 'sum'. - In 'sum' mode, the merged pianoroll is the sum of all the pianorolls. Note that for binarized pianorolls, integer summation is performed. - In 'max' mode, for each pixel, the maximum value among all the
python
{ "resource": "" }
q540
Multitrack.merge_tracks
train
def merge_tracks(self, track_indices=None, mode='sum', program=0, is_drum=False, name='merged', remove_merged=False): """ Merge pianorolls of the tracks specified by `track_indices`. The merged track will have program number as given by `program` and drum indicator as given by `is_drum`. The merged track will be appended at the end of the track list. Parameters ---------- track_indices : list The indices of tracks to be merged. Defaults to all the tracks. mode : {'sum', 'max', 'any'} A string that indicates the merging strategy to apply along the track axis. Default to 'sum'. - In 'sum' mode, the merged pianoroll is the sum of the collected pianorolls. Note that for binarized pianorolls, integer summation is performed. - In 'max' mode, for each pixel, the maximum value among the collected pianorolls is assigned to the merged pianoroll. - In 'any' mode, the value of a pixel in the merged pianoroll is True if any of the collected pianorolls has nonzero value at that pixel; False if all the collected pianorolls are inactive (zero-valued) at that pixel. program: int A program number
python
{ "resource": "" }
q541
Multitrack.pad_to_same
train
def pad_to_same(self): """Pad shorter pianorolls with zeros at the end along the time axis to make the resulting pianoroll lengths the same as the maximum pianoroll length among all the tracks.""" max_length = self.get_max_length() for track in
python
{ "resource": "" }
q542
Multitrack.remove_tracks
train
def remove_tracks(self, track_indices): """ Remove tracks specified by `track_indices`. Parameters ---------- track_indices : list The indices of the tracks to be removed. """ if isinstance(track_indices, int):
python
{ "resource": "" }
q543
Multitrack.transpose
train
def transpose(self, semitone): """ Transpose the pianorolls of all tracks by a number of semitones, where positive values are for higher key, while negative values are for lower key. The drum tracks are ignored. Parameters ----------
python
{ "resource": "" }
q544
Multitrack.trim_trailing_silence
train
def trim_trailing_silence(self): """Trim the trailing silences of the pianorolls of all tracks. Trailing silences are considered globally.""" active_length = self.get_active_length()
python
{ "resource": "" }
q545
Multitrack.write
train
def write(self, filename): """ Write the multitrack pianoroll to a MIDI file. Parameters ---------- filename : str The name of the MIDI file to which the multitrack pianoroll is written. """
python
{ "resource": "" }
q546
check_pianoroll
train
def check_pianoroll(arr): """ Return True if the array is a standard piano-roll matrix. Otherwise, return False. Raise TypeError if the input object is not a numpy array. """ if not isinstance(arr, np.ndarray): raise TypeError("`arr` must be of np.ndarray type") if not (np.issubdtype(arr.dtype, np.bool_)
python
{ "resource": "" }
q547
pad
train
def pad(obj, pad_length): """ Return a copy of the object with piano-roll padded with zeros at the end along the time axis. Parameters
python
{ "resource": "" }
q548
pad_to_multiple
train
def pad_to_multiple(obj, factor): """ Return a copy of the object with its piano-roll padded with zeros at the end along the time axis with the minimal length that make the length of the resulting piano-roll a multiple of `factor`. Parameters ---------- factor : int The value which the
python
{ "resource": "" }
q549
pad_to_same
train
def pad_to_same(obj): """ Return a copy of the object with shorter piano-rolls padded with zeros at the end along the time axis to the length of the piano-roll with
python
{ "resource": "" }
q550
save
train
def save(filepath, obj, compressed=True): """ Save the object to a .npz file. Parameters ---------- filepath : str The path to save the file.
python
{ "resource": "" }
q551
write
train
def write(obj, filepath): """ Write the object to a MIDI file. Parameters ---------- filepath : str
python
{ "resource": "" }
q552
_validate_pianoroll
train
def _validate_pianoroll(pianoroll): """Raise an error if the input array is not a standard pianoroll.""" if not isinstance(pianoroll, np.ndarray): raise TypeError("`pianoroll` must be of np.ndarray type.") if not (np.issubdtype(pianoroll.dtype, np.bool_) or np.issubdtype(pianoroll.dtype,
python
{ "resource": "" }
q553
_to_chroma
train
def _to_chroma(pianoroll): """Return the unnormalized chroma features of a pianoroll.""" _validate_pianoroll(pianoroll) reshaped = pianoroll[:, :120].reshape(-1, 12,
python
{ "resource": "" }
q554
empty_beat_rate
train
def empty_beat_rate(pianoroll, beat_resolution): """Return the ratio of empty beats to the total number of beats in a pianoroll.""" _validate_pianoroll(pianoroll)
python
{ "resource": "" }
q555
n_pitche_classes_used
train
def n_pitche_classes_used(pianoroll): """Return the number of unique pitch classes used in a pianoroll.""" _validate_pianoroll(pianoroll)
python
{ "resource": "" }
q556
polyphonic_rate
train
def polyphonic_rate(pianoroll, threshold=2): """Return the ratio of the number of time steps where the number of pitches being played is larger than `threshold` to the total number of time steps in a pianoroll."""
python
{ "resource": "" }
q557
in_scale_rate
train
def in_scale_rate(pianoroll, key=3, kind='major'): """Return the ratio of the number of nonzero entries that lie in a specific scale to the total number of nonzero entries in a pianoroll. Default to C major scale.""" if not isinstance(key, int): raise TypeError("`key` must an integer.") if key > 11 or key < 0: raise ValueError("`key` must be in an integer in between 0 and 11.") if kind not in ('major', 'minor'): raise ValueError("`kind` must be one of 'major' or 'minor'.") _validate_pianoroll(pianoroll) def _scale_mask(key, kind): """Return a scale mask for the given key. Default to C major scale.""" if kind == 'major':
python
{ "resource": "" }
q558
Track.assign_constant
train
def assign_constant(self, value, dtype=None): """ Assign a constant value to all nonzeros in the pianoroll. If the pianoroll is not binarized, its data type will be preserved. If the pianoroll is binarized, it will be casted to the type of `value`. Arguments --------- value : int or float The constant value to be assigned to all the nonzeros in the pianoroll. """ if not self.is_binarized(): self.pianoroll[self.pianoroll.nonzero()] = value
python
{ "resource": "" }
q559
Track.binarize
train
def binarize(self, threshold=0): """ Binarize the pianoroll. Parameters ---------- threshold : int
python
{ "resource": "" }
q560
Track.check_validity
train
def check_validity(self): """"Raise error if any invalid attribute found.""" # pianoroll if not isinstance(self.pianoroll, np.ndarray): raise TypeError("`pianoroll` must be a numpy array.") if not (np.issubdtype(self.pianoroll.dtype, np.bool_) or np.issubdtype(self.pianoroll.dtype, np.number)): raise TypeError("The data type of `pianoroll` must be np.bool_ or " "a subdtype of np.number.") if self.pianoroll.ndim != 2: raise ValueError("`pianoroll` must have exactly two dimensions.") if self.pianoroll.shape[1] != 128: raise ValueError("The length of the second axis of `pianoroll` "
python
{ "resource": "" }
q561
Track.clip
train
def clip(self, lower=0, upper=127): """ Clip the pianoroll by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianoroll. Defaults to 0. upper : int
python
{ "resource": "" }
q562
Track.is_binarized
train
def is_binarized(self): """ Return True if the pianoroll is already binarized. Otherwise, return False. Returns ------- is_binarized : bool True if the pianoroll is already binarized; otherwise,
python
{ "resource": "" }
q563
Track.pad
train
def pad(self, pad_length): """ Pad the pianoroll with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad with zeros along the time axis.
python
{ "resource": "" }
q564
Track.pad_to_multiple
train
def pad_to_multiple(self, factor): """ Pad the pianoroll with zeros at the end along the time axis with the minimum length that makes the resulting pianoroll length a multiple of `factor`. Parameters ---------- factor : int The value which the length of the resulting pianoroll will be a multiple of. """
python
{ "resource": "" }
q565
Track.transpose
train
def transpose(self, semitone): """ Transpose the pianoroll by a number of semitones, where positive values are for higher key, while negative values are for lower key. Parameters ---------- semitone : int The number of semitones to transpose
python
{ "resource": "" }
q566
Track.trim_trailing_silence
train
def trim_trailing_silence(self): """Trim the trailing silence of the pianoroll."""
python
{ "resource": "" }
q567
plot_conv_weights
train
def plot_conv_weights(layer, figsize=(6, 6)): """Plot the weights of a specific layer. Only really makes sense with convolutional layers. Parameters ---------- layer : lasagne.layers.Layer """ W = layer.W.get_value() shape = W.shape nrows = np.ceil(np.sqrt(shape[0])).astype(int) ncols = nrows for feature_map in range(shape[1]): figs, axes = plt.subplots(nrows, ncols, figsize=figsize, squeeze=False) for ax in axes.flatten(): ax.set_xticks([]) ax.set_yticks([])
python
{ "resource": "" }
q568
plot_conv_activity
train
def plot_conv_activity(layer, x, figsize=(6, 8)): """Plot the acitivities of a specific layer. Only really makes sense with layers that work 2D data (2D convolutional layers, 2D pooling layers ...). Parameters ---------- layer : lasagne.layers.Layer x : numpy.ndarray Only takes one sample at a time, i.e. x.shape[0] == 1. """ if x.shape[0] != 1: raise ValueError("Only one sample can be plotted at a time.") # compile theano function xs = T.tensor4('xs').astype(theano.config.floatX) get_activity = theano.function([xs], get_output(layer, xs)) activity = get_activity(x)
python
{ "resource": "" }
q569
occlusion_heatmap
train
def occlusion_heatmap(net, x, target, square_length=7): """An occlusion test that checks an image for its critical parts. In this function, a square part of the image is occluded (i.e. set to 0) and then the net is tested for its propensity to predict the correct label. One should expect that this propensity shrinks of critical parts of the image are occluded. If not, this indicates overfitting. Depending on the depth of the net and the size of the image, this function may take awhile to finish, since one prediction for each pixel of the image is made. Currently, all color channels are occluded at the same time. Also, this does not really work if images are randomly distorted by the batch iterator. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. x : np.array The input data, should be of shape (1, c, x, y). Only makes sense with image data. target : int The true value of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. square_length
python
{ "resource": "" }
q570
plot_occlusion
train
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)): """Plot which parts of an image are particularly import for the net to classify the image correctly. See paper: Zeiler, Fergus 2013 Parameters ---------- net : NeuralNet instance The neural net to test. X : numpy.array The input data, should be of shape (b, c, 0, 1). Only makes sense with image data. target : list or numpy.array of ints The true values of the image. If the net makes several predictions, say 10 classes, this indicates which one to look at. If more than one sample is passed to X, each of them needs its own target. square_length : int (default=7)
python
{ "resource": "" }
q571
multiclass_logloss
train
def multiclass_logloss(actual, predicted, eps=1e-15): """Multi class version of Logarithmic Loss metric. :param actual: Array containing the actual target classes :param predicted: Matrix with class predictions, one probability per class """ # Convert 'actual' to a binary array if it's not already: if len(actual.shape) == 1: actual2 = np.zeros((actual.shape[0], predicted.shape[1]))
python
{ "resource": "" }
q572
objective
train
def objective(layers, loss_function, target, aggregate=aggregate, deterministic=False, l1=0, l2=0, get_output_kw=None): """ Default implementation of the NeuralNet objective. :param layers: The underlying layers of the NeuralNetwork :param loss_function: The callable loss function to use :param target: the expected output :param aggregate: the aggregation function to use :param deterministic: Whether or not to get a deterministic output :param l1: Optional l1 regularization parameter :param l2: Optional l2 regularization parameter :param get_output_kw: optional kwargs to pass to :meth:`NeuralNetwork.get_output` :return: The total calculated loss
python
{ "resource": "" }
q573
NeuralNet.initialize
train
def initialize(self): """Initializes the network. Checks that no extra kwargs were passed to the constructor, and compiles the train, predict, and evaluation functions. Subsequent calls to this function will return without any action. """ if getattr(self, '_initialized', False): return out = getattr(self, '_output_layers', None) if out is None: self.initialize_layers() self._check_for_unused_kwargs() iter_funcs
python
{ "resource": "" }
q574
NeuralNet.fit
train
def fit(self, X, y, epochs=None): """ Runs the training loop for a given number of epochs :param X: The input data :param y: The ground truth :param epochs: The number of epochs to run, if `None` runs for the network's :attr:`max_epochs` :return: This instance """ if self.check_input: X, y = self._check_good_input(X, y) if self.use_label_encoder: self.enc_ = LabelEncoder()
python
{ "resource": "" }
q575
NeuralNet.partial_fit
train
def partial_fit(self, X, y, classes=None): """ Runs a single epoch
python
{ "resource": "" }
q576
ByDateQuerySetMixin.narrow
train
def narrow(self, **kwargs): """Up-to including""" from_date = kwargs.pop('from_date', None) to_date = kwargs.pop('to_date', None) date = kwargs.pop('date', None) qs = self if from_date: qs =
python
{ "resource": "" }
q577
set_environment_variables
train
def set_environment_variables(json_file_path): """ Read and set environment variables from a flat json file. Bear in mind that env vars set this way and later on read using `os.getenv` function will be strings since after all env vars are just that - plain strings. Json file example: ``` { "FOO": "bar", "BAZ": true } ``` :param json_file_path: path to flat json file
python
{ "resource": "" }
q578
millis_interval
train
def millis_interval(start, end): """start and end are datetime instances""" diff = end - start millis = diff.days * 24 * 60 * 60 * 1000
python
{ "resource": "" }
q579
Script._import_lua
train
def _import_lua(load_dependencies=True): """ Import lua and dependencies. :param load_dependencies: should Lua library dependencies be loaded? :raises: RuntimeError if Lua is not available
python
{ "resource": "" }
q580
Script._import_lua_dependencies
train
def _import_lua_dependencies(lua, lua_globals): """ Imports lua dependencies that are supported by redis lua scripts. The current implementation is fragile to the target platform and lua version and may be disabled if these imports are not needed. Included: - cjson lib.
python
{ "resource": "" }
q581
MockRedis.lock
train
def lock(self, key, timeout=0, sleep=0): """Emulate lock."""
python
{ "resource": "" }
q582
MockRedis.keys
train
def keys(self, pattern='*'): """Emulate keys.""" # making sure the pattern is unicode/str. try: pattern = pattern.decode('utf-8') # This throws an AttributeError in python 3, or an # UnicodeEncodeError in python 2
python
{ "resource": "" }
q583
MockRedis.delete
train
def delete(self, *keys): """Emulate delete.""" key_counter = 0 for key in map(self._encode, keys): if key in self.redis: del self.redis[key]
python
{ "resource": "" }
q584
MockRedis.do_expire
train
def do_expire(self): """ Expire objects assuming now == time """ # Deep copy to avoid RuntimeError: dictionary changed size during iteration _timeouts = deepcopy(self.timeouts) for key, value in _timeouts.items(): if value - self.clock.now() < timedelta(0):
python
{ "resource": "" }
q585
MockRedis.set
train
def set(self, key, value, ex=None, px=None, nx=False, xx=False): """ Set the ``value`` for the ``key`` in the context of the provided kwargs. As per the behavior of the redis-py lib: If nx and xx are both set, the function does nothing and None is returned. If px and ex are both set, the preference is given to px. If the key is not set for some reason, the lib function returns None. """ key = self._encode(key) value = self._encode(value) if nx and xx: return None mode = "nx" if nx else "xx" if xx else None if self._should_set(key, mode): expire = None if ex is not None: expire = ex if isinstance(ex, timedelta) else timedelta(seconds=ex) if px is not None:
python
{ "resource": "" }
q586
MockRedis._should_set
train
def _should_set(self, key, mode): """ Determine if it is okay to set a key. If the mode is None, returns True, otherwise, returns True of false based on the value of ``key`` and the ``mode`` (nx | xx). """ if mode is None or mode not in ["nx", "xx"]: return True if mode == "nx": if key in self.redis: # nx means set only if key is absent
python
{ "resource": "" }
q587
MockRedis.setex
train
def setex(self, name, time, value): """ Set the value of ``name`` to ``value`` that expires in ``time`` seconds. ``time`` can be represented by an integer or a Python timedelta object. """ if not self.strict:
python
{ "resource": "" }
q588
MockRedis.psetex
train
def psetex(self, key, time, value): """ Set the value of ``key`` to ``value`` that expires in ``time`` milliseconds. ``time`` can be represented by an integer or a
python
{ "resource": "" }
q589
MockRedis.setnx
train
def setnx(self, key, value): """Set the value of ``key``
python
{ "resource": "" }
q590
MockRedis.setbit
train
def setbit(self, key, offset, value): """ Set the bit at ``offset`` in ``key`` to ``value``. """ key = self._encode(key) index, bits, mask = self._get_bits_and_offset(key, offset) if index >= len(bits): bits.extend(b"\x00" * (index + 1 - len(bits)))
python
{ "resource": "" }
q591
MockRedis.getbit
train
def getbit(self, key, offset): """ Returns the bit value at ``offset`` in ``key``. """ key = self._encode(key) index, bits, mask = self._get_bits_and_offset(key, offset)
python
{ "resource": "" }
q592
MockRedis.hexists
train
def hexists(self, hashkey, attribute): """Emulate hexists.""" redis_hash =
python
{ "resource": "" }
q593
MockRedis.hget
train
def hget(self, hashkey, attribute): """Emulate hget.""" redis_hash = self._get_hash(hashkey, 'HGET')
python
{ "resource": "" }
q594
MockRedis.hmset
train
def hmset(self, hashkey, value): """Emulate hmset.""" redis_hash = self._get_hash(hashkey, 'HMSET', create=True) for key, value in value.items():
python
{ "resource": "" }
q595
MockRedis.hmget
train
def hmget(self, hashkey, keys, *args): """Emulate hmget.""" redis_hash = self._get_hash(hashkey, 'HMGET') attributes = self._list_or_args(keys, args)
python
{ "resource": "" }
q596
MockRedis.hset
train
def hset(self, hashkey, attribute, value): """Emulate hset.""" redis_hash = self._get_hash(hashkey, 'HSET', create=True) attribute = self._encode(attribute)
python
{ "resource": "" }
q597
MockRedis.hsetnx
train
def hsetnx(self, hashkey, attribute, value): """Emulate hsetnx.""" redis_hash = self._get_hash(hashkey, 'HSETNX', create=True) attribute = self._encode(attribute) if attribute in redis_hash: return
python
{ "resource": "" }
q598
MockRedis.hincrby
train
def hincrby(self, hashkey, attribute, increment=1): """Emulate hincrby."""
python
{ "resource": "" }
q599
MockRedis.hincrbyfloat
train
def hincrbyfloat(self, hashkey, attribute, increment=1.0): """Emulate hincrbyfloat."""
python
{ "resource": "" }