repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
MSchnei/pyprf_feature
pyprf_feature/analysis/find_prf_utils_np.py
https://github.com/MSchnei/pyprf_feature/blob/49004ede7ae1ddee07a30afe9ce3e2776750805c/pyprf_feature/analysis/find_prf_utils_np.py#L8-L21
def np_lst_sq(vecMdl, aryFuncChnk): """Least squares fitting in numpy without cross-validation. Notes ----- This is just a wrapper function for np.linalg.lstsq to keep piping consistent. """ aryTmpBts, vecTmpRes = np.linalg.lstsq(vecMdl, aryFuncChnk, rcond=-1)[:2] return aryTmpBts, vecTmpRes
[ "def", "np_lst_sq", "(", "vecMdl", ",", "aryFuncChnk", ")", ":", "aryTmpBts", ",", "vecTmpRes", "=", "np", ".", "linalg", ".", "lstsq", "(", "vecMdl", ",", "aryFuncChnk", ",", "rcond", "=", "-", "1", ")", "[", ":", "2", "]", "return", "aryTmpBts", ",", "vecTmpRes" ]
Least squares fitting in numpy without cross-validation. Notes ----- This is just a wrapper function for np.linalg.lstsq to keep piping consistent.
[ "Least", "squares", "fitting", "in", "numpy", "without", "cross", "-", "validation", "." ]
python
train
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L5715-L5736
def value_interval(ol,value): ''' ol = [0, 4, 6, 8, 10, 14] value_interval(ol,-1) value_interval(ol,1) value_interval(ol,2) value_interval(ol,3) value_interval(ol,4) value_interval(ol,9) value_interval(ol,14) value_interval(ol,17) ''' si,ei = where(ol,value) if(si == None): sv = None else: sv = ol[si] if(ei == None): ev = None else: ev = ol[ei] return((sv,ev))
[ "def", "value_interval", "(", "ol", ",", "value", ")", ":", "si", ",", "ei", "=", "where", "(", "ol", ",", "value", ")", "if", "(", "si", "==", "None", ")", ":", "sv", "=", "None", "else", ":", "sv", "=", "ol", "[", "si", "]", "if", "(", "ei", "==", "None", ")", ":", "ev", "=", "None", "else", ":", "ev", "=", "ol", "[", "ei", "]", "return", "(", "(", "sv", ",", "ev", ")", ")" ]
ol = [0, 4, 6, 8, 10, 14] value_interval(ol,-1) value_interval(ol,1) value_interval(ol,2) value_interval(ol,3) value_interval(ol,4) value_interval(ol,9) value_interval(ol,14) value_interval(ol,17)
[ "ol", "=", "[", "0", "4", "6", "8", "10", "14", "]", "value_interval", "(", "ol", "-", "1", ")", "value_interval", "(", "ol", "1", ")", "value_interval", "(", "ol", "2", ")", "value_interval", "(", "ol", "3", ")", "value_interval", "(", "ol", "4", ")", "value_interval", "(", "ol", "9", ")", "value_interval", "(", "ol", "14", ")", "value_interval", "(", "ol", "17", ")" ]
python
valid
napalm-automation/napalm-junos
napalm_junos/junos.py
https://github.com/napalm-automation/napalm-junos/blob/78c0d161daf2abf26af5835b773f6db57c46efff/napalm_junos/junos.py#L1574-L1601
def get_probes_results(self): """Return the results of the RPM probes.""" probes_results = {} probes_results_table = junos_views.junos_rpm_probes_results_table(self.device) probes_results_table.get() probes_results_items = probes_results_table.items() for probe_result in probes_results_items: probe_name = py23_compat.text_type(probe_result[0]) test_results = { p[0]: p[1] for p in probe_result[1] } test_results['last_test_loss'] = napalm_base.helpers.convert( int, test_results.pop('last_test_loss'), 0) for test_param_name, test_param_value in test_results.items(): if isinstance(test_param_value, float): test_results[test_param_name] = test_param_value * 1e-3 # convert from useconds to mseconds test_name = test_results.pop('test_name', '') source = test_results.get('source', u'') if source is None: test_results['source'] = u'' if probe_name not in probes_results.keys(): probes_results[probe_name] = {} probes_results[probe_name][test_name] = test_results return probes_results
[ "def", "get_probes_results", "(", "self", ")", ":", "probes_results", "=", "{", "}", "probes_results_table", "=", "junos_views", ".", "junos_rpm_probes_results_table", "(", "self", ".", "device", ")", "probes_results_table", ".", "get", "(", ")", "probes_results_items", "=", "probes_results_table", ".", "items", "(", ")", "for", "probe_result", "in", "probes_results_items", ":", "probe_name", "=", "py23_compat", ".", "text_type", "(", "probe_result", "[", "0", "]", ")", "test_results", "=", "{", "p", "[", "0", "]", ":", "p", "[", "1", "]", "for", "p", "in", "probe_result", "[", "1", "]", "}", "test_results", "[", "'last_test_loss'", "]", "=", "napalm_base", ".", "helpers", ".", "convert", "(", "int", ",", "test_results", ".", "pop", "(", "'last_test_loss'", ")", ",", "0", ")", "for", "test_param_name", ",", "test_param_value", "in", "test_results", ".", "items", "(", ")", ":", "if", "isinstance", "(", "test_param_value", ",", "float", ")", ":", "test_results", "[", "test_param_name", "]", "=", "test_param_value", "*", "1e-3", "# convert from useconds to mseconds", "test_name", "=", "test_results", ".", "pop", "(", "'test_name'", ",", "''", ")", "source", "=", "test_results", ".", "get", "(", "'source'", ",", "u''", ")", "if", "source", "is", "None", ":", "test_results", "[", "'source'", "]", "=", "u''", "if", "probe_name", "not", "in", "probes_results", ".", "keys", "(", ")", ":", "probes_results", "[", "probe_name", "]", "=", "{", "}", "probes_results", "[", "probe_name", "]", "[", "test_name", "]", "=", "test_results", "return", "probes_results" ]
Return the results of the RPM probes.
[ "Return", "the", "results", "of", "the", "RPM", "probes", "." ]
python
train
vertexproject/synapse
synapse/lib/jupyter.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/jupyter.py#L241-L259
async def getTempCoreCmdr(mods=None, outp=None): ''' Get a CmdrCore instance which is backed by a temporary Cortex. Args: mods (list): A list of additional CoreModules to load in the Cortex. outp: A output helper. Will be used for the Cmdr instance. Notes: The CmdrCore returned by this should be fini()'d to tear down the temporary Cortex. Returns: CmdrCore: A CmdrCore instance. ''' acm = genTempCoreProxy(mods) prox = await acm.__aenter__() cmdrcore = await CmdrCore.anit(prox, outp=outp) cmdrcore.acm = acm return cmdrcore
[ "async", "def", "getTempCoreCmdr", "(", "mods", "=", "None", ",", "outp", "=", "None", ")", ":", "acm", "=", "genTempCoreProxy", "(", "mods", ")", "prox", "=", "await", "acm", ".", "__aenter__", "(", ")", "cmdrcore", "=", "await", "CmdrCore", ".", "anit", "(", "prox", ",", "outp", "=", "outp", ")", "cmdrcore", ".", "acm", "=", "acm", "return", "cmdrcore" ]
Get a CmdrCore instance which is backed by a temporary Cortex. Args: mods (list): A list of additional CoreModules to load in the Cortex. outp: A output helper. Will be used for the Cmdr instance. Notes: The CmdrCore returned by this should be fini()'d to tear down the temporary Cortex. Returns: CmdrCore: A CmdrCore instance.
[ "Get", "a", "CmdrCore", "instance", "which", "is", "backed", "by", "a", "temporary", "Cortex", "." ]
python
train
KrishnaswamyLab/graphtools
graphtools/graphs.py
https://github.com/KrishnaswamyLab/graphtools/blob/44685352be7df2005d44722903092207967457f2/graphtools/graphs.py#L863-L947
def build_kernel(self): """Build the KNN kernel. Build a k nearest neighbors kernel, optionally with alpha decay. If `precomputed` is not `None`, the appropriate steps in the kernel building process are skipped. Must return a symmetric matrix Returns ------- K : kernel matrix, shape=[n_samples, n_samples] symmetric matrix with ones down the diagonal with no non-negative entries. Raises ------ ValueError: if `precomputed` is not an acceptable value """ if self.precomputed == "affinity": # already done # TODO: should we check that precomputed matrices look okay? # e.g. check the diagonal K = self.data_nu elif self.precomputed == "adjacency": # need to set diagonal to one to make it an affinity matrix K = self.data_nu if sparse.issparse(K) and \ not (isinstance(K, sparse.dok_matrix) or isinstance(K, sparse.lil_matrix)): K = K.tolil() K = set_diagonal(K, 1) else: tasklogger.log_start("affinities") if sparse.issparse(self.data_nu): self.data_nu = self.data_nu.toarray() if self.precomputed == "distance": pdx = self.data_nu elif self.precomputed is None: pdx = pdist(self.data_nu, metric=self.distance) if np.any(pdx == 0): pdx = squareform(pdx) duplicate_ids = np.array( [i for i in np.argwhere(pdx == 0) if i[1] > i[0]]) duplicate_names = ", ".join(["{} and {}".format(i[0], i[1]) for i in duplicate_ids]) warnings.warn( "Detected zero distance between samples {}. " "Consider removing duplicates to avoid errors in " "downstream processing.".format(duplicate_names), RuntimeWarning) else: pdx = squareform(pdx) else: raise ValueError( "precomputed='{}' not recognized. " "Choose from ['affinity', 'adjacency', 'distance', " "None]".format(self.precomputed)) if self.bandwidth is None: knn_dist = np.partition( pdx, self.knn + 1, axis=1)[:, :self.knn + 1] bandwidth = np.max(knn_dist, axis=1) elif callable(self.bandwidth): bandwidth = self.bandwidth(pdx) else: bandwidth = self.bandwidth bandwidth = bandwidth * self.bandwidth_scale pdx = (pdx.T / bandwidth).T K = np.exp(-1 * np.power(pdx, self.decay)) # handle nan K = np.where(np.isnan(K), 1, K) tasklogger.log_complete("affinities") # truncate if sparse.issparse(K): if not (isinstance(K, sparse.csr_matrix) or isinstance(K, sparse.csc_matrix) or isinstance(K, sparse.bsr_matrix)): K = K.tocsr() K.data[K.data < self.thresh] = 0 K = K.tocoo() K.eliminate_zeros() K = K.tocsr() else: K[K < self.thresh] = 0 return K
[ "def", "build_kernel", "(", "self", ")", ":", "if", "self", ".", "precomputed", "==", "\"affinity\"", ":", "# already done", "# TODO: should we check that precomputed matrices look okay?", "# e.g. check the diagonal", "K", "=", "self", ".", "data_nu", "elif", "self", ".", "precomputed", "==", "\"adjacency\"", ":", "# need to set diagonal to one to make it an affinity matrix", "K", "=", "self", ".", "data_nu", "if", "sparse", ".", "issparse", "(", "K", ")", "and", "not", "(", "isinstance", "(", "K", ",", "sparse", ".", "dok_matrix", ")", "or", "isinstance", "(", "K", ",", "sparse", ".", "lil_matrix", ")", ")", ":", "K", "=", "K", ".", "tolil", "(", ")", "K", "=", "set_diagonal", "(", "K", ",", "1", ")", "else", ":", "tasklogger", ".", "log_start", "(", "\"affinities\"", ")", "if", "sparse", ".", "issparse", "(", "self", ".", "data_nu", ")", ":", "self", ".", "data_nu", "=", "self", ".", "data_nu", ".", "toarray", "(", ")", "if", "self", ".", "precomputed", "==", "\"distance\"", ":", "pdx", "=", "self", ".", "data_nu", "elif", "self", ".", "precomputed", "is", "None", ":", "pdx", "=", "pdist", "(", "self", ".", "data_nu", ",", "metric", "=", "self", ".", "distance", ")", "if", "np", ".", "any", "(", "pdx", "==", "0", ")", ":", "pdx", "=", "squareform", "(", "pdx", ")", "duplicate_ids", "=", "np", ".", "array", "(", "[", "i", "for", "i", "in", "np", ".", "argwhere", "(", "pdx", "==", "0", ")", "if", "i", "[", "1", "]", ">", "i", "[", "0", "]", "]", ")", "duplicate_names", "=", "\", \"", ".", "join", "(", "[", "\"{} and {}\"", ".", "format", "(", "i", "[", "0", "]", ",", "i", "[", "1", "]", ")", "for", "i", "in", "duplicate_ids", "]", ")", "warnings", ".", "warn", "(", "\"Detected zero distance between samples {}. \"", "\"Consider removing duplicates to avoid errors in \"", "\"downstream processing.\"", ".", "format", "(", "duplicate_names", ")", ",", "RuntimeWarning", ")", "else", ":", "pdx", "=", "squareform", "(", "pdx", ")", "else", ":", "raise", "ValueError", "(", "\"precomputed='{}' not recognized. \"", "\"Choose from ['affinity', 'adjacency', 'distance', \"", "\"None]\"", ".", "format", "(", "self", ".", "precomputed", ")", ")", "if", "self", ".", "bandwidth", "is", "None", ":", "knn_dist", "=", "np", ".", "partition", "(", "pdx", ",", "self", ".", "knn", "+", "1", ",", "axis", "=", "1", ")", "[", ":", ",", ":", "self", ".", "knn", "+", "1", "]", "bandwidth", "=", "np", ".", "max", "(", "knn_dist", ",", "axis", "=", "1", ")", "elif", "callable", "(", "self", ".", "bandwidth", ")", ":", "bandwidth", "=", "self", ".", "bandwidth", "(", "pdx", ")", "else", ":", "bandwidth", "=", "self", ".", "bandwidth", "bandwidth", "=", "bandwidth", "*", "self", ".", "bandwidth_scale", "pdx", "=", "(", "pdx", ".", "T", "/", "bandwidth", ")", ".", "T", "K", "=", "np", ".", "exp", "(", "-", "1", "*", "np", ".", "power", "(", "pdx", ",", "self", ".", "decay", ")", ")", "# handle nan", "K", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "K", ")", ",", "1", ",", "K", ")", "tasklogger", ".", "log_complete", "(", "\"affinities\"", ")", "# truncate", "if", "sparse", ".", "issparse", "(", "K", ")", ":", "if", "not", "(", "isinstance", "(", "K", ",", "sparse", ".", "csr_matrix", ")", "or", "isinstance", "(", "K", ",", "sparse", ".", "csc_matrix", ")", "or", "isinstance", "(", "K", ",", "sparse", ".", "bsr_matrix", ")", ")", ":", "K", "=", "K", ".", "tocsr", "(", ")", "K", ".", "data", "[", "K", ".", "data", "<", "self", ".", "thresh", "]", "=", "0", "K", "=", "K", ".", "tocoo", "(", ")", "K", ".", "eliminate_zeros", "(", ")", "K", "=", "K", ".", "tocsr", "(", ")", "else", ":", "K", "[", "K", "<", "self", ".", "thresh", "]", "=", "0", "return", "K" ]
Build the KNN kernel. Build a k nearest neighbors kernel, optionally with alpha decay. If `precomputed` is not `None`, the appropriate steps in the kernel building process are skipped. Must return a symmetric matrix Returns ------- K : kernel matrix, shape=[n_samples, n_samples] symmetric matrix with ones down the diagonal with no non-negative entries. Raises ------ ValueError: if `precomputed` is not an acceptable value
[ "Build", "the", "KNN", "kernel", "." ]
python
train
django-treebeard/django-treebeard
treebeard/admin.py
https://github.com/django-treebeard/django-treebeard/blob/8042ee939cb45394909237da447f8925e3cc6aa3/treebeard/admin.py#L66-L87
def get_urls(self): """ Adds a url to move nodes to this admin """ urls = super(TreeAdmin, self).get_urls() if django.VERSION < (1, 10): from django.views.i18n import javascript_catalog jsi18n_url = url(r'^jsi18n/$', javascript_catalog, {'packages': ('treebeard',)}) else: from django.views.i18n import JavaScriptCatalog jsi18n_url = url(r'^jsi18n/$', JavaScriptCatalog.as_view(packages=['treebeard']), name='javascript-catalog' ) new_urls = [ url('^move/$', self.admin_site.admin_view(self.move_node), ), jsi18n_url, ] return new_urls + urls
[ "def", "get_urls", "(", "self", ")", ":", "urls", "=", "super", "(", "TreeAdmin", ",", "self", ")", ".", "get_urls", "(", ")", "if", "django", ".", "VERSION", "<", "(", "1", ",", "10", ")", ":", "from", "django", ".", "views", ".", "i18n", "import", "javascript_catalog", "jsi18n_url", "=", "url", "(", "r'^jsi18n/$'", ",", "javascript_catalog", ",", "{", "'packages'", ":", "(", "'treebeard'", ",", ")", "}", ")", "else", ":", "from", "django", ".", "views", ".", "i18n", "import", "JavaScriptCatalog", "jsi18n_url", "=", "url", "(", "r'^jsi18n/$'", ",", "JavaScriptCatalog", ".", "as_view", "(", "packages", "=", "[", "'treebeard'", "]", ")", ",", "name", "=", "'javascript-catalog'", ")", "new_urls", "=", "[", "url", "(", "'^move/$'", ",", "self", ".", "admin_site", ".", "admin_view", "(", "self", ".", "move_node", ")", ",", ")", ",", "jsi18n_url", ",", "]", "return", "new_urls", "+", "urls" ]
Adds a url to move nodes to this admin
[ "Adds", "a", "url", "to", "move", "nodes", "to", "this", "admin" ]
python
train
odlgroup/odl
odl/contrib/tensorflow/layer.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/tensorflow/layer.py#L22-L382
def as_tensorflow_layer(odl_op, name='ODLOperator', differentiable=True): """Convert `Operator` or `Functional` to a tensorflow layer. Parameters ---------- odl_op : `Operator` or `Functional` The operator that should be wrapped as a tensorflow layer. name : str Default name for tensorflow layers created. differentiable : boolean ``True`` if the layer should be differentiable, in which case ``odl_op`` should implement `Operator.derivative` which in turn implements `Operator.adjoint`. In this case, the adjoint of the derivative is properly wrapped in ``tensorflow_layer``, and gradients propagate as expected. If ``False``, the gradient is defined as everywhere zero. Returns ------- tensorflow_layer : callable Callable that, when called with a `tensorflow.Tensor` of shape ``(n,) + odl_op.domain.shape + (1,)`` where ``n`` is the batch size, returns another `tensorflow.Tensor` which is a lazy evaluation of ``odl_op``. If ``odl_op`` is an `Operator`, the shape of the returned tensor is ``(n,) + odl_op.range.shape + (1,)``. If ``odl_op`` is an `Functional`, the shape of the returned tensor is ``(n,)``. The ``dtype`` of the tensor is ``odl_op.range.dtype``. """ default_name = name def py_func(func, inp, Tout, stateful=True, name=None, grad=None): """Define custom py_func which takes also a grad op as argument. We need to overwrite this function since the default tensorflow `tf.py_func` does not support custom gradients. See tensorflow `issue #1095`_ for more information. Parameters ---------- func : callable Python function that takes and returns numpy arrays. inp : sequence of `tensorflow.Tensor` Input tensors for the function Tout : sequence of `tensorflow.dtype` Datatype of the output(s). stateful : bool, optional If the function has internal state, i.e. if calling the function with a given input repeatedly could give different output. name : string, optional Name of the python function. grad : callbable, optional Gradient of the function. References ---------- .. _issue #1095: https://github.com/tensorflow/tensorflow/issues/1095 """ if grad is None: return tf.py_func(func, inp, Tout, stateful=stateful, name=name) else: if stateful: override_name = 'PyFunc' else: override_name = 'PyFuncStateless' # Need to generate a unique name to avoid duplicates: rnd_name = override_name + 'Grad' + str(uuid.uuid4()) tf.RegisterGradient(rnd_name)(grad) g = tf.get_default_graph() with g.gradient_override_map({override_name: rnd_name}): return tf.py_func(func, inp, Tout, stateful=stateful, name=name) def tensorflow_layer_grad_impl(x, dy, name): """Implementation of the tensorflow gradient. Gradient in tensorflow is equivalent to the adjoint of the derivative in ODL. Returns a `tensorflow.Tensor` that represents a lazy application of :: odl_op.derivative(x).adjoint(dy) Parameters ---------- x : `numpy.ndarray` Point(s) in which the derivative should be taken. If ``odl_op`` is an `Operator` the axes are: 0 : batch id. This is a constant if ``fixed_size`` is ``True``, otherwise it is dynamic. 1, ..., n-2 : spatial dimensions of data. n-1 : (currently) unused data channel. If ``odl_op`` is a `Functional` the axes are: 0 : batch id. dy : `tensorflow.Tensor` Point(s) in which the adjoint of the derivative of the operator should be evaluated. The axes are: 0 : batch id. Should be pairwise matched with ``x``. 1, ..., m-2 : spatial dimensions of data. m-1 : (currently) unused data channel. name : string Name of the tensor. Returns ------- result : `tensorflow.Tensor` Lazy result of the computation. If ``odl_op`` is an `Operator` the axes are: 0 : batch id. 1, ..., n-2 : spatial dimensions of data. n-1 : (currently) unused data channel. If ``odl_op`` is a `Functional` the axes are: 0 : batch id. """ with tf.name_scope(name): # Validate the input/output shape x_shape = x.get_shape() dy_shape = dy.get_shape() try: # Lazy check if the first dimension is dynamic n_x = int(x_shape[0]) fixed_size = True except TypeError: n_x = x_shape[0] fixed_size = False if odl_op.is_functional: in_shape = (n_x,) else: in_shape = (n_x,) + space_shape(odl_op.range) + (1,) out_shape = (n_x,) + space_shape(odl_op.domain) + (1,) assert x_shape[1:] == space_shape(odl_op.domain) + (1,) if odl_op.is_functional: assert dy_shape[1:] == () else: assert dy_shape[1:] == space_shape(odl_op.range) + (1,) def _impl(x, dy): """Implementation of the adjoint of the derivative. Returns :: odl_op.derivative(x).adjoint(dy) Parameters ---------- x : `numpy.ndarray` Point(s) in which the derivative should be taken. If ``odl_op`` is an `Operator` the axes are: 0 : batch id. This is a constant if ``fixed_size`` is true, otherwise it is dynamic. 1, ..., n-2 : spatial dimensions of data. n-1 : (currently) unused data channel. If ``odl_op`` is a `Functional` the axes are: 0 : batch id. dy : `numpy.ndarray` Point(s) in which the adjoint of the derivative of the operator should be evaluated. The axes are: 0 : batch id. Should be pairwise matched with ``x``. 1, ..., m-2 : spatial dimensions of data. m-1 : (currently) unused data channel. Returns ------- result : `numpy.ndarray` Result of the computation. If ``odl_op`` is an `Operator` the axes are: 0 : batch id. 1, ..., n-2 : spatial dimensions of data. n-1 : (currently) unused data channel. If ``odl_op`` is a `Functional` the axes are: 0 : batch id. """ # Validate the shape of the given input if fixed_size: x_out_shape = out_shape assert x.shape == out_shape assert dy.shape == in_shape else: x_out_shape = (x.shape[0],) + out_shape[1:] assert x.shape[1:] == out_shape[1:] assert dy.shape[1:] == in_shape[1:] # Evaluate the operator on all inputs in the batch. out = np.empty(x_out_shape, odl_op.domain.dtype) out_element = odl_op.domain.element() for i in range(x_out_shape[0]): if odl_op.is_functional: xi = x[i, ..., 0] dyi = dy[i] out[i, ..., 0] = np.asarray(odl_op.gradient(xi)) * dyi else: xi = x[i, ..., 0] dyi = dy[i, ..., 0] odl_op.derivative(xi).adjoint(dyi, out=out_element) out[i, ..., 0] = np.asarray(out_element) # Rescale the domain/range according to the weighting since # tensorflow does not have weighted spaces. try: dom_weight = odl_op.domain.weighting.const except AttributeError: dom_weight = 1.0 try: ran_weight = odl_op.range.weighting.const except AttributeError: ran_weight = 1.0 scale = dom_weight / ran_weight out *= scale return out with ops.name_scope(name + '_pyfunc', values=[x, dy]) as name_call: result = py_func(_impl, [x, dy], [odl_op.domain.dtype], name=name_call, stateful=False) # We must manually set the output shape since tensorflow cannot # figure it out result = result[0] result.set_shape(out_shape) return result def tensorflow_layer(x, name=None): """Implementation of the tensorflow call. Returns a `tensorflow.Tensor` that represents a lazy application of ``odl_op`` to ``x``. Parameters ---------- x : `tensorflow.Tensor` Point(s) to which the layer should be applied. The axes are: 0 : batch id. Can be fixed or dynamic. 1, ..., n-2 : spatial dimensions of data. n-1 : (currently) unused data channel. name : string Name of the tensor. Default: Defaultname. Returns ------- result : `tensorflow.Tensor` Lazy result of the computation. If ``odl_op`` is an `Operator` the axes are: 0 : batch id. 1, ..., m-2 : spatial dimensions of data. m-1 : (currently) unused data channel. If ``odl_op`` is a `Functional` the axes are: 0 : batch id. """ if name is None: name = default_name with tf.name_scope(name): # Validate input shape x_shape = x.get_shape() try: # Lazy check if the first dimension is dynamic n_x = int(x_shape[0]) fixed_size = True except TypeError: n_x = x_shape[0] fixed_size = False in_shape = (n_x,) + space_shape(odl_op.domain) + (1,) if odl_op.is_functional: out_shape = (n_x,) else: out_shape = (n_x,) + space_shape(odl_op.range) + (1,) assert x_shape[1:] == space_shape(odl_op.domain) + (1,) out_dtype = getattr(odl_op.range, 'dtype', odl_op.domain.dtype) def _impl(x): """Implementation of the tensorflow layer. Parameters ---------- x : `numpy.ndarray` Point(s) in which the operator should be evaluated. The axes are: 0 : batch id. This is a constant if ``fixed_size`` is true, otherwise it is dynamic. 1, ..., n-2 : spatial dimensions of data. n-1 : (currently) unused data channel. Returns ------- result : `numpy.ndarray` Result of the computation. The axes are: 0 : batch id. Data is pairwise matched with ``x``. 1, ..., m-2 : spatial dimensions of data. m-1 : (currently) unused data channel. """ # Validate input shape if fixed_size: x_out_shape = out_shape assert x.shape == in_shape else: x_out_shape = (x.shape[0],) + out_shape[1:] assert x.shape[1:] == in_shape[1:] # Evaluate the operator on all inputs in the batch. out = np.empty(x_out_shape, out_dtype) out_element = odl_op.range.element() for i in range(x_out_shape[0]): if odl_op.is_functional: out[i] = odl_op(x[i, ..., 0]) else: odl_op(x[i, ..., 0], out=out_element) out[i, ..., 0] = np.asarray(out_element) return out if differentiable: def tensorflow_layer_grad(op, grad): """Thin wrapper for the gradient.""" x = op.inputs[0] return tensorflow_layer_grad_impl(x, grad, name=name + '_grad') else: tensorflow_layer_grad = None with ops.name_scope(name + '_pyfunc', values=[x]) as name_call: result = py_func(_impl, [x], [out_dtype], name=name_call, stateful=False, grad=tensorflow_layer_grad) # We must manually set the output shape since tensorflow cannot # figure it out result = result[0] result.set_shape(out_shape) return result return tensorflow_layer
[ "def", "as_tensorflow_layer", "(", "odl_op", ",", "name", "=", "'ODLOperator'", ",", "differentiable", "=", "True", ")", ":", "default_name", "=", "name", "def", "py_func", "(", "func", ",", "inp", ",", "Tout", ",", "stateful", "=", "True", ",", "name", "=", "None", ",", "grad", "=", "None", ")", ":", "\"\"\"Define custom py_func which takes also a grad op as argument.\n\n We need to overwrite this function since the default tensorflow\n `tf.py_func` does not support custom gradients.\n\n See tensorflow `issue #1095`_ for more information.\n\n Parameters\n ----------\n func : callable\n Python function that takes and returns numpy arrays.\n inp : sequence of `tensorflow.Tensor`\n Input tensors for the function\n Tout : sequence of `tensorflow.dtype`\n Datatype of the output(s).\n stateful : bool, optional\n If the function has internal state, i.e. if calling the function\n with a given input repeatedly could give different output.\n name : string, optional\n Name of the python function.\n grad : callbable, optional\n Gradient of the function.\n\n References\n ----------\n .. _issue #1095: https://github.com/tensorflow/tensorflow/issues/1095\n \"\"\"", "if", "grad", "is", "None", ":", "return", "tf", ".", "py_func", "(", "func", ",", "inp", ",", "Tout", ",", "stateful", "=", "stateful", ",", "name", "=", "name", ")", "else", ":", "if", "stateful", ":", "override_name", "=", "'PyFunc'", "else", ":", "override_name", "=", "'PyFuncStateless'", "# Need to generate a unique name to avoid duplicates:", "rnd_name", "=", "override_name", "+", "'Grad'", "+", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "tf", ".", "RegisterGradient", "(", "rnd_name", ")", "(", "grad", ")", "g", "=", "tf", ".", "get_default_graph", "(", ")", "with", "g", ".", "gradient_override_map", "(", "{", "override_name", ":", "rnd_name", "}", ")", ":", "return", "tf", ".", "py_func", "(", "func", ",", "inp", ",", "Tout", ",", "stateful", "=", "stateful", ",", "name", "=", "name", ")", "def", "tensorflow_layer_grad_impl", "(", "x", ",", "dy", ",", "name", ")", ":", "\"\"\"Implementation of the tensorflow gradient.\n\n Gradient in tensorflow is equivalent to the adjoint of the derivative\n in ODL.\n\n Returns a `tensorflow.Tensor` that represents a lazy application of ::\n\n odl_op.derivative(x).adjoint(dy)\n\n Parameters\n ----------\n x : `numpy.ndarray`\n Point(s) in which the derivative should be taken.\n If ``odl_op`` is an `Operator` the axes are:\n 0 : batch id. This is a constant if ``fixed_size`` is\n ``True``, otherwise it is dynamic.\n 1, ..., n-2 : spatial dimensions of data.\n n-1 : (currently) unused data channel.\n If ``odl_op`` is a `Functional` the axes are:\n 0 : batch id.\n dy : `tensorflow.Tensor`\n Point(s) in which the adjoint of the derivative of the\n operator should be evaluated.\n The axes are:\n 0 : batch id. Should be pairwise matched with ``x``.\n 1, ..., m-2 : spatial dimensions of data.\n m-1 : (currently) unused data channel.\n name : string\n Name of the tensor.\n\n Returns\n -------\n result : `tensorflow.Tensor`\n Lazy result of the computation.\n If ``odl_op`` is an `Operator` the axes are:\n 0 : batch id.\n 1, ..., n-2 : spatial dimensions of data.\n n-1 : (currently) unused data channel.\n If ``odl_op`` is a `Functional` the axes are:\n 0 : batch id.\n \"\"\"", "with", "tf", ".", "name_scope", "(", "name", ")", ":", "# Validate the input/output shape", "x_shape", "=", "x", ".", "get_shape", "(", ")", "dy_shape", "=", "dy", ".", "get_shape", "(", ")", "try", ":", "# Lazy check if the first dimension is dynamic", "n_x", "=", "int", "(", "x_shape", "[", "0", "]", ")", "fixed_size", "=", "True", "except", "TypeError", ":", "n_x", "=", "x_shape", "[", "0", "]", "fixed_size", "=", "False", "if", "odl_op", ".", "is_functional", ":", "in_shape", "=", "(", "n_x", ",", ")", "else", ":", "in_shape", "=", "(", "n_x", ",", ")", "+", "space_shape", "(", "odl_op", ".", "range", ")", "+", "(", "1", ",", ")", "out_shape", "=", "(", "n_x", ",", ")", "+", "space_shape", "(", "odl_op", ".", "domain", ")", "+", "(", "1", ",", ")", "assert", "x_shape", "[", "1", ":", "]", "==", "space_shape", "(", "odl_op", ".", "domain", ")", "+", "(", "1", ",", ")", "if", "odl_op", ".", "is_functional", ":", "assert", "dy_shape", "[", "1", ":", "]", "==", "(", ")", "else", ":", "assert", "dy_shape", "[", "1", ":", "]", "==", "space_shape", "(", "odl_op", ".", "range", ")", "+", "(", "1", ",", ")", "def", "_impl", "(", "x", ",", "dy", ")", ":", "\"\"\"Implementation of the adjoint of the derivative.\n\n Returns ::\n\n odl_op.derivative(x).adjoint(dy)\n\n Parameters\n ----------\n x : `numpy.ndarray`\n Point(s) in which the derivative should be taken.\n If ``odl_op`` is an `Operator` the axes are:\n 0 : batch id. This is a constant if ``fixed_size`` is\n true, otherwise it is dynamic.\n 1, ..., n-2 : spatial dimensions of data.\n n-1 : (currently) unused data channel.\n If ``odl_op`` is a `Functional` the axes are:\n 0 : batch id.\n dy : `numpy.ndarray`\n Point(s) in which the adjoint of the derivative of the\n operator should be evaluated.\n The axes are:\n 0 : batch id. Should be pairwise matched with ``x``.\n 1, ..., m-2 : spatial dimensions of data.\n m-1 : (currently) unused data channel.\n\n Returns\n -------\n result : `numpy.ndarray`\n Result of the computation.\n\n If ``odl_op`` is an `Operator` the axes are:\n 0 : batch id.\n 1, ..., n-2 : spatial dimensions of data.\n n-1 : (currently) unused data channel.\n If ``odl_op`` is a `Functional` the axes are:\n 0 : batch id.\n \"\"\"", "# Validate the shape of the given input", "if", "fixed_size", ":", "x_out_shape", "=", "out_shape", "assert", "x", ".", "shape", "==", "out_shape", "assert", "dy", ".", "shape", "==", "in_shape", "else", ":", "x_out_shape", "=", "(", "x", ".", "shape", "[", "0", "]", ",", ")", "+", "out_shape", "[", "1", ":", "]", "assert", "x", ".", "shape", "[", "1", ":", "]", "==", "out_shape", "[", "1", ":", "]", "assert", "dy", ".", "shape", "[", "1", ":", "]", "==", "in_shape", "[", "1", ":", "]", "# Evaluate the operator on all inputs in the batch.", "out", "=", "np", ".", "empty", "(", "x_out_shape", ",", "odl_op", ".", "domain", ".", "dtype", ")", "out_element", "=", "odl_op", ".", "domain", ".", "element", "(", ")", "for", "i", "in", "range", "(", "x_out_shape", "[", "0", "]", ")", ":", "if", "odl_op", ".", "is_functional", ":", "xi", "=", "x", "[", "i", ",", "...", ",", "0", "]", "dyi", "=", "dy", "[", "i", "]", "out", "[", "i", ",", "...", ",", "0", "]", "=", "np", ".", "asarray", "(", "odl_op", ".", "gradient", "(", "xi", ")", ")", "*", "dyi", "else", ":", "xi", "=", "x", "[", "i", ",", "...", ",", "0", "]", "dyi", "=", "dy", "[", "i", ",", "...", ",", "0", "]", "odl_op", ".", "derivative", "(", "xi", ")", ".", "adjoint", "(", "dyi", ",", "out", "=", "out_element", ")", "out", "[", "i", ",", "...", ",", "0", "]", "=", "np", ".", "asarray", "(", "out_element", ")", "# Rescale the domain/range according to the weighting since", "# tensorflow does not have weighted spaces.", "try", ":", "dom_weight", "=", "odl_op", ".", "domain", ".", "weighting", ".", "const", "except", "AttributeError", ":", "dom_weight", "=", "1.0", "try", ":", "ran_weight", "=", "odl_op", ".", "range", ".", "weighting", ".", "const", "except", "AttributeError", ":", "ran_weight", "=", "1.0", "scale", "=", "dom_weight", "/", "ran_weight", "out", "*=", "scale", "return", "out", "with", "ops", ".", "name_scope", "(", "name", "+", "'_pyfunc'", ",", "values", "=", "[", "x", ",", "dy", "]", ")", "as", "name_call", ":", "result", "=", "py_func", "(", "_impl", ",", "[", "x", ",", "dy", "]", ",", "[", "odl_op", ".", "domain", ".", "dtype", "]", ",", "name", "=", "name_call", ",", "stateful", "=", "False", ")", "# We must manually set the output shape since tensorflow cannot", "# figure it out", "result", "=", "result", "[", "0", "]", "result", ".", "set_shape", "(", "out_shape", ")", "return", "result", "def", "tensorflow_layer", "(", "x", ",", "name", "=", "None", ")", ":", "\"\"\"Implementation of the tensorflow call.\n\n Returns a `tensorflow.Tensor` that represents a lazy application of\n ``odl_op`` to ``x``.\n\n Parameters\n ----------\n x : `tensorflow.Tensor`\n Point(s) to which the layer should be applied.\n The axes are:\n 0 : batch id. Can be fixed or dynamic.\n 1, ..., n-2 : spatial dimensions of data.\n n-1 : (currently) unused data channel.\n name : string\n Name of the tensor. Default: Defaultname.\n\n Returns\n -------\n result : `tensorflow.Tensor`\n Lazy result of the computation.\n If ``odl_op`` is an `Operator` the axes are:\n 0 : batch id.\n 1, ..., m-2 : spatial dimensions of data.\n m-1 : (currently) unused data channel.\n If ``odl_op`` is a `Functional` the axes are:\n 0 : batch id.\n \"\"\"", "if", "name", "is", "None", ":", "name", "=", "default_name", "with", "tf", ".", "name_scope", "(", "name", ")", ":", "# Validate input shape", "x_shape", "=", "x", ".", "get_shape", "(", ")", "try", ":", "# Lazy check if the first dimension is dynamic", "n_x", "=", "int", "(", "x_shape", "[", "0", "]", ")", "fixed_size", "=", "True", "except", "TypeError", ":", "n_x", "=", "x_shape", "[", "0", "]", "fixed_size", "=", "False", "in_shape", "=", "(", "n_x", ",", ")", "+", "space_shape", "(", "odl_op", ".", "domain", ")", "+", "(", "1", ",", ")", "if", "odl_op", ".", "is_functional", ":", "out_shape", "=", "(", "n_x", ",", ")", "else", ":", "out_shape", "=", "(", "n_x", ",", ")", "+", "space_shape", "(", "odl_op", ".", "range", ")", "+", "(", "1", ",", ")", "assert", "x_shape", "[", "1", ":", "]", "==", "space_shape", "(", "odl_op", ".", "domain", ")", "+", "(", "1", ",", ")", "out_dtype", "=", "getattr", "(", "odl_op", ".", "range", ",", "'dtype'", ",", "odl_op", ".", "domain", ".", "dtype", ")", "def", "_impl", "(", "x", ")", ":", "\"\"\"Implementation of the tensorflow layer.\n\n Parameters\n ----------\n x : `numpy.ndarray`\n Point(s) in which the operator should be evaluated.\n The axes are:\n 0 : batch id. This is a constant if ``fixed_size`` is\n true, otherwise it is dynamic.\n 1, ..., n-2 : spatial dimensions of data.\n n-1 : (currently) unused data channel.\n\n Returns\n -------\n result : `numpy.ndarray`\n Result of the computation.\n The axes are:\n 0 : batch id. Data is pairwise matched with ``x``.\n 1, ..., m-2 : spatial dimensions of data.\n m-1 : (currently) unused data channel.\n \"\"\"", "# Validate input shape", "if", "fixed_size", ":", "x_out_shape", "=", "out_shape", "assert", "x", ".", "shape", "==", "in_shape", "else", ":", "x_out_shape", "=", "(", "x", ".", "shape", "[", "0", "]", ",", ")", "+", "out_shape", "[", "1", ":", "]", "assert", "x", ".", "shape", "[", "1", ":", "]", "==", "in_shape", "[", "1", ":", "]", "# Evaluate the operator on all inputs in the batch.", "out", "=", "np", ".", "empty", "(", "x_out_shape", ",", "out_dtype", ")", "out_element", "=", "odl_op", ".", "range", ".", "element", "(", ")", "for", "i", "in", "range", "(", "x_out_shape", "[", "0", "]", ")", ":", "if", "odl_op", ".", "is_functional", ":", "out", "[", "i", "]", "=", "odl_op", "(", "x", "[", "i", ",", "...", ",", "0", "]", ")", "else", ":", "odl_op", "(", "x", "[", "i", ",", "...", ",", "0", "]", ",", "out", "=", "out_element", ")", "out", "[", "i", ",", "...", ",", "0", "]", "=", "np", ".", "asarray", "(", "out_element", ")", "return", "out", "if", "differentiable", ":", "def", "tensorflow_layer_grad", "(", "op", ",", "grad", ")", ":", "\"\"\"Thin wrapper for the gradient.\"\"\"", "x", "=", "op", ".", "inputs", "[", "0", "]", "return", "tensorflow_layer_grad_impl", "(", "x", ",", "grad", ",", "name", "=", "name", "+", "'_grad'", ")", "else", ":", "tensorflow_layer_grad", "=", "None", "with", "ops", ".", "name_scope", "(", "name", "+", "'_pyfunc'", ",", "values", "=", "[", "x", "]", ")", "as", "name_call", ":", "result", "=", "py_func", "(", "_impl", ",", "[", "x", "]", ",", "[", "out_dtype", "]", ",", "name", "=", "name_call", ",", "stateful", "=", "False", ",", "grad", "=", "tensorflow_layer_grad", ")", "# We must manually set the output shape since tensorflow cannot", "# figure it out", "result", "=", "result", "[", "0", "]", "result", ".", "set_shape", "(", "out_shape", ")", "return", "result", "return", "tensorflow_layer" ]
Convert `Operator` or `Functional` to a tensorflow layer. Parameters ---------- odl_op : `Operator` or `Functional` The operator that should be wrapped as a tensorflow layer. name : str Default name for tensorflow layers created. differentiable : boolean ``True`` if the layer should be differentiable, in which case ``odl_op`` should implement `Operator.derivative` which in turn implements `Operator.adjoint`. In this case, the adjoint of the derivative is properly wrapped in ``tensorflow_layer``, and gradients propagate as expected. If ``False``, the gradient is defined as everywhere zero. Returns ------- tensorflow_layer : callable Callable that, when called with a `tensorflow.Tensor` of shape ``(n,) + odl_op.domain.shape + (1,)`` where ``n`` is the batch size, returns another `tensorflow.Tensor` which is a lazy evaluation of ``odl_op``. If ``odl_op`` is an `Operator`, the shape of the returned tensor is ``(n,) + odl_op.range.shape + (1,)``. If ``odl_op`` is an `Functional`, the shape of the returned tensor is ``(n,)``. The ``dtype`` of the tensor is ``odl_op.range.dtype``.
[ "Convert", "Operator", "or", "Functional", "to", "a", "tensorflow", "layer", "." ]
python
train
cmorisse/ikp3db
ikp3db.py
https://github.com/cmorisse/ikp3db/blob/a0f318d4e8494b2e6f2f07ec0f1202ca023c920f/ikp3db.py#L1426-L1440
def set_breakpoint(self, file_name, line_number, condition=None, enabled=True): """ Create a breakpoint, register it in the class's lists and returns a tuple of (error_message, break_number) """ c_file_name = self.canonic(file_name) import linecache line = linecache.getline(c_file_name, line_number) if not line: return "Line %s:%d does not exist." % (c_file_name, line_number), None bp = IKBreakpoint(c_file_name, line_number, condition, enabled) if self.pending_stop or IKBreakpoint.any_active_breakpoint: self.enable_tracing() else: self.disable_tracing() return None, bp.number
[ "def", "set_breakpoint", "(", "self", ",", "file_name", ",", "line_number", ",", "condition", "=", "None", ",", "enabled", "=", "True", ")", ":", "c_file_name", "=", "self", ".", "canonic", "(", "file_name", ")", "import", "linecache", "line", "=", "linecache", ".", "getline", "(", "c_file_name", ",", "line_number", ")", "if", "not", "line", ":", "return", "\"Line %s:%d does not exist.\"", "%", "(", "c_file_name", ",", "line_number", ")", ",", "None", "bp", "=", "IKBreakpoint", "(", "c_file_name", ",", "line_number", ",", "condition", ",", "enabled", ")", "if", "self", ".", "pending_stop", "or", "IKBreakpoint", ".", "any_active_breakpoint", ":", "self", ".", "enable_tracing", "(", ")", "else", ":", "self", ".", "disable_tracing", "(", ")", "return", "None", ",", "bp", ".", "number" ]
Create a breakpoint, register it in the class's lists and returns a tuple of (error_message, break_number)
[ "Create", "a", "breakpoint", "register", "it", "in", "the", "class", "s", "lists", "and", "returns", "a", "tuple", "of", "(", "error_message", "break_number", ")" ]
python
train
confluentinc/confluent-kafka-python
examples/adminapi.py
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/examples/adminapi.py#L51-L68
def example_delete_topics(a, topics): """ delete topics """ # Call delete_topics to asynchronously delete topics, a future is returned. # By default this operation on the broker returns immediately while # topics are deleted in the background. But here we give it some time (30s) # to propagate in the cluster before returning. # # Returns a dict of <topic,future>. fs = a.delete_topics(topics, operation_timeout=30) # Wait for operation to finish. for topic, f in fs.items(): try: f.result() # The result itself is None print("Topic {} deleted".format(topic)) except Exception as e: print("Failed to delete topic {}: {}".format(topic, e))
[ "def", "example_delete_topics", "(", "a", ",", "topics", ")", ":", "# Call delete_topics to asynchronously delete topics, a future is returned.", "# By default this operation on the broker returns immediately while", "# topics are deleted in the background. But here we give it some time (30s)", "# to propagate in the cluster before returning.", "#", "# Returns a dict of <topic,future>.", "fs", "=", "a", ".", "delete_topics", "(", "topics", ",", "operation_timeout", "=", "30", ")", "# Wait for operation to finish.", "for", "topic", ",", "f", "in", "fs", ".", "items", "(", ")", ":", "try", ":", "f", ".", "result", "(", ")", "# The result itself is None", "print", "(", "\"Topic {} deleted\"", ".", "format", "(", "topic", ")", ")", "except", "Exception", "as", "e", ":", "print", "(", "\"Failed to delete topic {}: {}\"", ".", "format", "(", "topic", ",", "e", ")", ")" ]
delete topics
[ "delete", "topics" ]
python
train
angr/angr
angr/exploration_techniques/common.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/exploration_techniques/common.py#L5-L49
def condition_to_lambda(condition, default=False): """ Translates an integer, set, list or function into a lambda that checks if state's current basic block matches some condition. :param condition: An integer, set, list or lambda to convert to a lambda. :param default: The default return value of the lambda (in case condition is None). Default: false. :returns: A tuple of two items: a lambda that takes a state and returns the set of addresses that it matched from the condition, and a set that contains the normalized set of addresses to stop at, or None if no addresses were provided statically. """ if condition is None: condition_function = lambda state: default static_addrs = set() elif isinstance(condition, int): return condition_to_lambda((condition,)) elif isinstance(condition, (tuple, set, list)): static_addrs = set(condition) def condition_function(state): if state.addr in static_addrs: # returning {state.addr} instead of True to properly handle find/avoid conflicts return {state.addr} if not isinstance(state.project.engines.default_engine, engines.SimEngineVEX): return False try: # If the address is not in the set (which could mean it is # not at the top of a block), check directly in the blocks # (Blocks are repeatedly created for every check, but with # the IRSB cache in angr lifter it should be OK.) return static_addrs.intersection(set(state.block().instruction_addrs)) except (AngrError, SimError): return False elif hasattr(condition, '__call__'): condition_function = condition static_addrs = None else: raise AngrExplorationTechniqueError("ExplorationTechnique is unable to convert given type (%s) to a callable condition function." % condition.__class__) return condition_function, static_addrs
[ "def", "condition_to_lambda", "(", "condition", ",", "default", "=", "False", ")", ":", "if", "condition", "is", "None", ":", "condition_function", "=", "lambda", "state", ":", "default", "static_addrs", "=", "set", "(", ")", "elif", "isinstance", "(", "condition", ",", "int", ")", ":", "return", "condition_to_lambda", "(", "(", "condition", ",", ")", ")", "elif", "isinstance", "(", "condition", ",", "(", "tuple", ",", "set", ",", "list", ")", ")", ":", "static_addrs", "=", "set", "(", "condition", ")", "def", "condition_function", "(", "state", ")", ":", "if", "state", ".", "addr", "in", "static_addrs", ":", "# returning {state.addr} instead of True to properly handle find/avoid conflicts", "return", "{", "state", ".", "addr", "}", "if", "not", "isinstance", "(", "state", ".", "project", ".", "engines", ".", "default_engine", ",", "engines", ".", "SimEngineVEX", ")", ":", "return", "False", "try", ":", "# If the address is not in the set (which could mean it is", "# not at the top of a block), check directly in the blocks", "# (Blocks are repeatedly created for every check, but with", "# the IRSB cache in angr lifter it should be OK.)", "return", "static_addrs", ".", "intersection", "(", "set", "(", "state", ".", "block", "(", ")", ".", "instruction_addrs", ")", ")", "except", "(", "AngrError", ",", "SimError", ")", ":", "return", "False", "elif", "hasattr", "(", "condition", ",", "'__call__'", ")", ":", "condition_function", "=", "condition", "static_addrs", "=", "None", "else", ":", "raise", "AngrExplorationTechniqueError", "(", "\"ExplorationTechnique is unable to convert given type (%s) to a callable condition function.\"", "%", "condition", ".", "__class__", ")", "return", "condition_function", ",", "static_addrs" ]
Translates an integer, set, list or function into a lambda that checks if state's current basic block matches some condition. :param condition: An integer, set, list or lambda to convert to a lambda. :param default: The default return value of the lambda (in case condition is None). Default: false. :returns: A tuple of two items: a lambda that takes a state and returns the set of addresses that it matched from the condition, and a set that contains the normalized set of addresses to stop at, or None if no addresses were provided statically.
[ "Translates", "an", "integer", "set", "list", "or", "function", "into", "a", "lambda", "that", "checks", "if", "state", "s", "current", "basic", "block", "matches", "some", "condition", "." ]
python
train
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L4597-L4608
def _le_circle(self, annot, p1, p2, lr): """Make stream commands for circle line end symbol. "lr" denotes left (False) or right point. """ m, im, L, R, w, scol, fcol, opacity = self._le_annot_parms(annot, p1, p2) shift = 2.5 # 2*shift*width = length of square edge d = shift * max(1, w) M = R - (d/2., 0) if lr else L + (d/2., 0) r = Rect(M, M) + (-d, -d, d, d) # the square ap = "q\n" + opacity + self._oval_string(r.tl * im, r.tr * im, r.br * im, r.bl * im) ap += "%g w\n" % w ap += scol + fcol + "b\nQ\n" return ap
[ "def", "_le_circle", "(", "self", ",", "annot", ",", "p1", ",", "p2", ",", "lr", ")", ":", "m", ",", "im", ",", "L", ",", "R", ",", "w", ",", "scol", ",", "fcol", ",", "opacity", "=", "self", ".", "_le_annot_parms", "(", "annot", ",", "p1", ",", "p2", ")", "shift", "=", "2.5", "# 2*shift*width = length of square edge", "d", "=", "shift", "*", "max", "(", "1", ",", "w", ")", "M", "=", "R", "-", "(", "d", "/", "2.", ",", "0", ")", "if", "lr", "else", "L", "+", "(", "d", "/", "2.", ",", "0", ")", "r", "=", "Rect", "(", "M", ",", "M", ")", "+", "(", "-", "d", ",", "-", "d", ",", "d", ",", "d", ")", "# the square", "ap", "=", "\"q\\n\"", "+", "opacity", "+", "self", ".", "_oval_string", "(", "r", ".", "tl", "*", "im", ",", "r", ".", "tr", "*", "im", ",", "r", ".", "br", "*", "im", ",", "r", ".", "bl", "*", "im", ")", "ap", "+=", "\"%g w\\n\"", "%", "w", "ap", "+=", "scol", "+", "fcol", "+", "\"b\\nQ\\n\"", "return", "ap" ]
Make stream commands for circle line end symbol. "lr" denotes left (False) or right point.
[ "Make", "stream", "commands", "for", "circle", "line", "end", "symbol", ".", "lr", "denotes", "left", "(", "False", ")", "or", "right", "point", "." ]
python
train
HazyResearch/fonduer
src/fonduer/utils/visualizer.py
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/visualizer.py#L28-L50
def display_boxes(self, pdf_file, boxes, alternate_colors=False): """ Displays each of the bounding boxes passed in 'boxes' on images of the pdf pointed to by pdf_file boxes is a list of 5-tuples (page, top, left, bottom, right) """ imgs = [] colors = [Color("blue"), Color("red")] boxes_per_page = defaultdict(int) boxes_by_page = defaultdict(list) for i, (page, top, left, bottom, right) in enumerate(boxes): boxes_per_page[page] += 1 boxes_by_page[page].append((top, left, bottom, right)) for i, page_num in enumerate(boxes_per_page.keys()): img = pdf_to_img(pdf_file, page_num) draw = Drawing() draw.fill_color = Color("rgba(0, 0, 0, 0.0)") for j, (top, left, bottom, right) in enumerate(boxes_by_page[page_num]): draw.stroke_color = colors[j % 2] if alternate_colors else colors[0] draw.rectangle(left=left, top=top, right=right, bottom=bottom) draw(img) imgs.append(img) return imgs
[ "def", "display_boxes", "(", "self", ",", "pdf_file", ",", "boxes", ",", "alternate_colors", "=", "False", ")", ":", "imgs", "=", "[", "]", "colors", "=", "[", "Color", "(", "\"blue\"", ")", ",", "Color", "(", "\"red\"", ")", "]", "boxes_per_page", "=", "defaultdict", "(", "int", ")", "boxes_by_page", "=", "defaultdict", "(", "list", ")", "for", "i", ",", "(", "page", ",", "top", ",", "left", ",", "bottom", ",", "right", ")", "in", "enumerate", "(", "boxes", ")", ":", "boxes_per_page", "[", "page", "]", "+=", "1", "boxes_by_page", "[", "page", "]", ".", "append", "(", "(", "top", ",", "left", ",", "bottom", ",", "right", ")", ")", "for", "i", ",", "page_num", "in", "enumerate", "(", "boxes_per_page", ".", "keys", "(", ")", ")", ":", "img", "=", "pdf_to_img", "(", "pdf_file", ",", "page_num", ")", "draw", "=", "Drawing", "(", ")", "draw", ".", "fill_color", "=", "Color", "(", "\"rgba(0, 0, 0, 0.0)\"", ")", "for", "j", ",", "(", "top", ",", "left", ",", "bottom", ",", "right", ")", "in", "enumerate", "(", "boxes_by_page", "[", "page_num", "]", ")", ":", "draw", ".", "stroke_color", "=", "colors", "[", "j", "%", "2", "]", "if", "alternate_colors", "else", "colors", "[", "0", "]", "draw", ".", "rectangle", "(", "left", "=", "left", ",", "top", "=", "top", ",", "right", "=", "right", ",", "bottom", "=", "bottom", ")", "draw", "(", "img", ")", "imgs", ".", "append", "(", "img", ")", "return", "imgs" ]
Displays each of the bounding boxes passed in 'boxes' on images of the pdf pointed to by pdf_file boxes is a list of 5-tuples (page, top, left, bottom, right)
[ "Displays", "each", "of", "the", "bounding", "boxes", "passed", "in", "boxes", "on", "images", "of", "the", "pdf", "pointed", "to", "by", "pdf_file", "boxes", "is", "a", "list", "of", "5", "-", "tuples", "(", "page", "top", "left", "bottom", "right", ")" ]
python
train
eerimoq/bitstruct
bitstruct.py
https://github.com/eerimoq/bitstruct/blob/8e887c10241aa51c2a77c10e9923bb3978b15bcb/bitstruct.py#L427-L435
def pack(self, data): """See :func:`~bitstruct.pack_dict()`. """ try: return self.pack_any(data) except KeyError as e: raise Error('{} not found in data dictionary'.format(str(e)))
[ "def", "pack", "(", "self", ",", "data", ")", ":", "try", ":", "return", "self", ".", "pack_any", "(", "data", ")", "except", "KeyError", "as", "e", ":", "raise", "Error", "(", "'{} not found in data dictionary'", ".", "format", "(", "str", "(", "e", ")", ")", ")" ]
See :func:`~bitstruct.pack_dict()`.
[ "See", ":", "func", ":", "~bitstruct", ".", "pack_dict", "()", "." ]
python
valid
awslabs/sockeye
sockeye/inference.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/inference.py#L1804-L1846
def _decode_step(self, prev_word: mx.nd.NDArray, step: int, source_length: int, states: List[ModelState], models_output_layer_w: List[mx.nd.NDArray], models_output_layer_b: List[mx.nd.NDArray]) \ -> Tuple[mx.nd.NDArray, mx.nd.NDArray, List[ModelState]]: """ Returns decoder predictions (combined from all models), attention scores, and updated states. :param prev_word: Previous words of hypotheses. Shape: (batch_size * beam_size,). :param step: Beam search iteration. :param source_length: Length of the input sequence. :param states: List of model states. :param models_output_layer_w: Custom model weights for logit computation (empty for none). :param models_output_layer_b: Custom model biases for logit computation (empty for none). :return: (scores, attention scores, list of model states) """ bucket_key = (source_length, step) model_outs, model_attention_probs, model_states = [], [], [] # We use zip_longest here since we'll have empty lists when not using restrict_lexicon for model, out_w, out_b, state in itertools.zip_longest( self.models, models_output_layer_w, models_output_layer_b, states): decoder_out, attention_probs, state = model.run_decoder(prev_word, bucket_key, state) # Compute logits and softmax with restricted vocabulary if self.restrict_lexicon: # Apply output layer outside decoder module. logits = model.output_layer(decoder_out, out_w, out_b) if model.skip_softmax: model_out = logits # raw logits else: model_out = mx.nd.softmax(logits) # normalized probabilities else: # Output layer is applied inside decoder module. # if model.skip_softmax decoder_out represents logits, normalized probabilities else. model_out = decoder_out model_outs.append(model_out) model_attention_probs.append(attention_probs) model_states.append(state) scores, attention_probs = self._combine_predictions(model_outs, model_attention_probs) return scores, attention_probs, model_states
[ "def", "_decode_step", "(", "self", ",", "prev_word", ":", "mx", ".", "nd", ".", "NDArray", ",", "step", ":", "int", ",", "source_length", ":", "int", ",", "states", ":", "List", "[", "ModelState", "]", ",", "models_output_layer_w", ":", "List", "[", "mx", ".", "nd", ".", "NDArray", "]", ",", "models_output_layer_b", ":", "List", "[", "mx", ".", "nd", ".", "NDArray", "]", ")", "->", "Tuple", "[", "mx", ".", "nd", ".", "NDArray", ",", "mx", ".", "nd", ".", "NDArray", ",", "List", "[", "ModelState", "]", "]", ":", "bucket_key", "=", "(", "source_length", ",", "step", ")", "model_outs", ",", "model_attention_probs", ",", "model_states", "=", "[", "]", ",", "[", "]", ",", "[", "]", "# We use zip_longest here since we'll have empty lists when not using restrict_lexicon", "for", "model", ",", "out_w", ",", "out_b", ",", "state", "in", "itertools", ".", "zip_longest", "(", "self", ".", "models", ",", "models_output_layer_w", ",", "models_output_layer_b", ",", "states", ")", ":", "decoder_out", ",", "attention_probs", ",", "state", "=", "model", ".", "run_decoder", "(", "prev_word", ",", "bucket_key", ",", "state", ")", "# Compute logits and softmax with restricted vocabulary", "if", "self", ".", "restrict_lexicon", ":", "# Apply output layer outside decoder module.", "logits", "=", "model", ".", "output_layer", "(", "decoder_out", ",", "out_w", ",", "out_b", ")", "if", "model", ".", "skip_softmax", ":", "model_out", "=", "logits", "# raw logits", "else", ":", "model_out", "=", "mx", ".", "nd", ".", "softmax", "(", "logits", ")", "# normalized probabilities", "else", ":", "# Output layer is applied inside decoder module.", "# if model.skip_softmax decoder_out represents logits, normalized probabilities else.", "model_out", "=", "decoder_out", "model_outs", ".", "append", "(", "model_out", ")", "model_attention_probs", ".", "append", "(", "attention_probs", ")", "model_states", ".", "append", "(", "state", ")", "scores", ",", "attention_probs", "=", "self", ".", "_combine_predictions", "(", "model_outs", ",", "model_attention_probs", ")", "return", "scores", ",", "attention_probs", ",", "model_states" ]
Returns decoder predictions (combined from all models), attention scores, and updated states. :param prev_word: Previous words of hypotheses. Shape: (batch_size * beam_size,). :param step: Beam search iteration. :param source_length: Length of the input sequence. :param states: List of model states. :param models_output_layer_w: Custom model weights for logit computation (empty for none). :param models_output_layer_b: Custom model biases for logit computation (empty for none). :return: (scores, attention scores, list of model states)
[ "Returns", "decoder", "predictions", "(", "combined", "from", "all", "models", ")", "attention", "scores", "and", "updated", "states", "." ]
python
train
MediaFire/mediafire-python-open-sdk
examples/mediafire-cli.py
https://github.com/MediaFire/mediafire-python-open-sdk/blob/8f1f23db1b16f16e026f5c6777aec32d00baa05f/examples/mediafire-cli.py#L106-L110
def do_folder_create(client, args): """Create directory""" for folder_uri in args.uris: client.create_folder(folder_uri, recursive=True) return True
[ "def", "do_folder_create", "(", "client", ",", "args", ")", ":", "for", "folder_uri", "in", "args", ".", "uris", ":", "client", ".", "create_folder", "(", "folder_uri", ",", "recursive", "=", "True", ")", "return", "True" ]
Create directory
[ "Create", "directory" ]
python
train
spyder-ide/spyder-kernels
spyder_kernels/customize/spydercustomize.py
https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/customize/spydercustomize.py#L424-L433
def user_return(self, frame, return_value): """This function is called when a return trap is set here.""" # This is useful when debugging in an active interpreter (otherwise, # the debugger will stop before reaching the target file) if self._wait_for_mainpyfile: if (self.mainpyfile != self.canonic(frame.f_code.co_filename) or frame.f_lineno<= 0): return self._wait_for_mainpyfile = 0 self._old_Pdb_user_return(frame, return_value)
[ "def", "user_return", "(", "self", ",", "frame", ",", "return_value", ")", ":", "# This is useful when debugging in an active interpreter (otherwise,", "# the debugger will stop before reaching the target file)", "if", "self", ".", "_wait_for_mainpyfile", ":", "if", "(", "self", ".", "mainpyfile", "!=", "self", ".", "canonic", "(", "frame", ".", "f_code", ".", "co_filename", ")", "or", "frame", ".", "f_lineno", "<=", "0", ")", ":", "return", "self", ".", "_wait_for_mainpyfile", "=", "0", "self", ".", "_old_Pdb_user_return", "(", "frame", ",", "return_value", ")" ]
This function is called when a return trap is set here.
[ "This", "function", "is", "called", "when", "a", "return", "trap", "is", "set", "here", "." ]
python
train
wonambi-python/wonambi
wonambi/ioeeg/blackrock.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/blackrock.py#L373-L575
def _read_neuralev(filename, read_markers=False, trigger_bits=16, trigger_zero=True): """Read some information from NEV Parameters ---------- filename : str path to NEV file read_markers : bool whether to read markers or not (it can get really large) trigger_bits : int, optional 8 or 16, read the triggers as one or two bytes trigger_zero : bool, optional read the trigger zero or not Returns ------- MetaTags : list of dict which corresponds to MetaTags of openNEV Markers : list of dict markers in NEV file Notes ----- The conversion to DateTime in openNEV.m is not correct. They add a value of 2 to the day. Instead, they should add it to the index of the weekday It returns triggers as strings (format of EDFBrowser), but it does not read the othe types of events (waveforms, videos, etc). The time stamps are stored in UTC in the NSx files. However, time stamps in the NEV files are stored as local time up to Central 6.03 included and stored as UTC after Central 6.05. It's impossible to know the version of Central from the header. """ hdr = {} with open(filename, 'rb') as f: BasicHdr = f.read(336) i1 = 8 hdr['FileTypeID'] = BasicHdr[:i1].decode('utf-8') assert hdr['FileTypeID'] == 'NEURALEV' i0, i1 = i1, i1 + 2 filespec = unpack('bb', BasicHdr[i0:i1]) hdr['FileSpec'] = str(filespec[0]) + '.' + str(filespec[1]) i0, i1 = i1, i1 + 2 hdr['Flags'] = unpack('<H', BasicHdr[i0:i1])[0] i0, i1 = i1, i1 + 4 hdr['HeaderOffset'] = unpack('<I', BasicHdr[i0:i1])[0] i0, i1 = i1, i1 + 4 hdr['PacketBytes'] = unpack('<I', BasicHdr[i0:i1])[0] i0, i1 = i1, i1 + 4 hdr['TimeRes'] = unpack('<I', BasicHdr[i0:i1])[0] i0, i1 = i1, i1 + 4 hdr['SampleRes'] = unpack('<I', BasicHdr[i0:i1])[0] i0, i1 = i1, i1 + 16 time = unpack('<' + 'H' * 8, BasicHdr[i0:i1]) hdr['DateTimeRaw'] = time lg.warning('DateTime is in local time with Central version <= 6.03' ' and in UTC with Central version > 6.05') hdr['DateTime'] = datetime(time[0], time[1], time[3], time[4], time[5], time[6], time[7] * 1000) i0, i1 = i1, i1 + 32 # hdr['Application'] = _str(BasicHdr[i0:i1].decode('utf-8')) i0, i1 = i1, i1 + 256 hdr['Comment'] = _str(BasicHdr[i0:i1].decode('utf-8', errors='replace')) i0, i1 = i1, i1 + 4 countExtHeader = unpack('<I', BasicHdr[i0:i1])[0] # you can read subject name from sif # Check data duration f.seek(-hdr['PacketBytes'], SEEK_END) hdr['DataDuration'] = unpack('<I', f.read(4))[0] hdr['DataDurationSec'] = hdr['DataDuration'] / hdr['SampleRes'] # Read the Extended Header f.seek(336) ElectrodesInfo = [] IOLabels = [] for i in range(countExtHeader): ExtendedHeader = f.read(32) i1 = 8 PacketID = ExtendedHeader[:i1].decode('utf-8') if PacketID == 'NEUEVWAV': elec = {} i0, i1 = i1, i1 + 2 elec['ElectrodeID'] = unpack('<H', ExtendedHeader[i0:i1])[0] i0, i1 = i1, i1 + 1 elec['ConnectorBank'] = chr(ExtendedHeader[i0] + 64) i0, i1 = i1, i1 + 1 elec['ConnectorPin'] = ExtendedHeader[i0] i0, i1 = i1, i1 + 2 df = unpack('<h', ExtendedHeader[i0:i1])[0] # This is a workaround for the DigitalFactor overflow if df == 21516: elec['DigitalFactor'] = 152592.547 else: elec['DigitalFactor'] = df i0, i1 = i1, i1 + 2 elec['EnergyThreshold'] = unpack('<H', ExtendedHeader[i0:i1])[0] i0, i1 = i1, i1 + 2 elec['HighThreshold'] = unpack('<h', ExtendedHeader[i0:i1])[0] i0, i1 = i1, i1 + 2 elec['LowThreshold'] = unpack('<h', ExtendedHeader[i0:i1])[0] i0, i1 = i1, i1 + 1 elec['Units'] = ExtendedHeader[i0] i0, i1 = i1, i1 + 1 elec['WaveformBytes'] = ExtendedHeader[i0] ElectrodesInfo.append(elec) elif PacketID == 'NEUEVLBL': i0, i1 = i1, i1 + 2 ElectrodeID = unpack('<H', ExtendedHeader[i0:i1])[0] - 1 s = _str(ExtendedHeader[i1:].decode('utf-8')) ElectrodesInfo[ElectrodeID]['ElectrodeLabel'] = s elif PacketID == 'NEUEVFLT': elec = {} i0, i1 = i1, i1 + 2 ElectrodeID = unpack('<H', ExtendedHeader[i0:i1])[0] - 1 i0, i1 = i1, i1 + 4 elec['HighFreqCorner'] = unpack('<I', ExtendedHeader[i0:i1])[0] i0, i1 = i1, i1 + 4 elec['HighFreqOrder'] = unpack('<I', ExtendedHeader[i0:i1])[0] i0, i1 = i1, i1 + 2 elec['HighFilterType'] = unpack('<H', ExtendedHeader[i0:i1])[0] i0, i1 = i1, i1 + 4 elec['LowFreqCorner'] = unpack('<I', ExtendedHeader[i0:i1])[0] i0, i1 = i1, i1 + 4 elec['LowFreqOrder'] = unpack('<I', ExtendedHeader[i0:i1])[0] i0, i1 = i1, i1 + 2 elec['LowFilterType'] = unpack('<H', ExtendedHeader[i0:i1])[0] ElectrodesInfo[ElectrodeID].update(elec) elif PacketID == 'DIGLABEL': # TODO: the order is not taken into account and probably wrong! iolabel = {} iolabel['mode'] = ExtendedHeader[24] + 1 s = _str(ExtendedHeader[8:25].decode('utf-8')) iolabel['label'] = s IOLabels.append(iolabel) else: raise NotImplementedError(PacketID + ' not implemented yet') hdr['ChannelID'] = [x['ElectrodeID'] for x in ElectrodesInfo] fExtendedHeader = f.tell() fData = f.seek(0, SEEK_END) countDataPacket = int((fData - fExtendedHeader) / hdr['PacketBytes']) markers = [] if read_markers and countDataPacket: f.seek(fExtendedHeader) x = f.read(countDataPacket * hdr['PacketBytes']) DigiValues = [] for j in range(countDataPacket): i = j * hdr['PacketBytes'] if trigger_bits == 16: tempDigiVals = unpack('<H', x[8 + i:10 + i])[0] else: tempDigiVals = unpack('<H', x[8 + i:9 + i] + b'\x00')[0] val = {'timestamp': unpack('<I', x[0 + i:4 + i])[0], 'packetID': unpack('<H', x[4 + i:6 + i])[0], 'tempClassOrReason': unpack('<B', x[6 + i:7 + i])[0], 'tempDigiVals': tempDigiVals} if tempDigiVals != 0 or False: DigiValues.append(val) digserPacketID = 0 not_serialdigital = [x for x in DigiValues if not x['packetID'] == digserPacketID] if not_serialdigital: lg.debug('Code not implemented to read PacketID ' + str(not_serialdigital[0]['packetID'])) # convert to markers for val in DigiValues: m = {'name': str(val['tempDigiVals']), 'start': val['timestamp'] / hdr['SampleRes'], 'end': val['timestamp'] / hdr['SampleRes'], 'chan': [''], } markers.append(m) if read_markers: return markers else: return hdr
[ "def", "_read_neuralev", "(", "filename", ",", "read_markers", "=", "False", ",", "trigger_bits", "=", "16", ",", "trigger_zero", "=", "True", ")", ":", "hdr", "=", "{", "}", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "BasicHdr", "=", "f", ".", "read", "(", "336", ")", "i1", "=", "8", "hdr", "[", "'FileTypeID'", "]", "=", "BasicHdr", "[", ":", "i1", "]", ".", "decode", "(", "'utf-8'", ")", "assert", "hdr", "[", "'FileTypeID'", "]", "==", "'NEURALEV'", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "2", "filespec", "=", "unpack", "(", "'bb'", ",", "BasicHdr", "[", "i0", ":", "i1", "]", ")", "hdr", "[", "'FileSpec'", "]", "=", "str", "(", "filespec", "[", "0", "]", ")", "+", "'.'", "+", "str", "(", "filespec", "[", "1", "]", ")", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "2", "hdr", "[", "'Flags'", "]", "=", "unpack", "(", "'<H'", ",", "BasicHdr", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "4", "hdr", "[", "'HeaderOffset'", "]", "=", "unpack", "(", "'<I'", ",", "BasicHdr", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "4", "hdr", "[", "'PacketBytes'", "]", "=", "unpack", "(", "'<I'", ",", "BasicHdr", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "4", "hdr", "[", "'TimeRes'", "]", "=", "unpack", "(", "'<I'", ",", "BasicHdr", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "4", "hdr", "[", "'SampleRes'", "]", "=", "unpack", "(", "'<I'", ",", "BasicHdr", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "16", "time", "=", "unpack", "(", "'<'", "+", "'H'", "*", "8", ",", "BasicHdr", "[", "i0", ":", "i1", "]", ")", "hdr", "[", "'DateTimeRaw'", "]", "=", "time", "lg", ".", "warning", "(", "'DateTime is in local time with Central version <= 6.03'", "' and in UTC with Central version > 6.05'", ")", "hdr", "[", "'DateTime'", "]", "=", "datetime", "(", "time", "[", "0", "]", ",", "time", "[", "1", "]", ",", "time", "[", "3", "]", ",", "time", "[", "4", "]", ",", "time", "[", "5", "]", ",", "time", "[", "6", "]", ",", "time", "[", "7", "]", "*", "1000", ")", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "32", "# hdr['Application'] = _str(BasicHdr[i0:i1].decode('utf-8'))", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "256", "hdr", "[", "'Comment'", "]", "=", "_str", "(", "BasicHdr", "[", "i0", ":", "i1", "]", ".", "decode", "(", "'utf-8'", ",", "errors", "=", "'replace'", ")", ")", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "4", "countExtHeader", "=", "unpack", "(", "'<I'", ",", "BasicHdr", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "# you can read subject name from sif", "# Check data duration", "f", ".", "seek", "(", "-", "hdr", "[", "'PacketBytes'", "]", ",", "SEEK_END", ")", "hdr", "[", "'DataDuration'", "]", "=", "unpack", "(", "'<I'", ",", "f", ".", "read", "(", "4", ")", ")", "[", "0", "]", "hdr", "[", "'DataDurationSec'", "]", "=", "hdr", "[", "'DataDuration'", "]", "/", "hdr", "[", "'SampleRes'", "]", "# Read the Extended Header", "f", ".", "seek", "(", "336", ")", "ElectrodesInfo", "=", "[", "]", "IOLabels", "=", "[", "]", "for", "i", "in", "range", "(", "countExtHeader", ")", ":", "ExtendedHeader", "=", "f", ".", "read", "(", "32", ")", "i1", "=", "8", "PacketID", "=", "ExtendedHeader", "[", ":", "i1", "]", ".", "decode", "(", "'utf-8'", ")", "if", "PacketID", "==", "'NEUEVWAV'", ":", "elec", "=", "{", "}", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "2", "elec", "[", "'ElectrodeID'", "]", "=", "unpack", "(", "'<H'", ",", "ExtendedHeader", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "1", "elec", "[", "'ConnectorBank'", "]", "=", "chr", "(", "ExtendedHeader", "[", "i0", "]", "+", "64", ")", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "1", "elec", "[", "'ConnectorPin'", "]", "=", "ExtendedHeader", "[", "i0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "2", "df", "=", "unpack", "(", "'<h'", ",", "ExtendedHeader", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "# This is a workaround for the DigitalFactor overflow", "if", "df", "==", "21516", ":", "elec", "[", "'DigitalFactor'", "]", "=", "152592.547", "else", ":", "elec", "[", "'DigitalFactor'", "]", "=", "df", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "2", "elec", "[", "'EnergyThreshold'", "]", "=", "unpack", "(", "'<H'", ",", "ExtendedHeader", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "2", "elec", "[", "'HighThreshold'", "]", "=", "unpack", "(", "'<h'", ",", "ExtendedHeader", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "2", "elec", "[", "'LowThreshold'", "]", "=", "unpack", "(", "'<h'", ",", "ExtendedHeader", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "1", "elec", "[", "'Units'", "]", "=", "ExtendedHeader", "[", "i0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "1", "elec", "[", "'WaveformBytes'", "]", "=", "ExtendedHeader", "[", "i0", "]", "ElectrodesInfo", ".", "append", "(", "elec", ")", "elif", "PacketID", "==", "'NEUEVLBL'", ":", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "2", "ElectrodeID", "=", "unpack", "(", "'<H'", ",", "ExtendedHeader", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "-", "1", "s", "=", "_str", "(", "ExtendedHeader", "[", "i1", ":", "]", ".", "decode", "(", "'utf-8'", ")", ")", "ElectrodesInfo", "[", "ElectrodeID", "]", "[", "'ElectrodeLabel'", "]", "=", "s", "elif", "PacketID", "==", "'NEUEVFLT'", ":", "elec", "=", "{", "}", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "2", "ElectrodeID", "=", "unpack", "(", "'<H'", ",", "ExtendedHeader", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "-", "1", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "4", "elec", "[", "'HighFreqCorner'", "]", "=", "unpack", "(", "'<I'", ",", "ExtendedHeader", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "4", "elec", "[", "'HighFreqOrder'", "]", "=", "unpack", "(", "'<I'", ",", "ExtendedHeader", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "2", "elec", "[", "'HighFilterType'", "]", "=", "unpack", "(", "'<H'", ",", "ExtendedHeader", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "4", "elec", "[", "'LowFreqCorner'", "]", "=", "unpack", "(", "'<I'", ",", "ExtendedHeader", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "4", "elec", "[", "'LowFreqOrder'", "]", "=", "unpack", "(", "'<I'", ",", "ExtendedHeader", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "i0", ",", "i1", "=", "i1", ",", "i1", "+", "2", "elec", "[", "'LowFilterType'", "]", "=", "unpack", "(", "'<H'", ",", "ExtendedHeader", "[", "i0", ":", "i1", "]", ")", "[", "0", "]", "ElectrodesInfo", "[", "ElectrodeID", "]", ".", "update", "(", "elec", ")", "elif", "PacketID", "==", "'DIGLABEL'", ":", "# TODO: the order is not taken into account and probably wrong!", "iolabel", "=", "{", "}", "iolabel", "[", "'mode'", "]", "=", "ExtendedHeader", "[", "24", "]", "+", "1", "s", "=", "_str", "(", "ExtendedHeader", "[", "8", ":", "25", "]", ".", "decode", "(", "'utf-8'", ")", ")", "iolabel", "[", "'label'", "]", "=", "s", "IOLabels", ".", "append", "(", "iolabel", ")", "else", ":", "raise", "NotImplementedError", "(", "PacketID", "+", "' not implemented yet'", ")", "hdr", "[", "'ChannelID'", "]", "=", "[", "x", "[", "'ElectrodeID'", "]", "for", "x", "in", "ElectrodesInfo", "]", "fExtendedHeader", "=", "f", ".", "tell", "(", ")", "fData", "=", "f", ".", "seek", "(", "0", ",", "SEEK_END", ")", "countDataPacket", "=", "int", "(", "(", "fData", "-", "fExtendedHeader", ")", "/", "hdr", "[", "'PacketBytes'", "]", ")", "markers", "=", "[", "]", "if", "read_markers", "and", "countDataPacket", ":", "f", ".", "seek", "(", "fExtendedHeader", ")", "x", "=", "f", ".", "read", "(", "countDataPacket", "*", "hdr", "[", "'PacketBytes'", "]", ")", "DigiValues", "=", "[", "]", "for", "j", "in", "range", "(", "countDataPacket", ")", ":", "i", "=", "j", "*", "hdr", "[", "'PacketBytes'", "]", "if", "trigger_bits", "==", "16", ":", "tempDigiVals", "=", "unpack", "(", "'<H'", ",", "x", "[", "8", "+", "i", ":", "10", "+", "i", "]", ")", "[", "0", "]", "else", ":", "tempDigiVals", "=", "unpack", "(", "'<H'", ",", "x", "[", "8", "+", "i", ":", "9", "+", "i", "]", "+", "b'\\x00'", ")", "[", "0", "]", "val", "=", "{", "'timestamp'", ":", "unpack", "(", "'<I'", ",", "x", "[", "0", "+", "i", ":", "4", "+", "i", "]", ")", "[", "0", "]", ",", "'packetID'", ":", "unpack", "(", "'<H'", ",", "x", "[", "4", "+", "i", ":", "6", "+", "i", "]", ")", "[", "0", "]", ",", "'tempClassOrReason'", ":", "unpack", "(", "'<B'", ",", "x", "[", "6", "+", "i", ":", "7", "+", "i", "]", ")", "[", "0", "]", ",", "'tempDigiVals'", ":", "tempDigiVals", "}", "if", "tempDigiVals", "!=", "0", "or", "False", ":", "DigiValues", ".", "append", "(", "val", ")", "digserPacketID", "=", "0", "not_serialdigital", "=", "[", "x", "for", "x", "in", "DigiValues", "if", "not", "x", "[", "'packetID'", "]", "==", "digserPacketID", "]", "if", "not_serialdigital", ":", "lg", ".", "debug", "(", "'Code not implemented to read PacketID '", "+", "str", "(", "not_serialdigital", "[", "0", "]", "[", "'packetID'", "]", ")", ")", "# convert to markers", "for", "val", "in", "DigiValues", ":", "m", "=", "{", "'name'", ":", "str", "(", "val", "[", "'tempDigiVals'", "]", ")", ",", "'start'", ":", "val", "[", "'timestamp'", "]", "/", "hdr", "[", "'SampleRes'", "]", ",", "'end'", ":", "val", "[", "'timestamp'", "]", "/", "hdr", "[", "'SampleRes'", "]", ",", "'chan'", ":", "[", "''", "]", ",", "}", "markers", ".", "append", "(", "m", ")", "if", "read_markers", ":", "return", "markers", "else", ":", "return", "hdr" ]
Read some information from NEV Parameters ---------- filename : str path to NEV file read_markers : bool whether to read markers or not (it can get really large) trigger_bits : int, optional 8 or 16, read the triggers as one or two bytes trigger_zero : bool, optional read the trigger zero or not Returns ------- MetaTags : list of dict which corresponds to MetaTags of openNEV Markers : list of dict markers in NEV file Notes ----- The conversion to DateTime in openNEV.m is not correct. They add a value of 2 to the day. Instead, they should add it to the index of the weekday It returns triggers as strings (format of EDFBrowser), but it does not read the othe types of events (waveforms, videos, etc). The time stamps are stored in UTC in the NSx files. However, time stamps in the NEV files are stored as local time up to Central 6.03 included and stored as UTC after Central 6.05. It's impossible to know the version of Central from the header.
[ "Read", "some", "information", "from", "NEV" ]
python
train
loli/medpy
medpy/metric/histogram.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/histogram.py#L389-L448
def relative_bin_deviation(h1, h2): # 79 us @array, 104 us @list \w 100 bins r""" Calculate the bin-wise deviation between two histograms. The relative bin deviation between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_{rbd}(H, H') = \sum_{m=1}^M \frac{ \sqrt{(H_m - H'_m)^2} }{ \frac{1}{2} \left( \sqrt{H_m^2} + \sqrt{{H'}_m^2} \right) } *Attributes:* - a real metric *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram, same bins as ``h1``. Returns ------- relative_bin_deviation : float Relative bin deviation between the two histograms. """ h1, h2 = __prepare_histogram(h1, h2) numerator = scipy.sqrt(scipy.square(h1 - h2)) denominator = (scipy.sqrt(scipy.square(h1)) + scipy.sqrt(scipy.square(h2))) / 2. old_err_state = scipy.seterr(invalid='ignore') # divide through zero only occurs when the bin is zero in both histograms, in which case the division is 0/0 and leads to (and should lead to) 0 result = numerator / denominator scipy.seterr(**old_err_state) result[scipy.isnan(result)] = 0 # faster than scipy.nan_to_num, which checks for +inf and -inf also return scipy.sum(result)
[ "def", "relative_bin_deviation", "(", "h1", ",", "h2", ")", ":", "# 79 us @array, 104 us @list \\w 100 bins", "h1", ",", "h2", "=", "__prepare_histogram", "(", "h1", ",", "h2", ")", "numerator", "=", "scipy", ".", "sqrt", "(", "scipy", ".", "square", "(", "h1", "-", "h2", ")", ")", "denominator", "=", "(", "scipy", ".", "sqrt", "(", "scipy", ".", "square", "(", "h1", ")", ")", "+", "scipy", ".", "sqrt", "(", "scipy", ".", "square", "(", "h2", ")", ")", ")", "/", "2.", "old_err_state", "=", "scipy", ".", "seterr", "(", "invalid", "=", "'ignore'", ")", "# divide through zero only occurs when the bin is zero in both histograms, in which case the division is 0/0 and leads to (and should lead to) 0", "result", "=", "numerator", "/", "denominator", "scipy", ".", "seterr", "(", "*", "*", "old_err_state", ")", "result", "[", "scipy", ".", "isnan", "(", "result", ")", "]", "=", "0", "# faster than scipy.nan_to_num, which checks for +inf and -inf also", "return", "scipy", ".", "sum", "(", "result", ")" ]
r""" Calculate the bin-wise deviation between two histograms. The relative bin deviation between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_{rbd}(H, H') = \sum_{m=1}^M \frac{ \sqrt{(H_m - H'_m)^2} }{ \frac{1}{2} \left( \sqrt{H_m^2} + \sqrt{{H'}_m^2} \right) } *Attributes:* - a real metric *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram, same bins as ``h1``. Returns ------- relative_bin_deviation : float Relative bin deviation between the two histograms.
[ "r", "Calculate", "the", "bin", "-", "wise", "deviation", "between", "two", "histograms", ".", "The", "relative", "bin", "deviation", "between", "two", "histograms", ":", "math", ":", "H", "and", ":", "math", ":", "H", "of", "size", ":", "math", ":", "m", "is", "defined", "as", ":", "..", "math", "::", "d_", "{", "rbd", "}", "(", "H", "H", ")", "=", "\\", "sum_", "{", "m", "=", "1", "}", "^M", "\\", "frac", "{", "\\", "sqrt", "{", "(", "H_m", "-", "H", "_m", ")", "^2", "}", "}", "{", "\\", "frac", "{", "1", "}", "{", "2", "}", "\\", "left", "(", "\\", "sqrt", "{", "H_m^2", "}", "+", "\\", "sqrt", "{{", "H", "}", "_m^2", "}", "\\", "right", ")", "}", "*", "Attributes", ":", "*" ]
python
train
rsteca/sklearn-deap
evolutionary_search/cv.py
https://github.com/rsteca/sklearn-deap/blob/b7ee1722a40cc0c6550d32a2714ab220db2b7430/evolutionary_search/cv.py#L316-L318
def possible_params(self): """ Used when assuming params is a list. """ return self.params if isinstance(self.params, list) else [self.params]
[ "def", "possible_params", "(", "self", ")", ":", "return", "self", ".", "params", "if", "isinstance", "(", "self", ".", "params", ",", "list", ")", "else", "[", "self", ".", "params", "]" ]
Used when assuming params is a list.
[ "Used", "when", "assuming", "params", "is", "a", "list", "." ]
python
train
igvteam/igv-jupyter
igv/browser.py
https://github.com/igvteam/igv-jupyter/blob/f93752ce507eae893c203325764551647e28a3dc/igv/browser.py#L151-L166
def on(self, eventName, cb): """ Subscribe to an igv.js event. :param Name of the event. Currently only "locuschange" is supported. :type str :param cb - callback function taking a single argument. For the locuschange event this argument will contain a dictionary of the form {chr, start, end} :type function """ self.eventHandlers[eventName] = cb return self._send({ "id": self.igv_id, "command": "on", "eventName": eventName })
[ "def", "on", "(", "self", ",", "eventName", ",", "cb", ")", ":", "self", ".", "eventHandlers", "[", "eventName", "]", "=", "cb", "return", "self", ".", "_send", "(", "{", "\"id\"", ":", "self", ".", "igv_id", ",", "\"command\"", ":", "\"on\"", ",", "\"eventName\"", ":", "eventName", "}", ")" ]
Subscribe to an igv.js event. :param Name of the event. Currently only "locuschange" is supported. :type str :param cb - callback function taking a single argument. For the locuschange event this argument will contain a dictionary of the form {chr, start, end} :type function
[ "Subscribe", "to", "an", "igv", ".", "js", "event", "." ]
python
train
icgood/pymap
pymap/backend/mailbox.py
https://github.com/icgood/pymap/blob/e77d9a54d760e3cbe044a548883bb4299ed61dc2/pymap/backend/mailbox.py#L186-L202
async def find(self, seq_set: SequenceSet, selected: SelectedMailbox, requirement: FetchRequirement = FetchRequirement.METADATA) \ -> AsyncIterable[Tuple[int, MessageT]]: """Find the active message UID and message pairs in the mailbox that are contained in the given sequences set. Message sequence numbers are resolved by the selected mailbox session. Args: seq_set: The sequence set of the desired messages. selected: The selected mailbox session. requirement: The data required from each message. """ for seq, cached_msg in selected.messages.get_all(seq_set): msg = await self.get(cached_msg.uid, cached_msg, requirement) if msg is not None: yield (seq, msg)
[ "async", "def", "find", "(", "self", ",", "seq_set", ":", "SequenceSet", ",", "selected", ":", "SelectedMailbox", ",", "requirement", ":", "FetchRequirement", "=", "FetchRequirement", ".", "METADATA", ")", "->", "AsyncIterable", "[", "Tuple", "[", "int", ",", "MessageT", "]", "]", ":", "for", "seq", ",", "cached_msg", "in", "selected", ".", "messages", ".", "get_all", "(", "seq_set", ")", ":", "msg", "=", "await", "self", ".", "get", "(", "cached_msg", ".", "uid", ",", "cached_msg", ",", "requirement", ")", "if", "msg", "is", "not", "None", ":", "yield", "(", "seq", ",", "msg", ")" ]
Find the active message UID and message pairs in the mailbox that are contained in the given sequences set. Message sequence numbers are resolved by the selected mailbox session. Args: seq_set: The sequence set of the desired messages. selected: The selected mailbox session. requirement: The data required from each message.
[ "Find", "the", "active", "message", "UID", "and", "message", "pairs", "in", "the", "mailbox", "that", "are", "contained", "in", "the", "given", "sequences", "set", ".", "Message", "sequence", "numbers", "are", "resolved", "by", "the", "selected", "mailbox", "session", "." ]
python
train
bpsmith/tia
tia/rlab/table.py
https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/table.py#L688-L694
def set_border_type(self, clazz, weight=DefaultWeight, color=None, cap=None, dashes=None, join=None, count=None, space=None): """example: set_border_type(BorderTypePartialRows) would set a border above and below each row in the range""" args = locals() args.pop('clazz') args.pop('self') clazz(**args).apply(self)
[ "def", "set_border_type", "(", "self", ",", "clazz", ",", "weight", "=", "DefaultWeight", ",", "color", "=", "None", ",", "cap", "=", "None", ",", "dashes", "=", "None", ",", "join", "=", "None", ",", "count", "=", "None", ",", "space", "=", "None", ")", ":", "args", "=", "locals", "(", ")", "args", ".", "pop", "(", "'clazz'", ")", "args", ".", "pop", "(", "'self'", ")", "clazz", "(", "*", "*", "args", ")", ".", "apply", "(", "self", ")" ]
example: set_border_type(BorderTypePartialRows) would set a border above and below each row in the range
[ "example", ":", "set_border_type", "(", "BorderTypePartialRows", ")", "would", "set", "a", "border", "above", "and", "below", "each", "row", "in", "the", "range" ]
python
train
genesluder/python-agiletixapi
agiletixapi/utils.py
https://github.com/genesluder/python-agiletixapi/blob/a7a3907414cd5623f4542b03cb970862368a894a/agiletixapi/utils.py#L86-L94
def to_underscore(s): """Transform camel or pascal case to underscore separated string """ return re.sub( r'(?!^)([A-Z]+)', lambda m: "_{0}".format(m.group(1).lower()), re.sub(r'(?!^)([A-Z]{1}[a-z]{1})', lambda m: "_{0}".format(m.group(1).lower()), s) ).lower()
[ "def", "to_underscore", "(", "s", ")", ":", "return", "re", ".", "sub", "(", "r'(?!^)([A-Z]+)'", ",", "lambda", "m", ":", "\"_{0}\"", ".", "format", "(", "m", ".", "group", "(", "1", ")", ".", "lower", "(", ")", ")", ",", "re", ".", "sub", "(", "r'(?!^)([A-Z]{1}[a-z]{1})'", ",", "lambda", "m", ":", "\"_{0}\"", ".", "format", "(", "m", ".", "group", "(", "1", ")", ".", "lower", "(", ")", ")", ",", "s", ")", ")", ".", "lower", "(", ")" ]
Transform camel or pascal case to underscore separated string
[ "Transform", "camel", "or", "pascal", "case", "to", "underscore", "separated", "string" ]
python
train
lingthio/Flask-User
flask_user/db_adapters/sql_db_adapter.py
https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/flask_user/db_adapters/sql_db_adapter.py#L91-L116
def ifind_first_object(self, ObjectClass, **kwargs): """ Retrieve the first object of type ``ObjectClass``, matching the specified filters in ``**kwargs`` -- case insensitive. | If USER_IFIND_MODE is 'nocase_collation' this method maps to find_first_object(). | If USER_IFIND_MODE is 'ifind' this method performs a case insensitive find. """ # Call regular find() if USER_IFIND_MODE is nocase_collation if self.user_manager.USER_IFIND_MODE=='nocase_collation': return self.find_first_object(ObjectClass, **kwargs) # Convert each name/value pair in 'kwargs' into a filter query = ObjectClass.query for field_name, field_value in kwargs.items(): # Make sure that ObjectClass has a 'field_name' property field = getattr(ObjectClass, field_name, None) if field is None: raise KeyError("BaseAlchemyAdapter.find_first_object(): Class '%s' has no field '%s'." % (ObjectClass, field_name)) # Add a case sensitive filter to the query query = query.filter(field.ifind(field_value)) # case insensitive!! # Execute query return query.first()
[ "def", "ifind_first_object", "(", "self", ",", "ObjectClass", ",", "*", "*", "kwargs", ")", ":", "# Call regular find() if USER_IFIND_MODE is nocase_collation", "if", "self", ".", "user_manager", ".", "USER_IFIND_MODE", "==", "'nocase_collation'", ":", "return", "self", ".", "find_first_object", "(", "ObjectClass", ",", "*", "*", "kwargs", ")", "# Convert each name/value pair in 'kwargs' into a filter", "query", "=", "ObjectClass", ".", "query", "for", "field_name", ",", "field_value", "in", "kwargs", ".", "items", "(", ")", ":", "# Make sure that ObjectClass has a 'field_name' property", "field", "=", "getattr", "(", "ObjectClass", ",", "field_name", ",", "None", ")", "if", "field", "is", "None", ":", "raise", "KeyError", "(", "\"BaseAlchemyAdapter.find_first_object(): Class '%s' has no field '%s'.\"", "%", "(", "ObjectClass", ",", "field_name", ")", ")", "# Add a case sensitive filter to the query", "query", "=", "query", ".", "filter", "(", "field", ".", "ifind", "(", "field_value", ")", ")", "# case insensitive!!", "# Execute query", "return", "query", ".", "first", "(", ")" ]
Retrieve the first object of type ``ObjectClass``, matching the specified filters in ``**kwargs`` -- case insensitive. | If USER_IFIND_MODE is 'nocase_collation' this method maps to find_first_object(). | If USER_IFIND_MODE is 'ifind' this method performs a case insensitive find.
[ "Retrieve", "the", "first", "object", "of", "type", "ObjectClass", "matching", "the", "specified", "filters", "in", "**", "kwargs", "--", "case", "insensitive", "." ]
python
train
KarchinLab/probabilistic2020
prob2020/python/indel.py
https://github.com/KarchinLab/probabilistic2020/blob/5d70583b0a7c07cfe32e95f3a70e05df412acb84/prob2020/python/indel.py#L146-L181
def keep_indels(mut_df, indel_len_col=True, indel_type_col=True): """Filters out all mutations that are not indels. Requires that one of the alleles have '-' indicating either an insertion or deletion depending if found in reference allele or somatic allele columns, respectively. Parameters ---------- mut_df : pd.DataFrame mutation input file as a dataframe in standard format indel_len_col : bool whether or not to add a column indicating the length of the indel Returns ------- mut_df : pd.DataFrame mutations with only frameshift mutations kept """ # keep only frameshifts mut_df = mut_df[is_indel_annotation(mut_df)] if indel_len_col: # calculate length mut_df.loc[:, 'indel len'] = compute_indel_length(mut_df) if indel_type_col: is_ins = mut_df['Reference_Allele']=='-' is_del = mut_df['Tumor_Allele']=='-' mut_df['indel type'] = '' mut_df.loc[is_ins, 'indel type'] = 'INS' mut_df.loc[is_del, 'indel type'] = 'DEL' return mut_df
[ "def", "keep_indels", "(", "mut_df", ",", "indel_len_col", "=", "True", ",", "indel_type_col", "=", "True", ")", ":", "# keep only frameshifts", "mut_df", "=", "mut_df", "[", "is_indel_annotation", "(", "mut_df", ")", "]", "if", "indel_len_col", ":", "# calculate length", "mut_df", ".", "loc", "[", ":", ",", "'indel len'", "]", "=", "compute_indel_length", "(", "mut_df", ")", "if", "indel_type_col", ":", "is_ins", "=", "mut_df", "[", "'Reference_Allele'", "]", "==", "'-'", "is_del", "=", "mut_df", "[", "'Tumor_Allele'", "]", "==", "'-'", "mut_df", "[", "'indel type'", "]", "=", "''", "mut_df", ".", "loc", "[", "is_ins", ",", "'indel type'", "]", "=", "'INS'", "mut_df", ".", "loc", "[", "is_del", ",", "'indel type'", "]", "=", "'DEL'", "return", "mut_df" ]
Filters out all mutations that are not indels. Requires that one of the alleles have '-' indicating either an insertion or deletion depending if found in reference allele or somatic allele columns, respectively. Parameters ---------- mut_df : pd.DataFrame mutation input file as a dataframe in standard format indel_len_col : bool whether or not to add a column indicating the length of the indel Returns ------- mut_df : pd.DataFrame mutations with only frameshift mutations kept
[ "Filters", "out", "all", "mutations", "that", "are", "not", "indels", "." ]
python
train
raiden-network/raiden
raiden/network/proxies/token.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/proxies/token.py#L49-L112
def approve( self, allowed_address: Address, allowance: TokenAmount, ): """ Aprove `allowed_address` to transfer up to `deposit` amount of token. Note: For channel deposit please use the channel proxy, since it does additional validations. """ # Note that given_block_identifier is not used here as there # are no preconditions to check before sending the transaction log_details = { 'node': pex(self.node_address), 'contract': pex(self.address), 'allowed_address': pex(allowed_address), 'allowance': allowance, } checking_block = self.client.get_checking_block() error_prefix = 'Call to approve will fail' gas_limit = self.proxy.estimate_gas( checking_block, 'approve', to_checksum_address(allowed_address), allowance, ) if gas_limit: error_prefix = 'Call to approve failed' log.debug('approve called', **log_details) transaction_hash = self.proxy.transact( 'approve', safe_gas_limit(gas_limit), to_checksum_address(allowed_address), allowance, ) self.client.poll(transaction_hash) receipt_or_none = check_transaction_threw(self.client, transaction_hash) transaction_executed = gas_limit is not None if not transaction_executed or receipt_or_none: if transaction_executed: block = receipt_or_none['blockNumber'] else: block = checking_block self.proxy.jsonrpc_client.check_for_insufficient_eth( transaction_name='approve', transaction_executed=transaction_executed, required_gas=GAS_REQUIRED_FOR_APPROVE, block_identifier=block, ) msg = self._check_why_approved_failed(allowance, block) error_msg = f'{error_prefix}. {msg}' log.critical(error_msg, **log_details) raise RaidenUnrecoverableError(error_msg) log.info('approve successful', **log_details)
[ "def", "approve", "(", "self", ",", "allowed_address", ":", "Address", ",", "allowance", ":", "TokenAmount", ",", ")", ":", "# Note that given_block_identifier is not used here as there", "# are no preconditions to check before sending the transaction", "log_details", "=", "{", "'node'", ":", "pex", "(", "self", ".", "node_address", ")", ",", "'contract'", ":", "pex", "(", "self", ".", "address", ")", ",", "'allowed_address'", ":", "pex", "(", "allowed_address", ")", ",", "'allowance'", ":", "allowance", ",", "}", "checking_block", "=", "self", ".", "client", ".", "get_checking_block", "(", ")", "error_prefix", "=", "'Call to approve will fail'", "gas_limit", "=", "self", ".", "proxy", ".", "estimate_gas", "(", "checking_block", ",", "'approve'", ",", "to_checksum_address", "(", "allowed_address", ")", ",", "allowance", ",", ")", "if", "gas_limit", ":", "error_prefix", "=", "'Call to approve failed'", "log", ".", "debug", "(", "'approve called'", ",", "*", "*", "log_details", ")", "transaction_hash", "=", "self", ".", "proxy", ".", "transact", "(", "'approve'", ",", "safe_gas_limit", "(", "gas_limit", ")", ",", "to_checksum_address", "(", "allowed_address", ")", ",", "allowance", ",", ")", "self", ".", "client", ".", "poll", "(", "transaction_hash", ")", "receipt_or_none", "=", "check_transaction_threw", "(", "self", ".", "client", ",", "transaction_hash", ")", "transaction_executed", "=", "gas_limit", "is", "not", "None", "if", "not", "transaction_executed", "or", "receipt_or_none", ":", "if", "transaction_executed", ":", "block", "=", "receipt_or_none", "[", "'blockNumber'", "]", "else", ":", "block", "=", "checking_block", "self", ".", "proxy", ".", "jsonrpc_client", ".", "check_for_insufficient_eth", "(", "transaction_name", "=", "'approve'", ",", "transaction_executed", "=", "transaction_executed", ",", "required_gas", "=", "GAS_REQUIRED_FOR_APPROVE", ",", "block_identifier", "=", "block", ",", ")", "msg", "=", "self", ".", "_check_why_approved_failed", "(", "allowance", ",", "block", ")", "error_msg", "=", "f'{error_prefix}. {msg}'", "log", ".", "critical", "(", "error_msg", ",", "*", "*", "log_details", ")", "raise", "RaidenUnrecoverableError", "(", "error_msg", ")", "log", ".", "info", "(", "'approve successful'", ",", "*", "*", "log_details", ")" ]
Aprove `allowed_address` to transfer up to `deposit` amount of token. Note: For channel deposit please use the channel proxy, since it does additional validations.
[ "Aprove", "allowed_address", "to", "transfer", "up", "to", "deposit", "amount", "of", "token", "." ]
python
train
pmacosta/ptrie
ptrie/ptrie.py
https://github.com/pmacosta/ptrie/blob/c176d3ee810b7b5243c7ff2bbf2f1af0b0fff2a8/ptrie/ptrie.py#L426-L508
def add_nodes(self, nodes): # noqa: D302 r""" Add nodes to tree. :param nodes: Node(s) to add with associated data. If there are several list items in the argument with the same node name the resulting node data is a list with items corresponding to the data of each entry in the argument with the same node name, in their order of appearance, in addition to any existing node data if the node is already present in the tree :type nodes: :ref:`NodesWithData` :raises: * RuntimeError (Argument \`nodes\` is not valid) * ValueError (Illegal node name: *[node_name]*) For example: .. =[=cog .. import docs.support.incfile .. docs.support.incfile.incfile('ptrie_example.py', cog.out) .. =]= .. code-block:: python # ptrie_example.py import ptrie def create_tree(): tobj = ptrie.Trie() tobj.add_nodes([ {'name':'root.branch1', 'data':5}, {'name':'root.branch1', 'data':7}, {'name':'root.branch2', 'data':[]}, {'name':'root.branch1.leaf1', 'data':[]}, {'name':'root.branch1.leaf1.subleaf1', 'data':333}, {'name':'root.branch1.leaf2', 'data':'Hello world!'}, {'name':'root.branch1.leaf2.subleaf2', 'data':[]}, ]) return tobj .. =[=end=]= .. code-block:: python >>> from __future__ import print_function >>> import docs.support.ptrie_example >>> tobj = docs.support.ptrie_example.create_tree() >>> print(tobj) root ├branch1 (*) │├leaf1 ││└subleaf1 (*) │└leaf2 (*) │ └subleaf2 └branch2 >>> tobj.get_data('root.branch1') [5, 7] """ self._validate_nodes_with_data(nodes) nodes = nodes if isinstance(nodes, list) else [nodes] # Create root node (if needed) if not self.root_name: self._set_root_name(nodes[0]["name"].split(self._node_separator)[0].strip()) self._root_hierarchy_length = len( self.root_name.split(self._node_separator) ) self._create_node(name=self.root_name, parent="", children=[], data=[]) # Process new data for node_dict in nodes: name, data = node_dict["name"], node_dict["data"] if name not in self._db: # Validate node name (root of new node same as tree root) if not name.startswith(self.root_name + self._node_separator): raise ValueError("Illegal node name: {0}".format(name)) self._create_intermediate_nodes(name) self._db[name]["data"] += copy.deepcopy( data if isinstance(data, list) and data else ([] if isinstance(data, list) else [data]) )
[ "def", "add_nodes", "(", "self", ",", "nodes", ")", ":", "# noqa: D302", "self", ".", "_validate_nodes_with_data", "(", "nodes", ")", "nodes", "=", "nodes", "if", "isinstance", "(", "nodes", ",", "list", ")", "else", "[", "nodes", "]", "# Create root node (if needed)", "if", "not", "self", ".", "root_name", ":", "self", ".", "_set_root_name", "(", "nodes", "[", "0", "]", "[", "\"name\"", "]", ".", "split", "(", "self", ".", "_node_separator", ")", "[", "0", "]", ".", "strip", "(", ")", ")", "self", ".", "_root_hierarchy_length", "=", "len", "(", "self", ".", "root_name", ".", "split", "(", "self", ".", "_node_separator", ")", ")", "self", ".", "_create_node", "(", "name", "=", "self", ".", "root_name", ",", "parent", "=", "\"\"", ",", "children", "=", "[", "]", ",", "data", "=", "[", "]", ")", "# Process new data", "for", "node_dict", "in", "nodes", ":", "name", ",", "data", "=", "node_dict", "[", "\"name\"", "]", ",", "node_dict", "[", "\"data\"", "]", "if", "name", "not", "in", "self", ".", "_db", ":", "# Validate node name (root of new node same as tree root)", "if", "not", "name", ".", "startswith", "(", "self", ".", "root_name", "+", "self", ".", "_node_separator", ")", ":", "raise", "ValueError", "(", "\"Illegal node name: {0}\"", ".", "format", "(", "name", ")", ")", "self", ".", "_create_intermediate_nodes", "(", "name", ")", "self", ".", "_db", "[", "name", "]", "[", "\"data\"", "]", "+=", "copy", ".", "deepcopy", "(", "data", "if", "isinstance", "(", "data", ",", "list", ")", "and", "data", "else", "(", "[", "]", "if", "isinstance", "(", "data", ",", "list", ")", "else", "[", "data", "]", ")", ")" ]
r""" Add nodes to tree. :param nodes: Node(s) to add with associated data. If there are several list items in the argument with the same node name the resulting node data is a list with items corresponding to the data of each entry in the argument with the same node name, in their order of appearance, in addition to any existing node data if the node is already present in the tree :type nodes: :ref:`NodesWithData` :raises: * RuntimeError (Argument \`nodes\` is not valid) * ValueError (Illegal node name: *[node_name]*) For example: .. =[=cog .. import docs.support.incfile .. docs.support.incfile.incfile('ptrie_example.py', cog.out) .. =]= .. code-block:: python # ptrie_example.py import ptrie def create_tree(): tobj = ptrie.Trie() tobj.add_nodes([ {'name':'root.branch1', 'data':5}, {'name':'root.branch1', 'data':7}, {'name':'root.branch2', 'data':[]}, {'name':'root.branch1.leaf1', 'data':[]}, {'name':'root.branch1.leaf1.subleaf1', 'data':333}, {'name':'root.branch1.leaf2', 'data':'Hello world!'}, {'name':'root.branch1.leaf2.subleaf2', 'data':[]}, ]) return tobj .. =[=end=]= .. code-block:: python >>> from __future__ import print_function >>> import docs.support.ptrie_example >>> tobj = docs.support.ptrie_example.create_tree() >>> print(tobj) root ├branch1 (*) │├leaf1 ││└subleaf1 (*) │└leaf2 (*) │ └subleaf2 └branch2 >>> tobj.get_data('root.branch1') [5, 7]
[ "r", "Add", "nodes", "to", "tree", "." ]
python
train
ultrabug/py3status
py3status/parse_config.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/parse_config.py#L180-L214
def check_child_friendly(self, name): """ Check if a module is a container and so can have children """ name = name.split()[0] if name in self.container_modules: return root = os.path.dirname(os.path.realpath(__file__)) module_path = os.path.join(root, "modules") try: info = imp.find_module(name, [module_path]) except ImportError: return if not info: return (file, pathname, description) = info try: py_mod = imp.load_module(name, file, pathname, description) except Exception: # We cannot load the module! We could error out here but then the # user gets informed that the problem is with their config. This # is not correct. Better to say that all is well and then the # config can get parsed and py3status loads. The error about the # failing module load is better handled at that point, and will be. return try: container = py_mod.Py3status.Meta.container except AttributeError: container = False # delete the module del py_mod if container: self.container_modules.append(name) else: self.error("Module `{}` cannot contain others".format(name))
[ "def", "check_child_friendly", "(", "self", ",", "name", ")", ":", "name", "=", "name", ".", "split", "(", ")", "[", "0", "]", "if", "name", "in", "self", ".", "container_modules", ":", "return", "root", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "module_path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "\"modules\"", ")", "try", ":", "info", "=", "imp", ".", "find_module", "(", "name", ",", "[", "module_path", "]", ")", "except", "ImportError", ":", "return", "if", "not", "info", ":", "return", "(", "file", ",", "pathname", ",", "description", ")", "=", "info", "try", ":", "py_mod", "=", "imp", ".", "load_module", "(", "name", ",", "file", ",", "pathname", ",", "description", ")", "except", "Exception", ":", "# We cannot load the module! We could error out here but then the", "# user gets informed that the problem is with their config. This", "# is not correct. Better to say that all is well and then the", "# config can get parsed and py3status loads. The error about the", "# failing module load is better handled at that point, and will be.", "return", "try", ":", "container", "=", "py_mod", ".", "Py3status", ".", "Meta", ".", "container", "except", "AttributeError", ":", "container", "=", "False", "# delete the module", "del", "py_mod", "if", "container", ":", "self", ".", "container_modules", ".", "append", "(", "name", ")", "else", ":", "self", ".", "error", "(", "\"Module `{}` cannot contain others\"", ".", "format", "(", "name", ")", ")" ]
Check if a module is a container and so can have children
[ "Check", "if", "a", "module", "is", "a", "container", "and", "so", "can", "have", "children" ]
python
train
singularityhub/singularity-python
singularity/views/trees.py
https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/views/trees.py#L250-L284
def make_interactive_tree(matrix=None,labels=None): '''make interactive tree will return complete html for an interactive tree :param title: a title for the plot, if not defined, will be left out. ''' from scipy.cluster.hierarchy import ( dendrogram, linkage, to_tree ) d3 = None from scipy.cluster.hierarchy import cophenet from scipy.spatial.distance import pdist if isinstance(matrix,pandas.DataFrame): Z = linkage(matrix, 'ward') # clusters T = to_tree(Z, rd=False) if labels == None: labels = matrix.index.tolist() lookup = dict(zip(range(len(labels)), labels)) # Create a dendrogram object without plotting dend = dendrogram(Z,no_plot=True, orientation="right", leaf_rotation=90., # rotates the x axis labels leaf_font_size=8., # font size for the x axis labels labels=labels) d3 = dict(children=[], name="root") add_node(T, d3) label_tree(d3["children"][0],lookup) else: bot.warning('Please provide data as pandas Data Frame.') return d3
[ "def", "make_interactive_tree", "(", "matrix", "=", "None", ",", "labels", "=", "None", ")", ":", "from", "scipy", ".", "cluster", ".", "hierarchy", "import", "(", "dendrogram", ",", "linkage", ",", "to_tree", ")", "d3", "=", "None", "from", "scipy", ".", "cluster", ".", "hierarchy", "import", "cophenet", "from", "scipy", ".", "spatial", ".", "distance", "import", "pdist", "if", "isinstance", "(", "matrix", ",", "pandas", ".", "DataFrame", ")", ":", "Z", "=", "linkage", "(", "matrix", ",", "'ward'", ")", "# clusters", "T", "=", "to_tree", "(", "Z", ",", "rd", "=", "False", ")", "if", "labels", "==", "None", ":", "labels", "=", "matrix", ".", "index", ".", "tolist", "(", ")", "lookup", "=", "dict", "(", "zip", "(", "range", "(", "len", "(", "labels", ")", ")", ",", "labels", ")", ")", "# Create a dendrogram object without plotting", "dend", "=", "dendrogram", "(", "Z", ",", "no_plot", "=", "True", ",", "orientation", "=", "\"right\"", ",", "leaf_rotation", "=", "90.", ",", "# rotates the x axis labels", "leaf_font_size", "=", "8.", ",", "# font size for the x axis labels", "labels", "=", "labels", ")", "d3", "=", "dict", "(", "children", "=", "[", "]", ",", "name", "=", "\"root\"", ")", "add_node", "(", "T", ",", "d3", ")", "label_tree", "(", "d3", "[", "\"children\"", "]", "[", "0", "]", ",", "lookup", ")", "else", ":", "bot", ".", "warning", "(", "'Please provide data as pandas Data Frame.'", ")", "return", "d3" ]
make interactive tree will return complete html for an interactive tree :param title: a title for the plot, if not defined, will be left out.
[ "make", "interactive", "tree", "will", "return", "complete", "html", "for", "an", "interactive", "tree", ":", "param", "title", ":", "a", "title", "for", "the", "plot", "if", "not", "defined", "will", "be", "left", "out", "." ]
python
train
synw/dataswim
dataswim/data/stats.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/stats.py#L8-L21
def lreg(self, xcol, ycol, name="Regression"): """ Add a column to the main dataframe populted with the model's linear regression for a column """ try: x = self.df[xcol].values.reshape(-1, 1) y = self.df[ycol] lm = linear_model.LinearRegression() lm.fit(x, y) predictions = lm.predict(x) self.df[name] = predictions except Exception as e: self.err(e, "Can not calculate linear regression")
[ "def", "lreg", "(", "self", ",", "xcol", ",", "ycol", ",", "name", "=", "\"Regression\"", ")", ":", "try", ":", "x", "=", "self", ".", "df", "[", "xcol", "]", ".", "values", ".", "reshape", "(", "-", "1", ",", "1", ")", "y", "=", "self", ".", "df", "[", "ycol", "]", "lm", "=", "linear_model", ".", "LinearRegression", "(", ")", "lm", ".", "fit", "(", "x", ",", "y", ")", "predictions", "=", "lm", ".", "predict", "(", "x", ")", "self", ".", "df", "[", "name", "]", "=", "predictions", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ",", "\"Can not calculate linear regression\"", ")" ]
Add a column to the main dataframe populted with the model's linear regression for a column
[ "Add", "a", "column", "to", "the", "main", "dataframe", "populted", "with", "the", "model", "s", "linear", "regression", "for", "a", "column" ]
python
train
google/brotli
research/brotlidump.py
https://github.com/google/brotli/blob/4b2b2d4f83ffeaac7708e44409fe34896a01a278/research/brotlidump.py#L1385-L1422
def formatBitData(self, pos, width1, width2=0): """Show formatted bit data: Bytes are separated by commas whole bytes are displayed in hex >>> Layout(olleke).formatBitData(6, 2, 16) '|00h|2Eh,|00' >>> Layout(olleke).formatBitData(4, 1, 0) '1' """ result = [] #make empty prefix code explicit if width1==0: result = ['()', ','] for width in width1, width2: #skip empty width2 if width==0: continue #build result backwards in a list while width>0: availableBits = 8-(pos&7) if width<availableBits: #read partial byte, beginning nor ending at boundary data = self.stream.data[pos>>3] >> (pos&7) & (1<<width)-1 result.append('{:0{}b}'.format(data, width)) elif availableBits<8: #read rest of byte, ending at boundary data = self.stream.data[pos>>3] >> (pos&7) result.append('|{:0{}b}'.format(data, availableBits)) else: #read whole byte (in hex), beginning and ending at boundary data = self.stream.data[pos>>3] result.append('|{:02X}h'.format(data)) width -= availableBits pos += availableBits #if width overshot from the availableBits subtraction, fix it pos += width #add comma to separate fields result.append(',') #concatenate pieces, reversed, skipping the last space return ''.join(result[-2::-1])
[ "def", "formatBitData", "(", "self", ",", "pos", ",", "width1", ",", "width2", "=", "0", ")", ":", "result", "=", "[", "]", "#make empty prefix code explicit", "if", "width1", "==", "0", ":", "result", "=", "[", "'()'", ",", "','", "]", "for", "width", "in", "width1", ",", "width2", ":", "#skip empty width2", "if", "width", "==", "0", ":", "continue", "#build result backwards in a list", "while", "width", ">", "0", ":", "availableBits", "=", "8", "-", "(", "pos", "&", "7", ")", "if", "width", "<", "availableBits", ":", "#read partial byte, beginning nor ending at boundary", "data", "=", "self", ".", "stream", ".", "data", "[", "pos", ">>", "3", "]", ">>", "(", "pos", "&", "7", ")", "&", "(", "1", "<<", "width", ")", "-", "1", "result", ".", "append", "(", "'{:0{}b}'", ".", "format", "(", "data", ",", "width", ")", ")", "elif", "availableBits", "<", "8", ":", "#read rest of byte, ending at boundary", "data", "=", "self", ".", "stream", ".", "data", "[", "pos", ">>", "3", "]", ">>", "(", "pos", "&", "7", ")", "result", ".", "append", "(", "'|{:0{}b}'", ".", "format", "(", "data", ",", "availableBits", ")", ")", "else", ":", "#read whole byte (in hex), beginning and ending at boundary", "data", "=", "self", ".", "stream", ".", "data", "[", "pos", ">>", "3", "]", "result", ".", "append", "(", "'|{:02X}h'", ".", "format", "(", "data", ")", ")", "width", "-=", "availableBits", "pos", "+=", "availableBits", "#if width overshot from the availableBits subtraction, fix it", "pos", "+=", "width", "#add comma to separate fields", "result", ".", "append", "(", "','", ")", "#concatenate pieces, reversed, skipping the last space", "return", "''", ".", "join", "(", "result", "[", "-", "2", ":", ":", "-", "1", "]", ")" ]
Show formatted bit data: Bytes are separated by commas whole bytes are displayed in hex >>> Layout(olleke).formatBitData(6, 2, 16) '|00h|2Eh,|00' >>> Layout(olleke).formatBitData(4, 1, 0) '1'
[ "Show", "formatted", "bit", "data", ":", "Bytes", "are", "separated", "by", "commas", "whole", "bytes", "are", "displayed", "in", "hex", ">>>", "Layout", "(", "olleke", ")", ".", "formatBitData", "(", "6", "2", "16", ")", "|00h|2Eh", "|00", ">>>", "Layout", "(", "olleke", ")", ".", "formatBitData", "(", "4", "1", "0", ")", "1" ]
python
test
jbloomlab/phydms
phydmslib/treelikelihood.py
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/treelikelihood.py#L553-L582
def paramsarray(self, value): """Set new `paramsarray` and update via `updateParams`.""" nparams = len(self._index_to_param) assert (isinstance(value, scipy.ndarray) and value.ndim == 1), ( "paramsarray must be 1-dim ndarray") assert len(value) == nparams, ("Assigning paramsarray to ndarray " "of the wrong length.") if (self._paramsarray is not None) and all(value == self._paramsarray): return # do not need to do anything if value has not changed # build `newvalues` to pass to `updateParams` newvalues = {} vectorized_params = {} for (i, param) in self._index_to_param.items(): if isinstance(param, str): newvalues[param] = float(value[i]) elif isinstance(param, tuple): (iparam, iparamindex) = param if iparam in vectorized_params: assert iparamindex not in vectorized_params[iparam] vectorized_params[iparam][iparamindex] = float(value[i]) else: vectorized_params[iparam] = {iparamindex:float(value[i])} else: raise ValueError("Invalid param type") for (param, paramd) in vectorized_params.items(): assert set(paramd.keys()) == set(range(len(paramd))) newvalues[param] = scipy.array([paramd[i] for i in range(len(paramd))], dtype='float') self.updateParams(newvalues) self._paramsarray = self.paramsarray
[ "def", "paramsarray", "(", "self", ",", "value", ")", ":", "nparams", "=", "len", "(", "self", ".", "_index_to_param", ")", "assert", "(", "isinstance", "(", "value", ",", "scipy", ".", "ndarray", ")", "and", "value", ".", "ndim", "==", "1", ")", ",", "(", "\"paramsarray must be 1-dim ndarray\"", ")", "assert", "len", "(", "value", ")", "==", "nparams", ",", "(", "\"Assigning paramsarray to ndarray \"", "\"of the wrong length.\"", ")", "if", "(", "self", ".", "_paramsarray", "is", "not", "None", ")", "and", "all", "(", "value", "==", "self", ".", "_paramsarray", ")", ":", "return", "# do not need to do anything if value has not changed", "# build `newvalues` to pass to `updateParams`", "newvalues", "=", "{", "}", "vectorized_params", "=", "{", "}", "for", "(", "i", ",", "param", ")", "in", "self", ".", "_index_to_param", ".", "items", "(", ")", ":", "if", "isinstance", "(", "param", ",", "str", ")", ":", "newvalues", "[", "param", "]", "=", "float", "(", "value", "[", "i", "]", ")", "elif", "isinstance", "(", "param", ",", "tuple", ")", ":", "(", "iparam", ",", "iparamindex", ")", "=", "param", "if", "iparam", "in", "vectorized_params", ":", "assert", "iparamindex", "not", "in", "vectorized_params", "[", "iparam", "]", "vectorized_params", "[", "iparam", "]", "[", "iparamindex", "]", "=", "float", "(", "value", "[", "i", "]", ")", "else", ":", "vectorized_params", "[", "iparam", "]", "=", "{", "iparamindex", ":", "float", "(", "value", "[", "i", "]", ")", "}", "else", ":", "raise", "ValueError", "(", "\"Invalid param type\"", ")", "for", "(", "param", ",", "paramd", ")", "in", "vectorized_params", ".", "items", "(", ")", ":", "assert", "set", "(", "paramd", ".", "keys", "(", ")", ")", "==", "set", "(", "range", "(", "len", "(", "paramd", ")", ")", ")", "newvalues", "[", "param", "]", "=", "scipy", ".", "array", "(", "[", "paramd", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "paramd", ")", ")", "]", ",", "dtype", "=", "'float'", ")", "self", ".", "updateParams", "(", "newvalues", ")", "self", ".", "_paramsarray", "=", "self", ".", "paramsarray" ]
Set new `paramsarray` and update via `updateParams`.
[ "Set", "new", "paramsarray", "and", "update", "via", "updateParams", "." ]
python
train
learningequality/ricecooker
ricecooker/sushi_bar_client.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/sushi_bar_client.py#L269-L296
def execute_command_in_message(controller, cliargs, clioptions, message): """ Runs the command in message['command'], which is one of: 'start' / 'stop'. Updates the chef's initial command line args and options with args and options provided in message['args'] and message['options']. """ SUPPORTED_COMMANDS = ['start'] # , 'stop'] # TODO print(message) # args and options from SushiBar overrride command line args and options args = cliargs options = clioptions if 'args' in message: args.update(message['args']) if 'options' in message: options.update(message['options']) if message['command'] == 'start': if not controller.thread or not controller.thread.isAlive(): controller.thread = threading.Thread( target=controller.chef.run, args=(args, options), ) controller.thread.start() else: config.LOGGER.info('Not starting because chef is already running.') else: config.LOGGER.info('Command not supported: %s' % message['command'])
[ "def", "execute_command_in_message", "(", "controller", ",", "cliargs", ",", "clioptions", ",", "message", ")", ":", "SUPPORTED_COMMANDS", "=", "[", "'start'", "]", "# , 'stop'] # TODO", "print", "(", "message", ")", "# args and options from SushiBar overrride command line args and options", "args", "=", "cliargs", "options", "=", "clioptions", "if", "'args'", "in", "message", ":", "args", ".", "update", "(", "message", "[", "'args'", "]", ")", "if", "'options'", "in", "message", ":", "options", ".", "update", "(", "message", "[", "'options'", "]", ")", "if", "message", "[", "'command'", "]", "==", "'start'", ":", "if", "not", "controller", ".", "thread", "or", "not", "controller", ".", "thread", ".", "isAlive", "(", ")", ":", "controller", ".", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "controller", ".", "chef", ".", "run", ",", "args", "=", "(", "args", ",", "options", ")", ",", ")", "controller", ".", "thread", ".", "start", "(", ")", "else", ":", "config", ".", "LOGGER", ".", "info", "(", "'Not starting because chef is already running.'", ")", "else", ":", "config", ".", "LOGGER", ".", "info", "(", "'Command not supported: %s'", "%", "message", "[", "'command'", "]", ")" ]
Runs the command in message['command'], which is one of: 'start' / 'stop'. Updates the chef's initial command line args and options with args and options provided in message['args'] and message['options'].
[ "Runs", "the", "command", "in", "message", "[", "command", "]", "which", "is", "one", "of", ":", "start", "/", "stop", ".", "Updates", "the", "chef", "s", "initial", "command", "line", "args", "and", "options", "with", "args", "and", "options", "provided", "in", "message", "[", "args", "]", "and", "message", "[", "options", "]", "." ]
python
train
videntity/django-djmongo
djmongo/console/utils.py
https://github.com/videntity/django-djmongo/blob/7534e0981a2bc12634cf3f1ed03353623dc57565/djmongo/console/utils.py#L131-L148
def mongodb_ensure_index(database_name, collection_name, key): """Ensure Index""" try: mongodb_client_url = getattr(settings, 'MONGODB_CLIENT', 'mongodb://localhost:27017/') mc = MongoClient(mongodb_client_url,document_class=OrderedDict) dbs = mc[database_name] dbc = dbs[collection_name] dbc.ensure_index(key) # print "success" return key except: # error connecting to mongodb # print str(sys.exc_info()) return str(sys.exc_info())
[ "def", "mongodb_ensure_index", "(", "database_name", ",", "collection_name", ",", "key", ")", ":", "try", ":", "mongodb_client_url", "=", "getattr", "(", "settings", ",", "'MONGODB_CLIENT'", ",", "'mongodb://localhost:27017/'", ")", "mc", "=", "MongoClient", "(", "mongodb_client_url", ",", "document_class", "=", "OrderedDict", ")", "dbs", "=", "mc", "[", "database_name", "]", "dbc", "=", "dbs", "[", "collection_name", "]", "dbc", ".", "ensure_index", "(", "key", ")", "# print \"success\"", "return", "key", "except", ":", "# error connecting to mongodb", "# print str(sys.exc_info())", "return", "str", "(", "sys", ".", "exc_info", "(", ")", ")" ]
Ensure Index
[ "Ensure", "Index" ]
python
train
gbiggs/rtctree
rtctree/node.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/node.py#L169-L218
def iterate(self, func, args=None, filter=[]): '''Call a function on this node, and recursively all its children. This is a depth-first iteration. @param func The function to call. Its declaration must be 'def blag(node, args)', where 'node' is the current node in the iteration and args is the value of @ref args. @param args Extra arguments to pass to the function at each iteration. Pass multiple arguments in as a tuple. @param filter A list of filters to apply before calling func for each node in the iteration. If the filter is not True, @ref func will not be called for that node. Each filter entry should be a string, representing one of the is_* properties (is_component, etc), or a function object. @return The results of the calls to @ref func in a list. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> def hello(n, args): ... return args[0] + ' ' + n._name >>> p.iterate(hello, args=['hello']) ['hello p', 'hello c2', 'hello c1'] >>> p.iterate(hello, args=['hello'], filter=['_name=="c1"']) ['hello c1'] ''' with self._mutex: result = [] if filter: filters_passed = True for f in filter: if type(f) == str: if not eval('self.' + f): filters_passed = False break else: if not f(self): filters_passed = False break if filters_passed: result = [func(self, args)] else: result = [func(self, args)] for child in self._children: result += self._children[child].iterate(func, args, filter) return result
[ "def", "iterate", "(", "self", ",", "func", ",", "args", "=", "None", ",", "filter", "=", "[", "]", ")", ":", "with", "self", ".", "_mutex", ":", "result", "=", "[", "]", "if", "filter", ":", "filters_passed", "=", "True", "for", "f", "in", "filter", ":", "if", "type", "(", "f", ")", "==", "str", ":", "if", "not", "eval", "(", "'self.'", "+", "f", ")", ":", "filters_passed", "=", "False", "break", "else", ":", "if", "not", "f", "(", "self", ")", ":", "filters_passed", "=", "False", "break", "if", "filters_passed", ":", "result", "=", "[", "func", "(", "self", ",", "args", ")", "]", "else", ":", "result", "=", "[", "func", "(", "self", ",", "args", ")", "]", "for", "child", "in", "self", ".", "_children", ":", "result", "+=", "self", ".", "_children", "[", "child", "]", ".", "iterate", "(", "func", ",", "args", ",", "filter", ")", "return", "result" ]
Call a function on this node, and recursively all its children. This is a depth-first iteration. @param func The function to call. Its declaration must be 'def blag(node, args)', where 'node' is the current node in the iteration and args is the value of @ref args. @param args Extra arguments to pass to the function at each iteration. Pass multiple arguments in as a tuple. @param filter A list of filters to apply before calling func for each node in the iteration. If the filter is not True, @ref func will not be called for that node. Each filter entry should be a string, representing one of the is_* properties (is_component, etc), or a function object. @return The results of the calls to @ref func in a list. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> def hello(n, args): ... return args[0] + ' ' + n._name >>> p.iterate(hello, args=['hello']) ['hello p', 'hello c2', 'hello c1'] >>> p.iterate(hello, args=['hello'], filter=['_name=="c1"']) ['hello c1']
[ "Call", "a", "function", "on", "this", "node", "and", "recursively", "all", "its", "children", "." ]
python
train
martinrusev/solid-python
solidpy/utils/wsgi.py
https://github.com/martinrusev/solid-python/blob/c5c39ad43c19e6746ea0297e0d440a2fccfb25ed/solidpy/utils/wsgi.py#L25-L45
def get_host(environ): """Return the real host for the given WSGI environment. This takes care of the `X-Forwarded-Host` header. :param environ: the WSGI environment to get the host of. """ scheme = environ.get('wsgi.url_scheme') if 'HTTP_X_FORWARDED_HOST' in environ: result = environ['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in environ: result = environ['HTTP_HOST'] else: result = environ['SERVER_NAME'] if (scheme, str(environ['SERVER_PORT'])) not \ in (('https', '443'), ('http', '80')): result += ':' + environ['SERVER_PORT'] if result.endswith(':80') and scheme == 'http': result = result[:-3] elif result.endswith(':443') and scheme == 'https': result = result[:-4] return result
[ "def", "get_host", "(", "environ", ")", ":", "scheme", "=", "environ", ".", "get", "(", "'wsgi.url_scheme'", ")", "if", "'HTTP_X_FORWARDED_HOST'", "in", "environ", ":", "result", "=", "environ", "[", "'HTTP_X_FORWARDED_HOST'", "]", "elif", "'HTTP_HOST'", "in", "environ", ":", "result", "=", "environ", "[", "'HTTP_HOST'", "]", "else", ":", "result", "=", "environ", "[", "'SERVER_NAME'", "]", "if", "(", "scheme", ",", "str", "(", "environ", "[", "'SERVER_PORT'", "]", ")", ")", "not", "in", "(", "(", "'https'", ",", "'443'", ")", ",", "(", "'http'", ",", "'80'", ")", ")", ":", "result", "+=", "':'", "+", "environ", "[", "'SERVER_PORT'", "]", "if", "result", ".", "endswith", "(", "':80'", ")", "and", "scheme", "==", "'http'", ":", "result", "=", "result", "[", ":", "-", "3", "]", "elif", "result", ".", "endswith", "(", "':443'", ")", "and", "scheme", "==", "'https'", ":", "result", "=", "result", "[", ":", "-", "4", "]", "return", "result" ]
Return the real host for the given WSGI environment. This takes care of the `X-Forwarded-Host` header. :param environ: the WSGI environment to get the host of.
[ "Return", "the", "real", "host", "for", "the", "given", "WSGI", "environment", ".", "This", "takes", "care", "of", "the", "X", "-", "Forwarded", "-", "Host", "header", "." ]
python
train
yyuu/botornado
boto/mturk/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/mturk/connection.py#L148-L224
def create_hit(self, hit_type=None, question=None, lifetime=datetime.timedelta(days=7), max_assignments=1, title=None, description=None, keywords=None, reward=None, duration=datetime.timedelta(days=7), approval_delay=None, annotation=None, questions=None, qualifications=None, response_groups=None): """ Creates a new HIT. Returns a ResultSet See: http://docs.amazonwebservices.com/AWSMechanicalTurkRequester/2006-10-31/ApiReference_CreateHITOperation.html """ # handle single or multiple questions neither = question is None and questions is None both = question is not None and questions is not None if neither or both: raise ValueError("Must specify either question (single Question instance) or questions (list or QuestionForm instance), but not both") if question: questions = [question] question_param = QuestionForm(questions) if isinstance(question, QuestionForm): question_param = question elif isinstance(question, ExternalQuestion): question_param = question # Handle basic required arguments and set up params dict params = {'Question': question_param.get_as_xml(), 'LifetimeInSeconds' : self.duration_as_seconds(lifetime), 'MaxAssignments' : max_assignments, } # if hit type specified then add it # else add the additional required parameters if hit_type: params['HITTypeId'] = hit_type else: # Handle keywords final_keywords = MTurkConnection.get_keywords_as_string(keywords) # Handle price argument final_price = MTurkConnection.get_price_as_price(reward) final_duration = self.duration_as_seconds(duration) additional_params = dict( Title=title, Description=description, Keywords=final_keywords, AssignmentDurationInSeconds=final_duration, ) additional_params.update(final_price.get_as_params('Reward')) if approval_delay is not None: d = self.duration_as_seconds(approval_delay) additional_params['AutoApprovalDelayInSeconds'] = d # add these params to the others params.update(additional_params) # add the annotation if specified if annotation is not None: params['RequesterAnnotation'] = annotation # Add the Qualifications if specified if qualifications is not None: params.update(qualifications.get_as_params()) # Handle optional response groups argument if response_groups: self.build_list_params(params, response_groups, 'ResponseGroup') # Submit return self._process_request('CreateHIT', params, [('HIT', HIT),])
[ "def", "create_hit", "(", "self", ",", "hit_type", "=", "None", ",", "question", "=", "None", ",", "lifetime", "=", "datetime", ".", "timedelta", "(", "days", "=", "7", ")", ",", "max_assignments", "=", "1", ",", "title", "=", "None", ",", "description", "=", "None", ",", "keywords", "=", "None", ",", "reward", "=", "None", ",", "duration", "=", "datetime", ".", "timedelta", "(", "days", "=", "7", ")", ",", "approval_delay", "=", "None", ",", "annotation", "=", "None", ",", "questions", "=", "None", ",", "qualifications", "=", "None", ",", "response_groups", "=", "None", ")", ":", "# handle single or multiple questions", "neither", "=", "question", "is", "None", "and", "questions", "is", "None", "both", "=", "question", "is", "not", "None", "and", "questions", "is", "not", "None", "if", "neither", "or", "both", ":", "raise", "ValueError", "(", "\"Must specify either question (single Question instance) or questions (list or QuestionForm instance), but not both\"", ")", "if", "question", ":", "questions", "=", "[", "question", "]", "question_param", "=", "QuestionForm", "(", "questions", ")", "if", "isinstance", "(", "question", ",", "QuestionForm", ")", ":", "question_param", "=", "question", "elif", "isinstance", "(", "question", ",", "ExternalQuestion", ")", ":", "question_param", "=", "question", "# Handle basic required arguments and set up params dict", "params", "=", "{", "'Question'", ":", "question_param", ".", "get_as_xml", "(", ")", ",", "'LifetimeInSeconds'", ":", "self", ".", "duration_as_seconds", "(", "lifetime", ")", ",", "'MaxAssignments'", ":", "max_assignments", ",", "}", "# if hit type specified then add it", "# else add the additional required parameters", "if", "hit_type", ":", "params", "[", "'HITTypeId'", "]", "=", "hit_type", "else", ":", "# Handle keywords", "final_keywords", "=", "MTurkConnection", ".", "get_keywords_as_string", "(", "keywords", ")", "# Handle price argument", "final_price", "=", "MTurkConnection", ".", "get_price_as_price", "(", "reward", ")", "final_duration", "=", "self", ".", "duration_as_seconds", "(", "duration", ")", "additional_params", "=", "dict", "(", "Title", "=", "title", ",", "Description", "=", "description", ",", "Keywords", "=", "final_keywords", ",", "AssignmentDurationInSeconds", "=", "final_duration", ",", ")", "additional_params", ".", "update", "(", "final_price", ".", "get_as_params", "(", "'Reward'", ")", ")", "if", "approval_delay", "is", "not", "None", ":", "d", "=", "self", ".", "duration_as_seconds", "(", "approval_delay", ")", "additional_params", "[", "'AutoApprovalDelayInSeconds'", "]", "=", "d", "# add these params to the others", "params", ".", "update", "(", "additional_params", ")", "# add the annotation if specified", "if", "annotation", "is", "not", "None", ":", "params", "[", "'RequesterAnnotation'", "]", "=", "annotation", "# Add the Qualifications if specified", "if", "qualifications", "is", "not", "None", ":", "params", ".", "update", "(", "qualifications", ".", "get_as_params", "(", ")", ")", "# Handle optional response groups argument", "if", "response_groups", ":", "self", ".", "build_list_params", "(", "params", ",", "response_groups", ",", "'ResponseGroup'", ")", "# Submit", "return", "self", ".", "_process_request", "(", "'CreateHIT'", ",", "params", ",", "[", "(", "'HIT'", ",", "HIT", ")", ",", "]", ")" ]
Creates a new HIT. Returns a ResultSet See: http://docs.amazonwebservices.com/AWSMechanicalTurkRequester/2006-10-31/ApiReference_CreateHITOperation.html
[ "Creates", "a", "new", "HIT", ".", "Returns", "a", "ResultSet", "See", ":", "http", ":", "//", "docs", ".", "amazonwebservices", ".", "com", "/", "AWSMechanicalTurkRequester", "/", "2006", "-", "10", "-", "31", "/", "ApiReference_CreateHITOperation", ".", "html" ]
python
train
tanghaibao/goatools
goatools/cli/gosubdag_plot.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/cli/gosubdag_plot.py#L205-L227
def _plt_gogrouped(self, goids, go2color_usr, **kws): """Plot grouped GO IDs.""" fout_img = self.get_outfile(kws['outfile'], goids) sections = read_sections(kws['sections'], exclude_ungrouped=True) # print ("KWWSSSSSSSS", kws) # kws_plt = {k:v for k, v in kws.items if k in self.kws_plt} grprobj_cur = self._get_grprobj(goids, sections) # GO: purple=hdr-only, green=hdr&usr, yellow=usr-only # BORDER: Black=hdr Blu=hdr&usr grpcolor = GrouperColors(grprobj_cur) # get_bordercolor get_go2color_users grp_go2color = grpcolor.get_go2color_users() grp_go2bordercolor = grpcolor.get_bordercolor() for goid, color in go2color_usr.items(): grp_go2color[goid] = color objcolor = Go2Color(self.gosubdag, objgoea=None, go2color=grp_go2color, go2bordercolor=grp_go2bordercolor) go2txt = GrouperPlot.get_go2txt(grprobj_cur, grp_go2color, grp_go2bordercolor) objplt = GoSubDagPlot(self.gosubdag, Go2Color=objcolor, go2txt=go2txt, **kws) objplt.prt_goids(sys.stdout) objplt.plt_dag(fout_img) sys.stdout.write("{N:>6} sections read\n".format( N="NO" if sections is None else len(sections))) return fout_img
[ "def", "_plt_gogrouped", "(", "self", ",", "goids", ",", "go2color_usr", ",", "*", "*", "kws", ")", ":", "fout_img", "=", "self", ".", "get_outfile", "(", "kws", "[", "'outfile'", "]", ",", "goids", ")", "sections", "=", "read_sections", "(", "kws", "[", "'sections'", "]", ",", "exclude_ungrouped", "=", "True", ")", "# print (\"KWWSSSSSSSS\", kws)", "# kws_plt = {k:v for k, v in kws.items if k in self.kws_plt}", "grprobj_cur", "=", "self", ".", "_get_grprobj", "(", "goids", ",", "sections", ")", "# GO: purple=hdr-only, green=hdr&usr, yellow=usr-only", "# BORDER: Black=hdr Blu=hdr&usr", "grpcolor", "=", "GrouperColors", "(", "grprobj_cur", ")", "# get_bordercolor get_go2color_users", "grp_go2color", "=", "grpcolor", ".", "get_go2color_users", "(", ")", "grp_go2bordercolor", "=", "grpcolor", ".", "get_bordercolor", "(", ")", "for", "goid", ",", "color", "in", "go2color_usr", ".", "items", "(", ")", ":", "grp_go2color", "[", "goid", "]", "=", "color", "objcolor", "=", "Go2Color", "(", "self", ".", "gosubdag", ",", "objgoea", "=", "None", ",", "go2color", "=", "grp_go2color", ",", "go2bordercolor", "=", "grp_go2bordercolor", ")", "go2txt", "=", "GrouperPlot", ".", "get_go2txt", "(", "grprobj_cur", ",", "grp_go2color", ",", "grp_go2bordercolor", ")", "objplt", "=", "GoSubDagPlot", "(", "self", ".", "gosubdag", ",", "Go2Color", "=", "objcolor", ",", "go2txt", "=", "go2txt", ",", "*", "*", "kws", ")", "objplt", ".", "prt_goids", "(", "sys", ".", "stdout", ")", "objplt", ".", "plt_dag", "(", "fout_img", ")", "sys", ".", "stdout", ".", "write", "(", "\"{N:>6} sections read\\n\"", ".", "format", "(", "N", "=", "\"NO\"", "if", "sections", "is", "None", "else", "len", "(", "sections", ")", ")", ")", "return", "fout_img" ]
Plot grouped GO IDs.
[ "Plot", "grouped", "GO", "IDs", "." ]
python
train
briancappello/flask-unchained
flask_unchained/string_utils.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/string_utils.py#L8-L23
def camel_case(string): """ Converts a string to camel case. For example:: camel_case('one_two_three') -> 'oneTwoThree' """ if not string: return string parts = snake_case(string).split('_') rv = '' while parts: part = parts.pop(0) rv += part or '_' if part: break return rv + ''.join(x.title() for x in parts)
[ "def", "camel_case", "(", "string", ")", ":", "if", "not", "string", ":", "return", "string", "parts", "=", "snake_case", "(", "string", ")", ".", "split", "(", "'_'", ")", "rv", "=", "''", "while", "parts", ":", "part", "=", "parts", ".", "pop", "(", "0", ")", "rv", "+=", "part", "or", "'_'", "if", "part", ":", "break", "return", "rv", "+", "''", ".", "join", "(", "x", ".", "title", "(", ")", "for", "x", "in", "parts", ")" ]
Converts a string to camel case. For example:: camel_case('one_two_three') -> 'oneTwoThree'
[ "Converts", "a", "string", "to", "camel", "case", ".", "For", "example", "::" ]
python
train
thanethomson/statik
statik/utils.py
https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/statik/utils.py#L141-L163
def copy_file_if_modified(src_path, dest_path): """Only copies the file from the source path to the destination path if it doesn't exist yet or it has been modified. Intended to provide something of an optimisation when a project has large trees of assets.""" # if the destination path is a directory, delete it completely - we assume here we are # writing a file to the filesystem if os.path.isdir(dest_path): shutil.rmtree(dest_path) must_copy = False if not os.path.exists(dest_path): must_copy = True else: src_stat = os.stat(src_path) dest_stat = os.stat(dest_path) # if the size or last modified timestamp are different if ((src_stat[stat.ST_SIZE] != dest_stat[stat.ST_SIZE]) or (src_stat[stat.ST_MTIME] != dest_stat[stat.ST_MTIME])): must_copy = True if must_copy: shutil.copy2(src_path, dest_path)
[ "def", "copy_file_if_modified", "(", "src_path", ",", "dest_path", ")", ":", "# if the destination path is a directory, delete it completely - we assume here we are", "# writing a file to the filesystem", "if", "os", ".", "path", ".", "isdir", "(", "dest_path", ")", ":", "shutil", ".", "rmtree", "(", "dest_path", ")", "must_copy", "=", "False", "if", "not", "os", ".", "path", ".", "exists", "(", "dest_path", ")", ":", "must_copy", "=", "True", "else", ":", "src_stat", "=", "os", ".", "stat", "(", "src_path", ")", "dest_stat", "=", "os", ".", "stat", "(", "dest_path", ")", "# if the size or last modified timestamp are different", "if", "(", "(", "src_stat", "[", "stat", ".", "ST_SIZE", "]", "!=", "dest_stat", "[", "stat", ".", "ST_SIZE", "]", ")", "or", "(", "src_stat", "[", "stat", ".", "ST_MTIME", "]", "!=", "dest_stat", "[", "stat", ".", "ST_MTIME", "]", ")", ")", ":", "must_copy", "=", "True", "if", "must_copy", ":", "shutil", ".", "copy2", "(", "src_path", ",", "dest_path", ")" ]
Only copies the file from the source path to the destination path if it doesn't exist yet or it has been modified. Intended to provide something of an optimisation when a project has large trees of assets.
[ "Only", "copies", "the", "file", "from", "the", "source", "path", "to", "the", "destination", "path", "if", "it", "doesn", "t", "exist", "yet", "or", "it", "has", "been", "modified", ".", "Intended", "to", "provide", "something", "of", "an", "optimisation", "when", "a", "project", "has", "large", "trees", "of", "assets", "." ]
python
train
eykd/paved
paved/docs.py
https://github.com/eykd/paved/blob/f04f8a4248c571f3d5ce882b325884a3e5d80203/paved/docs.py#L24-L29
def sphinx_make(*targets): """Call the Sphinx Makefile with the specified targets. `options.paved.docs.path`: the path to the Sphinx folder (where the Makefile resides). """ sh('make %s' % ' '.join(targets), cwd=options.paved.docs.path)
[ "def", "sphinx_make", "(", "*", "targets", ")", ":", "sh", "(", "'make %s'", "%", "' '", ".", "join", "(", "targets", ")", ",", "cwd", "=", "options", ".", "paved", ".", "docs", ".", "path", ")" ]
Call the Sphinx Makefile with the specified targets. `options.paved.docs.path`: the path to the Sphinx folder (where the Makefile resides).
[ "Call", "the", "Sphinx", "Makefile", "with", "the", "specified", "targets", "." ]
python
valid
ARMmbed/icetea
icetea_lib/CliResponse.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/CliResponse.py#L144-L154
def verify_response_time(self, expected_below): """ Verify that response time (time span between request-response) is reasonable. :param expected_below: integer :return: Nothing :raises: ValueError if timedelta > expected time """ if self.timedelta > expected_below: raise ValueError("Response time is more (%f) than expected (%f)!" % (self.timedelta, expected_below))
[ "def", "verify_response_time", "(", "self", ",", "expected_below", ")", ":", "if", "self", ".", "timedelta", ">", "expected_below", ":", "raise", "ValueError", "(", "\"Response time is more (%f) than expected (%f)!\"", "%", "(", "self", ".", "timedelta", ",", "expected_below", ")", ")" ]
Verify that response time (time span between request-response) is reasonable. :param expected_below: integer :return: Nothing :raises: ValueError if timedelta > expected time
[ "Verify", "that", "response", "time", "(", "time", "span", "between", "request", "-", "response", ")", "is", "reasonable", "." ]
python
train
spyder-ide/conda-manager
conda_manager/api/conda_api.py
https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/api/conda_api.py#L884-L887
def clear_lock(self, abspath=True): """Clean any conda lock in the system.""" cmd_list = ['clean', '--lock', '--json'] return self._call_and_parse(cmd_list, abspath=abspath)
[ "def", "clear_lock", "(", "self", ",", "abspath", "=", "True", ")", ":", "cmd_list", "=", "[", "'clean'", ",", "'--lock'", ",", "'--json'", "]", "return", "self", ".", "_call_and_parse", "(", "cmd_list", ",", "abspath", "=", "abspath", ")" ]
Clean any conda lock in the system.
[ "Clean", "any", "conda", "lock", "in", "the", "system", "." ]
python
train
projectatomic/atomic-reactor
atomic_reactor/rpm_util.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/rpm_util.py#L36-L95
def parse_rpm_output(output, tags=None, separator=';'): """ Parse output of the rpm query. :param output: list, decoded output (str) from the rpm subprocess :param tags: list, str fields used for query output :return: list, dicts describing each rpm package """ if tags is None: tags = image_component_rpm_tags def field(tag): """ Get a field value by name """ try: value = fields[tags.index(tag)] except ValueError: return None if value == '(none)': return None return value components = [] sigmarker = 'Key ID ' for rpm in output: fields = rpm.rstrip('\n').split(separator) if len(fields) < len(tags): continue signature = field('SIGPGP:pgpsig') or field('SIGGPG:pgpsig') if signature: parts = signature.split(sigmarker, 1) if len(parts) > 1: signature = parts[1] component_rpm = { 'type': 'rpm', 'name': field('NAME'), 'version': field('VERSION'), 'release': field('RELEASE'), 'arch': field('ARCH'), 'sigmd5': field('SIGMD5'), 'signature': signature, } # Special handling for epoch as it must be an integer or None epoch = field('EPOCH') if epoch is not None: epoch = int(epoch) component_rpm['epoch'] = epoch if component_rpm['name'] != 'gpg-pubkey': components.append(component_rpm) return components
[ "def", "parse_rpm_output", "(", "output", ",", "tags", "=", "None", ",", "separator", "=", "';'", ")", ":", "if", "tags", "is", "None", ":", "tags", "=", "image_component_rpm_tags", "def", "field", "(", "tag", ")", ":", "\"\"\"\n Get a field value by name\n \"\"\"", "try", ":", "value", "=", "fields", "[", "tags", ".", "index", "(", "tag", ")", "]", "except", "ValueError", ":", "return", "None", "if", "value", "==", "'(none)'", ":", "return", "None", "return", "value", "components", "=", "[", "]", "sigmarker", "=", "'Key ID '", "for", "rpm", "in", "output", ":", "fields", "=", "rpm", ".", "rstrip", "(", "'\\n'", ")", ".", "split", "(", "separator", ")", "if", "len", "(", "fields", ")", "<", "len", "(", "tags", ")", ":", "continue", "signature", "=", "field", "(", "'SIGPGP:pgpsig'", ")", "or", "field", "(", "'SIGGPG:pgpsig'", ")", "if", "signature", ":", "parts", "=", "signature", ".", "split", "(", "sigmarker", ",", "1", ")", "if", "len", "(", "parts", ")", ">", "1", ":", "signature", "=", "parts", "[", "1", "]", "component_rpm", "=", "{", "'type'", ":", "'rpm'", ",", "'name'", ":", "field", "(", "'NAME'", ")", ",", "'version'", ":", "field", "(", "'VERSION'", ")", ",", "'release'", ":", "field", "(", "'RELEASE'", ")", ",", "'arch'", ":", "field", "(", "'ARCH'", ")", ",", "'sigmd5'", ":", "field", "(", "'SIGMD5'", ")", ",", "'signature'", ":", "signature", ",", "}", "# Special handling for epoch as it must be an integer or None", "epoch", "=", "field", "(", "'EPOCH'", ")", "if", "epoch", "is", "not", "None", ":", "epoch", "=", "int", "(", "epoch", ")", "component_rpm", "[", "'epoch'", "]", "=", "epoch", "if", "component_rpm", "[", "'name'", "]", "!=", "'gpg-pubkey'", ":", "components", ".", "append", "(", "component_rpm", ")", "return", "components" ]
Parse output of the rpm query. :param output: list, decoded output (str) from the rpm subprocess :param tags: list, str fields used for query output :return: list, dicts describing each rpm package
[ "Parse", "output", "of", "the", "rpm", "query", "." ]
python
train
aiortc/aioice
aioice/ice.py
https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/ice.py#L554-L598
def check_incoming(self, message, addr, protocol): """ Handle a succesful incoming check. """ component = protocol.local_candidate.component # find remote candidate remote_candidate = None for c in self._remote_candidates: if c.host == addr[0] and c.port == addr[1]: remote_candidate = c assert remote_candidate.component == component break if remote_candidate is None: # 7.2.1.3. Learning Peer Reflexive Candidates remote_candidate = Candidate( foundation=random_string(10), component=component, transport='udp', priority=message.attributes['PRIORITY'], host=addr[0], port=addr[1], type='prflx') self._remote_candidates.append(remote_candidate) self.__log_info('Discovered peer reflexive candidate %s', remote_candidate) # find pair pair = self._find_pair(protocol, remote_candidate) if pair is None: pair = CandidatePair(protocol, remote_candidate) pair.state = CandidatePair.State.WAITING self._check_list.append(pair) self.sort_check_list() # triggered check if pair.state in [CandidatePair.State.WAITING, CandidatePair.State.FAILED]: pair.handle = asyncio.ensure_future(self.check_start(pair)) # 7.2.1.5. Updating the Nominated Flag if 'USE-CANDIDATE' in message.attributes and not self.ice_controlling: pair.remote_nominated = True if pair.state == CandidatePair.State.SUCCEEDED: pair.nominated = True self.check_complete(pair)
[ "def", "check_incoming", "(", "self", ",", "message", ",", "addr", ",", "protocol", ")", ":", "component", "=", "protocol", ".", "local_candidate", ".", "component", "# find remote candidate", "remote_candidate", "=", "None", "for", "c", "in", "self", ".", "_remote_candidates", ":", "if", "c", ".", "host", "==", "addr", "[", "0", "]", "and", "c", ".", "port", "==", "addr", "[", "1", "]", ":", "remote_candidate", "=", "c", "assert", "remote_candidate", ".", "component", "==", "component", "break", "if", "remote_candidate", "is", "None", ":", "# 7.2.1.3. Learning Peer Reflexive Candidates", "remote_candidate", "=", "Candidate", "(", "foundation", "=", "random_string", "(", "10", ")", ",", "component", "=", "component", ",", "transport", "=", "'udp'", ",", "priority", "=", "message", ".", "attributes", "[", "'PRIORITY'", "]", ",", "host", "=", "addr", "[", "0", "]", ",", "port", "=", "addr", "[", "1", "]", ",", "type", "=", "'prflx'", ")", "self", ".", "_remote_candidates", ".", "append", "(", "remote_candidate", ")", "self", ".", "__log_info", "(", "'Discovered peer reflexive candidate %s'", ",", "remote_candidate", ")", "# find pair", "pair", "=", "self", ".", "_find_pair", "(", "protocol", ",", "remote_candidate", ")", "if", "pair", "is", "None", ":", "pair", "=", "CandidatePair", "(", "protocol", ",", "remote_candidate", ")", "pair", ".", "state", "=", "CandidatePair", ".", "State", ".", "WAITING", "self", ".", "_check_list", ".", "append", "(", "pair", ")", "self", ".", "sort_check_list", "(", ")", "# triggered check", "if", "pair", ".", "state", "in", "[", "CandidatePair", ".", "State", ".", "WAITING", ",", "CandidatePair", ".", "State", ".", "FAILED", "]", ":", "pair", ".", "handle", "=", "asyncio", ".", "ensure_future", "(", "self", ".", "check_start", "(", "pair", ")", ")", "# 7.2.1.5. Updating the Nominated Flag", "if", "'USE-CANDIDATE'", "in", "message", ".", "attributes", "and", "not", "self", ".", "ice_controlling", ":", "pair", ".", "remote_nominated", "=", "True", "if", "pair", ".", "state", "==", "CandidatePair", ".", "State", ".", "SUCCEEDED", ":", "pair", ".", "nominated", "=", "True", "self", ".", "check_complete", "(", "pair", ")" ]
Handle a succesful incoming check.
[ "Handle", "a", "succesful", "incoming", "check", "." ]
python
train
marshmallow-code/webargs
src/webargs/falconparser.py
https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/falconparser.py#L125-L128
def parse_headers(self, req, name, field): """Pull a header value from the request.""" # Use req.get_headers rather than req.headers for performance return req.get_header(name, required=False) or core.missing
[ "def", "parse_headers", "(", "self", ",", "req", ",", "name", ",", "field", ")", ":", "# Use req.get_headers rather than req.headers for performance", "return", "req", ".", "get_header", "(", "name", ",", "required", "=", "False", ")", "or", "core", ".", "missing" ]
Pull a header value from the request.
[ "Pull", "a", "header", "value", "from", "the", "request", "." ]
python
train
HPENetworking/PYHPEIMC
build/lib/pyhpeimc/plat/icc.py
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/build/lib/pyhpeimc/plat/icc.py#L120-L148
def get_template_id(template_name, auth, url): """ Helper function takes str input of folder name and returns str numerical id of the folder. :param folder_name: str name of the folder :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: str numerical id of the folder :rtype: str >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.icc import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> file_id = get_template_id('CW7SNMP.cfg', auth.creds, auth.url) >>> assert type(file_id) is str """ object_list = get_cfg_template(auth=auth, url=url) for object in object_list: if object['confFileName'] == template_name: return object['confFileId'] return "template not found"
[ "def", "get_template_id", "(", "template_name", ",", "auth", ",", "url", ")", ":", "object_list", "=", "get_cfg_template", "(", "auth", "=", "auth", ",", "url", "=", "url", ")", "for", "object", "in", "object_list", ":", "if", "object", "[", "'confFileName'", "]", "==", "template_name", ":", "return", "object", "[", "'confFileId'", "]", "return", "\"template not found\"" ]
Helper function takes str input of folder name and returns str numerical id of the folder. :param folder_name: str name of the folder :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: str numerical id of the folder :rtype: str >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.icc import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> file_id = get_template_id('CW7SNMP.cfg', auth.creds, auth.url) >>> assert type(file_id) is str
[ "Helper", "function", "takes", "str", "input", "of", "folder", "name", "and", "returns", "str", "numerical", "id", "of", "the", "folder", ".", ":", "param", "folder_name", ":", "str", "name", "of", "the", "folder" ]
python
train
DarkEnergySurvey/ugali
ugali/utils/projector.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/projector.py#L422-L440
def hms2dec(hms): """ Convert longitude from hours,minutes,seconds in string or 3-array format to decimal degrees. ADW: This really should be replaced by astropy """ DEGREE = 360. HOUR = 24. MINUTE = 60. SECOND = 3600. if isstring(hms): hour,minute,second = np.array(re.split('[hms]',hms))[:3].astype(float) else: hour,minute,second = hms.T decimal = (hour + minute * 1./MINUTE + second * 1./SECOND)*(DEGREE/HOUR) return decimal
[ "def", "hms2dec", "(", "hms", ")", ":", "DEGREE", "=", "360.", "HOUR", "=", "24.", "MINUTE", "=", "60.", "SECOND", "=", "3600.", "if", "isstring", "(", "hms", ")", ":", "hour", ",", "minute", ",", "second", "=", "np", ".", "array", "(", "re", ".", "split", "(", "'[hms]'", ",", "hms", ")", ")", "[", ":", "3", "]", ".", "astype", "(", "float", ")", "else", ":", "hour", ",", "minute", ",", "second", "=", "hms", ".", "T", "decimal", "=", "(", "hour", "+", "minute", "*", "1.", "/", "MINUTE", "+", "second", "*", "1.", "/", "SECOND", ")", "*", "(", "DEGREE", "/", "HOUR", ")", "return", "decimal" ]
Convert longitude from hours,minutes,seconds in string or 3-array format to decimal degrees. ADW: This really should be replaced by astropy
[ "Convert", "longitude", "from", "hours", "minutes", "seconds", "in", "string", "or", "3", "-", "array", "format", "to", "decimal", "degrees", "." ]
python
train
wbond/asn1crypto
dev/deps.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/dev/deps.py#L56-L83
def _download(url, dest): """ Downloads a URL to a directory :param url: The URL to download :param dest: The path to the directory to save the file in :return: The filesystem path to the saved file """ print('Downloading %s' % url) filename = os.path.basename(url) dest_path = os.path.join(dest, filename) if sys.platform == 'win32': powershell_exe = os.path.join('system32\\WindowsPowerShell\\v1.0\\powershell.exe') code = "[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12;" code += "(New-Object Net.WebClient).DownloadFile('%s', '%s');" % (url, dest_path) _execute([powershell_exe, '-Command', code], dest) else: _execute(['curl', '-L', '--silent', '--show-error', '-O', url], dest) return dest_path
[ "def", "_download", "(", "url", ",", "dest", ")", ":", "print", "(", "'Downloading %s'", "%", "url", ")", "filename", "=", "os", ".", "path", ".", "basename", "(", "url", ")", "dest_path", "=", "os", ".", "path", ".", "join", "(", "dest", ",", "filename", ")", "if", "sys", ".", "platform", "==", "'win32'", ":", "powershell_exe", "=", "os", ".", "path", ".", "join", "(", "'system32\\\\WindowsPowerShell\\\\v1.0\\\\powershell.exe'", ")", "code", "=", "\"[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12;\"", "code", "+=", "\"(New-Object Net.WebClient).DownloadFile('%s', '%s');\"", "%", "(", "url", ",", "dest_path", ")", "_execute", "(", "[", "powershell_exe", ",", "'-Command'", ",", "code", "]", ",", "dest", ")", "else", ":", "_execute", "(", "[", "'curl'", ",", "'-L'", ",", "'--silent'", ",", "'--show-error'", ",", "'-O'", ",", "url", "]", ",", "dest", ")", "return", "dest_path" ]
Downloads a URL to a directory :param url: The URL to download :param dest: The path to the directory to save the file in :return: The filesystem path to the saved file
[ "Downloads", "a", "URL", "to", "a", "directory" ]
python
train
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/tools/common.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/common.py#L285-L320
def get_invocation_command_nodefault( toolset, tool, user_provided_command=[], additional_paths=[], path_last=False): """ A helper rule to get the command to invoke some tool. If 'user-provided-command' is not given, tries to find binary named 'tool' in PATH and in the passed 'additional-path'. Otherwise, verifies that the first element of 'user-provided-command' is an existing program. This rule returns the command to be used when invoking the tool. If we can't find the tool, a warning is issued. If 'path-last' is specified, PATH is checked after 'additional-paths' when searching for 'tool'. """ assert isinstance(toolset, basestring) assert isinstance(tool, basestring) assert is_iterable_typed(user_provided_command, basestring) assert is_iterable_typed(additional_paths, basestring) or additional_paths is None assert isinstance(path_last, (int, bool)) if not user_provided_command: command = find_tool(tool, additional_paths, path_last) if not command and __debug_configuration: print "warning: toolset", toolset, "initialization: can't find tool, tool" #FIXME #print "warning: initialized from" [ errors.nearest-user-location ] ; else: command = check_tool(user_provided_command) if not command and __debug_configuration: print "warning: toolset", toolset, "initialization:" print "warning: can't find user-provided command", user_provided_command #FIXME #ECHO "warning: initialized from" [ errors.nearest-user-location ] command = [] if command: command = ' '.join(command) return command
[ "def", "get_invocation_command_nodefault", "(", "toolset", ",", "tool", ",", "user_provided_command", "=", "[", "]", ",", "additional_paths", "=", "[", "]", ",", "path_last", "=", "False", ")", ":", "assert", "isinstance", "(", "toolset", ",", "basestring", ")", "assert", "isinstance", "(", "tool", ",", "basestring", ")", "assert", "is_iterable_typed", "(", "user_provided_command", ",", "basestring", ")", "assert", "is_iterable_typed", "(", "additional_paths", ",", "basestring", ")", "or", "additional_paths", "is", "None", "assert", "isinstance", "(", "path_last", ",", "(", "int", ",", "bool", ")", ")", "if", "not", "user_provided_command", ":", "command", "=", "find_tool", "(", "tool", ",", "additional_paths", ",", "path_last", ")", "if", "not", "command", "and", "__debug_configuration", ":", "print", "\"warning: toolset\"", ",", "toolset", ",", "\"initialization: can't find tool, tool\"", "#FIXME", "#print \"warning: initialized from\" [ errors.nearest-user-location ] ;", "else", ":", "command", "=", "check_tool", "(", "user_provided_command", ")", "if", "not", "command", "and", "__debug_configuration", ":", "print", "\"warning: toolset\"", ",", "toolset", ",", "\"initialization:\"", "print", "\"warning: can't find user-provided command\"", ",", "user_provided_command", "#FIXME", "#ECHO \"warning: initialized from\" [ errors.nearest-user-location ]", "command", "=", "[", "]", "if", "command", ":", "command", "=", "' '", ".", "join", "(", "command", ")", "return", "command" ]
A helper rule to get the command to invoke some tool. If 'user-provided-command' is not given, tries to find binary named 'tool' in PATH and in the passed 'additional-path'. Otherwise, verifies that the first element of 'user-provided-command' is an existing program. This rule returns the command to be used when invoking the tool. If we can't find the tool, a warning is issued. If 'path-last' is specified, PATH is checked after 'additional-paths' when searching for 'tool'.
[ "A", "helper", "rule", "to", "get", "the", "command", "to", "invoke", "some", "tool", ".", "If", "user", "-", "provided", "-", "command", "is", "not", "given", "tries", "to", "find", "binary", "named", "tool", "in", "PATH", "and", "in", "the", "passed", "additional", "-", "path", ".", "Otherwise", "verifies", "that", "the", "first", "element", "of", "user", "-", "provided", "-", "command", "is", "an", "existing", "program", "." ]
python
train
Kaggle/kaggle-api
kaggle/api/kaggle_api_extended.py
https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L1592-L1632
def kernels_list_cli(self, mine=False, page=1, page_size=20, search=None, csv_display=False, parent=None, competition=None, dataset=None, user=None, language=None, kernel_type=None, output_type=None, sort_by=None): """ client wrapper for kernels_list, see this function for arguments. Additional arguments are provided here. Parameters ========== csv_display: if True, print comma separated values instead of table """ kernels = self.kernels_list( page=page, page_size=page_size, search=search, mine=mine, dataset=dataset, competition=competition, parent_kernel=parent, user=user, language=language, kernel_type=kernel_type, output_type=output_type, sort_by=sort_by) fields = ['ref', 'title', 'author', 'lastRunTime', 'totalVotes'] if kernels: if csv_display: self.print_csv(kernels, fields) else: self.print_table(kernels, fields) else: print('No kernels found')
[ "def", "kernels_list_cli", "(", "self", ",", "mine", "=", "False", ",", "page", "=", "1", ",", "page_size", "=", "20", ",", "search", "=", "None", ",", "csv_display", "=", "False", ",", "parent", "=", "None", ",", "competition", "=", "None", ",", "dataset", "=", "None", ",", "user", "=", "None", ",", "language", "=", "None", ",", "kernel_type", "=", "None", ",", "output_type", "=", "None", ",", "sort_by", "=", "None", ")", ":", "kernels", "=", "self", ".", "kernels_list", "(", "page", "=", "page", ",", "page_size", "=", "page_size", ",", "search", "=", "search", ",", "mine", "=", "mine", ",", "dataset", "=", "dataset", ",", "competition", "=", "competition", ",", "parent_kernel", "=", "parent", ",", "user", "=", "user", ",", "language", "=", "language", ",", "kernel_type", "=", "kernel_type", ",", "output_type", "=", "output_type", ",", "sort_by", "=", "sort_by", ")", "fields", "=", "[", "'ref'", ",", "'title'", ",", "'author'", ",", "'lastRunTime'", ",", "'totalVotes'", "]", "if", "kernels", ":", "if", "csv_display", ":", "self", ".", "print_csv", "(", "kernels", ",", "fields", ")", "else", ":", "self", ".", "print_table", "(", "kernels", ",", "fields", ")", "else", ":", "print", "(", "'No kernels found'", ")" ]
client wrapper for kernels_list, see this function for arguments. Additional arguments are provided here. Parameters ========== csv_display: if True, print comma separated values instead of table
[ "client", "wrapper", "for", "kernels_list", "see", "this", "function", "for", "arguments", ".", "Additional", "arguments", "are", "provided", "here", ".", "Parameters", "==========", "csv_display", ":", "if", "True", "print", "comma", "separated", "values", "instead", "of", "table" ]
python
train
CiscoTestAutomation/yang
ncdiff/src/yang/ncdiff/netconf.py
https://github.com/CiscoTestAutomation/yang/blob/c70ec5ac5a91f276c4060009203770ece92e76b4/ncdiff/src/yang/ncdiff/netconf.py#L597-L683
def _node_add_with_peer_list(self, child_self, child_other): '''_node_add_with_peer_list Low-level api: Apply delta child_other to child_self when child_self is the peer of child_other. Element child_self and child_other are list nodes. Element child_self will be modified during the process. RFC6020 section 7.8.6 is a reference of this method. Parameters ---------- child_self : `Element` A child of a config node in a config tree. child_other : `Element` A child of a config node in another config tree. child_self is the peer of child_other. Returns ------- None There is no return of this method. ''' parent_self = child_self.getparent() s_node = self.device.get_schema_node(child_self) if child_other.get(operation_tag) != 'delete' and \ child_other.get(operation_tag) != 'remove' and \ s_node.get('ordered-by') == 'user' and \ child_other.get(insert_tag) is not None: if child_other.get(insert_tag) == 'first': scope = parent_self.getchildren() siblings = self._get_sequence(scope, child_other.tag, parent_self) if siblings[0] != child_self: siblings[0].addprevious(child_self) elif child_other.get(insert_tag) == 'last': scope = parent_self.getchildren() siblings = self._get_sequence(scope, child_other.tag, parent_self) if siblings[-1] != child_self: siblings[-1].addnext(child_self) elif child_other.get(insert_tag) == 'before': if child_other.get(key_tag) is None: _inserterror('before', self.device.get_xpath(child_other), 'key') sibling = parent_self.find(child_other.tag + child_other.get(key_tag), namespaces=child_other.nsmap) if sibling is None: path = self.device.get_xpath(child_other) key = child_other.get(key_tag) _inserterror('before', path, 'key', key) if sibling != child_self: sibling.addprevious(child_self) elif child_other.get(insert_tag) == 'after': if child_other.get(key_tag) is None: _inserterror('after', self.device.get_xpath(child_other), 'key') sibling = parent_self.find(child_other.tag + child_other.get(key_tag), namespaces=child_other.nsmap) if sibling is None: path = self.device.get_xpath(child_other) key = child_other.get(key_tag) _inserterror('after', path, 'key', key) if sibling != child_self: sibling.addnext(child_self) if child_other.get(operation_tag) is None or \ child_other.get(operation_tag) == 'merge': self.node_add(child_self, child_other) elif child_other.get(operation_tag) == 'replace': e = deepcopy(child_other) parent_self.replace(child_self, self._del_attrib(e)) elif child_other.get(operation_tag) == 'create': raise ConfigDeltaError('data-exists: try to create node {} but ' \ 'it already exists' \ .format(self.device.get_xpath(child_other))) elif child_other.get(operation_tag) == 'delete' or \ child_other.get(operation_tag) == 'remove': parent_self.remove(child_self) else: raise ConfigDeltaError("unknown operation: node {} contains " \ "operation '{}'" \ .format(self.device.get_xpath(child_other), child_other.get(operation_tag)))
[ "def", "_node_add_with_peer_list", "(", "self", ",", "child_self", ",", "child_other", ")", ":", "parent_self", "=", "child_self", ".", "getparent", "(", ")", "s_node", "=", "self", ".", "device", ".", "get_schema_node", "(", "child_self", ")", "if", "child_other", ".", "get", "(", "operation_tag", ")", "!=", "'delete'", "and", "child_other", ".", "get", "(", "operation_tag", ")", "!=", "'remove'", "and", "s_node", ".", "get", "(", "'ordered-by'", ")", "==", "'user'", "and", "child_other", ".", "get", "(", "insert_tag", ")", "is", "not", "None", ":", "if", "child_other", ".", "get", "(", "insert_tag", ")", "==", "'first'", ":", "scope", "=", "parent_self", ".", "getchildren", "(", ")", "siblings", "=", "self", ".", "_get_sequence", "(", "scope", ",", "child_other", ".", "tag", ",", "parent_self", ")", "if", "siblings", "[", "0", "]", "!=", "child_self", ":", "siblings", "[", "0", "]", ".", "addprevious", "(", "child_self", ")", "elif", "child_other", ".", "get", "(", "insert_tag", ")", "==", "'last'", ":", "scope", "=", "parent_self", ".", "getchildren", "(", ")", "siblings", "=", "self", ".", "_get_sequence", "(", "scope", ",", "child_other", ".", "tag", ",", "parent_self", ")", "if", "siblings", "[", "-", "1", "]", "!=", "child_self", ":", "siblings", "[", "-", "1", "]", ".", "addnext", "(", "child_self", ")", "elif", "child_other", ".", "get", "(", "insert_tag", ")", "==", "'before'", ":", "if", "child_other", ".", "get", "(", "key_tag", ")", "is", "None", ":", "_inserterror", "(", "'before'", ",", "self", ".", "device", ".", "get_xpath", "(", "child_other", ")", ",", "'key'", ")", "sibling", "=", "parent_self", ".", "find", "(", "child_other", ".", "tag", "+", "child_other", ".", "get", "(", "key_tag", ")", ",", "namespaces", "=", "child_other", ".", "nsmap", ")", "if", "sibling", "is", "None", ":", "path", "=", "self", ".", "device", ".", "get_xpath", "(", "child_other", ")", "key", "=", "child_other", ".", "get", "(", "key_tag", ")", "_inserterror", "(", "'before'", ",", "path", ",", "'key'", ",", "key", ")", "if", "sibling", "!=", "child_self", ":", "sibling", ".", "addprevious", "(", "child_self", ")", "elif", "child_other", ".", "get", "(", "insert_tag", ")", "==", "'after'", ":", "if", "child_other", ".", "get", "(", "key_tag", ")", "is", "None", ":", "_inserterror", "(", "'after'", ",", "self", ".", "device", ".", "get_xpath", "(", "child_other", ")", ",", "'key'", ")", "sibling", "=", "parent_self", ".", "find", "(", "child_other", ".", "tag", "+", "child_other", ".", "get", "(", "key_tag", ")", ",", "namespaces", "=", "child_other", ".", "nsmap", ")", "if", "sibling", "is", "None", ":", "path", "=", "self", ".", "device", ".", "get_xpath", "(", "child_other", ")", "key", "=", "child_other", ".", "get", "(", "key_tag", ")", "_inserterror", "(", "'after'", ",", "path", ",", "'key'", ",", "key", ")", "if", "sibling", "!=", "child_self", ":", "sibling", ".", "addnext", "(", "child_self", ")", "if", "child_other", ".", "get", "(", "operation_tag", ")", "is", "None", "or", "child_other", ".", "get", "(", "operation_tag", ")", "==", "'merge'", ":", "self", ".", "node_add", "(", "child_self", ",", "child_other", ")", "elif", "child_other", ".", "get", "(", "operation_tag", ")", "==", "'replace'", ":", "e", "=", "deepcopy", "(", "child_other", ")", "parent_self", ".", "replace", "(", "child_self", ",", "self", ".", "_del_attrib", "(", "e", ")", ")", "elif", "child_other", ".", "get", "(", "operation_tag", ")", "==", "'create'", ":", "raise", "ConfigDeltaError", "(", "'data-exists: try to create node {} but '", "'it already exists'", ".", "format", "(", "self", ".", "device", ".", "get_xpath", "(", "child_other", ")", ")", ")", "elif", "child_other", ".", "get", "(", "operation_tag", ")", "==", "'delete'", "or", "child_other", ".", "get", "(", "operation_tag", ")", "==", "'remove'", ":", "parent_self", ".", "remove", "(", "child_self", ")", "else", ":", "raise", "ConfigDeltaError", "(", "\"unknown operation: node {} contains \"", "\"operation '{}'\"", ".", "format", "(", "self", ".", "device", ".", "get_xpath", "(", "child_other", ")", ",", "child_other", ".", "get", "(", "operation_tag", ")", ")", ")" ]
_node_add_with_peer_list Low-level api: Apply delta child_other to child_self when child_self is the peer of child_other. Element child_self and child_other are list nodes. Element child_self will be modified during the process. RFC6020 section 7.8.6 is a reference of this method. Parameters ---------- child_self : `Element` A child of a config node in a config tree. child_other : `Element` A child of a config node in another config tree. child_self is the peer of child_other. Returns ------- None There is no return of this method.
[ "_node_add_with_peer_list" ]
python
train
cokelaer/spectrum
doc/sphinxext/sphinx_gallery/docs_resolv.py
https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/doc/sphinxext/sphinx_gallery/docs_resolv.py#L408-L436
def embed_code_links(app, exception): """Embed hyperlinks to documentation into example code""" if exception is not None: return # No need to waste time embedding hyperlinks when not running the examples # XXX: also at the time of writing this fixes make html-noplot # for some reason I don't fully understand if not app.builder.config.plot_gallery: return # XXX: Whitelist of builders for which it makes sense to embed # hyperlinks inside the example html. Note that the link embedding # require searchindex.js to exist for the links to the local doc # and there does not seem to be a good way of knowing which # builders creates a searchindex.js. if app.builder.name not in ['html', 'readthedocs']: return print('Embedding documentation hyperlinks in examples..') gallery_conf = app.config.sphinx_gallery_conf gallery_dirs = gallery_conf['gallery_dirs'] if not isinstance(gallery_dirs, list): gallery_dirs = [gallery_dirs] for gallery_dir in gallery_dirs: _embed_code_links(app, gallery_conf, gallery_dir)
[ "def", "embed_code_links", "(", "app", ",", "exception", ")", ":", "if", "exception", "is", "not", "None", ":", "return", "# No need to waste time embedding hyperlinks when not running the examples", "# XXX: also at the time of writing this fixes make html-noplot", "# for some reason I don't fully understand", "if", "not", "app", ".", "builder", ".", "config", ".", "plot_gallery", ":", "return", "# XXX: Whitelist of builders for which it makes sense to embed", "# hyperlinks inside the example html. Note that the link embedding", "# require searchindex.js to exist for the links to the local doc", "# and there does not seem to be a good way of knowing which", "# builders creates a searchindex.js.", "if", "app", ".", "builder", ".", "name", "not", "in", "[", "'html'", ",", "'readthedocs'", "]", ":", "return", "print", "(", "'Embedding documentation hyperlinks in examples..'", ")", "gallery_conf", "=", "app", ".", "config", ".", "sphinx_gallery_conf", "gallery_dirs", "=", "gallery_conf", "[", "'gallery_dirs'", "]", "if", "not", "isinstance", "(", "gallery_dirs", ",", "list", ")", ":", "gallery_dirs", "=", "[", "gallery_dirs", "]", "for", "gallery_dir", "in", "gallery_dirs", ":", "_embed_code_links", "(", "app", ",", "gallery_conf", ",", "gallery_dir", ")" ]
Embed hyperlinks to documentation into example code
[ "Embed", "hyperlinks", "to", "documentation", "into", "example", "code" ]
python
valid
UCSBarchlab/PyRTL
pyrtl/transform.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/transform.py#L43-L48
def all_nets(transform_func): """Decorator that wraps a net transform function""" @functools.wraps(transform_func) def t_res(**kwargs): net_transform(transform_func, **kwargs) return t_res
[ "def", "all_nets", "(", "transform_func", ")", ":", "@", "functools", ".", "wraps", "(", "transform_func", ")", "def", "t_res", "(", "*", "*", "kwargs", ")", ":", "net_transform", "(", "transform_func", ",", "*", "*", "kwargs", ")", "return", "t_res" ]
Decorator that wraps a net transform function
[ "Decorator", "that", "wraps", "a", "net", "transform", "function" ]
python
train
SCIP-Interfaces/PySCIPOpt
examples/finished/ssa.py
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/finished/ssa.py#L14-L53
def ssa(n,h,K,f,T): """ssa -- multi-stage (serial) safety stock allocation model Parameters: - n: number of stages - h[i]: inventory cost on stage i - K: number of linear segments - f: (non-linear) cost function - T[i]: production lead time on stage i Returns the model with the piecewise linear relation on added variables x, f, and z. """ model = Model("safety stock allocation") # calculate endpoints for linear segments a,b = {},{} for i in range(1,n+1): a[i] = [k for k in range(K)] b[i] = [f(i,k) for k in range(K)] # x: net replenishment time for stage i # y: corresponding cost # s: piecewise linear segment of variable x x,y,s = {},{},{} L = {} # service time of stage i for i in range(1,n+1): x[i],y[i],s[i] = convex_comb_sos(model,a[i],b[i]) if i == 1: L[i] = model.addVar(ub=0, vtype="C", name="L[%s]"%i) else: L[i] = model.addVar(vtype="C", name="L[%s]"%i) L[n+1] = model.addVar(ub=0, vtype="C", name="L[%s]"%(n+1)) for i in range(1,n+1): # net replenishment time for each stage i model.addCons(x[i] + L[i] == T[i] + L[i+1]) model.setObjective(quicksum(h[i]*y[i] for i in range(1,n+1)), "minimize") model.data = x,s,L return model
[ "def", "ssa", "(", "n", ",", "h", ",", "K", ",", "f", ",", "T", ")", ":", "model", "=", "Model", "(", "\"safety stock allocation\"", ")", "# calculate endpoints for linear segments", "a", ",", "b", "=", "{", "}", ",", "{", "}", "for", "i", "in", "range", "(", "1", ",", "n", "+", "1", ")", ":", "a", "[", "i", "]", "=", "[", "k", "for", "k", "in", "range", "(", "K", ")", "]", "b", "[", "i", "]", "=", "[", "f", "(", "i", ",", "k", ")", "for", "k", "in", "range", "(", "K", ")", "]", "# x: net replenishment time for stage i", "# y: corresponding cost", "# s: piecewise linear segment of variable x", "x", ",", "y", ",", "s", "=", "{", "}", ",", "{", "}", ",", "{", "}", "L", "=", "{", "}", "# service time of stage i", "for", "i", "in", "range", "(", "1", ",", "n", "+", "1", ")", ":", "x", "[", "i", "]", ",", "y", "[", "i", "]", ",", "s", "[", "i", "]", "=", "convex_comb_sos", "(", "model", ",", "a", "[", "i", "]", ",", "b", "[", "i", "]", ")", "if", "i", "==", "1", ":", "L", "[", "i", "]", "=", "model", ".", "addVar", "(", "ub", "=", "0", ",", "vtype", "=", "\"C\"", ",", "name", "=", "\"L[%s]\"", "%", "i", ")", "else", ":", "L", "[", "i", "]", "=", "model", ".", "addVar", "(", "vtype", "=", "\"C\"", ",", "name", "=", "\"L[%s]\"", "%", "i", ")", "L", "[", "n", "+", "1", "]", "=", "model", ".", "addVar", "(", "ub", "=", "0", ",", "vtype", "=", "\"C\"", ",", "name", "=", "\"L[%s]\"", "%", "(", "n", "+", "1", ")", ")", "for", "i", "in", "range", "(", "1", ",", "n", "+", "1", ")", ":", "# net replenishment time for each stage i", "model", ".", "addCons", "(", "x", "[", "i", "]", "+", "L", "[", "i", "]", "==", "T", "[", "i", "]", "+", "L", "[", "i", "+", "1", "]", ")", "model", ".", "setObjective", "(", "quicksum", "(", "h", "[", "i", "]", "*", "y", "[", "i", "]", "for", "i", "in", "range", "(", "1", ",", "n", "+", "1", ")", ")", ",", "\"minimize\"", ")", "model", ".", "data", "=", "x", ",", "s", ",", "L", "return", "model" ]
ssa -- multi-stage (serial) safety stock allocation model Parameters: - n: number of stages - h[i]: inventory cost on stage i - K: number of linear segments - f: (non-linear) cost function - T[i]: production lead time on stage i Returns the model with the piecewise linear relation on added variables x, f, and z.
[ "ssa", "--", "multi", "-", "stage", "(", "serial", ")", "safety", "stock", "allocation", "model", "Parameters", ":", "-", "n", ":", "number", "of", "stages", "-", "h", "[", "i", "]", ":", "inventory", "cost", "on", "stage", "i", "-", "K", ":", "number", "of", "linear", "segments", "-", "f", ":", "(", "non", "-", "linear", ")", "cost", "function", "-", "T", "[", "i", "]", ":", "production", "lead", "time", "on", "stage", "i", "Returns", "the", "model", "with", "the", "piecewise", "linear", "relation", "on", "added", "variables", "x", "f", "and", "z", "." ]
python
train
anchore/anchore
anchore/cli/toolbox.py
https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/toolbox.py#L144-L246
def setup_module_dev(destdir): """ Sets up a development environment suitable for working on anchore modules (queries, etc) in the specified directory. Creates a copied environment in the destination containing the module scripts, unpacked image(s) and helper scripts such that a module script that works in the environment can be copied into the correct installation environment and run with anchore explore <modulename> invocation and should work. """ if not nav: sys.exit(1) ecode = 0 try: anchore_print("Anchore Module Development Environment\n") helpstr = "This tool has set up an environment that represents what anchore will normally set up before running an analyzer, gate and/or query module. Each section below includes some information along with a string that you can use to help develop your own anchore modules.\n" anchore_print(fill(helpstr, 80)) anchore_print("") anchore_print("Setting up environment...") anchore_print("") result = nav.unpack(destdir=destdir) if not result: raise Exception("unable to unpack input image") for imageId in result: unpackdir = result[imageId] # copy anchore imageDB dir into unpacked environment imgdir = '/'.join([config.data['image_data_store'], imageId]) tmpdatastore = '/'.join([unpackdir, 'data']) dstimgdir = '/'.join([tmpdatastore, imageId]) if not os.path.exists(imgdir): anchore_print_err("Image must exist and have been analyzed before being used for module development.") break if not os.path.exists(tmpdatastore): os.makedirs(tmpdatastore) shutil.copytree(imgdir, dstimgdir, symlinks=True) # copy examples into the unpacked environment examples = {} basedir = '/'.join([unpackdir, "anchore-modules"]) if not os.path.exists(basedir): os.makedirs(basedir) # copy the shell-utils os.makedirs('/'.join([basedir, 'shell-utils'])) for s in os.listdir('/'.join([config.data['scripts_dir'], 'shell-utils'])): shutil.copy('/'.join([config.data['scripts_dir'], 'shell-utils', s]), '/'.join([basedir, 'shell-utils', s])) # copy any examples that exist in the anchore egg into the unpack dir for d in os.listdir(config.data['scripts_dir']): scriptdir = '/'.join([basedir, d]) if os.path.exists(config.data['scripts_dir'] + "/examples/" + d): if not os.path.exists(scriptdir): os.makedirs(scriptdir) for s in os.listdir(config.data['scripts_dir'] + "/examples/" + d): thefile = '/'.join([config.data['scripts_dir'], "examples", d, s]) thefiledst = '/'.join([scriptdir, s]) if re.match(".*(\.sh)$", thefile): examples[d] = thefiledst shutil.copy(thefile, thefiledst) # all set, show how to use them anchore_print("\tImage: " + imageId[0:12]) anchore_print("\tUnpack Directory: " +result[imageId]) anchore_print("") analyzer_string = ' '.join([examples['analyzers'], imageId, tmpdatastore, dstimgdir, result[imageId]]) anchore_print("\tAnalyzer Command:\n\n\t" +analyzer_string) anchore_print("") anchore_utils.write_plainfile_fromstr(result[imageId] + "/queryimages", imageId+"\n") queryoutput = '/'.join([result[imageId], "querytmp/"]) if not os.path.exists(queryoutput): os.makedirs(queryoutput) query_string = ' '.join([examples['queries'], result[imageId] + "/queryimages", tmpdatastore, queryoutput, "passwd"]) anchore_print("Query Command:\n\n\t" + query_string) anchore_print("") anchore_print("Next Steps: ") anchore_print("\tFirst: run the above analyzer command and note the RESULT output") anchore_print("\tSecond: run the above query command and note the RESULT output, checking that the query was able to use the analyzer data to perform its search") anchore_print("\tThird: modify the analyzer/query modules as you wish, including renaming them and continue running/inspecting output until you are satisfied") anchore_print("\tFinally: when you're happy with the analyzer/query, copy them to next to existing anchore analyzer/query modules and anchore will start calling them as part of container analysis/query:\n") anchore_print("\tcp " + examples['analyzers'] + " " + config.data['scripts_dir'] + "/analyzers/99_analyzer-example.sh") anchore_print("\tcp " + examples['queries'] + " " + config.data['scripts_dir'] + "/queries/") anchore_print("\tanchore analyze --force --image " + imageId + " --imagetype none") anchore_print("\tanchore query --image " + imageId + " query-example") anchore_print("\tanchore query --image " + imageId + " query-example passwd") anchore_print("\tanchore query --image " + imageId + " query-example pdoesntexist") except: anchore_print_err("operation failed") ecode = 1 contexts['anchore_allimages'].clear() sys.exit(ecode)
[ "def", "setup_module_dev", "(", "destdir", ")", ":", "if", "not", "nav", ":", "sys", ".", "exit", "(", "1", ")", "ecode", "=", "0", "try", ":", "anchore_print", "(", "\"Anchore Module Development Environment\\n\"", ")", "helpstr", "=", "\"This tool has set up an environment that represents what anchore will normally set up before running an analyzer, gate and/or query module. Each section below includes some information along with a string that you can use to help develop your own anchore modules.\\n\"", "anchore_print", "(", "fill", "(", "helpstr", ",", "80", ")", ")", "anchore_print", "(", "\"\"", ")", "anchore_print", "(", "\"Setting up environment...\"", ")", "anchore_print", "(", "\"\"", ")", "result", "=", "nav", ".", "unpack", "(", "destdir", "=", "destdir", ")", "if", "not", "result", ":", "raise", "Exception", "(", "\"unable to unpack input image\"", ")", "for", "imageId", "in", "result", ":", "unpackdir", "=", "result", "[", "imageId", "]", "# copy anchore imageDB dir into unpacked environment", "imgdir", "=", "'/'", ".", "join", "(", "[", "config", ".", "data", "[", "'image_data_store'", "]", ",", "imageId", "]", ")", "tmpdatastore", "=", "'/'", ".", "join", "(", "[", "unpackdir", ",", "'data'", "]", ")", "dstimgdir", "=", "'/'", ".", "join", "(", "[", "tmpdatastore", ",", "imageId", "]", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "imgdir", ")", ":", "anchore_print_err", "(", "\"Image must exist and have been analyzed before being used for module development.\"", ")", "break", "if", "not", "os", ".", "path", ".", "exists", "(", "tmpdatastore", ")", ":", "os", ".", "makedirs", "(", "tmpdatastore", ")", "shutil", ".", "copytree", "(", "imgdir", ",", "dstimgdir", ",", "symlinks", "=", "True", ")", "# copy examples into the unpacked environment ", "examples", "=", "{", "}", "basedir", "=", "'/'", ".", "join", "(", "[", "unpackdir", ",", "\"anchore-modules\"", "]", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "basedir", ")", ":", "os", ".", "makedirs", "(", "basedir", ")", "# copy the shell-utils", "os", ".", "makedirs", "(", "'/'", ".", "join", "(", "[", "basedir", ",", "'shell-utils'", "]", ")", ")", "for", "s", "in", "os", ".", "listdir", "(", "'/'", ".", "join", "(", "[", "config", ".", "data", "[", "'scripts_dir'", "]", ",", "'shell-utils'", "]", ")", ")", ":", "shutil", ".", "copy", "(", "'/'", ".", "join", "(", "[", "config", ".", "data", "[", "'scripts_dir'", "]", ",", "'shell-utils'", ",", "s", "]", ")", ",", "'/'", ".", "join", "(", "[", "basedir", ",", "'shell-utils'", ",", "s", "]", ")", ")", "# copy any examples that exist in the anchore egg into the unpack dir", "for", "d", "in", "os", ".", "listdir", "(", "config", ".", "data", "[", "'scripts_dir'", "]", ")", ":", "scriptdir", "=", "'/'", ".", "join", "(", "[", "basedir", ",", "d", "]", ")", "if", "os", ".", "path", ".", "exists", "(", "config", ".", "data", "[", "'scripts_dir'", "]", "+", "\"/examples/\"", "+", "d", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "scriptdir", ")", ":", "os", ".", "makedirs", "(", "scriptdir", ")", "for", "s", "in", "os", ".", "listdir", "(", "config", ".", "data", "[", "'scripts_dir'", "]", "+", "\"/examples/\"", "+", "d", ")", ":", "thefile", "=", "'/'", ".", "join", "(", "[", "config", ".", "data", "[", "'scripts_dir'", "]", ",", "\"examples\"", ",", "d", ",", "s", "]", ")", "thefiledst", "=", "'/'", ".", "join", "(", "[", "scriptdir", ",", "s", "]", ")", "if", "re", ".", "match", "(", "\".*(\\.sh)$\"", ",", "thefile", ")", ":", "examples", "[", "d", "]", "=", "thefiledst", "shutil", ".", "copy", "(", "thefile", ",", "thefiledst", ")", "# all set, show how to use them", "anchore_print", "(", "\"\\tImage: \"", "+", "imageId", "[", "0", ":", "12", "]", ")", "anchore_print", "(", "\"\\tUnpack Directory: \"", "+", "result", "[", "imageId", "]", ")", "anchore_print", "(", "\"\"", ")", "analyzer_string", "=", "' '", ".", "join", "(", "[", "examples", "[", "'analyzers'", "]", ",", "imageId", ",", "tmpdatastore", ",", "dstimgdir", ",", "result", "[", "imageId", "]", "]", ")", "anchore_print", "(", "\"\\tAnalyzer Command:\\n\\n\\t\"", "+", "analyzer_string", ")", "anchore_print", "(", "\"\"", ")", "anchore_utils", ".", "write_plainfile_fromstr", "(", "result", "[", "imageId", "]", "+", "\"/queryimages\"", ",", "imageId", "+", "\"\\n\"", ")", "queryoutput", "=", "'/'", ".", "join", "(", "[", "result", "[", "imageId", "]", ",", "\"querytmp/\"", "]", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "queryoutput", ")", ":", "os", ".", "makedirs", "(", "queryoutput", ")", "query_string", "=", "' '", ".", "join", "(", "[", "examples", "[", "'queries'", "]", ",", "result", "[", "imageId", "]", "+", "\"/queryimages\"", ",", "tmpdatastore", ",", "queryoutput", ",", "\"passwd\"", "]", ")", "anchore_print", "(", "\"Query Command:\\n\\n\\t\"", "+", "query_string", ")", "anchore_print", "(", "\"\"", ")", "anchore_print", "(", "\"Next Steps: \"", ")", "anchore_print", "(", "\"\\tFirst: run the above analyzer command and note the RESULT output\"", ")", "anchore_print", "(", "\"\\tSecond: run the above query command and note the RESULT output, checking that the query was able to use the analyzer data to perform its search\"", ")", "anchore_print", "(", "\"\\tThird: modify the analyzer/query modules as you wish, including renaming them and continue running/inspecting output until you are satisfied\"", ")", "anchore_print", "(", "\"\\tFinally: when you're happy with the analyzer/query, copy them to next to existing anchore analyzer/query modules and anchore will start calling them as part of container analysis/query:\\n\"", ")", "anchore_print", "(", "\"\\tcp \"", "+", "examples", "[", "'analyzers'", "]", "+", "\" \"", "+", "config", ".", "data", "[", "'scripts_dir'", "]", "+", "\"/analyzers/99_analyzer-example.sh\"", ")", "anchore_print", "(", "\"\\tcp \"", "+", "examples", "[", "'queries'", "]", "+", "\" \"", "+", "config", ".", "data", "[", "'scripts_dir'", "]", "+", "\"/queries/\"", ")", "anchore_print", "(", "\"\\tanchore analyze --force --image \"", "+", "imageId", "+", "\" --imagetype none\"", ")", "anchore_print", "(", "\"\\tanchore query --image \"", "+", "imageId", "+", "\" query-example\"", ")", "anchore_print", "(", "\"\\tanchore query --image \"", "+", "imageId", "+", "\" query-example passwd\"", ")", "anchore_print", "(", "\"\\tanchore query --image \"", "+", "imageId", "+", "\" query-example pdoesntexist\"", ")", "except", ":", "anchore_print_err", "(", "\"operation failed\"", ")", "ecode", "=", "1", "contexts", "[", "'anchore_allimages'", "]", ".", "clear", "(", ")", "sys", ".", "exit", "(", "ecode", ")" ]
Sets up a development environment suitable for working on anchore modules (queries, etc) in the specified directory. Creates a copied environment in the destination containing the module scripts, unpacked image(s) and helper scripts such that a module script that works in the environment can be copied into the correct installation environment and run with anchore explore <modulename> invocation and should work.
[ "Sets", "up", "a", "development", "environment", "suitable", "for", "working", "on", "anchore", "modules", "(", "queries", "etc", ")", "in", "the", "specified", "directory", ".", "Creates", "a", "copied", "environment", "in", "the", "destination", "containing", "the", "module", "scripts", "unpacked", "image", "(", "s", ")", "and", "helper", "scripts", "such", "that", "a", "module", "script", "that", "works", "in", "the", "environment", "can", "be", "copied", "into", "the", "correct", "installation", "environment", "and", "run", "with", "anchore", "explore", "<modulename", ">", "invocation", "and", "should", "work", "." ]
python
train
saltstack/salt
salt/modules/gcp_addon.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gcp_addon.py#L66-L137
def route_create(credential_file=None, project_id=None, name=None, dest_range=None, next_hop_instance=None, instance_zone=None, tags=None, network=None, priority=None ): ''' Create a route to send traffic destined to the Internet through your gateway instance credential_file : string File location of application default credential. For more information, refer: https://developers.google.com/identity/protocols/application-default-credentials project_id : string Project ID where instance and network resides. name : string name of the route to create next_hop_instance : string the name of an instance that should handle traffic matching this route. instance_zone : string zone where instance("next_hop_instance") resides network : string Specifies the network to which the route will be applied. dest_range : string The destination range of outgoing packets that the route will apply to. tags : list (optional) Identifies the set of instances that this route will apply to. priority : int (optional) Specifies the priority of this route relative to other routes. default=1000 CLI Example: salt 'salt-master.novalocal' gcp.route_create credential_file=/root/secret_key.json project_id=cp100-170315 name=derby-db-route1 next_hop_instance=instance-1 instance_zone=us-central1-a network=default dest_range=0.0.0.0/0 tags=['no-ip'] priority=700 In above example, the instances which are having tag "no-ip" will route the packet to instance "instance-1"(if packet is intended to other network) ''' credentials = oauth2client.service_account.ServiceAccountCredentials.\ from_json_keyfile_name(credential_file) service = googleapiclient.discovery.build('compute', 'v1', credentials=credentials) routes = service.routes() routes_config = { 'name': six.text_type(name), 'network': _get_network(project_id, six.text_type(network), service=service)['selfLink'], 'destRange': six.text_type(dest_range), 'nextHopInstance': _get_instance(project_id, instance_zone, next_hop_instance, service=service)['selfLink'], 'tags': tags, 'priority': priority } route_create_request = routes.insert(project=project_id, body=routes_config) return route_create_request.execute()
[ "def", "route_create", "(", "credential_file", "=", "None", ",", "project_id", "=", "None", ",", "name", "=", "None", ",", "dest_range", "=", "None", ",", "next_hop_instance", "=", "None", ",", "instance_zone", "=", "None", ",", "tags", "=", "None", ",", "network", "=", "None", ",", "priority", "=", "None", ")", ":", "credentials", "=", "oauth2client", ".", "service_account", ".", "ServiceAccountCredentials", ".", "from_json_keyfile_name", "(", "credential_file", ")", "service", "=", "googleapiclient", ".", "discovery", ".", "build", "(", "'compute'", ",", "'v1'", ",", "credentials", "=", "credentials", ")", "routes", "=", "service", ".", "routes", "(", ")", "routes_config", "=", "{", "'name'", ":", "six", ".", "text_type", "(", "name", ")", ",", "'network'", ":", "_get_network", "(", "project_id", ",", "six", ".", "text_type", "(", "network", ")", ",", "service", "=", "service", ")", "[", "'selfLink'", "]", ",", "'destRange'", ":", "six", ".", "text_type", "(", "dest_range", ")", ",", "'nextHopInstance'", ":", "_get_instance", "(", "project_id", ",", "instance_zone", ",", "next_hop_instance", ",", "service", "=", "service", ")", "[", "'selfLink'", "]", ",", "'tags'", ":", "tags", ",", "'priority'", ":", "priority", "}", "route_create_request", "=", "routes", ".", "insert", "(", "project", "=", "project_id", ",", "body", "=", "routes_config", ")", "return", "route_create_request", ".", "execute", "(", ")" ]
Create a route to send traffic destined to the Internet through your gateway instance credential_file : string File location of application default credential. For more information, refer: https://developers.google.com/identity/protocols/application-default-credentials project_id : string Project ID where instance and network resides. name : string name of the route to create next_hop_instance : string the name of an instance that should handle traffic matching this route. instance_zone : string zone where instance("next_hop_instance") resides network : string Specifies the network to which the route will be applied. dest_range : string The destination range of outgoing packets that the route will apply to. tags : list (optional) Identifies the set of instances that this route will apply to. priority : int (optional) Specifies the priority of this route relative to other routes. default=1000 CLI Example: salt 'salt-master.novalocal' gcp.route_create credential_file=/root/secret_key.json project_id=cp100-170315 name=derby-db-route1 next_hop_instance=instance-1 instance_zone=us-central1-a network=default dest_range=0.0.0.0/0 tags=['no-ip'] priority=700 In above example, the instances which are having tag "no-ip" will route the packet to instance "instance-1"(if packet is intended to other network)
[ "Create", "a", "route", "to", "send", "traffic", "destined", "to", "the", "Internet", "through", "your", "gateway", "instance" ]
python
train
lvh/txeasymail
txeasymail/html.py
https://github.com/lvh/txeasymail/blob/7b845a5238b1371824854468646d54653a426f09/txeasymail/html.py#L27-L33
def textFromHTML(html): """ Cleans and parses text from the given HTML. """ cleaner = lxml.html.clean.Cleaner(scripts=True) cleaned = cleaner.clean_html(html) return lxml.html.fromstring(cleaned).text_content()
[ "def", "textFromHTML", "(", "html", ")", ":", "cleaner", "=", "lxml", ".", "html", ".", "clean", ".", "Cleaner", "(", "scripts", "=", "True", ")", "cleaned", "=", "cleaner", ".", "clean_html", "(", "html", ")", "return", "lxml", ".", "html", ".", "fromstring", "(", "cleaned", ")", ".", "text_content", "(", ")" ]
Cleans and parses text from the given HTML.
[ "Cleans", "and", "parses", "text", "from", "the", "given", "HTML", "." ]
python
train
daethnir/authprogs
authprogs/authprogs.py
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L344-L368
def find_match_command(self, rule): """Return a matching (possibly munged) command, if found in rule.""" command_string = rule['command'] command_list = command_string.split() self.logdebug('comparing "%s" to "%s"\n' % (command_list, self.original_command_list)) if rule.get('allow_trailing_args'): self.logdebug('allow_trailing_args is true - comparing initial ' 'list.\n') # Verify the initial arguments are all the same if (self.original_command_list[:len(command_list)] == command_list): self.logdebug('initial list is same\n') return {'command': self.original_command_list} else: self.logdebug('initial list is not same\n') elif rule.get('pcre_match'): if re.search(command_string, self.original_command_string): return {'command': self.original_command_list} elif command_list == self.original_command_list: return {'command': command_list}
[ "def", "find_match_command", "(", "self", ",", "rule", ")", ":", "command_string", "=", "rule", "[", "'command'", "]", "command_list", "=", "command_string", ".", "split", "(", ")", "self", ".", "logdebug", "(", "'comparing \"%s\" to \"%s\"\\n'", "%", "(", "command_list", ",", "self", ".", "original_command_list", ")", ")", "if", "rule", ".", "get", "(", "'allow_trailing_args'", ")", ":", "self", ".", "logdebug", "(", "'allow_trailing_args is true - comparing initial '", "'list.\\n'", ")", "# Verify the initial arguments are all the same", "if", "(", "self", ".", "original_command_list", "[", ":", "len", "(", "command_list", ")", "]", "==", "command_list", ")", ":", "self", ".", "logdebug", "(", "'initial list is same\\n'", ")", "return", "{", "'command'", ":", "self", ".", "original_command_list", "}", "else", ":", "self", ".", "logdebug", "(", "'initial list is not same\\n'", ")", "elif", "rule", ".", "get", "(", "'pcre_match'", ")", ":", "if", "re", ".", "search", "(", "command_string", ",", "self", ".", "original_command_string", ")", ":", "return", "{", "'command'", ":", "self", ".", "original_command_list", "}", "elif", "command_list", "==", "self", ".", "original_command_list", ":", "return", "{", "'command'", ":", "command_list", "}" ]
Return a matching (possibly munged) command, if found in rule.
[ "Return", "a", "matching", "(", "possibly", "munged", ")", "command", "if", "found", "in", "rule", "." ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_fabric_service.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_fabric_service.py#L937-L951
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_nbr_wwn(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_fabric_trunk_info = ET.Element("show_fabric_trunk_info") config = show_fabric_trunk_info output = ET.SubElement(show_fabric_trunk_info, "output") show_trunk_list = ET.SubElement(output, "show-trunk-list") trunk_list_groups = ET.SubElement(show_trunk_list, "trunk-list-groups") trunk_list_member = ET.SubElement(trunk_list_groups, "trunk-list-member") trunk_list_nbr_wwn = ET.SubElement(trunk_list_member, "trunk-list-nbr-wwn") trunk_list_nbr_wwn.text = kwargs.pop('trunk_list_nbr_wwn') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_nbr_wwn", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_fabric_trunk_info", "=", "ET", ".", "Element", "(", "\"show_fabric_trunk_info\"", ")", "config", "=", "show_fabric_trunk_info", "output", "=", "ET", ".", "SubElement", "(", "show_fabric_trunk_info", ",", "\"output\"", ")", "show_trunk_list", "=", "ET", ".", "SubElement", "(", "output", ",", "\"show-trunk-list\"", ")", "trunk_list_groups", "=", "ET", ".", "SubElement", "(", "show_trunk_list", ",", "\"trunk-list-groups\"", ")", "trunk_list_member", "=", "ET", ".", "SubElement", "(", "trunk_list_groups", ",", "\"trunk-list-member\"", ")", "trunk_list_nbr_wwn", "=", "ET", ".", "SubElement", "(", "trunk_list_member", ",", "\"trunk-list-nbr-wwn\"", ")", "trunk_list_nbr_wwn", ".", "text", "=", "kwargs", ".", "pop", "(", "'trunk_list_nbr_wwn'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
lk-geimfari/mimesis
mimesis/providers/units.py
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/units.py#L35-L51
def prefix(self, sign: Optional[PrefixSign] = None, symbol: bool = False) -> str: """Get a random prefix for the International System of Units. :param sign: Sing of number. :param symbol: Return symbol of prefix. :return: Prefix for SI. :raises NonEnumerableError: if sign is not supported. :Example: mega """ prefixes = SI_PREFIXES_SYM if \ symbol else SI_PREFIXES key = self._validate_enum(item=sign, enum=PrefixSign) return self.random.choice(prefixes[key])
[ "def", "prefix", "(", "self", ",", "sign", ":", "Optional", "[", "PrefixSign", "]", "=", "None", ",", "symbol", ":", "bool", "=", "False", ")", "->", "str", ":", "prefixes", "=", "SI_PREFIXES_SYM", "if", "symbol", "else", "SI_PREFIXES", "key", "=", "self", ".", "_validate_enum", "(", "item", "=", "sign", ",", "enum", "=", "PrefixSign", ")", "return", "self", ".", "random", ".", "choice", "(", "prefixes", "[", "key", "]", ")" ]
Get a random prefix for the International System of Units. :param sign: Sing of number. :param symbol: Return symbol of prefix. :return: Prefix for SI. :raises NonEnumerableError: if sign is not supported. :Example: mega
[ "Get", "a", "random", "prefix", "for", "the", "International", "System", "of", "Units", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/states/container_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L1749-L1767
def _check_data_port_id(self, data_port): """Checks the validity of a data port id Checks whether the id of the given data port is already used by anther data port (input, output, scoped vars) within the state. :param rafcon.core.data_port.DataPort data_port: The data port to be checked :return bool validity, str message: validity is True, when the data port is valid, False else. message gives more information especially if the data port is not valid """ # First check inputs and outputs valid, message = super(ContainerState, self)._check_data_port_id(data_port) if not valid: return False, message # Container state also has scoped variables for scoped_variable_id, scoped_variable in self.scoped_variables.items(): if data_port.data_port_id == scoped_variable_id and data_port is not scoped_variable: return False, "data port id already existing in state" return True, message
[ "def", "_check_data_port_id", "(", "self", ",", "data_port", ")", ":", "# First check inputs and outputs", "valid", ",", "message", "=", "super", "(", "ContainerState", ",", "self", ")", ".", "_check_data_port_id", "(", "data_port", ")", "if", "not", "valid", ":", "return", "False", ",", "message", "# Container state also has scoped variables", "for", "scoped_variable_id", ",", "scoped_variable", "in", "self", ".", "scoped_variables", ".", "items", "(", ")", ":", "if", "data_port", ".", "data_port_id", "==", "scoped_variable_id", "and", "data_port", "is", "not", "scoped_variable", ":", "return", "False", ",", "\"data port id already existing in state\"", "return", "True", ",", "message" ]
Checks the validity of a data port id Checks whether the id of the given data port is already used by anther data port (input, output, scoped vars) within the state. :param rafcon.core.data_port.DataPort data_port: The data port to be checked :return bool validity, str message: validity is True, when the data port is valid, False else. message gives more information especially if the data port is not valid
[ "Checks", "the", "validity", "of", "a", "data", "port", "id" ]
python
train
autokey/autokey
lib/autokey/qtapp.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/qtapp.py#L271-L279
def toggle_service(self): """ Convenience method for toggling the expansion service on or off. This is called by the global hotkey. """ self.monitoring_disabled.emit(not self.service.is_running()) if self.service.is_running(): self.pause_service() else: self.unpause_service()
[ "def", "toggle_service", "(", "self", ")", ":", "self", ".", "monitoring_disabled", ".", "emit", "(", "not", "self", ".", "service", ".", "is_running", "(", ")", ")", "if", "self", ".", "service", ".", "is_running", "(", ")", ":", "self", ".", "pause_service", "(", ")", "else", ":", "self", ".", "unpause_service", "(", ")" ]
Convenience method for toggling the expansion service on or off. This is called by the global hotkey.
[ "Convenience", "method", "for", "toggling", "the", "expansion", "service", "on", "or", "off", ".", "This", "is", "called", "by", "the", "global", "hotkey", "." ]
python
train
meejah/txtorcon
txtorcon/onion.py
https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/onion.py#L1396-L1413
def _validate_ports_low_level(ports): """ Internal helper. Validates the 'ports' argument to EphemeralOnionService or EphemeralAuthenticatedOnionService returning None on success or raising ValueError otherwise. This only accepts the "list of strings" variants; some higher-level APIs also allow lists of ints or lists of 2-tuples, but those must be converted to strings before they get here. """ if not isinstance(ports, (list, tuple)): raise ValueError("'ports' must be a list of strings") if any([not isinstance(x, (six.text_type, str)) for x in ports]): raise ValueError("'ports' must be a list of strings") for port in ports: _validate_single_port_string(port)
[ "def", "_validate_ports_low_level", "(", "ports", ")", ":", "if", "not", "isinstance", "(", "ports", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "ValueError", "(", "\"'ports' must be a list of strings\"", ")", "if", "any", "(", "[", "not", "isinstance", "(", "x", ",", "(", "six", ".", "text_type", ",", "str", ")", ")", "for", "x", "in", "ports", "]", ")", ":", "raise", "ValueError", "(", "\"'ports' must be a list of strings\"", ")", "for", "port", "in", "ports", ":", "_validate_single_port_string", "(", "port", ")" ]
Internal helper. Validates the 'ports' argument to EphemeralOnionService or EphemeralAuthenticatedOnionService returning None on success or raising ValueError otherwise. This only accepts the "list of strings" variants; some higher-level APIs also allow lists of ints or lists of 2-tuples, but those must be converted to strings before they get here.
[ "Internal", "helper", "." ]
python
train
angr/angr
angr/storage/paged_memory.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/storage/paged_memory.py#L207-L220
def load_mo(self, state, page_idx): """ Loads a memory object from memory. :param page_idx: the index into the page :returns: a tuple of the object """ try: key = next(self._storage.irange(maximum=page_idx, reverse=True)) except StopIteration: return None else: return self._storage[key]
[ "def", "load_mo", "(", "self", ",", "state", ",", "page_idx", ")", ":", "try", ":", "key", "=", "next", "(", "self", ".", "_storage", ".", "irange", "(", "maximum", "=", "page_idx", ",", "reverse", "=", "True", ")", ")", "except", "StopIteration", ":", "return", "None", "else", ":", "return", "self", ".", "_storage", "[", "key", "]" ]
Loads a memory object from memory. :param page_idx: the index into the page :returns: a tuple of the object
[ "Loads", "a", "memory", "object", "from", "memory", "." ]
python
train
saltstack/salt
salt/cloud/clouds/aliyun.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/aliyun.py#L124-L144
def avail_locations(call=None): ''' Return a dict of all available VM locations on the cloud provider with relevant data ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) params = {'Action': 'DescribeRegions'} items = query(params=params) ret = {} for region in items['Regions']['Region']: ret[region['RegionId']] = {} for item in region: ret[region['RegionId']][item] = six.text_type(region[item]) return ret
[ "def", "avail_locations", "(", "call", "=", "None", ")", ":", "if", "call", "==", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The avail_locations function must be called with '", "'-f or --function, or with the --list-locations option'", ")", "params", "=", "{", "'Action'", ":", "'DescribeRegions'", "}", "items", "=", "query", "(", "params", "=", "params", ")", "ret", "=", "{", "}", "for", "region", "in", "items", "[", "'Regions'", "]", "[", "'Region'", "]", ":", "ret", "[", "region", "[", "'RegionId'", "]", "]", "=", "{", "}", "for", "item", "in", "region", ":", "ret", "[", "region", "[", "'RegionId'", "]", "]", "[", "item", "]", "=", "six", ".", "text_type", "(", "region", "[", "item", "]", ")", "return", "ret" ]
Return a dict of all available VM locations on the cloud provider with relevant data
[ "Return", "a", "dict", "of", "all", "available", "VM", "locations", "on", "the", "cloud", "provider", "with", "relevant", "data" ]
python
train
Qiskit/qiskit-terra
qiskit/converters/ast_to_dag.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/converters/ast_to_dag.py#L200-L207
def _process_if(self, node): """Process an if node.""" creg_name = node.children[0].name creg = self.dag.cregs[creg_name] cval = node.children[1].value self.condition = (creg, cval) self._process_node(node.children[2]) self.condition = None
[ "def", "_process_if", "(", "self", ",", "node", ")", ":", "creg_name", "=", "node", ".", "children", "[", "0", "]", ".", "name", "creg", "=", "self", ".", "dag", ".", "cregs", "[", "creg_name", "]", "cval", "=", "node", ".", "children", "[", "1", "]", ".", "value", "self", ".", "condition", "=", "(", "creg", ",", "cval", ")", "self", ".", "_process_node", "(", "node", ".", "children", "[", "2", "]", ")", "self", ".", "condition", "=", "None" ]
Process an if node.
[ "Process", "an", "if", "node", "." ]
python
test
pyviz/holoviews
holoviews/core/options.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/options.py#L1581-L1617
def create_custom_trees(cls, obj, options=None): """ Returns the appropriate set of customized subtree clones for an object, suitable for merging with Store.custom_options (i.e with the ids appropriately offset). Note if an object has no integer ids a new OptionTree is built. The id_mapping return value is a list mapping the ids that need to be matched as set to their new values. """ clones, id_mapping = {}, [] obj_ids = cls.get_object_ids(obj) offset = cls.id_offset() obj_ids = [None] if len(obj_ids)==0 else obj_ids for tree_id in obj_ids: if tree_id is not None and tree_id in Store.custom_options(): original = Store.custom_options()[tree_id] clone = OptionTree(items = original.items(), groups = original.groups) clones[tree_id + offset + 1] = clone id_mapping.append((tree_id, tree_id + offset + 1)) else: clone = OptionTree(groups=Store.options().groups) clones[offset] = clone id_mapping.append((tree_id, offset)) # Nodes needed to ensure allowed_keywords is respected for k in Store.options(): if k in [(opt.split('.')[0],) for opt in options]: group = {grp:Options( allowed_keywords=opt.allowed_keywords) for (grp, opt) in Store.options()[k].groups.items()} clone[k] = group return {k:cls.apply_customizations(options, t) if options else t for k,t in clones.items()}, id_mapping
[ "def", "create_custom_trees", "(", "cls", ",", "obj", ",", "options", "=", "None", ")", ":", "clones", ",", "id_mapping", "=", "{", "}", ",", "[", "]", "obj_ids", "=", "cls", ".", "get_object_ids", "(", "obj", ")", "offset", "=", "cls", ".", "id_offset", "(", ")", "obj_ids", "=", "[", "None", "]", "if", "len", "(", "obj_ids", ")", "==", "0", "else", "obj_ids", "for", "tree_id", "in", "obj_ids", ":", "if", "tree_id", "is", "not", "None", "and", "tree_id", "in", "Store", ".", "custom_options", "(", ")", ":", "original", "=", "Store", ".", "custom_options", "(", ")", "[", "tree_id", "]", "clone", "=", "OptionTree", "(", "items", "=", "original", ".", "items", "(", ")", ",", "groups", "=", "original", ".", "groups", ")", "clones", "[", "tree_id", "+", "offset", "+", "1", "]", "=", "clone", "id_mapping", ".", "append", "(", "(", "tree_id", ",", "tree_id", "+", "offset", "+", "1", ")", ")", "else", ":", "clone", "=", "OptionTree", "(", "groups", "=", "Store", ".", "options", "(", ")", ".", "groups", ")", "clones", "[", "offset", "]", "=", "clone", "id_mapping", ".", "append", "(", "(", "tree_id", ",", "offset", ")", ")", "# Nodes needed to ensure allowed_keywords is respected", "for", "k", "in", "Store", ".", "options", "(", ")", ":", "if", "k", "in", "[", "(", "opt", ".", "split", "(", "'.'", ")", "[", "0", "]", ",", ")", "for", "opt", "in", "options", "]", ":", "group", "=", "{", "grp", ":", "Options", "(", "allowed_keywords", "=", "opt", ".", "allowed_keywords", ")", "for", "(", "grp", ",", "opt", ")", "in", "Store", ".", "options", "(", ")", "[", "k", "]", ".", "groups", ".", "items", "(", ")", "}", "clone", "[", "k", "]", "=", "group", "return", "{", "k", ":", "cls", ".", "apply_customizations", "(", "options", ",", "t", ")", "if", "options", "else", "t", "for", "k", ",", "t", "in", "clones", ".", "items", "(", ")", "}", ",", "id_mapping" ]
Returns the appropriate set of customized subtree clones for an object, suitable for merging with Store.custom_options (i.e with the ids appropriately offset). Note if an object has no integer ids a new OptionTree is built. The id_mapping return value is a list mapping the ids that need to be matched as set to their new values.
[ "Returns", "the", "appropriate", "set", "of", "customized", "subtree", "clones", "for", "an", "object", "suitable", "for", "merging", "with", "Store", ".", "custom_options", "(", "i", ".", "e", "with", "the", "ids", "appropriately", "offset", ")", ".", "Note", "if", "an", "object", "has", "no", "integer", "ids", "a", "new", "OptionTree", "is", "built", "." ]
python
train
gem/oq-engine
openquake/hazardlib/sourceconverter.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/sourceconverter.py#L918-L943
def mfds2multimfd(mfds): """ Convert a list of MFD nodes into a single MultiMFD node """ _, kind = mfds[0].tag.split('}') node = Node('multiMFD', dict(kind=kind, size=len(mfds))) lengths = None for field in mfd.multi_mfd.ASSOC[kind][1:]: alias = mfd.multi_mfd.ALIAS.get(field, field) if field in ('magnitudes', 'occurRates'): data = [~getattr(m, field) for m in mfds] lengths = [len(d) for d in data] data = sum(data, []) # list of lists else: try: data = [m[alias] for m in mfds] except KeyError: if alias == 'binWidth': # missing bindWidth in GR MDFs is ok continue else: raise node.append(Node(field, text=collapse(data))) if lengths: # this is the last field if present node.append(Node('lengths', text=collapse(lengths))) return node
[ "def", "mfds2multimfd", "(", "mfds", ")", ":", "_", ",", "kind", "=", "mfds", "[", "0", "]", ".", "tag", ".", "split", "(", "'}'", ")", "node", "=", "Node", "(", "'multiMFD'", ",", "dict", "(", "kind", "=", "kind", ",", "size", "=", "len", "(", "mfds", ")", ")", ")", "lengths", "=", "None", "for", "field", "in", "mfd", ".", "multi_mfd", ".", "ASSOC", "[", "kind", "]", "[", "1", ":", "]", ":", "alias", "=", "mfd", ".", "multi_mfd", ".", "ALIAS", ".", "get", "(", "field", ",", "field", ")", "if", "field", "in", "(", "'magnitudes'", ",", "'occurRates'", ")", ":", "data", "=", "[", "~", "getattr", "(", "m", ",", "field", ")", "for", "m", "in", "mfds", "]", "lengths", "=", "[", "len", "(", "d", ")", "for", "d", "in", "data", "]", "data", "=", "sum", "(", "data", ",", "[", "]", ")", "# list of lists", "else", ":", "try", ":", "data", "=", "[", "m", "[", "alias", "]", "for", "m", "in", "mfds", "]", "except", "KeyError", ":", "if", "alias", "==", "'binWidth'", ":", "# missing bindWidth in GR MDFs is ok", "continue", "else", ":", "raise", "node", ".", "append", "(", "Node", "(", "field", ",", "text", "=", "collapse", "(", "data", ")", ")", ")", "if", "lengths", ":", "# this is the last field if present", "node", ".", "append", "(", "Node", "(", "'lengths'", ",", "text", "=", "collapse", "(", "lengths", ")", ")", ")", "return", "node" ]
Convert a list of MFD nodes into a single MultiMFD node
[ "Convert", "a", "list", "of", "MFD", "nodes", "into", "a", "single", "MultiMFD", "node" ]
python
train
pypa/pipenv
pipenv/project.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/project.py#L1028-L1033
def ensure_proper_casing(self): """Ensures proper casing of Pipfile packages""" pfile = self.parsed_pipfile casing_changed = self.proper_case_section(pfile.get("packages", {})) casing_changed |= self.proper_case_section(pfile.get("dev-packages", {})) return casing_changed
[ "def", "ensure_proper_casing", "(", "self", ")", ":", "pfile", "=", "self", ".", "parsed_pipfile", "casing_changed", "=", "self", ".", "proper_case_section", "(", "pfile", ".", "get", "(", "\"packages\"", ",", "{", "}", ")", ")", "casing_changed", "|=", "self", ".", "proper_case_section", "(", "pfile", ".", "get", "(", "\"dev-packages\"", ",", "{", "}", ")", ")", "return", "casing_changed" ]
Ensures proper casing of Pipfile packages
[ "Ensures", "proper", "casing", "of", "Pipfile", "packages" ]
python
train
gpoulter/fablib
fablib.py
https://github.com/gpoulter/fablib/blob/5d14c4d998f79dd1aa3207063c3d06e30e3e2bf9/fablib.py#L157-L165
def watch(filenames, callback, use_sudo=False): """Call callback if any of filenames change during the context""" filenames = [filenames] if isinstance(filenames, basestring) else filenames old_md5 = {fn: md5sum(fn, use_sudo) for fn in filenames} yield for filename in filenames: if md5sum(filename, use_sudo) != old_md5[filename]: callback() return
[ "def", "watch", "(", "filenames", ",", "callback", ",", "use_sudo", "=", "False", ")", ":", "filenames", "=", "[", "filenames", "]", "if", "isinstance", "(", "filenames", ",", "basestring", ")", "else", "filenames", "old_md5", "=", "{", "fn", ":", "md5sum", "(", "fn", ",", "use_sudo", ")", "for", "fn", "in", "filenames", "}", "yield", "for", "filename", "in", "filenames", ":", "if", "md5sum", "(", "filename", ",", "use_sudo", ")", "!=", "old_md5", "[", "filename", "]", ":", "callback", "(", ")", "return" ]
Call callback if any of filenames change during the context
[ "Call", "callback", "if", "any", "of", "filenames", "change", "during", "the", "context" ]
python
train
readbeyond/aeneas
aeneas/analyzecontainer.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/analyzecontainer.py#L98-L249
def _analyze_txt_config(self, config_string=None): """ Analyze the given container and return the corresponding job. If ``config_string`` is ``None``, try reading it from the TXT config file inside the container. :param string config_string: the configuration string :rtype: :class:`~aeneas.job.Job` """ self.log(u"Analyzing container with TXT config string") if config_string is None: self.log(u"Analyzing container with TXT config file") config_entry = self.container.entry_config_txt self.log([u"Found TXT config entry '%s'", config_entry]) config_dir = os.path.dirname(config_entry) self.log([u"Directory of TXT config entry: '%s'", config_dir]) self.log([u"Reading TXT config entry: '%s'", config_entry]) config_contents = self.container.read_entry(config_entry) self.log(u"Converting config contents to config string") config_contents = gf.safe_unicode(config_contents) config_string = gf.config_txt_to_string(config_contents) else: self.log([u"Analyzing container with TXT config string '%s'", config_string]) config_dir = "" self.log(u"Creating the Job object") job = Job(config_string) self.log(u"Getting entries") entries = self.container.entries self.log(u"Converting config string into config dict") parameters = gf.config_string_to_dict(config_string) self.log(u"Calculating the path of the tasks root directory") tasks_root_directory = gf.norm_join( config_dir, parameters[gc.PPN_JOB_IS_HIERARCHY_PREFIX] ) self.log([u"Path of the tasks root directory: '%s'", tasks_root_directory]) self.log(u"Calculating the path of the sync map root directory") sync_map_root_directory = gf.norm_join( config_dir, parameters[gc.PPN_JOB_OS_HIERARCHY_PREFIX] ) job_os_hierarchy_type = parameters[gc.PPN_JOB_OS_HIERARCHY_TYPE] self.log([u"Path of the sync map root directory: '%s'", sync_map_root_directory]) text_file_relative_path = parameters[gc.PPN_JOB_IS_TEXT_FILE_RELATIVE_PATH] self.log([u"Relative path for text file: '%s'", text_file_relative_path]) text_file_name_regex = re.compile(r"" + parameters[gc.PPN_JOB_IS_TEXT_FILE_NAME_REGEX]) self.log([u"Regex for text file: '%s'", parameters[gc.PPN_JOB_IS_TEXT_FILE_NAME_REGEX]]) audio_file_relative_path = parameters[gc.PPN_JOB_IS_AUDIO_FILE_RELATIVE_PATH] self.log([u"Relative path for audio file: '%s'", audio_file_relative_path]) audio_file_name_regex = re.compile(r"" + parameters[gc.PPN_JOB_IS_AUDIO_FILE_NAME_REGEX]) self.log([u"Regex for audio file: '%s'", parameters[gc.PPN_JOB_IS_AUDIO_FILE_NAME_REGEX]]) if parameters[gc.PPN_JOB_IS_HIERARCHY_TYPE] == HierarchyType.FLAT: self.log(u"Looking for text/audio pairs in flat hierarchy") text_files = self._find_files( entries, tasks_root_directory, text_file_relative_path, text_file_name_regex ) self.log([u"Found text files: '%s'", text_files]) audio_files = self._find_files( entries, tasks_root_directory, audio_file_relative_path, audio_file_name_regex ) self.log([u"Found audio files: '%s'", audio_files]) self.log(u"Matching files in flat hierarchy...") matched_tasks = self._match_files_flat_hierarchy( text_files, audio_files ) self.log(u"Matching files in flat hierarchy... done") for task_info in matched_tasks: self.log([u"Creating task: '%s'", str(task_info)]) task = self._create_task( task_info, config_string, sync_map_root_directory, job_os_hierarchy_type ) job.add_task(task) if parameters[gc.PPN_JOB_IS_HIERARCHY_TYPE] == HierarchyType.PAGED: self.log(u"Looking for text/audio pairs in paged hierarchy") # find all subdirectories of tasks_root_directory # that match gc.PPN_JOB_IS_TASK_DIRECTORY_NAME_REGEX matched_directories = self._match_directories( entries, tasks_root_directory, parameters[gc.PPN_JOB_IS_TASK_DIRECTORY_NAME_REGEX] ) for matched_directory in matched_directories: # rebuild the full path matched_directory_full_path = gf.norm_join( tasks_root_directory, matched_directory ) self.log([u"Looking for text/audio pairs in directory '%s'", matched_directory_full_path]) # look for text and audio files there text_files = self._find_files( entries, matched_directory_full_path, text_file_relative_path, text_file_name_regex ) self.log([u"Found text files: '%s'", text_files]) audio_files = self._find_files( entries, matched_directory_full_path, audio_file_relative_path, audio_file_name_regex ) self.log([u"Found audio files: '%s'", audio_files]) # if we have found exactly one text and one audio file, # create a Task if (len(text_files) == 1) and (len(audio_files) == 1): self.log([u"Exactly one text file and one audio file in '%s'", matched_directory]) task_info = [ matched_directory, text_files[0], audio_files[0] ] self.log([u"Creating task: '%s'", str(task_info)]) task = self._create_task( task_info, config_string, sync_map_root_directory, job_os_hierarchy_type ) job.add_task(task) elif len(text_files) > 1: self.log([u"More than one text file in '%s'", matched_directory]) elif len(audio_files) > 1: self.log([u"More than one audio file in '%s'", matched_directory]) else: self.log([u"No text nor audio file in '%s'", matched_directory]) return job
[ "def", "_analyze_txt_config", "(", "self", ",", "config_string", "=", "None", ")", ":", "self", ".", "log", "(", "u\"Analyzing container with TXT config string\"", ")", "if", "config_string", "is", "None", ":", "self", ".", "log", "(", "u\"Analyzing container with TXT config file\"", ")", "config_entry", "=", "self", ".", "container", ".", "entry_config_txt", "self", ".", "log", "(", "[", "u\"Found TXT config entry '%s'\"", ",", "config_entry", "]", ")", "config_dir", "=", "os", ".", "path", ".", "dirname", "(", "config_entry", ")", "self", ".", "log", "(", "[", "u\"Directory of TXT config entry: '%s'\"", ",", "config_dir", "]", ")", "self", ".", "log", "(", "[", "u\"Reading TXT config entry: '%s'\"", ",", "config_entry", "]", ")", "config_contents", "=", "self", ".", "container", ".", "read_entry", "(", "config_entry", ")", "self", ".", "log", "(", "u\"Converting config contents to config string\"", ")", "config_contents", "=", "gf", ".", "safe_unicode", "(", "config_contents", ")", "config_string", "=", "gf", ".", "config_txt_to_string", "(", "config_contents", ")", "else", ":", "self", ".", "log", "(", "[", "u\"Analyzing container with TXT config string '%s'\"", ",", "config_string", "]", ")", "config_dir", "=", "\"\"", "self", ".", "log", "(", "u\"Creating the Job object\"", ")", "job", "=", "Job", "(", "config_string", ")", "self", ".", "log", "(", "u\"Getting entries\"", ")", "entries", "=", "self", ".", "container", ".", "entries", "self", ".", "log", "(", "u\"Converting config string into config dict\"", ")", "parameters", "=", "gf", ".", "config_string_to_dict", "(", "config_string", ")", "self", ".", "log", "(", "u\"Calculating the path of the tasks root directory\"", ")", "tasks_root_directory", "=", "gf", ".", "norm_join", "(", "config_dir", ",", "parameters", "[", "gc", ".", "PPN_JOB_IS_HIERARCHY_PREFIX", "]", ")", "self", ".", "log", "(", "[", "u\"Path of the tasks root directory: '%s'\"", ",", "tasks_root_directory", "]", ")", "self", ".", "log", "(", "u\"Calculating the path of the sync map root directory\"", ")", "sync_map_root_directory", "=", "gf", ".", "norm_join", "(", "config_dir", ",", "parameters", "[", "gc", ".", "PPN_JOB_OS_HIERARCHY_PREFIX", "]", ")", "job_os_hierarchy_type", "=", "parameters", "[", "gc", ".", "PPN_JOB_OS_HIERARCHY_TYPE", "]", "self", ".", "log", "(", "[", "u\"Path of the sync map root directory: '%s'\"", ",", "sync_map_root_directory", "]", ")", "text_file_relative_path", "=", "parameters", "[", "gc", ".", "PPN_JOB_IS_TEXT_FILE_RELATIVE_PATH", "]", "self", ".", "log", "(", "[", "u\"Relative path for text file: '%s'\"", ",", "text_file_relative_path", "]", ")", "text_file_name_regex", "=", "re", ".", "compile", "(", "r\"\"", "+", "parameters", "[", "gc", ".", "PPN_JOB_IS_TEXT_FILE_NAME_REGEX", "]", ")", "self", ".", "log", "(", "[", "u\"Regex for text file: '%s'\"", ",", "parameters", "[", "gc", ".", "PPN_JOB_IS_TEXT_FILE_NAME_REGEX", "]", "]", ")", "audio_file_relative_path", "=", "parameters", "[", "gc", ".", "PPN_JOB_IS_AUDIO_FILE_RELATIVE_PATH", "]", "self", ".", "log", "(", "[", "u\"Relative path for audio file: '%s'\"", ",", "audio_file_relative_path", "]", ")", "audio_file_name_regex", "=", "re", ".", "compile", "(", "r\"\"", "+", "parameters", "[", "gc", ".", "PPN_JOB_IS_AUDIO_FILE_NAME_REGEX", "]", ")", "self", ".", "log", "(", "[", "u\"Regex for audio file: '%s'\"", ",", "parameters", "[", "gc", ".", "PPN_JOB_IS_AUDIO_FILE_NAME_REGEX", "]", "]", ")", "if", "parameters", "[", "gc", ".", "PPN_JOB_IS_HIERARCHY_TYPE", "]", "==", "HierarchyType", ".", "FLAT", ":", "self", ".", "log", "(", "u\"Looking for text/audio pairs in flat hierarchy\"", ")", "text_files", "=", "self", ".", "_find_files", "(", "entries", ",", "tasks_root_directory", ",", "text_file_relative_path", ",", "text_file_name_regex", ")", "self", ".", "log", "(", "[", "u\"Found text files: '%s'\"", ",", "text_files", "]", ")", "audio_files", "=", "self", ".", "_find_files", "(", "entries", ",", "tasks_root_directory", ",", "audio_file_relative_path", ",", "audio_file_name_regex", ")", "self", ".", "log", "(", "[", "u\"Found audio files: '%s'\"", ",", "audio_files", "]", ")", "self", ".", "log", "(", "u\"Matching files in flat hierarchy...\"", ")", "matched_tasks", "=", "self", ".", "_match_files_flat_hierarchy", "(", "text_files", ",", "audio_files", ")", "self", ".", "log", "(", "u\"Matching files in flat hierarchy... done\"", ")", "for", "task_info", "in", "matched_tasks", ":", "self", ".", "log", "(", "[", "u\"Creating task: '%s'\"", ",", "str", "(", "task_info", ")", "]", ")", "task", "=", "self", ".", "_create_task", "(", "task_info", ",", "config_string", ",", "sync_map_root_directory", ",", "job_os_hierarchy_type", ")", "job", ".", "add_task", "(", "task", ")", "if", "parameters", "[", "gc", ".", "PPN_JOB_IS_HIERARCHY_TYPE", "]", "==", "HierarchyType", ".", "PAGED", ":", "self", ".", "log", "(", "u\"Looking for text/audio pairs in paged hierarchy\"", ")", "# find all subdirectories of tasks_root_directory", "# that match gc.PPN_JOB_IS_TASK_DIRECTORY_NAME_REGEX", "matched_directories", "=", "self", ".", "_match_directories", "(", "entries", ",", "tasks_root_directory", ",", "parameters", "[", "gc", ".", "PPN_JOB_IS_TASK_DIRECTORY_NAME_REGEX", "]", ")", "for", "matched_directory", "in", "matched_directories", ":", "# rebuild the full path", "matched_directory_full_path", "=", "gf", ".", "norm_join", "(", "tasks_root_directory", ",", "matched_directory", ")", "self", ".", "log", "(", "[", "u\"Looking for text/audio pairs in directory '%s'\"", ",", "matched_directory_full_path", "]", ")", "# look for text and audio files there", "text_files", "=", "self", ".", "_find_files", "(", "entries", ",", "matched_directory_full_path", ",", "text_file_relative_path", ",", "text_file_name_regex", ")", "self", ".", "log", "(", "[", "u\"Found text files: '%s'\"", ",", "text_files", "]", ")", "audio_files", "=", "self", ".", "_find_files", "(", "entries", ",", "matched_directory_full_path", ",", "audio_file_relative_path", ",", "audio_file_name_regex", ")", "self", ".", "log", "(", "[", "u\"Found audio files: '%s'\"", ",", "audio_files", "]", ")", "# if we have found exactly one text and one audio file,", "# create a Task", "if", "(", "len", "(", "text_files", ")", "==", "1", ")", "and", "(", "len", "(", "audio_files", ")", "==", "1", ")", ":", "self", ".", "log", "(", "[", "u\"Exactly one text file and one audio file in '%s'\"", ",", "matched_directory", "]", ")", "task_info", "=", "[", "matched_directory", ",", "text_files", "[", "0", "]", ",", "audio_files", "[", "0", "]", "]", "self", ".", "log", "(", "[", "u\"Creating task: '%s'\"", ",", "str", "(", "task_info", ")", "]", ")", "task", "=", "self", ".", "_create_task", "(", "task_info", ",", "config_string", ",", "sync_map_root_directory", ",", "job_os_hierarchy_type", ")", "job", ".", "add_task", "(", "task", ")", "elif", "len", "(", "text_files", ")", ">", "1", ":", "self", ".", "log", "(", "[", "u\"More than one text file in '%s'\"", ",", "matched_directory", "]", ")", "elif", "len", "(", "audio_files", ")", ">", "1", ":", "self", ".", "log", "(", "[", "u\"More than one audio file in '%s'\"", ",", "matched_directory", "]", ")", "else", ":", "self", ".", "log", "(", "[", "u\"No text nor audio file in '%s'\"", ",", "matched_directory", "]", ")", "return", "job" ]
Analyze the given container and return the corresponding job. If ``config_string`` is ``None``, try reading it from the TXT config file inside the container. :param string config_string: the configuration string :rtype: :class:`~aeneas.job.Job`
[ "Analyze", "the", "given", "container", "and", "return", "the", "corresponding", "job", "." ]
python
train
petl-developers/petl
petl/util/materialise.py
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/util/materialise.py#L45-L71
def columns(table, missing=None): """ Construct a :class:`dict` mapping field names to lists of values. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] >>> cols = etl.columns(table) >>> cols['foo'] ['a', 'b', 'b'] >>> cols['bar'] [1, 2, 3] See also :func:`petl.util.materialise.facetcolumns`. """ cols = OrderedDict() it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) for f in flds: cols[f] = list() for row in it: for f, v in izip_longest(flds, row, fillvalue=missing): if f in cols: cols[f].append(v) return cols
[ "def", "columns", "(", "table", ",", "missing", "=", "None", ")", ":", "cols", "=", "OrderedDict", "(", ")", "it", "=", "iter", "(", "table", ")", "hdr", "=", "next", "(", "it", ")", "flds", "=", "list", "(", "map", "(", "text_type", ",", "hdr", ")", ")", "for", "f", "in", "flds", ":", "cols", "[", "f", "]", "=", "list", "(", ")", "for", "row", "in", "it", ":", "for", "f", ",", "v", "in", "izip_longest", "(", "flds", ",", "row", ",", "fillvalue", "=", "missing", ")", ":", "if", "f", "in", "cols", ":", "cols", "[", "f", "]", ".", "append", "(", "v", ")", "return", "cols" ]
Construct a :class:`dict` mapping field names to lists of values. E.g.:: >>> import petl as etl >>> table = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] >>> cols = etl.columns(table) >>> cols['foo'] ['a', 'b', 'b'] >>> cols['bar'] [1, 2, 3] See also :func:`petl.util.materialise.facetcolumns`.
[ "Construct", "a", ":", "class", ":", "dict", "mapping", "field", "names", "to", "lists", "of", "values", ".", "E", ".", "g", ".", "::" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/video/basic_stochastic.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/video/basic_stochastic.py#L215-L231
def next_frame_basic_stochastic(): """Basic 2-frame conv model with stochastic tower.""" hparams = basic_deterministic_params.next_frame_basic_deterministic() hparams.stochastic_model = True hparams.add_hparam("latent_channels", 1) hparams.add_hparam("latent_std_min", -5.0) hparams.add_hparam("num_iterations_1st_stage", 15000) hparams.add_hparam("num_iterations_2nd_stage", 15000) hparams.add_hparam("latent_loss_multiplier", 1e-3) hparams.add_hparam("latent_loss_multiplier_dynamic", False) hparams.add_hparam("latent_loss_multiplier_alpha", 1e-5) hparams.add_hparam("latent_loss_multiplier_epsilon", 1.0) hparams.add_hparam("latent_loss_multiplier_schedule", "constant") hparams.add_hparam("latent_num_frames", 0) # 0 means use all frames. hparams.add_hparam("anneal_end", 50000) hparams.add_hparam("information_capacity", 0.0) return hparams
[ "def", "next_frame_basic_stochastic", "(", ")", ":", "hparams", "=", "basic_deterministic_params", ".", "next_frame_basic_deterministic", "(", ")", "hparams", ".", "stochastic_model", "=", "True", "hparams", ".", "add_hparam", "(", "\"latent_channels\"", ",", "1", ")", "hparams", ".", "add_hparam", "(", "\"latent_std_min\"", ",", "-", "5.0", ")", "hparams", ".", "add_hparam", "(", "\"num_iterations_1st_stage\"", ",", "15000", ")", "hparams", ".", "add_hparam", "(", "\"num_iterations_2nd_stage\"", ",", "15000", ")", "hparams", ".", "add_hparam", "(", "\"latent_loss_multiplier\"", ",", "1e-3", ")", "hparams", ".", "add_hparam", "(", "\"latent_loss_multiplier_dynamic\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"latent_loss_multiplier_alpha\"", ",", "1e-5", ")", "hparams", ".", "add_hparam", "(", "\"latent_loss_multiplier_epsilon\"", ",", "1.0", ")", "hparams", ".", "add_hparam", "(", "\"latent_loss_multiplier_schedule\"", ",", "\"constant\"", ")", "hparams", ".", "add_hparam", "(", "\"latent_num_frames\"", ",", "0", ")", "# 0 means use all frames.", "hparams", ".", "add_hparam", "(", "\"anneal_end\"", ",", "50000", ")", "hparams", ".", "add_hparam", "(", "\"information_capacity\"", ",", "0.0", ")", "return", "hparams" ]
Basic 2-frame conv model with stochastic tower.
[ "Basic", "2", "-", "frame", "conv", "model", "with", "stochastic", "tower", "." ]
python
train
Neurosim-lab/netpyne
netpyne/network/conn.py
https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/network/conn.py#L310-L327
def fullConn (self, preCellsTags, postCellsTags, connParam): from .. import sim ''' Generates connections between all pre and post-syn cells ''' if sim.cfg.verbose: print('Generating set of all-to-all connections (rule: %s) ...' % (connParam['label'])) # get list of params that have a lambda function paramsStrFunc = [param for param in [p+'Func' for p in self.connStringFuncParams] if param in connParam] for paramStrFunc in paramsStrFunc: # replace lambda function (with args as dict of lambda funcs) with list of values connParam[paramStrFunc[:-4]+'List'] = {(preGid,postGid): connParam[paramStrFunc](**{k:v if isinstance(v, Number) else v(preCellTags,postCellTags) for k,v in connParam[paramStrFunc+'Vars'].items()}) for preGid,preCellTags in preCellsTags.items() for postGid,postCellTags in postCellsTags.items()} for postCellGid in postCellsTags: # for each postsyn cell if postCellGid in self.gid2lid: # check if postsyn is in this node's list of gids for preCellGid, preCellTags in preCellsTags.items(): # for each presyn cell self._addCellConn(connParam, preCellGid, postCellGid)
[ "def", "fullConn", "(", "self", ",", "preCellsTags", ",", "postCellsTags", ",", "connParam", ")", ":", "from", ".", ".", "import", "sim", "if", "sim", ".", "cfg", ".", "verbose", ":", "print", "(", "'Generating set of all-to-all connections (rule: %s) ...'", "%", "(", "connParam", "[", "'label'", "]", ")", ")", "# get list of params that have a lambda function", "paramsStrFunc", "=", "[", "param", "for", "param", "in", "[", "p", "+", "'Func'", "for", "p", "in", "self", ".", "connStringFuncParams", "]", "if", "param", "in", "connParam", "]", "for", "paramStrFunc", "in", "paramsStrFunc", ":", "# replace lambda function (with args as dict of lambda funcs) with list of values", "connParam", "[", "paramStrFunc", "[", ":", "-", "4", "]", "+", "'List'", "]", "=", "{", "(", "preGid", ",", "postGid", ")", ":", "connParam", "[", "paramStrFunc", "]", "(", "*", "*", "{", "k", ":", "v", "if", "isinstance", "(", "v", ",", "Number", ")", "else", "v", "(", "preCellTags", ",", "postCellTags", ")", "for", "k", ",", "v", "in", "connParam", "[", "paramStrFunc", "+", "'Vars'", "]", ".", "items", "(", ")", "}", ")", "for", "preGid", ",", "preCellTags", "in", "preCellsTags", ".", "items", "(", ")", "for", "postGid", ",", "postCellTags", "in", "postCellsTags", ".", "items", "(", ")", "}", "for", "postCellGid", "in", "postCellsTags", ":", "# for each postsyn cell", "if", "postCellGid", "in", "self", ".", "gid2lid", ":", "# check if postsyn is in this node's list of gids", "for", "preCellGid", ",", "preCellTags", "in", "preCellsTags", ".", "items", "(", ")", ":", "# for each presyn cell", "self", ".", "_addCellConn", "(", "connParam", ",", "preCellGid", ",", "postCellGid", ")" ]
Generates connections between all pre and post-syn cells
[ "Generates", "connections", "between", "all", "pre", "and", "post", "-", "syn", "cells" ]
python
train
woolfson-group/isambard
isambard/optimisation/optimizer.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/optimisation/optimizer.py#L385-L404
def funnel_rebuild(psg_trm_spec): """Rebuilds a model and compares it to a reference model. Parameters ---------- psg_trm: (([float], float, int), AMPAL, specification) A tuple containing the parameters, score and generation for a model as well as a model of the best scoring parameters. Returns ------- energy_rmsd_gen: (float, float, int) A triple containing the BUFF score, RMSD to the top model and generation of a model generated during the minimisation. """ param_score_gen, top_result_model, specification = psg_trm_spec params, score, gen = param_score_gen model = specification(*params) rmsd = top_result_model.rmsd(model) return rmsd, score, gen
[ "def", "funnel_rebuild", "(", "psg_trm_spec", ")", ":", "param_score_gen", ",", "top_result_model", ",", "specification", "=", "psg_trm_spec", "params", ",", "score", ",", "gen", "=", "param_score_gen", "model", "=", "specification", "(", "*", "params", ")", "rmsd", "=", "top_result_model", ".", "rmsd", "(", "model", ")", "return", "rmsd", ",", "score", ",", "gen" ]
Rebuilds a model and compares it to a reference model. Parameters ---------- psg_trm: (([float], float, int), AMPAL, specification) A tuple containing the parameters, score and generation for a model as well as a model of the best scoring parameters. Returns ------- energy_rmsd_gen: (float, float, int) A triple containing the BUFF score, RMSD to the top model and generation of a model generated during the minimisation.
[ "Rebuilds", "a", "model", "and", "compares", "it", "to", "a", "reference", "model", "." ]
python
train
agoragames/leaderboard-python
leaderboard/leaderboard.py
https://github.com/agoragames/leaderboard-python/blob/ec309859b197a751ac0322374b36d134d8c5522f/leaderboard/leaderboard.py#L196-L224
def rank_member_if_in( self, leaderboard_name, rank_conditional, member, score, member_data=None): ''' Rank a member in the named leaderboard based on execution of the +rank_conditional+. The +rank_conditional+ is passed the following parameters: member: Member name. current_score: Current score for the member in the leaderboard. score: Member score. member_data: Optional member data. leaderboard_options: Leaderboard options, e.g. 'reverse': Value of reverse option @param leaderboard_name [String] Name of the leaderboard. @param rank_conditional [function] Function which must return +True+ or +False+ that controls whether or not the member is ranked in the leaderboard. @param member [String] Member name. @param score [float] Member score. @param member_data [String] Optional member_data. ''' current_score = self.redis_connection.zscore(leaderboard_name, member) if current_score is not None: current_score = float(current_score) if rank_conditional(self, member, current_score, score, member_data, {'reverse': self.order}): self.rank_member_in(leaderboard_name, member, score, member_data)
[ "def", "rank_member_if_in", "(", "self", ",", "leaderboard_name", ",", "rank_conditional", ",", "member", ",", "score", ",", "member_data", "=", "None", ")", ":", "current_score", "=", "self", ".", "redis_connection", ".", "zscore", "(", "leaderboard_name", ",", "member", ")", "if", "current_score", "is", "not", "None", ":", "current_score", "=", "float", "(", "current_score", ")", "if", "rank_conditional", "(", "self", ",", "member", ",", "current_score", ",", "score", ",", "member_data", ",", "{", "'reverse'", ":", "self", ".", "order", "}", ")", ":", "self", ".", "rank_member_in", "(", "leaderboard_name", ",", "member", ",", "score", ",", "member_data", ")" ]
Rank a member in the named leaderboard based on execution of the +rank_conditional+. The +rank_conditional+ is passed the following parameters: member: Member name. current_score: Current score for the member in the leaderboard. score: Member score. member_data: Optional member data. leaderboard_options: Leaderboard options, e.g. 'reverse': Value of reverse option @param leaderboard_name [String] Name of the leaderboard. @param rank_conditional [function] Function which must return +True+ or +False+ that controls whether or not the member is ranked in the leaderboard. @param member [String] Member name. @param score [float] Member score. @param member_data [String] Optional member_data.
[ "Rank", "a", "member", "in", "the", "named", "leaderboard", "based", "on", "execution", "of", "the", "+", "rank_conditional", "+", "." ]
python
train
XuShaohua/bcloud
bcloud/pcs.py
https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/pcs.py#L364-L372
def get_share_url_with_dirname(uk, shareid, dirname): '''得到共享目录的链接''' return ''.join([ const.PAN_URL, 'wap/link', '?shareid=', shareid, '&uk=', uk, '&dir=', encoder.encode_uri_component(dirname), '&third=0', ])
[ "def", "get_share_url_with_dirname", "(", "uk", ",", "shareid", ",", "dirname", ")", ":", "return", "''", ".", "join", "(", "[", "const", ".", "PAN_URL", ",", "'wap/link'", ",", "'?shareid='", ",", "shareid", ",", "'&uk='", ",", "uk", ",", "'&dir='", ",", "encoder", ".", "encode_uri_component", "(", "dirname", ")", ",", "'&third=0'", ",", "]", ")" ]
得到共享目录的链接
[ "得到共享目录的链接" ]
python
train
google/grr
grr/server/grr_response_server/gui/api_plugins/hunt.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/api_plugins/hunt.py#L1320-L1343
def _Resample(self, stats, target_size): """Resamples the stats to have a specific number of data points.""" t_first = stats[0][0] t_last = stats[-1][0] interval = (t_last - t_first) / target_size result = [] current_t = t_first current_v = 0 i = 0 while i < len(stats): stat_t = stats[i][0] stat_v = stats[i][1] if stat_t <= (current_t + interval): # Always add the last value in an interval to the result. current_v = stat_v i += 1 else: result.append([current_t + interval, current_v]) current_t += interval result.append([current_t + interval, current_v]) return result
[ "def", "_Resample", "(", "self", ",", "stats", ",", "target_size", ")", ":", "t_first", "=", "stats", "[", "0", "]", "[", "0", "]", "t_last", "=", "stats", "[", "-", "1", "]", "[", "0", "]", "interval", "=", "(", "t_last", "-", "t_first", ")", "/", "target_size", "result", "=", "[", "]", "current_t", "=", "t_first", "current_v", "=", "0", "i", "=", "0", "while", "i", "<", "len", "(", "stats", ")", ":", "stat_t", "=", "stats", "[", "i", "]", "[", "0", "]", "stat_v", "=", "stats", "[", "i", "]", "[", "1", "]", "if", "stat_t", "<=", "(", "current_t", "+", "interval", ")", ":", "# Always add the last value in an interval to the result.", "current_v", "=", "stat_v", "i", "+=", "1", "else", ":", "result", ".", "append", "(", "[", "current_t", "+", "interval", ",", "current_v", "]", ")", "current_t", "+=", "interval", "result", ".", "append", "(", "[", "current_t", "+", "interval", ",", "current_v", "]", ")", "return", "result" ]
Resamples the stats to have a specific number of data points.
[ "Resamples", "the", "stats", "to", "have", "a", "specific", "number", "of", "data", "points", "." ]
python
train
wrongwaycn/ssdb-py
ssdb/connection.py
https://github.com/wrongwaycn/ssdb-py/blob/ce7b1542f0faa06fe71a60c667fe15992af0f621/ssdb/connection.py#L171-L178
def on_connect(self, connection): """ Called when the socket connects """ self._sock = connection._sock self._buffer = SocketBuffer(self._sock, self.socket_read_size) if connection.decode_responses: self.encoding = connection.encoding
[ "def", "on_connect", "(", "self", ",", "connection", ")", ":", "self", ".", "_sock", "=", "connection", ".", "_sock", "self", ".", "_buffer", "=", "SocketBuffer", "(", "self", ".", "_sock", ",", "self", ".", "socket_read_size", ")", "if", "connection", ".", "decode_responses", ":", "self", ".", "encoding", "=", "connection", ".", "encoding" ]
Called when the socket connects
[ "Called", "when", "the", "socket", "connects" ]
python
train
Samreay/ChainConsumer
chainconsumer/chainconsumer.py
https://github.com/Samreay/ChainConsumer/blob/902288e4d85c2677a9051a2172e03128a6169ad7/chainconsumer/chainconsumer.py#L236-L266
def remove_chain(self, chain=-1): """ Removes a chain from ChainConsumer. Calling this will require any configurations set to be redone! Parameters ---------- chain : int|str, list[str|int] The chain(s) to remove. You can pass in either the chain index, or the chain name, to remove it. By default removes the last chain added. Returns ------- ChainConsumer Itself, to allow chaining calls. """ if isinstance(chain, str) or isinstance(chain, int): chain = [chain] chain = sorted([i for c in chain for i in self._get_chain(c)])[::-1] assert len(chain) == len(list(set(chain))), "Error, you are trying to remove a chain more than once." for index in chain: del self.chains[index] seen = set() self._all_parameters = [p for c in self.chains for p in c.parameters if not (p in seen or seen.add(p))] # Need to reconfigure self._init_params() return self
[ "def", "remove_chain", "(", "self", ",", "chain", "=", "-", "1", ")", ":", "if", "isinstance", "(", "chain", ",", "str", ")", "or", "isinstance", "(", "chain", ",", "int", ")", ":", "chain", "=", "[", "chain", "]", "chain", "=", "sorted", "(", "[", "i", "for", "c", "in", "chain", "for", "i", "in", "self", ".", "_get_chain", "(", "c", ")", "]", ")", "[", ":", ":", "-", "1", "]", "assert", "len", "(", "chain", ")", "==", "len", "(", "list", "(", "set", "(", "chain", ")", ")", ")", ",", "\"Error, you are trying to remove a chain more than once.\"", "for", "index", "in", "chain", ":", "del", "self", ".", "chains", "[", "index", "]", "seen", "=", "set", "(", ")", "self", ".", "_all_parameters", "=", "[", "p", "for", "c", "in", "self", ".", "chains", "for", "p", "in", "c", ".", "parameters", "if", "not", "(", "p", "in", "seen", "or", "seen", ".", "add", "(", "p", ")", ")", "]", "# Need to reconfigure", "self", ".", "_init_params", "(", ")", "return", "self" ]
Removes a chain from ChainConsumer. Calling this will require any configurations set to be redone! Parameters ---------- chain : int|str, list[str|int] The chain(s) to remove. You can pass in either the chain index, or the chain name, to remove it. By default removes the last chain added. Returns ------- ChainConsumer Itself, to allow chaining calls.
[ "Removes", "a", "chain", "from", "ChainConsumer", ".", "Calling", "this", "will", "require", "any", "configurations", "set", "to", "be", "redone!" ]
python
train
ifduyue/urlfetch
urlfetch.py
https://github.com/ifduyue/urlfetch/blob/e0ea4673367c157eb832ba4ba2635306c81a61be/urlfetch.py#L848-L857
def get_proxies_from_environ(): """Get proxies from os.environ.""" proxies = {} http_proxy = os.getenv('http_proxy') or os.getenv('HTTP_PROXY') https_proxy = os.getenv('https_proxy') or os.getenv('HTTPS_PROXY') if http_proxy: proxies['http'] = http_proxy if https_proxy: proxies['https'] = https_proxy return proxies
[ "def", "get_proxies_from_environ", "(", ")", ":", "proxies", "=", "{", "}", "http_proxy", "=", "os", ".", "getenv", "(", "'http_proxy'", ")", "or", "os", ".", "getenv", "(", "'HTTP_PROXY'", ")", "https_proxy", "=", "os", ".", "getenv", "(", "'https_proxy'", ")", "or", "os", ".", "getenv", "(", "'HTTPS_PROXY'", ")", "if", "http_proxy", ":", "proxies", "[", "'http'", "]", "=", "http_proxy", "if", "https_proxy", ":", "proxies", "[", "'https'", "]", "=", "https_proxy", "return", "proxies" ]
Get proxies from os.environ.
[ "Get", "proxies", "from", "os", ".", "environ", "." ]
python
train
saltstack/salt
salt/modules/acme.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/acme.py#L107-L258
def cert(name, aliases=None, email=None, webroot=None, test_cert=False, renew=None, keysize=None, server=None, owner='root', group='root', mode='0640', certname=None, preferred_challenges=None, tls_sni_01_port=None, tls_sni_01_address=None, http_01_port=None, http_01_address=None, dns_plugin=None, dns_plugin_credentials=None, dns_plugin_propagate_seconds=10): ''' Obtain/renew a certificate from an ACME CA, probably Let's Encrypt. :param name: Common Name of the certificate (DNS name of certificate) :param aliases: subjectAltNames (Additional DNS names on certificate) :param email: e-mail address for interaction with ACME provider :param webroot: True or a full path to use to use webroot. Otherwise use standalone mode :param test_cert: Request a certificate from the Happy Hacker Fake CA (mutually exclusive with 'server') :param renew: True/'force' to force a renewal, or a window of renewal before expiry in days :param keysize: RSA key bits :param server: API endpoint to talk to :param owner: owner of the private key file :param group: group of the private key file :param mode: mode of the private key file :param certname: Name of the certificate to save :param preferred_challenges: A sorted, comma delimited list of the preferred challenge to use during authorization with the most preferred challenge listed first. :param tls_sni_01_port: Port used during tls-sni-01 challenge. This only affects the port Certbot listens on. A conforming ACME server will still attempt to connect on port 443. :param tls_sni_01_address: The address the server listens to during tls-sni-01 challenge. :param http_01_port: Port used in the http-01 challenge. This only affects the port Certbot listens on. A conforming ACME server will still attempt to connect on port 80. :param https_01_address: The address the server listens to during http-01 challenge. :param dns_plugin: Name of a DNS plugin to use (currently only 'cloudflare' or 'digitalocean') :param dns_plugin_credentials: Path to the credentials file if required by the specified DNS plugin :param dns_plugin_propagate_seconds: Number of seconds to wait for DNS propogations before asking ACME servers to verify the DNS record. (default 10) :return: dict with 'result' True/False/None, 'comment' and certificate's expiry date ('not_after') CLI example: .. code-block:: bash salt 'gitlab.example.com' acme.cert dev.example.com "[gitlab.example.com]" test_cert=True renew=14 webroot=/opt/gitlab/embedded/service/gitlab-rails/public ''' cmd = [LEA, 'certonly', '--non-interactive', '--agree-tos'] supported_dns_plugins = ['cloudflare', 'digitalocean'] cert_file = _cert_file(name, 'cert') if not __salt__['file.file_exists'](cert_file): log.debug('Certificate %s does not exist (yet)', cert_file) renew = False elif needs_renewal(name, renew): log.debug('Certificate %s will be renewed', cert_file) cmd.append('--renew-by-default') renew = True if server: cmd.append('--server {0}'.format(server)) if certname: cmd.append('--cert-name {0}'.format(certname)) if test_cert: if server: return {'result': False, 'comment': 'Use either server or test_cert, not both'} cmd.append('--test-cert') if webroot: cmd.append('--authenticator webroot') if webroot is not True: cmd.append('--webroot-path {0}'.format(webroot)) elif dns_plugin in supported_dns_plugins: if dns_plugin == 'cloudflare': cmd.append('--dns-cloudflare') cmd.append('--dns-cloudflare-credentials {0}'.format(dns_plugin_credentials)) cmd.append('--dns-cloudflare-propagation-seconds {0}'.format(dns_plugin_propagate_seconds)) elif dns_plugin == 'digitalocean': cmd.append('--dns-digitalocean') cmd.append('--dns-digitalocean-credentials {0}'.format(dns_plugin_credentials)) cmd.append('--dns-digitalocean-propagation-seconds {0}'.format(dns_plugin_propagate_seconds)) else: return {'result': False, 'comment': 'DNS plugin \'{0}\' is not supported'.format(dns_plugin)} else: cmd.append('--authenticator standalone') if email: cmd.append('--email {0}'.format(email)) if keysize: cmd.append('--rsa-key-size {0}'.format(keysize)) cmd.append('--domains {0}'.format(name)) if aliases is not None: for dns in aliases: cmd.append('--domains {0}'.format(dns)) if preferred_challenges: cmd.append('--preferred-challenges {}'.format(preferred_challenges)) if tls_sni_01_port: cmd.append('--tls-sni-01-port {}'.format(tls_sni_01_port)) if tls_sni_01_address: cmd.append('--tls-sni-01-address {}'.format(tls_sni_01_address)) if http_01_port: cmd.append('--http-01-port {}'.format(http_01_port)) if http_01_address: cmd.append('--http-01-address {}'.format(http_01_address)) res = __salt__['cmd.run_all'](' '.join(cmd)) if res['retcode'] != 0: if 'expand' in res['stderr']: cmd.append('--expand') res = __salt__['cmd.run_all'](' '.join(cmd)) if res['retcode'] != 0: return {'result': False, 'comment': 'Certificate {0} renewal failed with:\n{1}'.format(name, res['stderr'])} else: return {'result': False, 'comment': 'Certificate {0} renewal failed with:\n{1}'.format(name, res['stderr'])} if 'no action taken' in res['stdout']: comment = 'Certificate {0} unchanged'.format(cert_file) result = None elif renew: comment = 'Certificate {0} renewed'.format(name) result = True else: comment = 'Certificate {0} obtained'.format(name) result = True ret = {'comment': comment, 'not_after': expires(name), 'changes': {}, 'result': result} ret, _ = __salt__['file.check_perms'](_cert_file(name, 'privkey'), ret, owner, group, mode, follow_symlinks=True) return ret
[ "def", "cert", "(", "name", ",", "aliases", "=", "None", ",", "email", "=", "None", ",", "webroot", "=", "None", ",", "test_cert", "=", "False", ",", "renew", "=", "None", ",", "keysize", "=", "None", ",", "server", "=", "None", ",", "owner", "=", "'root'", ",", "group", "=", "'root'", ",", "mode", "=", "'0640'", ",", "certname", "=", "None", ",", "preferred_challenges", "=", "None", ",", "tls_sni_01_port", "=", "None", ",", "tls_sni_01_address", "=", "None", ",", "http_01_port", "=", "None", ",", "http_01_address", "=", "None", ",", "dns_plugin", "=", "None", ",", "dns_plugin_credentials", "=", "None", ",", "dns_plugin_propagate_seconds", "=", "10", ")", ":", "cmd", "=", "[", "LEA", ",", "'certonly'", ",", "'--non-interactive'", ",", "'--agree-tos'", "]", "supported_dns_plugins", "=", "[", "'cloudflare'", ",", "'digitalocean'", "]", "cert_file", "=", "_cert_file", "(", "name", ",", "'cert'", ")", "if", "not", "__salt__", "[", "'file.file_exists'", "]", "(", "cert_file", ")", ":", "log", ".", "debug", "(", "'Certificate %s does not exist (yet)'", ",", "cert_file", ")", "renew", "=", "False", "elif", "needs_renewal", "(", "name", ",", "renew", ")", ":", "log", ".", "debug", "(", "'Certificate %s will be renewed'", ",", "cert_file", ")", "cmd", ".", "append", "(", "'--renew-by-default'", ")", "renew", "=", "True", "if", "server", ":", "cmd", ".", "append", "(", "'--server {0}'", ".", "format", "(", "server", ")", ")", "if", "certname", ":", "cmd", ".", "append", "(", "'--cert-name {0}'", ".", "format", "(", "certname", ")", ")", "if", "test_cert", ":", "if", "server", ":", "return", "{", "'result'", ":", "False", ",", "'comment'", ":", "'Use either server or test_cert, not both'", "}", "cmd", ".", "append", "(", "'--test-cert'", ")", "if", "webroot", ":", "cmd", ".", "append", "(", "'--authenticator webroot'", ")", "if", "webroot", "is", "not", "True", ":", "cmd", ".", "append", "(", "'--webroot-path {0}'", ".", "format", "(", "webroot", ")", ")", "elif", "dns_plugin", "in", "supported_dns_plugins", ":", "if", "dns_plugin", "==", "'cloudflare'", ":", "cmd", ".", "append", "(", "'--dns-cloudflare'", ")", "cmd", ".", "append", "(", "'--dns-cloudflare-credentials {0}'", ".", "format", "(", "dns_plugin_credentials", ")", ")", "cmd", ".", "append", "(", "'--dns-cloudflare-propagation-seconds {0}'", ".", "format", "(", "dns_plugin_propagate_seconds", ")", ")", "elif", "dns_plugin", "==", "'digitalocean'", ":", "cmd", ".", "append", "(", "'--dns-digitalocean'", ")", "cmd", ".", "append", "(", "'--dns-digitalocean-credentials {0}'", ".", "format", "(", "dns_plugin_credentials", ")", ")", "cmd", ".", "append", "(", "'--dns-digitalocean-propagation-seconds {0}'", ".", "format", "(", "dns_plugin_propagate_seconds", ")", ")", "else", ":", "return", "{", "'result'", ":", "False", ",", "'comment'", ":", "'DNS plugin \\'{0}\\' is not supported'", ".", "format", "(", "dns_plugin", ")", "}", "else", ":", "cmd", ".", "append", "(", "'--authenticator standalone'", ")", "if", "email", ":", "cmd", ".", "append", "(", "'--email {0}'", ".", "format", "(", "email", ")", ")", "if", "keysize", ":", "cmd", ".", "append", "(", "'--rsa-key-size {0}'", ".", "format", "(", "keysize", ")", ")", "cmd", ".", "append", "(", "'--domains {0}'", ".", "format", "(", "name", ")", ")", "if", "aliases", "is", "not", "None", ":", "for", "dns", "in", "aliases", ":", "cmd", ".", "append", "(", "'--domains {0}'", ".", "format", "(", "dns", ")", ")", "if", "preferred_challenges", ":", "cmd", ".", "append", "(", "'--preferred-challenges {}'", ".", "format", "(", "preferred_challenges", ")", ")", "if", "tls_sni_01_port", ":", "cmd", ".", "append", "(", "'--tls-sni-01-port {}'", ".", "format", "(", "tls_sni_01_port", ")", ")", "if", "tls_sni_01_address", ":", "cmd", ".", "append", "(", "'--tls-sni-01-address {}'", ".", "format", "(", "tls_sni_01_address", ")", ")", "if", "http_01_port", ":", "cmd", ".", "append", "(", "'--http-01-port {}'", ".", "format", "(", "http_01_port", ")", ")", "if", "http_01_address", ":", "cmd", ".", "append", "(", "'--http-01-address {}'", ".", "format", "(", "http_01_address", ")", ")", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "' '", ".", "join", "(", "cmd", ")", ")", "if", "res", "[", "'retcode'", "]", "!=", "0", ":", "if", "'expand'", "in", "res", "[", "'stderr'", "]", ":", "cmd", ".", "append", "(", "'--expand'", ")", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "' '", ".", "join", "(", "cmd", ")", ")", "if", "res", "[", "'retcode'", "]", "!=", "0", ":", "return", "{", "'result'", ":", "False", ",", "'comment'", ":", "'Certificate {0} renewal failed with:\\n{1}'", ".", "format", "(", "name", ",", "res", "[", "'stderr'", "]", ")", "}", "else", ":", "return", "{", "'result'", ":", "False", ",", "'comment'", ":", "'Certificate {0} renewal failed with:\\n{1}'", ".", "format", "(", "name", ",", "res", "[", "'stderr'", "]", ")", "}", "if", "'no action taken'", "in", "res", "[", "'stdout'", "]", ":", "comment", "=", "'Certificate {0} unchanged'", ".", "format", "(", "cert_file", ")", "result", "=", "None", "elif", "renew", ":", "comment", "=", "'Certificate {0} renewed'", ".", "format", "(", "name", ")", "result", "=", "True", "else", ":", "comment", "=", "'Certificate {0} obtained'", ".", "format", "(", "name", ")", "result", "=", "True", "ret", "=", "{", "'comment'", ":", "comment", ",", "'not_after'", ":", "expires", "(", "name", ")", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "result", "}", "ret", ",", "_", "=", "__salt__", "[", "'file.check_perms'", "]", "(", "_cert_file", "(", "name", ",", "'privkey'", ")", ",", "ret", ",", "owner", ",", "group", ",", "mode", ",", "follow_symlinks", "=", "True", ")", "return", "ret" ]
Obtain/renew a certificate from an ACME CA, probably Let's Encrypt. :param name: Common Name of the certificate (DNS name of certificate) :param aliases: subjectAltNames (Additional DNS names on certificate) :param email: e-mail address for interaction with ACME provider :param webroot: True or a full path to use to use webroot. Otherwise use standalone mode :param test_cert: Request a certificate from the Happy Hacker Fake CA (mutually exclusive with 'server') :param renew: True/'force' to force a renewal, or a window of renewal before expiry in days :param keysize: RSA key bits :param server: API endpoint to talk to :param owner: owner of the private key file :param group: group of the private key file :param mode: mode of the private key file :param certname: Name of the certificate to save :param preferred_challenges: A sorted, comma delimited list of the preferred challenge to use during authorization with the most preferred challenge listed first. :param tls_sni_01_port: Port used during tls-sni-01 challenge. This only affects the port Certbot listens on. A conforming ACME server will still attempt to connect on port 443. :param tls_sni_01_address: The address the server listens to during tls-sni-01 challenge. :param http_01_port: Port used in the http-01 challenge. This only affects the port Certbot listens on. A conforming ACME server will still attempt to connect on port 80. :param https_01_address: The address the server listens to during http-01 challenge. :param dns_plugin: Name of a DNS plugin to use (currently only 'cloudflare' or 'digitalocean') :param dns_plugin_credentials: Path to the credentials file if required by the specified DNS plugin :param dns_plugin_propagate_seconds: Number of seconds to wait for DNS propogations before asking ACME servers to verify the DNS record. (default 10) :return: dict with 'result' True/False/None, 'comment' and certificate's expiry date ('not_after') CLI example: .. code-block:: bash salt 'gitlab.example.com' acme.cert dev.example.com "[gitlab.example.com]" test_cert=True renew=14 webroot=/opt/gitlab/embedded/service/gitlab-rails/public
[ "Obtain", "/", "renew", "a", "certificate", "from", "an", "ACME", "CA", "probably", "Let", "s", "Encrypt", "." ]
python
train
tensorflow/probability
tensorflow_probability/__init__.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/__init__.py#L32-L65
def _ensure_tf_install(): # pylint: disable=g-statement-before-imports """Attempt to import tensorflow, and ensure its version is sufficient. Raises: ImportError: if either tensorflow is not importable or its version is inadequate. """ try: import tensorflow as tf except ImportError: # Print more informative error message, then reraise. print("\n\nFailed to import TensorFlow. Please note that TensorFlow is not " "installed by default when you install TensorFlow Probability. This " "is so that users can decide whether to install the GPU-enabled " "TensorFlow package. To use TensorFlow Probability, please install " "the most recent version of TensorFlow, by following instructions at " "https://tensorflow.org/install.\n\n") raise import distutils.version # # Update this whenever we need to depend on a newer TensorFlow release. # required_tensorflow_version = "1.13" if (distutils.version.LooseVersion(tf.__version__) < distutils.version.LooseVersion(required_tensorflow_version)): raise ImportError( "This version of TensorFlow Probability requires TensorFlow " "version >= {required}; Detected an installation of version {present}. " "Please upgrade TensorFlow to proceed.".format( required=required_tensorflow_version, present=tf.__version__))
[ "def", "_ensure_tf_install", "(", ")", ":", "# pylint: disable=g-statement-before-imports", "try", ":", "import", "tensorflow", "as", "tf", "except", "ImportError", ":", "# Print more informative error message, then reraise.", "print", "(", "\"\\n\\nFailed to import TensorFlow. Please note that TensorFlow is not \"", "\"installed by default when you install TensorFlow Probability. This \"", "\"is so that users can decide whether to install the GPU-enabled \"", "\"TensorFlow package. To use TensorFlow Probability, please install \"", "\"the most recent version of TensorFlow, by following instructions at \"", "\"https://tensorflow.org/install.\\n\\n\"", ")", "raise", "import", "distutils", ".", "version", "#", "# Update this whenever we need to depend on a newer TensorFlow release.", "#", "required_tensorflow_version", "=", "\"1.13\"", "if", "(", "distutils", ".", "version", ".", "LooseVersion", "(", "tf", ".", "__version__", ")", "<", "distutils", ".", "version", ".", "LooseVersion", "(", "required_tensorflow_version", ")", ")", ":", "raise", "ImportError", "(", "\"This version of TensorFlow Probability requires TensorFlow \"", "\"version >= {required}; Detected an installation of version {present}. \"", "\"Please upgrade TensorFlow to proceed.\"", ".", "format", "(", "required", "=", "required_tensorflow_version", ",", "present", "=", "tf", ".", "__version__", ")", ")" ]
Attempt to import tensorflow, and ensure its version is sufficient. Raises: ImportError: if either tensorflow is not importable or its version is inadequate.
[ "Attempt", "to", "import", "tensorflow", "and", "ensure", "its", "version", "is", "sufficient", "." ]
python
test
omederos/pyspinner
spinning/spinning.py
https://github.com/omederos/pyspinner/blob/4615d92e669942c48d5542a23ddf6d40b206d9d5/spinning/spinning.py#L27-L49
def unique(text): """ Return an unique text @type text: str @param text: Text written used spin syntax. @return: An unique text # Generate an unique sentence >>> unique('The {quick|fast} {brown|gray|red} fox jumped over the lazy dog.') 'The quick red fox jumped over the lazy dog' """ # check if the text is correct correct, error = _is_correct(text) if not correct: raise Exception(error) s = [] _all_unique_texts(text, s) return s[0]
[ "def", "unique", "(", "text", ")", ":", "# check if the text is correct", "correct", ",", "error", "=", "_is_correct", "(", "text", ")", "if", "not", "correct", ":", "raise", "Exception", "(", "error", ")", "s", "=", "[", "]", "_all_unique_texts", "(", "text", ",", "s", ")", "return", "s", "[", "0", "]" ]
Return an unique text @type text: str @param text: Text written used spin syntax. @return: An unique text # Generate an unique sentence >>> unique('The {quick|fast} {brown|gray|red} fox jumped over the lazy dog.') 'The quick red fox jumped over the lazy dog'
[ "Return", "an", "unique", "text" ]
python
train
LasLabs/python-helpscout
helpscout/apis/conversations.py
https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/conversations.py#L93-L117
def create_attachment(cls, session, attachment): """Create an attachment. An attachment must be sent to the API before it can be used in a thread. Use this method to create the attachment, then use the resulting hash when creating a thread. Note that HelpScout only supports attachments of 10MB or lower. Args: session (requests.sessions.Session): Authenticated session. attachment (helpscout.models.Attachment): The attachment to be created. Returns: helpscout.models.Attachment: The newly created attachment (hash property only). Use this hash when associating the attachment with a new thread. """ return super(Conversations, cls).create( session, attachment, endpoint_override='/attachments.json', out_type=Attachment, )
[ "def", "create_attachment", "(", "cls", ",", "session", ",", "attachment", ")", ":", "return", "super", "(", "Conversations", ",", "cls", ")", ".", "create", "(", "session", ",", "attachment", ",", "endpoint_override", "=", "'/attachments.json'", ",", "out_type", "=", "Attachment", ",", ")" ]
Create an attachment. An attachment must be sent to the API before it can be used in a thread. Use this method to create the attachment, then use the resulting hash when creating a thread. Note that HelpScout only supports attachments of 10MB or lower. Args: session (requests.sessions.Session): Authenticated session. attachment (helpscout.models.Attachment): The attachment to be created. Returns: helpscout.models.Attachment: The newly created attachment (hash property only). Use this hash when associating the attachment with a new thread.
[ "Create", "an", "attachment", "." ]
python
train
streamlink/streamlink
src/streamlink/plugin/api/http_session.py
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/plugin/api/http_session.py#L106-L108
def xml(cls, res, *args, **kwargs): """Parses XML from a response.""" return parse_xml(res.text, *args, **kwargs)
[ "def", "xml", "(", "cls", ",", "res", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "parse_xml", "(", "res", ".", "text", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Parses XML from a response.
[ "Parses", "XML", "from", "a", "response", "." ]
python
test
radhermit/vimball
vimball/base.py
https://github.com/radhermit/vimball/blob/3998bdb8d8c4852a388a259778f971f562f9ef37/vimball/base.py#L82-L102
def files(self): """Yields archive file information.""" # try new file header format first, then fallback on old for header in (r"(.*)\t\[\[\[1\n", r"^(\d+)\n$"): header = re.compile(header) filename = None self.fd.seek(0) line = self.readline() while line: m = header.match(line) if m is not None: filename = m.group(1) try: filelines = int(self.readline().rstrip()) except ValueError: raise ArchiveError('invalid archive format') filestart = self.fd.tell() yield (filename, filelines, filestart) line = self.readline() if filename is not None: break
[ "def", "files", "(", "self", ")", ":", "# try new file header format first, then fallback on old", "for", "header", "in", "(", "r\"(.*)\\t\\[\\[\\[1\\n\"", ",", "r\"^(\\d+)\\n$\"", ")", ":", "header", "=", "re", ".", "compile", "(", "header", ")", "filename", "=", "None", "self", ".", "fd", ".", "seek", "(", "0", ")", "line", "=", "self", ".", "readline", "(", ")", "while", "line", ":", "m", "=", "header", ".", "match", "(", "line", ")", "if", "m", "is", "not", "None", ":", "filename", "=", "m", ".", "group", "(", "1", ")", "try", ":", "filelines", "=", "int", "(", "self", ".", "readline", "(", ")", ".", "rstrip", "(", ")", ")", "except", "ValueError", ":", "raise", "ArchiveError", "(", "'invalid archive format'", ")", "filestart", "=", "self", ".", "fd", ".", "tell", "(", ")", "yield", "(", "filename", ",", "filelines", ",", "filestart", ")", "line", "=", "self", ".", "readline", "(", ")", "if", "filename", "is", "not", "None", ":", "break" ]
Yields archive file information.
[ "Yields", "archive", "file", "information", "." ]
python
train
jendrikseipp/vulture
vulture/utils.py
https://github.com/jendrikseipp/vulture/blob/fed11fb7e7ed065058a9fb1acd10052ece37f984/vulture/utils.py#L70-L86
def get_modules(paths, toplevel=True): """Take files from the command line even if they don't end with .py.""" modules = [] for path in paths: path = os.path.abspath(path) if toplevel and path.endswith('.pyc'): sys.exit('.pyc files are not supported: {0}'.format(path)) if os.path.isfile(path) and (path.endswith('.py') or toplevel): modules.append(path) elif os.path.isdir(path): subpaths = [ os.path.join(path, filename) for filename in sorted(os.listdir(path))] modules.extend(get_modules(subpaths, toplevel=False)) elif toplevel: sys.exit('Error: {0} could not be found.'.format(path)) return modules
[ "def", "get_modules", "(", "paths", ",", "toplevel", "=", "True", ")", ":", "modules", "=", "[", "]", "for", "path", "in", "paths", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "if", "toplevel", "and", "path", ".", "endswith", "(", "'.pyc'", ")", ":", "sys", ".", "exit", "(", "'.pyc files are not supported: {0}'", ".", "format", "(", "path", ")", ")", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", "and", "(", "path", ".", "endswith", "(", "'.py'", ")", "or", "toplevel", ")", ":", "modules", ".", "append", "(", "path", ")", "elif", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "subpaths", "=", "[", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "for", "filename", "in", "sorted", "(", "os", ".", "listdir", "(", "path", ")", ")", "]", "modules", ".", "extend", "(", "get_modules", "(", "subpaths", ",", "toplevel", "=", "False", ")", ")", "elif", "toplevel", ":", "sys", ".", "exit", "(", "'Error: {0} could not be found.'", ".", "format", "(", "path", ")", ")", "return", "modules" ]
Take files from the command line even if they don't end with .py.
[ "Take", "files", "from", "the", "command", "line", "even", "if", "they", "don", "t", "end", "with", ".", "py", "." ]
python
train
saltstack/salt
salt/modules/boto_apigateway.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_apigateway.py#L753-L769
def delete_api_deployment(restApiId, deploymentId, region=None, key=None, keyid=None, profile=None): ''' Deletes API deployment for a given restApiId and deploymentID CLI Example: .. code-block:: bash salt myminion boto_apigateway.delete_api_deployent restApiId deploymentId ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_deployment(restApiId=restApiId, deploymentId=deploymentId) return {'deleted': True} except ClientError as e: return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
[ "def", "delete_api_deployment", "(", "restApiId", ",", "deploymentId", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "conn", ".", "delete_deployment", "(", "restApiId", "=", "restApiId", ",", "deploymentId", "=", "deploymentId", ")", "return", "{", "'deleted'", ":", "True", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'deleted'", ":", "False", ",", "'error'", ":", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "}" ]
Deletes API deployment for a given restApiId and deploymentID CLI Example: .. code-block:: bash salt myminion boto_apigateway.delete_api_deployent restApiId deploymentId
[ "Deletes", "API", "deployment", "for", "a", "given", "restApiId", "and", "deploymentID" ]
python
train
mbi/django-simple-captcha
captcha/fields.py
https://github.com/mbi/django-simple-captcha/blob/e96cd8f63e41e658d103d12d6486b34195aee555/captcha/fields.py#L152-L164
def _direct_render(self, name, attrs): """Render the widget the old way - using field_template or output_format.""" context = { 'image': self.image_url(), 'name': name, 'key': self._key, 'id': u'%s_%s' % (self.id_prefix, attrs.get('id')) if self.id_prefix else attrs.get('id'), 'audio': self.audio_url(), } self.image_and_audio = render_to_string(settings.CAPTCHA_IMAGE_TEMPLATE, context) self.hidden_field = render_to_string(settings.CAPTCHA_HIDDEN_FIELD_TEMPLATE, context) self.text_field = render_to_string(settings.CAPTCHA_TEXT_FIELD_TEMPLATE, context) return self.format_output(None)
[ "def", "_direct_render", "(", "self", ",", "name", ",", "attrs", ")", ":", "context", "=", "{", "'image'", ":", "self", ".", "image_url", "(", ")", ",", "'name'", ":", "name", ",", "'key'", ":", "self", ".", "_key", ",", "'id'", ":", "u'%s_%s'", "%", "(", "self", ".", "id_prefix", ",", "attrs", ".", "get", "(", "'id'", ")", ")", "if", "self", ".", "id_prefix", "else", "attrs", ".", "get", "(", "'id'", ")", ",", "'audio'", ":", "self", ".", "audio_url", "(", ")", ",", "}", "self", ".", "image_and_audio", "=", "render_to_string", "(", "settings", ".", "CAPTCHA_IMAGE_TEMPLATE", ",", "context", ")", "self", ".", "hidden_field", "=", "render_to_string", "(", "settings", ".", "CAPTCHA_HIDDEN_FIELD_TEMPLATE", ",", "context", ")", "self", ".", "text_field", "=", "render_to_string", "(", "settings", ".", "CAPTCHA_TEXT_FIELD_TEMPLATE", ",", "context", ")", "return", "self", ".", "format_output", "(", "None", ")" ]
Render the widget the old way - using field_template or output_format.
[ "Render", "the", "widget", "the", "old", "way", "-", "using", "field_template", "or", "output_format", "." ]
python
train
ethereum/web3.py
web3/iban.py
https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/iban.py#L105-L118
def fromAddress(address): """ This method should be used to create an iban object from ethereum address @method fromAddress @param {String} address @return {Iban} the IBAN object """ validate_address(address) address_as_integer = int(address, 16) address_as_base36 = baseN(address_as_integer, 36) padded = pad_left_hex(address_as_base36, 15) return Iban.fromBban(padded.upper())
[ "def", "fromAddress", "(", "address", ")", ":", "validate_address", "(", "address", ")", "address_as_integer", "=", "int", "(", "address", ",", "16", ")", "address_as_base36", "=", "baseN", "(", "address_as_integer", ",", "36", ")", "padded", "=", "pad_left_hex", "(", "address_as_base36", ",", "15", ")", "return", "Iban", ".", "fromBban", "(", "padded", ".", "upper", "(", ")", ")" ]
This method should be used to create an iban object from ethereum address @method fromAddress @param {String} address @return {Iban} the IBAN object
[ "This", "method", "should", "be", "used", "to", "create", "an", "iban", "object", "from", "ethereum", "address" ]
python
train
samjabrahams/anchorhub
anchorhub/util/stripprefix.py
https://github.com/samjabrahams/anchorhub/blob/5ade359b08297d4003a5f477389c01de9e634b54/anchorhub/util/stripprefix.py#L23-L32
def strip_prefix_from_list(list, strip): """ Goes through a list of strings and removes the specified prefix from the beginning of each string in place. :param list: a list of strings to be modified in place :param strip: a string specifying the prefix to remove from the list """ for i in range(len(list)): list[i] = strip_prefix(list[i], strip)
[ "def", "strip_prefix_from_list", "(", "list", ",", "strip", ")", ":", "for", "i", "in", "range", "(", "len", "(", "list", ")", ")", ":", "list", "[", "i", "]", "=", "strip_prefix", "(", "list", "[", "i", "]", ",", "strip", ")" ]
Goes through a list of strings and removes the specified prefix from the beginning of each string in place. :param list: a list of strings to be modified in place :param strip: a string specifying the prefix to remove from the list
[ "Goes", "through", "a", "list", "of", "strings", "and", "removes", "the", "specified", "prefix", "from", "the", "beginning", "of", "each", "string", "in", "place", "." ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datarepo.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datarepo.py#L947-L953
def removeBiosample(self, biosample): """ Removes the specified biosample from this repository. """ q = models.Biosample.delete().where( models.Biosample.id == biosample.getId()) q.execute()
[ "def", "removeBiosample", "(", "self", ",", "biosample", ")", ":", "q", "=", "models", ".", "Biosample", ".", "delete", "(", ")", ".", "where", "(", "models", ".", "Biosample", ".", "id", "==", "biosample", ".", "getId", "(", ")", ")", "q", ".", "execute", "(", ")" ]
Removes the specified biosample from this repository.
[ "Removes", "the", "specified", "biosample", "from", "this", "repository", "." ]
python
train
patrickfuller/jgraph
python/force_directed_layout.py
https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/force_directed_layout.py#L10-L59
def run(edges, iterations=1000, force_strength=5.0, dampening=0.01, max_velocity=2.0, max_distance=50, is_3d=True): """Runs a force-directed-layout algorithm on the input graph. iterations - Number of FDL iterations to run in coordinate generation force_strength - Strength of Coulomb and Hooke forces (edit this to scale the distance between nodes) dampening - Multiplier to reduce force applied to nodes max_velocity - Maximum distance a node can move in one step max_distance - The maximum distance considered for interactions """ # Get a list of node ids from the edge data nodes = set(e['source'] for e in edges) | set(e['target'] for e in edges) # Convert to a data-storing object and initialize some values d = 3 if is_3d else 2 nodes = {n: {'velocity': [0.0] * d, 'force': [0.0] * d} for n in nodes} # Repeat n times (is there a more Pythonic way to do this?) for _ in repeat(None, iterations): # Add in Coulomb-esque node-node repulsive forces for node1, node2 in combinations(nodes.values(), 2): _coulomb(node1, node2, force_strength, max_distance) # And Hooke-esque edge spring forces for edge in edges: _hooke(nodes[edge['source']], nodes[edge['target']], force_strength * edge.get('size', 1), max_distance) # Move by resultant force for node in nodes.values(): # Constrain the force to the bounds specified by input parameter force = [_constrain(dampening * f, -max_velocity, max_velocity) for f in node['force']] # Update velocities and reset force node['velocity'] = [v + dv for v, dv in zip(node['velocity'], force)] node['force'] = [0] * d # Clean and return for node in nodes.values(): del node['force'] node['location'] = node['velocity'] del node['velocity'] # Even if it's 2D, let's specify three dimensions if not is_3d: node['location'] += [0.0] return nodes
[ "def", "run", "(", "edges", ",", "iterations", "=", "1000", ",", "force_strength", "=", "5.0", ",", "dampening", "=", "0.01", ",", "max_velocity", "=", "2.0", ",", "max_distance", "=", "50", ",", "is_3d", "=", "True", ")", ":", "# Get a list of node ids from the edge data", "nodes", "=", "set", "(", "e", "[", "'source'", "]", "for", "e", "in", "edges", ")", "|", "set", "(", "e", "[", "'target'", "]", "for", "e", "in", "edges", ")", "# Convert to a data-storing object and initialize some values", "d", "=", "3", "if", "is_3d", "else", "2", "nodes", "=", "{", "n", ":", "{", "'velocity'", ":", "[", "0.0", "]", "*", "d", ",", "'force'", ":", "[", "0.0", "]", "*", "d", "}", "for", "n", "in", "nodes", "}", "# Repeat n times (is there a more Pythonic way to do this?)", "for", "_", "in", "repeat", "(", "None", ",", "iterations", ")", ":", "# Add in Coulomb-esque node-node repulsive forces", "for", "node1", ",", "node2", "in", "combinations", "(", "nodes", ".", "values", "(", ")", ",", "2", ")", ":", "_coulomb", "(", "node1", ",", "node2", ",", "force_strength", ",", "max_distance", ")", "# And Hooke-esque edge spring forces", "for", "edge", "in", "edges", ":", "_hooke", "(", "nodes", "[", "edge", "[", "'source'", "]", "]", ",", "nodes", "[", "edge", "[", "'target'", "]", "]", ",", "force_strength", "*", "edge", ".", "get", "(", "'size'", ",", "1", ")", ",", "max_distance", ")", "# Move by resultant force", "for", "node", "in", "nodes", ".", "values", "(", ")", ":", "# Constrain the force to the bounds specified by input parameter", "force", "=", "[", "_constrain", "(", "dampening", "*", "f", ",", "-", "max_velocity", ",", "max_velocity", ")", "for", "f", "in", "node", "[", "'force'", "]", "]", "# Update velocities and reset force", "node", "[", "'velocity'", "]", "=", "[", "v", "+", "dv", "for", "v", ",", "dv", "in", "zip", "(", "node", "[", "'velocity'", "]", ",", "force", ")", "]", "node", "[", "'force'", "]", "=", "[", "0", "]", "*", "d", "# Clean and return", "for", "node", "in", "nodes", ".", "values", "(", ")", ":", "del", "node", "[", "'force'", "]", "node", "[", "'location'", "]", "=", "node", "[", "'velocity'", "]", "del", "node", "[", "'velocity'", "]", "# Even if it's 2D, let's specify three dimensions", "if", "not", "is_3d", ":", "node", "[", "'location'", "]", "+=", "[", "0.0", "]", "return", "nodes" ]
Runs a force-directed-layout algorithm on the input graph. iterations - Number of FDL iterations to run in coordinate generation force_strength - Strength of Coulomb and Hooke forces (edit this to scale the distance between nodes) dampening - Multiplier to reduce force applied to nodes max_velocity - Maximum distance a node can move in one step max_distance - The maximum distance considered for interactions
[ "Runs", "a", "force", "-", "directed", "-", "layout", "algorithm", "on", "the", "input", "graph", "." ]
python
train
markovmodel/msmtools
msmtools/analysis/dense/correlations.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/dense/correlations.py#L55-L124
def time_correlation_direct_by_mtx_vec_prod(P, mu, obs1, obs2=None, time=1, start_values=None, return_P_k_obs=False): r"""Compute time-correlation of obs1, or time-cross-correlation with obs2. The time-correlation at time=k is computed by the matrix-vector expression: cor(k) = obs1' diag(pi) P^k obs2 Parameters ---------- P : ndarray, shape=(n, n) or scipy.sparse matrix Transition matrix obs1 : ndarray, shape=(n) Vector representing observable 1 on discrete states obs2 : ndarray, shape=(n) Vector representing observable 2 on discrete states. If not given, the autocorrelation of obs1 will be computed mu : ndarray, shape=(n) stationary distribution vector. time : int time point at which the (auto)correlation will be evaluated. start_values : (time, ndarray <P, <P, obs2>>_t) start iteration of calculation of matrix power product, with this values. only useful when calling this function out of a loop over times. return_P_k_obs : bool if True, the dot product <P^time, obs2> will be returned for further calculations. Returns ------- cor(k) : float correlation between observations """ # input checks if not (type(time) == int): if not (type(time) == np.int64): raise TypeError("given time (%s) is not an integer, but has type: %s" % (str(time), type(time))) if obs1.shape[0] != P.shape[0]: raise ValueError("observable shape not compatible with given matrix") if obs2 is None: obs2 = obs1 # multiply element-wise obs1 and pi. this is obs1' diag(pi) l = np.multiply(obs1, mu) # raise transition matrix to power of time by substituting dot product # <Pk, obs2> with something like <P, <P, obs2>>. # This saves a lot of matrix matrix multiplications. if start_values: # begin with a previous calculated val P_i_obs = start_values[1] # calculate difference properly! time_prev = start_values[0] t_diff = time - time_prev r = range(t_diff) else: if time >= 2: P_i_obs = np.dot(P, np.dot(P, obs2)) # vector <P, <P, obs2> := P^2 * obs r = range(time - 2) elif time == 1: P_i_obs = np.dot(P, obs2) # P^1 = P*obs r = range(0) elif time == 0: # P^0 = I => I*obs2 = obs2 P_i_obs = obs2 r = range(0) for k in r: # since we already substituted started with 0 P_i_obs = np.dot(P, P_i_obs) corr = np.dot(l, P_i_obs) if return_P_k_obs: return corr, (time, P_i_obs) else: return corr
[ "def", "time_correlation_direct_by_mtx_vec_prod", "(", "P", ",", "mu", ",", "obs1", ",", "obs2", "=", "None", ",", "time", "=", "1", ",", "start_values", "=", "None", ",", "return_P_k_obs", "=", "False", ")", ":", "# input checks", "if", "not", "(", "type", "(", "time", ")", "==", "int", ")", ":", "if", "not", "(", "type", "(", "time", ")", "==", "np", ".", "int64", ")", ":", "raise", "TypeError", "(", "\"given time (%s) is not an integer, but has type: %s\"", "%", "(", "str", "(", "time", ")", ",", "type", "(", "time", ")", ")", ")", "if", "obs1", ".", "shape", "[", "0", "]", "!=", "P", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "\"observable shape not compatible with given matrix\"", ")", "if", "obs2", "is", "None", ":", "obs2", "=", "obs1", "# multiply element-wise obs1 and pi. this is obs1' diag(pi)", "l", "=", "np", ".", "multiply", "(", "obs1", ",", "mu", ")", "# raise transition matrix to power of time by substituting dot product", "# <Pk, obs2> with something like <P, <P, obs2>>.", "# This saves a lot of matrix matrix multiplications.", "if", "start_values", ":", "# begin with a previous calculated val", "P_i_obs", "=", "start_values", "[", "1", "]", "# calculate difference properly!", "time_prev", "=", "start_values", "[", "0", "]", "t_diff", "=", "time", "-", "time_prev", "r", "=", "range", "(", "t_diff", ")", "else", ":", "if", "time", ">=", "2", ":", "P_i_obs", "=", "np", ".", "dot", "(", "P", ",", "np", ".", "dot", "(", "P", ",", "obs2", ")", ")", "# vector <P, <P, obs2> := P^2 * obs", "r", "=", "range", "(", "time", "-", "2", ")", "elif", "time", "==", "1", ":", "P_i_obs", "=", "np", ".", "dot", "(", "P", ",", "obs2", ")", "# P^1 = P*obs", "r", "=", "range", "(", "0", ")", "elif", "time", "==", "0", ":", "# P^0 = I => I*obs2 = obs2", "P_i_obs", "=", "obs2", "r", "=", "range", "(", "0", ")", "for", "k", "in", "r", ":", "# since we already substituted started with 0", "P_i_obs", "=", "np", ".", "dot", "(", "P", ",", "P_i_obs", ")", "corr", "=", "np", ".", "dot", "(", "l", ",", "P_i_obs", ")", "if", "return_P_k_obs", ":", "return", "corr", ",", "(", "time", ",", "P_i_obs", ")", "else", ":", "return", "corr" ]
r"""Compute time-correlation of obs1, or time-cross-correlation with obs2. The time-correlation at time=k is computed by the matrix-vector expression: cor(k) = obs1' diag(pi) P^k obs2 Parameters ---------- P : ndarray, shape=(n, n) or scipy.sparse matrix Transition matrix obs1 : ndarray, shape=(n) Vector representing observable 1 on discrete states obs2 : ndarray, shape=(n) Vector representing observable 2 on discrete states. If not given, the autocorrelation of obs1 will be computed mu : ndarray, shape=(n) stationary distribution vector. time : int time point at which the (auto)correlation will be evaluated. start_values : (time, ndarray <P, <P, obs2>>_t) start iteration of calculation of matrix power product, with this values. only useful when calling this function out of a loop over times. return_P_k_obs : bool if True, the dot product <P^time, obs2> will be returned for further calculations. Returns ------- cor(k) : float correlation between observations
[ "r", "Compute", "time", "-", "correlation", "of", "obs1", "or", "time", "-", "cross", "-", "correlation", "with", "obs2", "." ]
python
train
awslabs/aws-sam-cli
samcli/local/lambdafn/env_vars.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/lambdafn/env_vars.py#L77-L104
def resolve(self): """ Resolves the values from different sources and returns a dict of environment variables to use when running the function locally. :return dict: Dict where key is the variable name and value is the value of the variable. Both key and values are strings """ # AWS_* variables must always be passed to the function, but user has the choice to override them result = self._get_aws_variables() # Default value for the variable gets lowest priority for name, value in self.variables.items(): # Shell environment values, second priority if name in self.shell_env_values: value = self.shell_env_values[name] # Overridden values, highest priority if name in self.override_values: value = self.override_values[name] # Any value must be a string when passed to Lambda runtime. # Runtime expects a Map<String, String> for environment variables result[name] = self._stringify_value(value) return result
[ "def", "resolve", "(", "self", ")", ":", "# AWS_* variables must always be passed to the function, but user has the choice to override them", "result", "=", "self", ".", "_get_aws_variables", "(", ")", "# Default value for the variable gets lowest priority", "for", "name", ",", "value", "in", "self", ".", "variables", ".", "items", "(", ")", ":", "# Shell environment values, second priority", "if", "name", "in", "self", ".", "shell_env_values", ":", "value", "=", "self", ".", "shell_env_values", "[", "name", "]", "# Overridden values, highest priority", "if", "name", "in", "self", ".", "override_values", ":", "value", "=", "self", ".", "override_values", "[", "name", "]", "# Any value must be a string when passed to Lambda runtime.", "# Runtime expects a Map<String, String> for environment variables", "result", "[", "name", "]", "=", "self", ".", "_stringify_value", "(", "value", ")", "return", "result" ]
Resolves the values from different sources and returns a dict of environment variables to use when running the function locally. :return dict: Dict where key is the variable name and value is the value of the variable. Both key and values are strings
[ "Resolves", "the", "values", "from", "different", "sources", "and", "returns", "a", "dict", "of", "environment", "variables", "to", "use", "when", "running", "the", "function", "locally", "." ]
python
train
tanghaibao/jcvi
jcvi/compara/synfind.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synfind.py#L46-L66
def get_flanker(group, query): """ >>> get_flanker([(370, 15184), (372, 15178), (373, 15176), (400, 15193)], 385) ((373, 15176), (400, 15193), True) >>> get_flanker([(124, 13639), (137, 13625)], 138) ((137, 13625), (137, 13625), False) """ group.sort() pos = bisect_left(group, (query, 0)) left_flanker = group[0] if pos == 0 else group[pos-1] right_flanker = group[-1] if pos == len(group) else group[pos] # pick the closest flanker if abs(query - left_flanker[0]) < abs(query - right_flanker[0]): flanker, other = left_flanker, right_flanker else: flanker, other = right_flanker, left_flanker flanked = not (pos == 0 or pos == len(group) or flanker == query) return flanker, other, flanked
[ "def", "get_flanker", "(", "group", ",", "query", ")", ":", "group", ".", "sort", "(", ")", "pos", "=", "bisect_left", "(", "group", ",", "(", "query", ",", "0", ")", ")", "left_flanker", "=", "group", "[", "0", "]", "if", "pos", "==", "0", "else", "group", "[", "pos", "-", "1", "]", "right_flanker", "=", "group", "[", "-", "1", "]", "if", "pos", "==", "len", "(", "group", ")", "else", "group", "[", "pos", "]", "# pick the closest flanker", "if", "abs", "(", "query", "-", "left_flanker", "[", "0", "]", ")", "<", "abs", "(", "query", "-", "right_flanker", "[", "0", "]", ")", ":", "flanker", ",", "other", "=", "left_flanker", ",", "right_flanker", "else", ":", "flanker", ",", "other", "=", "right_flanker", ",", "left_flanker", "flanked", "=", "not", "(", "pos", "==", "0", "or", "pos", "==", "len", "(", "group", ")", "or", "flanker", "==", "query", ")", "return", "flanker", ",", "other", ",", "flanked" ]
>>> get_flanker([(370, 15184), (372, 15178), (373, 15176), (400, 15193)], 385) ((373, 15176), (400, 15193), True) >>> get_flanker([(124, 13639), (137, 13625)], 138) ((137, 13625), (137, 13625), False)
[ ">>>", "get_flanker", "(", "[", "(", "370", "15184", ")", "(", "372", "15178", ")", "(", "373", "15176", ")", "(", "400", "15193", ")", "]", "385", ")", "((", "373", "15176", ")", "(", "400", "15193", ")", "True", ")" ]
python
train
skylander86/uriutils
uriutils/storages.py
https://github.com/skylander86/uriutils/blob/e756d9483ee884973bf3a0c9ad27ae362fbe7fc6/uriutils/storages.py#L421-L433
def list_dir(self): """ Non-recursive file listing. :returns: A generator over files in this "directory" for efficiency. """ bucket = self.blob.bucket prefix = self.blob.name if not prefix.endswith('/'): prefix += '/' for blob in bucket.list_blobs(prefix=prefix, delimiter='/'): yield 'gs://{}/{}'.format(blob.bucket.name, blob.name)
[ "def", "list_dir", "(", "self", ")", ":", "bucket", "=", "self", ".", "blob", ".", "bucket", "prefix", "=", "self", ".", "blob", ".", "name", "if", "not", "prefix", ".", "endswith", "(", "'/'", ")", ":", "prefix", "+=", "'/'", "for", "blob", "in", "bucket", ".", "list_blobs", "(", "prefix", "=", "prefix", ",", "delimiter", "=", "'/'", ")", ":", "yield", "'gs://{}/{}'", ".", "format", "(", "blob", ".", "bucket", ".", "name", ",", "blob", ".", "name", ")" ]
Non-recursive file listing. :returns: A generator over files in this "directory" for efficiency.
[ "Non", "-", "recursive", "file", "listing", "." ]
python
train
Bystroushaak/pyDHTMLParser
src/dhtmlparser/htmlelement/html_parser.py
https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/htmlelement/html_parser.py#L354-L367
def isOpeningTag(self): """ Detect whether this tag is opening or not. Returns: bool: True if it is opening. """ if self.isTag() and \ not self.isComment() and \ not self.isEndTag() and \ not self.isNonPairTag(): return True return False
[ "def", "isOpeningTag", "(", "self", ")", ":", "if", "self", ".", "isTag", "(", ")", "and", "not", "self", ".", "isComment", "(", ")", "and", "not", "self", ".", "isEndTag", "(", ")", "and", "not", "self", ".", "isNonPairTag", "(", ")", ":", "return", "True", "return", "False" ]
Detect whether this tag is opening or not. Returns: bool: True if it is opening.
[ "Detect", "whether", "this", "tag", "is", "opening", "or", "not", "." ]
python
train