repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
LogicalDash/LiSE
allegedb/allegedb/__init__.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/allegedb/allegedb/__init__.py#L409-L502
def get_turn_delta(self, branch=None, turn=None, tick_from=0, tick_to=None): """Get a dictionary describing changes made on a given turn. If ``tick_to`` is not supplied, report all changes after ``tick_from`` (default 0). The keys are graph names. Their values are dictionaries of the graphs' attributes' new values, with ``None`` for deleted keys. Also in those graph dictionaries are special keys 'node_val' and 'edge_val' describing changes to node and edge attributes, and 'nodes' and 'edges' full of booleans indicating whether a node or edge exists. :arg branch: A branch of history; defaults to the current branch :arg turn: The turn in the branch; defaults to the current turn :arg tick_from: Starting tick; defaults to 0 """ branch = branch or self.branch turn = turn or self.turn tick_to = tick_to or self.tick delta = {} if tick_from < tick_to: gvbranches = self._graph_val_cache.settings nbranches = self._nodes_cache.settings nvbranches = self._node_val_cache.settings ebranches = self._edges_cache.settings evbranches = self._edge_val_cache.settings else: gvbranches = self._graph_val_cache.presettings nbranches = self._nodes_cache.presettings nvbranches = self._node_val_cache.presettings ebranches = self._edges_cache.presettings evbranches = self._edge_val_cache.presettings if branch in gvbranches and turn in gvbranches[branch]: for graph, key, value in gvbranches[branch][turn][tick_from:tick_to]: if graph in delta: delta[graph][key] = value else: delta[graph] = {key: value} if branch in nbranches and turn in nbranches[branch]: for graph, node, exists in nbranches[branch][turn][tick_from:tick_to]: delta.setdefault(graph, {}).setdefault('nodes', {})[node] = bool(exists) if branch in nvbranches and turn in nvbranches[branch]: for graph, node, key, value in nvbranches[branch][turn][tick_from:tick_to]: if ( graph in delta and 'nodes' in delta[graph] and node in delta[graph]['nodes'] and not delta[graph]['nodes'][node] ): continue nodevd = delta.setdefault(graph, {}).setdefault('node_val', {}) if node in nodevd: nodevd[node][key] = value else: nodevd[node] = {key: value} graph_objs = self._graph_objs if branch in ebranches and turn in ebranches[branch]: for graph, orig, dest, idx, exists in ebranches[branch][turn][tick_from:tick_to]: if graph_objs[graph].is_multigraph(): if ( graph in delta and 'edges' in delta[graph] and orig in delta[graph]['edges'] and dest in delta[graph]['edges'][orig] and idx in delta[graph]['edges'][orig][dest] and not delta[graph]['edges'][orig][dest][idx] ): continue delta.setdefault(graph, {}).setdefault('edges', {})\ .setdefault(orig, {}).setdefault(dest, {})[idx] = bool(exists) else: if ( graph in delta and 'edges' in delta[graph] and orig in delta[graph]['edges'] and dest in delta[graph]['edges'][orig] and not delta[graph]['edges'][orig][dest] ): continue delta.setdefault(graph, {}).setdefault('edges', {})\ .setdefault(orig, {})[dest] = bool(exists) if branch in evbranches and turn in evbranches[branch]: for graph, orig, dest, idx, key, value in evbranches[branch][turn][tick_from:tick_to]: edgevd = delta.setdefault(graph, {}).setdefault('edge_val', {})\ .setdefault(orig, {}).setdefault(dest, {}) if graph_objs[graph].is_multigraph(): if idx in edgevd: edgevd[idx][key] = value else: edgevd[idx] = {key: value} else: edgevd[key] = value return delta
[ "def", "get_turn_delta", "(", "self", ",", "branch", "=", "None", ",", "turn", "=", "None", ",", "tick_from", "=", "0", ",", "tick_to", "=", "None", ")", ":", "branch", "=", "branch", "or", "self", ".", "branch", "turn", "=", "turn", "or", "self", ".", "turn", "tick_to", "=", "tick_to", "or", "self", ".", "tick", "delta", "=", "{", "}", "if", "tick_from", "<", "tick_to", ":", "gvbranches", "=", "self", ".", "_graph_val_cache", ".", "settings", "nbranches", "=", "self", ".", "_nodes_cache", ".", "settings", "nvbranches", "=", "self", ".", "_node_val_cache", ".", "settings", "ebranches", "=", "self", ".", "_edges_cache", ".", "settings", "evbranches", "=", "self", ".", "_edge_val_cache", ".", "settings", "else", ":", "gvbranches", "=", "self", ".", "_graph_val_cache", ".", "presettings", "nbranches", "=", "self", ".", "_nodes_cache", ".", "presettings", "nvbranches", "=", "self", ".", "_node_val_cache", ".", "presettings", "ebranches", "=", "self", ".", "_edges_cache", ".", "presettings", "evbranches", "=", "self", ".", "_edge_val_cache", ".", "presettings", "if", "branch", "in", "gvbranches", "and", "turn", "in", "gvbranches", "[", "branch", "]", ":", "for", "graph", ",", "key", ",", "value", "in", "gvbranches", "[", "branch", "]", "[", "turn", "]", "[", "tick_from", ":", "tick_to", "]", ":", "if", "graph", "in", "delta", ":", "delta", "[", "graph", "]", "[", "key", "]", "=", "value", "else", ":", "delta", "[", "graph", "]", "=", "{", "key", ":", "value", "}", "if", "branch", "in", "nbranches", "and", "turn", "in", "nbranches", "[", "branch", "]", ":", "for", "graph", ",", "node", ",", "exists", "in", "nbranches", "[", "branch", "]", "[", "turn", "]", "[", "tick_from", ":", "tick_to", "]", ":", "delta", ".", "setdefault", "(", "graph", ",", "{", "}", ")", ".", "setdefault", "(", "'nodes'", ",", "{", "}", ")", "[", "node", "]", "=", "bool", "(", "exists", ")", "if", "branch", "in", "nvbranches", "and", "turn", "in", "nvbranches", "[", "branch", "]", ":", "for", "graph", ",", "node", ",", "key", ",", "value", "in", "nvbranches", "[", "branch", "]", "[", "turn", "]", "[", "tick_from", ":", "tick_to", "]", ":", "if", "(", "graph", "in", "delta", "and", "'nodes'", "in", "delta", "[", "graph", "]", "and", "node", "in", "delta", "[", "graph", "]", "[", "'nodes'", "]", "and", "not", "delta", "[", "graph", "]", "[", "'nodes'", "]", "[", "node", "]", ")", ":", "continue", "nodevd", "=", "delta", ".", "setdefault", "(", "graph", ",", "{", "}", ")", ".", "setdefault", "(", "'node_val'", ",", "{", "}", ")", "if", "node", "in", "nodevd", ":", "nodevd", "[", "node", "]", "[", "key", "]", "=", "value", "else", ":", "nodevd", "[", "node", "]", "=", "{", "key", ":", "value", "}", "graph_objs", "=", "self", ".", "_graph_objs", "if", "branch", "in", "ebranches", "and", "turn", "in", "ebranches", "[", "branch", "]", ":", "for", "graph", ",", "orig", ",", "dest", ",", "idx", ",", "exists", "in", "ebranches", "[", "branch", "]", "[", "turn", "]", "[", "tick_from", ":", "tick_to", "]", ":", "if", "graph_objs", "[", "graph", "]", ".", "is_multigraph", "(", ")", ":", "if", "(", "graph", "in", "delta", "and", "'edges'", "in", "delta", "[", "graph", "]", "and", "orig", "in", "delta", "[", "graph", "]", "[", "'edges'", "]", "and", "dest", "in", "delta", "[", "graph", "]", "[", "'edges'", "]", "[", "orig", "]", "and", "idx", "in", "delta", "[", "graph", "]", "[", "'edges'", "]", "[", "orig", "]", "[", "dest", "]", "and", "not", "delta", "[", "graph", "]", "[", "'edges'", "]", "[", "orig", "]", "[", "dest", "]", "[", "idx", "]", ")", ":", "continue", "delta", ".", "setdefault", "(", "graph", ",", "{", "}", ")", ".", "setdefault", "(", "'edges'", ",", "{", "}", ")", ".", "setdefault", "(", "orig", ",", "{", "}", ")", ".", "setdefault", "(", "dest", ",", "{", "}", ")", "[", "idx", "]", "=", "bool", "(", "exists", ")", "else", ":", "if", "(", "graph", "in", "delta", "and", "'edges'", "in", "delta", "[", "graph", "]", "and", "orig", "in", "delta", "[", "graph", "]", "[", "'edges'", "]", "and", "dest", "in", "delta", "[", "graph", "]", "[", "'edges'", "]", "[", "orig", "]", "and", "not", "delta", "[", "graph", "]", "[", "'edges'", "]", "[", "orig", "]", "[", "dest", "]", ")", ":", "continue", "delta", ".", "setdefault", "(", "graph", ",", "{", "}", ")", ".", "setdefault", "(", "'edges'", ",", "{", "}", ")", ".", "setdefault", "(", "orig", ",", "{", "}", ")", "[", "dest", "]", "=", "bool", "(", "exists", ")", "if", "branch", "in", "evbranches", "and", "turn", "in", "evbranches", "[", "branch", "]", ":", "for", "graph", ",", "orig", ",", "dest", ",", "idx", ",", "key", ",", "value", "in", "evbranches", "[", "branch", "]", "[", "turn", "]", "[", "tick_from", ":", "tick_to", "]", ":", "edgevd", "=", "delta", ".", "setdefault", "(", "graph", ",", "{", "}", ")", ".", "setdefault", "(", "'edge_val'", ",", "{", "}", ")", ".", "setdefault", "(", "orig", ",", "{", "}", ")", ".", "setdefault", "(", "dest", ",", "{", "}", ")", "if", "graph_objs", "[", "graph", "]", ".", "is_multigraph", "(", ")", ":", "if", "idx", "in", "edgevd", ":", "edgevd", "[", "idx", "]", "[", "key", "]", "=", "value", "else", ":", "edgevd", "[", "idx", "]", "=", "{", "key", ":", "value", "}", "else", ":", "edgevd", "[", "key", "]", "=", "value", "return", "delta" ]
Get a dictionary describing changes made on a given turn. If ``tick_to`` is not supplied, report all changes after ``tick_from`` (default 0). The keys are graph names. Their values are dictionaries of the graphs' attributes' new values, with ``None`` for deleted keys. Also in those graph dictionaries are special keys 'node_val' and 'edge_val' describing changes to node and edge attributes, and 'nodes' and 'edges' full of booleans indicating whether a node or edge exists. :arg branch: A branch of history; defaults to the current branch :arg turn: The turn in the branch; defaults to the current turn :arg tick_from: Starting tick; defaults to 0
[ "Get", "a", "dictionary", "describing", "changes", "made", "on", "a", "given", "turn", "." ]
python
train
CalebBell/fluids
fluids/core.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/core.py#L879-L946
def Grashof(L, beta, T1, T2=0, rho=None, mu=None, nu=None, g=g): r'''Calculates Grashof number or `Gr` for a fluid with the given properties, temperature difference, and characteristic length. .. math:: Gr = \frac{g\beta (T_s-T_\infty)L^3}{\nu^2} = \frac{g\beta (T_s-T_\infty)L^3\rho^2}{\mu^2} Inputs either of any of the following sets: * L, beta, T1 and T2, and density `rho` and kinematic viscosity `mu` * L, beta, T1 and T2, and dynamic viscosity `nu` Parameters ---------- L : float Characteristic length [m] beta : float Volumetric thermal expansion coefficient [1/K] T1 : float Temperature 1, usually a film temperature [K] T2 : float, optional Temperature 2, usually a bulk temperature (or 0 if only a difference is provided to the function) [K] rho : float, optional Density, [kg/m^3] mu : float, optional Dynamic viscosity, [Pa*s] nu : float, optional Kinematic viscosity, [m^2/s] g : float, optional Acceleration due to gravity, [m/s^2] Returns ------- Gr : float Grashof number [] Notes ----- .. math:: Gr = \frac{\text{Buoyancy forces}}{\text{Viscous forces}} An error is raised if none of the required input sets are provided. Used in free convection problems only. Examples -------- Example 4 of [1]_, p. 1-21 (matches): >>> Grashof(L=0.9144, beta=0.000933, T1=178.2, rho=1.1613, mu=1.9E-5) 4656936556.178915 >>> Grashof(L=0.9144, beta=0.000933, T1=378.2, T2=200, nu=1.636e-05) 4657491516.530312 References ---------- .. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook, Eighth Edition. McGraw-Hill Professional, 2007. .. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and Applications. Boston: McGraw Hill Higher Education, 2006. ''' if rho and mu: nu = mu/rho elif not nu: raise Exception('Either density and viscosity, or dynamic viscosity, \ is needed') return g*beta*abs(T2-T1)*L**3/nu**2
[ "def", "Grashof", "(", "L", ",", "beta", ",", "T1", ",", "T2", "=", "0", ",", "rho", "=", "None", ",", "mu", "=", "None", ",", "nu", "=", "None", ",", "g", "=", "g", ")", ":", "if", "rho", "and", "mu", ":", "nu", "=", "mu", "/", "rho", "elif", "not", "nu", ":", "raise", "Exception", "(", "'Either density and viscosity, or dynamic viscosity, \\\n is needed'", ")", "return", "g", "*", "beta", "*", "abs", "(", "T2", "-", "T1", ")", "*", "L", "**", "3", "/", "nu", "**", "2" ]
r'''Calculates Grashof number or `Gr` for a fluid with the given properties, temperature difference, and characteristic length. .. math:: Gr = \frac{g\beta (T_s-T_\infty)L^3}{\nu^2} = \frac{g\beta (T_s-T_\infty)L^3\rho^2}{\mu^2} Inputs either of any of the following sets: * L, beta, T1 and T2, and density `rho` and kinematic viscosity `mu` * L, beta, T1 and T2, and dynamic viscosity `nu` Parameters ---------- L : float Characteristic length [m] beta : float Volumetric thermal expansion coefficient [1/K] T1 : float Temperature 1, usually a film temperature [K] T2 : float, optional Temperature 2, usually a bulk temperature (or 0 if only a difference is provided to the function) [K] rho : float, optional Density, [kg/m^3] mu : float, optional Dynamic viscosity, [Pa*s] nu : float, optional Kinematic viscosity, [m^2/s] g : float, optional Acceleration due to gravity, [m/s^2] Returns ------- Gr : float Grashof number [] Notes ----- .. math:: Gr = \frac{\text{Buoyancy forces}}{\text{Viscous forces}} An error is raised if none of the required input sets are provided. Used in free convection problems only. Examples -------- Example 4 of [1]_, p. 1-21 (matches): >>> Grashof(L=0.9144, beta=0.000933, T1=178.2, rho=1.1613, mu=1.9E-5) 4656936556.178915 >>> Grashof(L=0.9144, beta=0.000933, T1=378.2, T2=200, nu=1.636e-05) 4657491516.530312 References ---------- .. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook, Eighth Edition. McGraw-Hill Professional, 2007. .. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and Applications. Boston: McGraw Hill Higher Education, 2006.
[ "r", "Calculates", "Grashof", "number", "or", "Gr", "for", "a", "fluid", "with", "the", "given", "properties", "temperature", "difference", "and", "characteristic", "length", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xratingslider.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xratingslider.py#L129-L139
def setMaximum( self, value ): """ Sets the maximum value for this slider - this will also adjust the minimum size value to match the width of the icons by the number for the maximu. :param value | <int> """ super(XRatingSlider, self).setMaximum(value) self.adjustMinimumWidth()
[ "def", "setMaximum", "(", "self", ",", "value", ")", ":", "super", "(", "XRatingSlider", ",", "self", ")", ".", "setMaximum", "(", "value", ")", "self", ".", "adjustMinimumWidth", "(", ")" ]
Sets the maximum value for this slider - this will also adjust the minimum size value to match the width of the icons by the number for the maximu. :param value | <int>
[ "Sets", "the", "maximum", "value", "for", "this", "slider", "-", "this", "will", "also", "adjust", "the", "minimum", "size", "value", "to", "match", "the", "width", "of", "the", "icons", "by", "the", "number", "for", "the", "maximu", ".", ":", "param", "value", "|", "<int", ">" ]
python
train
saltstack/salt
salt/modules/boto_rds.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_rds.py#L736-L755
def delete_parameter_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an RDS parameter group. CLI example:: salt myminion boto_rds.delete_parameter_group my-param-group \ region=us-east-1 ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return {'results': bool(conn)} r = conn.delete_db_parameter_group(DBParameterGroupName=name) return {'deleted': bool(r), 'message': 'Deleted RDS parameter group {0}.'.format(name)} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
[ "def", "delete_parameter_group", "(", "name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "not", "conn", ":", "return", "{", "'results'", ":", "bool", "(", "conn", ")", "}", "r", "=", "conn", ".", "delete_db_parameter_group", "(", "DBParameterGroupName", "=", "name", ")", "return", "{", "'deleted'", ":", "bool", "(", "r", ")", ",", "'message'", ":", "'Deleted RDS parameter group {0}.'", ".", "format", "(", "name", ")", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'error'", ":", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "}" ]
Delete an RDS parameter group. CLI example:: salt myminion boto_rds.delete_parameter_group my-param-group \ region=us-east-1
[ "Delete", "an", "RDS", "parameter", "group", "." ]
python
train
twilio/twilio-python
twilio/rest/taskrouter/v1/workspace/worker/workers_statistics.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/worker/workers_statistics.py#L258-L284
def fetch(self, minutes=values.unset, start_date=values.unset, end_date=values.unset, task_queue_sid=values.unset, task_queue_name=values.unset, friendly_name=values.unset, task_channel=values.unset): """ Fetch a WorkersStatisticsInstance :param unicode minutes: Filter cumulative statistics by up to 'x' minutes in the past. :param datetime start_date: Filter cumulative statistics by a start date. :param datetime end_date: Filter cumulative statistics by a end date. :param unicode task_queue_sid: Filter the real-time and cumulative statistics based on Workers tied to a particular queue :param unicode task_queue_name: Filter the real-time and cumulative statistics based on Workers tied to a particular queue :param unicode friendly_name: The friendly_name :param unicode task_channel: Filter cumulative statistics by TaskChannel. :returns: Fetched WorkersStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_statistics.WorkersStatisticsInstance """ return self._proxy.fetch( minutes=minutes, start_date=start_date, end_date=end_date, task_queue_sid=task_queue_sid, task_queue_name=task_queue_name, friendly_name=friendly_name, task_channel=task_channel, )
[ "def", "fetch", "(", "self", ",", "minutes", "=", "values", ".", "unset", ",", "start_date", "=", "values", ".", "unset", ",", "end_date", "=", "values", ".", "unset", ",", "task_queue_sid", "=", "values", ".", "unset", ",", "task_queue_name", "=", "values", ".", "unset", ",", "friendly_name", "=", "values", ".", "unset", ",", "task_channel", "=", "values", ".", "unset", ")", ":", "return", "self", ".", "_proxy", ".", "fetch", "(", "minutes", "=", "minutes", ",", "start_date", "=", "start_date", ",", "end_date", "=", "end_date", ",", "task_queue_sid", "=", "task_queue_sid", ",", "task_queue_name", "=", "task_queue_name", ",", "friendly_name", "=", "friendly_name", ",", "task_channel", "=", "task_channel", ",", ")" ]
Fetch a WorkersStatisticsInstance :param unicode minutes: Filter cumulative statistics by up to 'x' minutes in the past. :param datetime start_date: Filter cumulative statistics by a start date. :param datetime end_date: Filter cumulative statistics by a end date. :param unicode task_queue_sid: Filter the real-time and cumulative statistics based on Workers tied to a particular queue :param unicode task_queue_name: Filter the real-time and cumulative statistics based on Workers tied to a particular queue :param unicode friendly_name: The friendly_name :param unicode task_channel: Filter cumulative statistics by TaskChannel. :returns: Fetched WorkersStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_statistics.WorkersStatisticsInstance
[ "Fetch", "a", "WorkersStatisticsInstance" ]
python
train
shanbay/peeweext
peeweext/model.py
https://github.com/shanbay/peeweext/blob/ff62a3d01e4584d50fde1944b9616c3b4236ecf0/peeweext/model.py#L53-L59
def update_with(self, **query): """ secure update, mass assignment protected """ for k, v in self._filter_attrs(query).items(): setattr(self, k, v) return self.save()
[ "def", "update_with", "(", "self", ",", "*", "*", "query", ")", ":", "for", "k", ",", "v", "in", "self", ".", "_filter_attrs", "(", "query", ")", ".", "items", "(", ")", ":", "setattr", "(", "self", ",", "k", ",", "v", ")", "return", "self", ".", "save", "(", ")" ]
secure update, mass assignment protected
[ "secure", "update", "mass", "assignment", "protected" ]
python
train
jealous/stockstats
stockstats.py
https://github.com/jealous/stockstats/blob/a479a504ea1906955feeb8519c34ef40eb48ec9b/stockstats.py#L650-L661
def _get_kdjj(df, n_days): """ Get the J of KDJ J = 3K-2D :param df: data :param n_days: calculation range :return: None """ k_column = 'kdjk_{}'.format(n_days) d_column = 'kdjd_{}'.format(n_days) j_column = 'kdjj_{}'.format(n_days) df[j_column] = 3 * df[k_column] - 2 * df[d_column]
[ "def", "_get_kdjj", "(", "df", ",", "n_days", ")", ":", "k_column", "=", "'kdjk_{}'", ".", "format", "(", "n_days", ")", "d_column", "=", "'kdjd_{}'", ".", "format", "(", "n_days", ")", "j_column", "=", "'kdjj_{}'", ".", "format", "(", "n_days", ")", "df", "[", "j_column", "]", "=", "3", "*", "df", "[", "k_column", "]", "-", "2", "*", "df", "[", "d_column", "]" ]
Get the J of KDJ J = 3K-2D :param df: data :param n_days: calculation range :return: None
[ "Get", "the", "J", "of", "KDJ", "J", "=", "3K", "-", "2D", ":", "param", "df", ":", "data", ":", "param", "n_days", ":", "calculation", "range", ":", "return", ":", "None" ]
python
train
williamjameshandley/fgivenx
fgivenx/_utils.py
https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/_utils.py#L4-L61
def _check_args(logZ, f, x, samples, weights): """ Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`. Parameters ---------- f, x, samples, weights: see arguments for :func:`fgivenx.drivers.compute_samples` """ # convert to arrays if logZ is None: logZ = [0] f = [f] samples = [samples] weights = [weights] # logZ logZ = numpy.array(logZ, dtype='double') if len(logZ.shape) is not 1: raise ValueError("logZ should be a 1D array") # x x = numpy.array(x, dtype='double') if len(x.shape) is not 1: raise ValueError("x should be a 1D array") # f if len(logZ) != len(f): raise ValueError("len(logZ) = %i != len(f)= %i" % (len(logZ), len(f))) for func in f: if not callable(func): raise ValueError("first argument f must be function" "(or list of functions) of two variables") # samples if len(logZ) != len(samples): raise ValueError("len(logZ) = %i != len(samples)= %i" % (len(logZ), len(samples))) samples = [numpy.array(s, dtype='double') for s in samples] for s in samples: if len(s.shape) is not 2: raise ValueError("each set of samples should be a 2D array") # weights if len(logZ) != len(weights): raise ValueError("len(logZ) = %i != len(weights)= %i" % (len(logZ), len(weights))) weights = [numpy.array(w, dtype='double') if w is not None else numpy.ones(len(s), dtype='double') for w, s in zip(weights, samples)] for w, s in zip(weights, samples): if len(w.shape) is not 1: raise ValueError("each set of weights should be a 1D array") if len(w) != len(s): raise ValueError("len(w) = %i != len(s) = %i" % (len(s), len(w))) return logZ, f, x, samples, weights
[ "def", "_check_args", "(", "logZ", ",", "f", ",", "x", ",", "samples", ",", "weights", ")", ":", "# convert to arrays", "if", "logZ", "is", "None", ":", "logZ", "=", "[", "0", "]", "f", "=", "[", "f", "]", "samples", "=", "[", "samples", "]", "weights", "=", "[", "weights", "]", "# logZ", "logZ", "=", "numpy", ".", "array", "(", "logZ", ",", "dtype", "=", "'double'", ")", "if", "len", "(", "logZ", ".", "shape", ")", "is", "not", "1", ":", "raise", "ValueError", "(", "\"logZ should be a 1D array\"", ")", "# x", "x", "=", "numpy", ".", "array", "(", "x", ",", "dtype", "=", "'double'", ")", "if", "len", "(", "x", ".", "shape", ")", "is", "not", "1", ":", "raise", "ValueError", "(", "\"x should be a 1D array\"", ")", "# f", "if", "len", "(", "logZ", ")", "!=", "len", "(", "f", ")", ":", "raise", "ValueError", "(", "\"len(logZ) = %i != len(f)= %i\"", "%", "(", "len", "(", "logZ", ")", ",", "len", "(", "f", ")", ")", ")", "for", "func", "in", "f", ":", "if", "not", "callable", "(", "func", ")", ":", "raise", "ValueError", "(", "\"first argument f must be function\"", "\"(or list of functions) of two variables\"", ")", "# samples", "if", "len", "(", "logZ", ")", "!=", "len", "(", "samples", ")", ":", "raise", "ValueError", "(", "\"len(logZ) = %i != len(samples)= %i\"", "%", "(", "len", "(", "logZ", ")", ",", "len", "(", "samples", ")", ")", ")", "samples", "=", "[", "numpy", ".", "array", "(", "s", ",", "dtype", "=", "'double'", ")", "for", "s", "in", "samples", "]", "for", "s", "in", "samples", ":", "if", "len", "(", "s", ".", "shape", ")", "is", "not", "2", ":", "raise", "ValueError", "(", "\"each set of samples should be a 2D array\"", ")", "# weights", "if", "len", "(", "logZ", ")", "!=", "len", "(", "weights", ")", ":", "raise", "ValueError", "(", "\"len(logZ) = %i != len(weights)= %i\"", "%", "(", "len", "(", "logZ", ")", ",", "len", "(", "weights", ")", ")", ")", "weights", "=", "[", "numpy", ".", "array", "(", "w", ",", "dtype", "=", "'double'", ")", "if", "w", "is", "not", "None", "else", "numpy", ".", "ones", "(", "len", "(", "s", ")", ",", "dtype", "=", "'double'", ")", "for", "w", ",", "s", "in", "zip", "(", "weights", ",", "samples", ")", "]", "for", "w", ",", "s", "in", "zip", "(", "weights", ",", "samples", ")", ":", "if", "len", "(", "w", ".", "shape", ")", "is", "not", "1", ":", "raise", "ValueError", "(", "\"each set of weights should be a 1D array\"", ")", "if", "len", "(", "w", ")", "!=", "len", "(", "s", ")", ":", "raise", "ValueError", "(", "\"len(w) = %i != len(s) = %i\"", "%", "(", "len", "(", "s", ")", ",", "len", "(", "w", ")", ")", ")", "return", "logZ", ",", "f", ",", "x", ",", "samples", ",", "weights" ]
Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`. Parameters ---------- f, x, samples, weights: see arguments for :func:`fgivenx.drivers.compute_samples`
[ "Sanity", "-", "check", "the", "arguments", "for", ":", "func", ":", "fgivenx", ".", "drivers", ".", "compute_samples", "." ]
python
train
kibitzr/kibitzr
kibitzr/storage.py
https://github.com/kibitzr/kibitzr/blob/749da312488f1dda1ed1093cf4c95aaac0a604f7/kibitzr/storage.py#L109-L142
def word(self): """Return last changes with word diff""" try: output = ensure_unicode(self.git.diff( '--no-color', '--word-diff=plain', 'HEAD~1:content', 'HEAD:content', ).stdout) except sh.ErrorReturnCode_128: result = ensure_unicode(self.git.show( "HEAD:content" ).stdout) else: ago = ensure_unicode(self.git.log( '-2', '--pretty=format:last change was %cr', 'content' ).stdout).splitlines() lines = output.splitlines() result = u'\n'.join( itertools.chain( itertools.islice( itertools.dropwhile( lambda x: not x.startswith('@@'), lines[1:], ), 1, None, ), itertools.islice(ago, 1, None), ) ) return result
[ "def", "word", "(", "self", ")", ":", "try", ":", "output", "=", "ensure_unicode", "(", "self", ".", "git", ".", "diff", "(", "'--no-color'", ",", "'--word-diff=plain'", ",", "'HEAD~1:content'", ",", "'HEAD:content'", ",", ")", ".", "stdout", ")", "except", "sh", ".", "ErrorReturnCode_128", ":", "result", "=", "ensure_unicode", "(", "self", ".", "git", ".", "show", "(", "\"HEAD:content\"", ")", ".", "stdout", ")", "else", ":", "ago", "=", "ensure_unicode", "(", "self", ".", "git", ".", "log", "(", "'-2'", ",", "'--pretty=format:last change was %cr'", ",", "'content'", ")", ".", "stdout", ")", ".", "splitlines", "(", ")", "lines", "=", "output", ".", "splitlines", "(", ")", "result", "=", "u'\\n'", ".", "join", "(", "itertools", ".", "chain", "(", "itertools", ".", "islice", "(", "itertools", ".", "dropwhile", "(", "lambda", "x", ":", "not", "x", ".", "startswith", "(", "'@@'", ")", ",", "lines", "[", "1", ":", "]", ",", ")", ",", "1", ",", "None", ",", ")", ",", "itertools", ".", "islice", "(", "ago", ",", "1", ",", "None", ")", ",", ")", ")", "return", "result" ]
Return last changes with word diff
[ "Return", "last", "changes", "with", "word", "diff" ]
python
train
brocade/pynos
pynos/versions/base/interface.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/interface.py#L2874-L2887
def get_vlan_brief_request(last_vlan_id): """ Creates a new Netconf request based on the last received vlan id when the hasMore flag is true """ request_interface = ET.Element( 'get-vlan-brief', xmlns="urn:brocade.com:mgmt:brocade-interface-ext" ) if last_vlan_id != '': last_received_int_el = ET.SubElement(request_interface, "last-rcvd-vlan-id") last_received_int_el.text = last_vlan_id return request_interface
[ "def", "get_vlan_brief_request", "(", "last_vlan_id", ")", ":", "request_interface", "=", "ET", ".", "Element", "(", "'get-vlan-brief'", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-interface-ext\"", ")", "if", "last_vlan_id", "!=", "''", ":", "last_received_int_el", "=", "ET", ".", "SubElement", "(", "request_interface", ",", "\"last-rcvd-vlan-id\"", ")", "last_received_int_el", ".", "text", "=", "last_vlan_id", "return", "request_interface" ]
Creates a new Netconf request based on the last received vlan id when the hasMore flag is true
[ "Creates", "a", "new", "Netconf", "request", "based", "on", "the", "last", "received", "vlan", "id", "when", "the", "hasMore", "flag", "is", "true" ]
python
train
thorgate/tg-react
tg_react/language.py
https://github.com/thorgate/tg-react/blob/5a6e83d5a5c883f1a5ee4fda2226e81a468bdee3/tg_react/language.py#L28-L36
def get_catalog(self, locale): """Create Django translation catalogue for `locale`.""" with translation.override(locale): translation_engine = DjangoTranslation(locale, domain=self.domain, localedirs=self.paths) trans_cat = translation_engine._catalog trans_fallback_cat = translation_engine._fallback._catalog if translation_engine._fallback else {} return trans_cat, trans_fallback_cat
[ "def", "get_catalog", "(", "self", ",", "locale", ")", ":", "with", "translation", ".", "override", "(", "locale", ")", ":", "translation_engine", "=", "DjangoTranslation", "(", "locale", ",", "domain", "=", "self", ".", "domain", ",", "localedirs", "=", "self", ".", "paths", ")", "trans_cat", "=", "translation_engine", ".", "_catalog", "trans_fallback_cat", "=", "translation_engine", ".", "_fallback", ".", "_catalog", "if", "translation_engine", ".", "_fallback", "else", "{", "}", "return", "trans_cat", ",", "trans_fallback_cat" ]
Create Django translation catalogue for `locale`.
[ "Create", "Django", "translation", "catalogue", "for", "locale", "." ]
python
train
genialis/resolwe
resolwe/flow/management/commands/collecttools.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/management/commands/collecttools.py#L76-L84
def clear_dir(self): """Delete contents of the directory on the given path.""" self.stdout.write("Deleting contents of '{}'.".format(self.destination_path)) for filename in os.listdir(self.destination_path): if os.path.isfile(filename) or os.path.islink(filename): os.remove(filename) elif os.path.isdir(filename): shutil.rmtree(filename)
[ "def", "clear_dir", "(", "self", ")", ":", "self", ".", "stdout", ".", "write", "(", "\"Deleting contents of '{}'.\"", ".", "format", "(", "self", ".", "destination_path", ")", ")", "for", "filename", "in", "os", ".", "listdir", "(", "self", ".", "destination_path", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", "or", "os", ".", "path", ".", "islink", "(", "filename", ")", ":", "os", ".", "remove", "(", "filename", ")", "elif", "os", ".", "path", ".", "isdir", "(", "filename", ")", ":", "shutil", ".", "rmtree", "(", "filename", ")" ]
Delete contents of the directory on the given path.
[ "Delete", "contents", "of", "the", "directory", "on", "the", "given", "path", "." ]
python
train
pycontribs/pyrax
pyrax/base_identity.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/base_identity.py#L497-L503
def _read_credential_file(self, cfg): """ Implements the default (keystone) behavior. """ self.username = cfg.get("keystone", "username") self.password = cfg.get("keystone", "password", raw=True) self.tenant_id = cfg.get("keystone", "tenant_id")
[ "def", "_read_credential_file", "(", "self", ",", "cfg", ")", ":", "self", ".", "username", "=", "cfg", ".", "get", "(", "\"keystone\"", ",", "\"username\"", ")", "self", ".", "password", "=", "cfg", ".", "get", "(", "\"keystone\"", ",", "\"password\"", ",", "raw", "=", "True", ")", "self", ".", "tenant_id", "=", "cfg", ".", "get", "(", "\"keystone\"", ",", "\"tenant_id\"", ")" ]
Implements the default (keystone) behavior.
[ "Implements", "the", "default", "(", "keystone", ")", "behavior", "." ]
python
train
gwastro/pycbc-glue
pycbc_glue/pipeline.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/pipeline.py#L1167-L1177
def add_output_file(self, filename): """ Add filename as a output file for this DAG node. @param filename: output filename to add """ if filename not in self.__output_files: self.__output_files.append(filename) if not isinstance(self.job(), CondorDAGManJob): if self.job().get_universe() == 'grid': self.add_output_macro(filename)
[ "def", "add_output_file", "(", "self", ",", "filename", ")", ":", "if", "filename", "not", "in", "self", ".", "__output_files", ":", "self", ".", "__output_files", ".", "append", "(", "filename", ")", "if", "not", "isinstance", "(", "self", ".", "job", "(", ")", ",", "CondorDAGManJob", ")", ":", "if", "self", ".", "job", "(", ")", ".", "get_universe", "(", ")", "==", "'grid'", ":", "self", ".", "add_output_macro", "(", "filename", ")" ]
Add filename as a output file for this DAG node. @param filename: output filename to add
[ "Add", "filename", "as", "a", "output", "file", "for", "this", "DAG", "node", "." ]
python
train
CityOfZion/neo-python
neo/Core/State/StorageItem.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/State/StorageItem.py#L51-L59
def Deserialize(self, reader): """ Deserialize full object. Args: reader (neocore.IO.BinaryReader): """ super(StorageItem, self).Deserialize(reader) self.Value = reader.ReadVarBytes()
[ "def", "Deserialize", "(", "self", ",", "reader", ")", ":", "super", "(", "StorageItem", ",", "self", ")", ".", "Deserialize", "(", "reader", ")", "self", ".", "Value", "=", "reader", ".", "ReadVarBytes", "(", ")" ]
Deserialize full object. Args: reader (neocore.IO.BinaryReader):
[ "Deserialize", "full", "object", "." ]
python
train
InfoAgeTech/django-core
django_core/utils/urls.py
https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/utils/urls.py#L99-L128
def get_query_values_from_url(url, keys=None): """Gets query string values from a url. if a list of keys are provided, then a dict will be returned. If only a single string key is provided, then only a single value will be returned. >>> url = 'http://helloworld.com/some/path?test=5&hello=world&john=doe' >>> get_query_values_from_url(url=url, keys='test') "5" >>> get_query_values_from_url(url=url, keys=['test']) {'test': '5'} >>> get_query_values_from_url(url=url, keys=['test', 'john']) {'test': '5', 'john': 'doe'} >>> get_query_values_from_url(url=url, keys=['test', 'john', 'blah']) {'test': '5', 'john': 'doe', 'blah': None} """ if not url or '?' not in url: # no query params return None parsed_url = urlparse(url) query = dict(parse_qsl(parsed_url.query)) if keys is None: return query if isinstance(keys, string_types): return query.get(keys) return {k: query.get(k) for k in keys}
[ "def", "get_query_values_from_url", "(", "url", ",", "keys", "=", "None", ")", ":", "if", "not", "url", "or", "'?'", "not", "in", "url", ":", "# no query params", "return", "None", "parsed_url", "=", "urlparse", "(", "url", ")", "query", "=", "dict", "(", "parse_qsl", "(", "parsed_url", ".", "query", ")", ")", "if", "keys", "is", "None", ":", "return", "query", "if", "isinstance", "(", "keys", ",", "string_types", ")", ":", "return", "query", ".", "get", "(", "keys", ")", "return", "{", "k", ":", "query", ".", "get", "(", "k", ")", "for", "k", "in", "keys", "}" ]
Gets query string values from a url. if a list of keys are provided, then a dict will be returned. If only a single string key is provided, then only a single value will be returned. >>> url = 'http://helloworld.com/some/path?test=5&hello=world&john=doe' >>> get_query_values_from_url(url=url, keys='test') "5" >>> get_query_values_from_url(url=url, keys=['test']) {'test': '5'} >>> get_query_values_from_url(url=url, keys=['test', 'john']) {'test': '5', 'john': 'doe'} >>> get_query_values_from_url(url=url, keys=['test', 'john', 'blah']) {'test': '5', 'john': 'doe', 'blah': None}
[ "Gets", "query", "string", "values", "from", "a", "url", "." ]
python
train
totalgood/pugnlp
src/pugnlp/stats.py
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/stats.py#L227-L240
def confusion(df, labels=['neg', 'pos']): """ Binary classification confusion """ c = pd.DataFrame(np.zeros((2, 2)), dtype=int) a, b = df.columns[:2] # labels[df.columns[:2]] c.columns = sorted(set(df[a]))[:2] c.columns.name = a c.index = list(c.columns) c.index.name = b c1, c2 = c.columns c[c1][c1] = ((df[a] == c1) & (df[b] == c1)).sum() c[c1][c2] = ((df[a] == c1) & (df[b] == c2)).sum() c[c2][c2] = ((df[a] == c2) & (df[b] == c2)).sum() c[c2][c1] = ((df[a] == c2) & (df[b] == c1)).sum() return c
[ "def", "confusion", "(", "df", ",", "labels", "=", "[", "'neg'", ",", "'pos'", "]", ")", ":", "c", "=", "pd", ".", "DataFrame", "(", "np", ".", "zeros", "(", "(", "2", ",", "2", ")", ")", ",", "dtype", "=", "int", ")", "a", ",", "b", "=", "df", ".", "columns", "[", ":", "2", "]", "# labels[df.columns[:2]]", "c", ".", "columns", "=", "sorted", "(", "set", "(", "df", "[", "a", "]", ")", ")", "[", ":", "2", "]", "c", ".", "columns", ".", "name", "=", "a", "c", ".", "index", "=", "list", "(", "c", ".", "columns", ")", "c", ".", "index", ".", "name", "=", "b", "c1", ",", "c2", "=", "c", ".", "columns", "c", "[", "c1", "]", "[", "c1", "]", "=", "(", "(", "df", "[", "a", "]", "==", "c1", ")", "&", "(", "df", "[", "b", "]", "==", "c1", ")", ")", ".", "sum", "(", ")", "c", "[", "c1", "]", "[", "c2", "]", "=", "(", "(", "df", "[", "a", "]", "==", "c1", ")", "&", "(", "df", "[", "b", "]", "==", "c2", ")", ")", ".", "sum", "(", ")", "c", "[", "c2", "]", "[", "c2", "]", "=", "(", "(", "df", "[", "a", "]", "==", "c2", ")", "&", "(", "df", "[", "b", "]", "==", "c2", ")", ")", ".", "sum", "(", ")", "c", "[", "c2", "]", "[", "c1", "]", "=", "(", "(", "df", "[", "a", "]", "==", "c2", ")", "&", "(", "df", "[", "b", "]", "==", "c1", ")", ")", ".", "sum", "(", ")", "return", "c" ]
Binary classification confusion
[ "Binary", "classification", "confusion" ]
python
train
jck/kya
scripts/link_pyqt.py
https://github.com/jck/kya/blob/377361a336691612ce1b86cc36dda3ab8b079789/scripts/link_pyqt.py#L16-L22
def link_pyqt(sys_python, venv_python): """Symlink the systemwide PyQt/sip into the venv.""" real_site = site_dir(sys_python) venv_site = site_dir(venv_python) for f in ['sip.so', 'PyQt5']: (venv_site/f).symlink_to(real_site/f)
[ "def", "link_pyqt", "(", "sys_python", ",", "venv_python", ")", ":", "real_site", "=", "site_dir", "(", "sys_python", ")", "venv_site", "=", "site_dir", "(", "venv_python", ")", "for", "f", "in", "[", "'sip.so'", ",", "'PyQt5'", "]", ":", "(", "venv_site", "/", "f", ")", ".", "symlink_to", "(", "real_site", "/", "f", ")" ]
Symlink the systemwide PyQt/sip into the venv.
[ "Symlink", "the", "systemwide", "PyQt", "/", "sip", "into", "the", "venv", "." ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L2718-L2746
def getOutputDevice(self, textureType): """ * Returns platform- and texture-type specific adapter identification so that applications and the compositor are creating textures and swap chains on the same GPU. If an error occurs the device will be set to 0. pInstance is an optional parameter that is required only when textureType is TextureType_Vulkan. [D3D10/11/12 Only (D3D9 Not Supported)] Returns the adapter LUID that identifies the GPU attached to the HMD. The user should enumerate all adapters using IDXGIFactory::EnumAdapters and IDXGIAdapter::GetDesc to find the adapter with the matching LUID, or use IDXGIFactory4::EnumAdapterByLuid. The discovered IDXGIAdapter should be used to create the device and swap chain. [Vulkan Only] Returns the VkPhysicalDevice that should be used by the application. pInstance must be the instance the application will use to query for the VkPhysicalDevice. The application must create the VkInstance with extensions returned by IVRCompositor::GetVulkanInstanceExtensionsRequired enabled. [macOS Only] For TextureType_IOSurface returns the id<MTLDevice> that should be used by the application. On 10.13+ for TextureType_OpenGL returns the 'registryId' of the renderer which should be used by the application. See Apple Technical Q&A QA1168 for information on enumerating GL Renderers, and the new kCGLRPRegistryIDLow and kCGLRPRegistryIDHigh CGLRendererProperty values in the 10.13 SDK. Pre 10.13 for TextureType_OpenGL returns 0, as there is no dependable way to correlate the HMDs MTLDevice with a GL Renderer. """ fn = self.function_table.getOutputDevice pnDevice = c_uint64() pInstance = VkInstance_T() fn(byref(pnDevice), textureType, byref(pInstance)) return pnDevice.value, pInstance
[ "def", "getOutputDevice", "(", "self", ",", "textureType", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getOutputDevice", "pnDevice", "=", "c_uint64", "(", ")", "pInstance", "=", "VkInstance_T", "(", ")", "fn", "(", "byref", "(", "pnDevice", ")", ",", "textureType", ",", "byref", "(", "pInstance", ")", ")", "return", "pnDevice", ".", "value", ",", "pInstance" ]
* Returns platform- and texture-type specific adapter identification so that applications and the compositor are creating textures and swap chains on the same GPU. If an error occurs the device will be set to 0. pInstance is an optional parameter that is required only when textureType is TextureType_Vulkan. [D3D10/11/12 Only (D3D9 Not Supported)] Returns the adapter LUID that identifies the GPU attached to the HMD. The user should enumerate all adapters using IDXGIFactory::EnumAdapters and IDXGIAdapter::GetDesc to find the adapter with the matching LUID, or use IDXGIFactory4::EnumAdapterByLuid. The discovered IDXGIAdapter should be used to create the device and swap chain. [Vulkan Only] Returns the VkPhysicalDevice that should be used by the application. pInstance must be the instance the application will use to query for the VkPhysicalDevice. The application must create the VkInstance with extensions returned by IVRCompositor::GetVulkanInstanceExtensionsRequired enabled. [macOS Only] For TextureType_IOSurface returns the id<MTLDevice> that should be used by the application. On 10.13+ for TextureType_OpenGL returns the 'registryId' of the renderer which should be used by the application. See Apple Technical Q&A QA1168 for information on enumerating GL Renderers, and the new kCGLRPRegistryIDLow and kCGLRPRegistryIDHigh CGLRendererProperty values in the 10.13 SDK. Pre 10.13 for TextureType_OpenGL returns 0, as there is no dependable way to correlate the HMDs MTLDevice with a GL Renderer.
[ "*", "Returns", "platform", "-", "and", "texture", "-", "type", "specific", "adapter", "identification", "so", "that", "applications", "and", "the", "compositor", "are", "creating", "textures", "and", "swap", "chains", "on", "the", "same", "GPU", ".", "If", "an", "error", "occurs", "the", "device", "will", "be", "set", "to", "0", ".", "pInstance", "is", "an", "optional", "parameter", "that", "is", "required", "only", "when", "textureType", "is", "TextureType_Vulkan", ".", "[", "D3D10", "/", "11", "/", "12", "Only", "(", "D3D9", "Not", "Supported", ")", "]", "Returns", "the", "adapter", "LUID", "that", "identifies", "the", "GPU", "attached", "to", "the", "HMD", ".", "The", "user", "should", "enumerate", "all", "adapters", "using", "IDXGIFactory", "::", "EnumAdapters", "and", "IDXGIAdapter", "::", "GetDesc", "to", "find", "the", "adapter", "with", "the", "matching", "LUID", "or", "use", "IDXGIFactory4", "::", "EnumAdapterByLuid", ".", "The", "discovered", "IDXGIAdapter", "should", "be", "used", "to", "create", "the", "device", "and", "swap", "chain", ".", "[", "Vulkan", "Only", "]", "Returns", "the", "VkPhysicalDevice", "that", "should", "be", "used", "by", "the", "application", ".", "pInstance", "must", "be", "the", "instance", "the", "application", "will", "use", "to", "query", "for", "the", "VkPhysicalDevice", ".", "The", "application", "must", "create", "the", "VkInstance", "with", "extensions", "returned", "by", "IVRCompositor", "::", "GetVulkanInstanceExtensionsRequired", "enabled", ".", "[", "macOS", "Only", "]", "For", "TextureType_IOSurface", "returns", "the", "id<MTLDevice", ">", "that", "should", "be", "used", "by", "the", "application", ".", "On", "10", ".", "13", "+", "for", "TextureType_OpenGL", "returns", "the", "registryId", "of", "the", "renderer", "which", "should", "be", "used", "by", "the", "application", ".", "See", "Apple", "Technical", "Q&A", "QA1168", "for", "information", "on", "enumerating", "GL", "Renderers", "and", "the", "new", "kCGLRPRegistryIDLow", "and", "kCGLRPRegistryIDHigh", "CGLRendererProperty", "values", "in", "the", "10", ".", "13", "SDK", ".", "Pre", "10", ".", "13", "for", "TextureType_OpenGL", "returns", "0", "as", "there", "is", "no", "dependable", "way", "to", "correlate", "the", "HMDs", "MTLDevice", "with", "a", "GL", "Renderer", "." ]
python
train
wind-python/windpowerlib
windpowerlib/wind_farm.py
https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/wind_farm.py#L160-L302
def assign_power_curve(self, wake_losses_model='power_efficiency_curve', smoothing=False, block_width=0.5, standard_deviation_method='turbulence_intensity', smoothing_order='wind_farm_power_curves', turbulence_intensity=None, **kwargs): r""" Calculates the power curve of a wind farm. The wind farm power curve is calculated by aggregating the power curves of all wind turbines in the wind farm. Depending on the parameters the power curves are smoothed (before or after the aggregation) and/or a wind farm efficiency (power efficiency curve or constant efficiency) is applied after the aggregation. After the calculations the power curve is assigned to the wind farm object. Parameters ---------- wake_losses_model : string Defines the method for taking wake losses within the farm into consideration. Options: 'power_efficiency_curve', 'constant_efficiency' or None. Default: 'power_efficiency_curve'. smoothing : boolean If True the power curves will be smoothed before or after the aggregation of power curves depending on `smoothing_order`. Default: False. block_width : float Width between the wind speeds in the sum of the equation in :py:func:`~.power_curves.smooth_power_curve`. Default: 0.5. standard_deviation_method : string Method for calculating the standard deviation for the Gauss distribution. Options: 'turbulence_intensity', 'Staffell_Pfenninger'. Default: 'turbulence_intensity'. smoothing_order : string Defines when the smoothing takes place if `smoothing` is True. Options: 'turbine_power_curves' (to the single turbine power curves), 'wind_farm_power_curves'. Default: 'wind_farm_power_curves'. turbulence_intensity : float Turbulence intensity at hub height of the wind farm for power curve smoothing with 'turbulence_intensity' method. Can be calculated from `roughness_length` instead. Default: None. Other Parameters ---------------- roughness_length : float, optional. Roughness length. If `standard_deviation_method` is 'turbulence_intensity' and `turbulence_intensity` is not given the turbulence intensity is calculated via the roughness length. Returns ------- self """ # Check if all wind turbines have a power curve as attribute for item in self.wind_turbine_fleet: if item['wind_turbine'].power_curve is None: raise ValueError("For an aggregated wind farm power curve " + "each wind turbine needs a power curve " + "but `power_curve` of wind turbine " + "{} is {}.".format( item['wind_turbine'].name if item['wind_turbine'].name else '', item['wind_turbine'].power_curve)) # Initialize data frame for power curve values df = pd.DataFrame() for turbine_type_dict in self.wind_turbine_fleet: # Check if all needed parameters are available and/or assign them if smoothing: if (standard_deviation_method == 'turbulence_intensity' and turbulence_intensity is None): if 'roughness_length' in kwargs: # Calculate turbulence intensity and write to kwargs turbulence_intensity = ( tools.estimate_turbulence_intensity( turbine_type_dict['wind_turbine'].hub_height, kwargs['roughness_length'])) kwargs['turbulence_intensity'] = turbulence_intensity else: raise ValueError( "`roughness_length` must be defined for using " + "'turbulence_intensity' as " + "`standard_deviation_method` if " + "`turbulence_intensity` is not given") if wake_losses_model is not None: if self.efficiency is None: raise KeyError( "`efficiency` is needed if " + "`wake_losses_model´ is '{0}', but ".format( wake_losses_model) + "`efficiency` of wind farm {0} is {1}.".format( self.name if self.name else '', self.efficiency)) # Get original power curve power_curve = pd.DataFrame( turbine_type_dict['wind_turbine'].power_curve) # Editions to the power curves before the summation if smoothing and smoothing_order == 'turbine_power_curves': power_curve = power_curves.smooth_power_curve( power_curve['wind_speed'], power_curve['value'], standard_deviation_method=standard_deviation_method, block_width=block_width, **kwargs) else: # Add value zero to start and end of curve as otherwise there # can occure problems during the aggregation if power_curve.iloc[0]['wind_speed'] != 0.0: power_curve = pd.concat( [pd.DataFrame(data={ 'value': [0.0], 'wind_speed': [0.0]}), power_curve], sort=True) if power_curve.iloc[-1]['value'] != 0.0: power_curve = pd.concat( [power_curve, pd.DataFrame(data={ 'value': [0.0], 'wind_speed': [ power_curve['wind_speed'].loc[ power_curve.index[-1]] + 0.5]})], sort=True) # Add power curves of all turbine types to data frame # (multiplied by turbine amount) df = pd.concat( [df, pd.DataFrame(power_curve.set_index(['wind_speed']) * turbine_type_dict['number_of_turbines'])], axis=1) # Aggregate all power curves wind_farm_power_curve = pd.DataFrame( df.interpolate(method='index').sum(axis=1)) wind_farm_power_curve.columns = ['value'] wind_farm_power_curve.reset_index('wind_speed', inplace=True) # Editions to the power curve after the summation if smoothing and smoothing_order == 'wind_farm_power_curves': wind_farm_power_curve = power_curves.smooth_power_curve( wind_farm_power_curve['wind_speed'], wind_farm_power_curve['value'], standard_deviation_method=standard_deviation_method, block_width=block_width, **kwargs) if (wake_losses_model == 'constant_efficiency' or wake_losses_model == 'power_efficiency_curve'): wind_farm_power_curve = ( power_curves.wake_losses_to_power_curve( wind_farm_power_curve['wind_speed'].values, wind_farm_power_curve['value'].values, wake_losses_model=wake_losses_model, wind_farm_efficiency=self.efficiency)) self.power_curve = wind_farm_power_curve return self
[ "def", "assign_power_curve", "(", "self", ",", "wake_losses_model", "=", "'power_efficiency_curve'", ",", "smoothing", "=", "False", ",", "block_width", "=", "0.5", ",", "standard_deviation_method", "=", "'turbulence_intensity'", ",", "smoothing_order", "=", "'wind_farm_power_curves'", ",", "turbulence_intensity", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Check if all wind turbines have a power curve as attribute", "for", "item", "in", "self", ".", "wind_turbine_fleet", ":", "if", "item", "[", "'wind_turbine'", "]", ".", "power_curve", "is", "None", ":", "raise", "ValueError", "(", "\"For an aggregated wind farm power curve \"", "+", "\"each wind turbine needs a power curve \"", "+", "\"but `power_curve` of wind turbine \"", "+", "\"{} is {}.\"", ".", "format", "(", "item", "[", "'wind_turbine'", "]", ".", "name", "if", "item", "[", "'wind_turbine'", "]", ".", "name", "else", "''", ",", "item", "[", "'wind_turbine'", "]", ".", "power_curve", ")", ")", "# Initialize data frame for power curve values", "df", "=", "pd", ".", "DataFrame", "(", ")", "for", "turbine_type_dict", "in", "self", ".", "wind_turbine_fleet", ":", "# Check if all needed parameters are available and/or assign them", "if", "smoothing", ":", "if", "(", "standard_deviation_method", "==", "'turbulence_intensity'", "and", "turbulence_intensity", "is", "None", ")", ":", "if", "'roughness_length'", "in", "kwargs", ":", "# Calculate turbulence intensity and write to kwargs", "turbulence_intensity", "=", "(", "tools", ".", "estimate_turbulence_intensity", "(", "turbine_type_dict", "[", "'wind_turbine'", "]", ".", "hub_height", ",", "kwargs", "[", "'roughness_length'", "]", ")", ")", "kwargs", "[", "'turbulence_intensity'", "]", "=", "turbulence_intensity", "else", ":", "raise", "ValueError", "(", "\"`roughness_length` must be defined for using \"", "+", "\"'turbulence_intensity' as \"", "+", "\"`standard_deviation_method` if \"", "+", "\"`turbulence_intensity` is not given\"", ")", "if", "wake_losses_model", "is", "not", "None", ":", "if", "self", ".", "efficiency", "is", "None", ":", "raise", "KeyError", "(", "\"`efficiency` is needed if \"", "+", "\"`wake_losses_model´ is '{0}', but \".", "f", "ormat(", "", "wake_losses_model", ")", "+", "\"`efficiency` of wind farm {0} is {1}.\"", ".", "format", "(", "self", ".", "name", "if", "self", ".", "name", "else", "''", ",", "self", ".", "efficiency", ")", ")", "# Get original power curve", "power_curve", "=", "pd", ".", "DataFrame", "(", "turbine_type_dict", "[", "'wind_turbine'", "]", ".", "power_curve", ")", "# Editions to the power curves before the summation", "if", "smoothing", "and", "smoothing_order", "==", "'turbine_power_curves'", ":", "power_curve", "=", "power_curves", ".", "smooth_power_curve", "(", "power_curve", "[", "'wind_speed'", "]", ",", "power_curve", "[", "'value'", "]", ",", "standard_deviation_method", "=", "standard_deviation_method", ",", "block_width", "=", "block_width", ",", "*", "*", "kwargs", ")", "else", ":", "# Add value zero to start and end of curve as otherwise there", "# can occure problems during the aggregation", "if", "power_curve", ".", "iloc", "[", "0", "]", "[", "'wind_speed'", "]", "!=", "0.0", ":", "power_curve", "=", "pd", ".", "concat", "(", "[", "pd", ".", "DataFrame", "(", "data", "=", "{", "'value'", ":", "[", "0.0", "]", ",", "'wind_speed'", ":", "[", "0.0", "]", "}", ")", ",", "power_curve", "]", ",", "sort", "=", "True", ")", "if", "power_curve", ".", "iloc", "[", "-", "1", "]", "[", "'value'", "]", "!=", "0.0", ":", "power_curve", "=", "pd", ".", "concat", "(", "[", "power_curve", ",", "pd", ".", "DataFrame", "(", "data", "=", "{", "'value'", ":", "[", "0.0", "]", ",", "'wind_speed'", ":", "[", "power_curve", "[", "'wind_speed'", "]", ".", "loc", "[", "power_curve", ".", "index", "[", "-", "1", "]", "]", "+", "0.5", "]", "}", ")", "]", ",", "sort", "=", "True", ")", "# Add power curves of all turbine types to data frame", "# (multiplied by turbine amount)", "df", "=", "pd", ".", "concat", "(", "[", "df", ",", "pd", ".", "DataFrame", "(", "power_curve", ".", "set_index", "(", "[", "'wind_speed'", "]", ")", "*", "turbine_type_dict", "[", "'number_of_turbines'", "]", ")", "]", ",", "axis", "=", "1", ")", "# Aggregate all power curves", "wind_farm_power_curve", "=", "pd", ".", "DataFrame", "(", "df", ".", "interpolate", "(", "method", "=", "'index'", ")", ".", "sum", "(", "axis", "=", "1", ")", ")", "wind_farm_power_curve", ".", "columns", "=", "[", "'value'", "]", "wind_farm_power_curve", ".", "reset_index", "(", "'wind_speed'", ",", "inplace", "=", "True", ")", "# Editions to the power curve after the summation", "if", "smoothing", "and", "smoothing_order", "==", "'wind_farm_power_curves'", ":", "wind_farm_power_curve", "=", "power_curves", ".", "smooth_power_curve", "(", "wind_farm_power_curve", "[", "'wind_speed'", "]", ",", "wind_farm_power_curve", "[", "'value'", "]", ",", "standard_deviation_method", "=", "standard_deviation_method", ",", "block_width", "=", "block_width", ",", "*", "*", "kwargs", ")", "if", "(", "wake_losses_model", "==", "'constant_efficiency'", "or", "wake_losses_model", "==", "'power_efficiency_curve'", ")", ":", "wind_farm_power_curve", "=", "(", "power_curves", ".", "wake_losses_to_power_curve", "(", "wind_farm_power_curve", "[", "'wind_speed'", "]", ".", "values", ",", "wind_farm_power_curve", "[", "'value'", "]", ".", "values", ",", "wake_losses_model", "=", "wake_losses_model", ",", "wind_farm_efficiency", "=", "self", ".", "efficiency", ")", ")", "self", ".", "power_curve", "=", "wind_farm_power_curve", "return", "self" ]
r""" Calculates the power curve of a wind farm. The wind farm power curve is calculated by aggregating the power curves of all wind turbines in the wind farm. Depending on the parameters the power curves are smoothed (before or after the aggregation) and/or a wind farm efficiency (power efficiency curve or constant efficiency) is applied after the aggregation. After the calculations the power curve is assigned to the wind farm object. Parameters ---------- wake_losses_model : string Defines the method for taking wake losses within the farm into consideration. Options: 'power_efficiency_curve', 'constant_efficiency' or None. Default: 'power_efficiency_curve'. smoothing : boolean If True the power curves will be smoothed before or after the aggregation of power curves depending on `smoothing_order`. Default: False. block_width : float Width between the wind speeds in the sum of the equation in :py:func:`~.power_curves.smooth_power_curve`. Default: 0.5. standard_deviation_method : string Method for calculating the standard deviation for the Gauss distribution. Options: 'turbulence_intensity', 'Staffell_Pfenninger'. Default: 'turbulence_intensity'. smoothing_order : string Defines when the smoothing takes place if `smoothing` is True. Options: 'turbine_power_curves' (to the single turbine power curves), 'wind_farm_power_curves'. Default: 'wind_farm_power_curves'. turbulence_intensity : float Turbulence intensity at hub height of the wind farm for power curve smoothing with 'turbulence_intensity' method. Can be calculated from `roughness_length` instead. Default: None. Other Parameters ---------------- roughness_length : float, optional. Roughness length. If `standard_deviation_method` is 'turbulence_intensity' and `turbulence_intensity` is not given the turbulence intensity is calculated via the roughness length. Returns ------- self
[ "r", "Calculates", "the", "power", "curve", "of", "a", "wind", "farm", "." ]
python
train
apache/spark
python/pyspark/rdd.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L2159-L2184
def zipWithIndex(self): """ Zips this RDD with its element indices. The ordering is first based on the partition index and then the ordering of items within each partition. So the first item in the first partition gets index 0, and the last item in the last partition receives the largest index. This method needs to trigger a spark job when this RDD contains more than one partitions. >>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect() [('a', 0), ('b', 1), ('c', 2), ('d', 3)] """ starts = [0] if self.getNumPartitions() > 1: nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect() for i in range(len(nums) - 1): starts.append(starts[-1] + nums[i]) def func(k, it): for i, v in enumerate(it, starts[k]): yield v, i return self.mapPartitionsWithIndex(func)
[ "def", "zipWithIndex", "(", "self", ")", ":", "starts", "=", "[", "0", "]", "if", "self", ".", "getNumPartitions", "(", ")", ">", "1", ":", "nums", "=", "self", ".", "mapPartitions", "(", "lambda", "it", ":", "[", "sum", "(", "1", "for", "i", "in", "it", ")", "]", ")", ".", "collect", "(", ")", "for", "i", "in", "range", "(", "len", "(", "nums", ")", "-", "1", ")", ":", "starts", ".", "append", "(", "starts", "[", "-", "1", "]", "+", "nums", "[", "i", "]", ")", "def", "func", "(", "k", ",", "it", ")", ":", "for", "i", ",", "v", "in", "enumerate", "(", "it", ",", "starts", "[", "k", "]", ")", ":", "yield", "v", ",", "i", "return", "self", ".", "mapPartitionsWithIndex", "(", "func", ")" ]
Zips this RDD with its element indices. The ordering is first based on the partition index and then the ordering of items within each partition. So the first item in the first partition gets index 0, and the last item in the last partition receives the largest index. This method needs to trigger a spark job when this RDD contains more than one partitions. >>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect() [('a', 0), ('b', 1), ('c', 2), ('d', 3)]
[ "Zips", "this", "RDD", "with", "its", "element", "indices", "." ]
python
train
VisTrails/tej
tej/submission.py
https://github.com/VisTrails/tej/blob/b8dedaeb6bdeb650b46cfe6d85e5aa9284fc7f0b/tej/submission.py#L209-L218
def _connect(self): """Connects via SSH. """ ssh = self._ssh_client() logger.debug("Connecting with %s", ', '.join('%s=%r' % (k, v if k != "password" else "***") for k, v in iteritems(self.destination))) ssh.connect(**self.destination) logger.debug("Connected to %s", self.destination['hostname']) self._ssh = ssh
[ "def", "_connect", "(", "self", ")", ":", "ssh", "=", "self", ".", "_ssh_client", "(", ")", "logger", ".", "debug", "(", "\"Connecting with %s\"", ",", "', '", ".", "join", "(", "'%s=%r'", "%", "(", "k", ",", "v", "if", "k", "!=", "\"password\"", "else", "\"***\"", ")", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "destination", ")", ")", ")", "ssh", ".", "connect", "(", "*", "*", "self", ".", "destination", ")", "logger", ".", "debug", "(", "\"Connected to %s\"", ",", "self", ".", "destination", "[", "'hostname'", "]", ")", "self", ".", "_ssh", "=", "ssh" ]
Connects via SSH.
[ "Connects", "via", "SSH", "." ]
python
train
programa-stic/barf-project
barf/analysis/gadgets/finder.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/analysis/gadgets/finder.py#L307-L325
def _build_gadgets_rec(self, gadget_tree_root): """Build a gadgets from a gadgets tree. """ root = gadget_tree_root.get_root() children = gadget_tree_root.get_children() node_list = [] root_gadget_ins = root if not children: node_list += [[root_gadget_ins]] else: for child in children: node_list_rec = self._build_gadgets_rec(child) node_list += [n + [root_gadget_ins] for n in node_list_rec] return node_list
[ "def", "_build_gadgets_rec", "(", "self", ",", "gadget_tree_root", ")", ":", "root", "=", "gadget_tree_root", ".", "get_root", "(", ")", "children", "=", "gadget_tree_root", ".", "get_children", "(", ")", "node_list", "=", "[", "]", "root_gadget_ins", "=", "root", "if", "not", "children", ":", "node_list", "+=", "[", "[", "root_gadget_ins", "]", "]", "else", ":", "for", "child", "in", "children", ":", "node_list_rec", "=", "self", ".", "_build_gadgets_rec", "(", "child", ")", "node_list", "+=", "[", "n", "+", "[", "root_gadget_ins", "]", "for", "n", "in", "node_list_rec", "]", "return", "node_list" ]
Build a gadgets from a gadgets tree.
[ "Build", "a", "gadgets", "from", "a", "gadgets", "tree", "." ]
python
train
edx/i18n-tools
i18n/transifex.py
https://github.com/edx/i18n-tools/blob/99b20c17d1a0ca07a8839f33e0e9068248a581e5/i18n/transifex.py#L91-L100
def pull_all_rtl(configuration): """ Pulls all translations - reviewed or not - for RTL languages """ print("Pulling all translated RTL languages from transifex...") for lang in configuration.rtl_langs: print('rm -rf conf/locale/' + lang) execute('rm -rf conf/locale/' + lang) execute('tx pull -l ' + lang) clean_translated_locales(configuration, langs=configuration.rtl_langs)
[ "def", "pull_all_rtl", "(", "configuration", ")", ":", "print", "(", "\"Pulling all translated RTL languages from transifex...\"", ")", "for", "lang", "in", "configuration", ".", "rtl_langs", ":", "print", "(", "'rm -rf conf/locale/'", "+", "lang", ")", "execute", "(", "'rm -rf conf/locale/'", "+", "lang", ")", "execute", "(", "'tx pull -l '", "+", "lang", ")", "clean_translated_locales", "(", "configuration", ",", "langs", "=", "configuration", ".", "rtl_langs", ")" ]
Pulls all translations - reviewed or not - for RTL languages
[ "Pulls", "all", "translations", "-", "reviewed", "or", "not", "-", "for", "RTL", "languages" ]
python
train
google/grumpy
third_party/pythonparser/parser.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pythonparser/parser.py#L1050-L1075
def import_from(self, from_loc, module_name, import_loc, names): """ (2.6, 2.7) import_from: ('from' ('.'* dotted_name | '.'+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) (3.0-) # note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) """ (dots_loc, dots_count), dotted_name_opt = module_name module_loc = module = None if dotted_name_opt: module_loc, module = dotted_name_opt lparen_loc, names, rparen_loc = names loc = from_loc.join(names[-1].loc) if rparen_loc: loc = loc.join(rparen_loc) if module == "__future__": self.add_flags([x.name for x in names]) return ast.ImportFrom(names=names, module=module, level=dots_count, keyword_loc=from_loc, dots_loc=dots_loc, module_loc=module_loc, import_loc=import_loc, lparen_loc=lparen_loc, rparen_loc=rparen_loc, loc=loc)
[ "def", "import_from", "(", "self", ",", "from_loc", ",", "module_name", ",", "import_loc", ",", "names", ")", ":", "(", "dots_loc", ",", "dots_count", ")", ",", "dotted_name_opt", "=", "module_name", "module_loc", "=", "module", "=", "None", "if", "dotted_name_opt", ":", "module_loc", ",", "module", "=", "dotted_name_opt", "lparen_loc", ",", "names", ",", "rparen_loc", "=", "names", "loc", "=", "from_loc", ".", "join", "(", "names", "[", "-", "1", "]", ".", "loc", ")", "if", "rparen_loc", ":", "loc", "=", "loc", ".", "join", "(", "rparen_loc", ")", "if", "module", "==", "\"__future__\"", ":", "self", ".", "add_flags", "(", "[", "x", ".", "name", "for", "x", "in", "names", "]", ")", "return", "ast", ".", "ImportFrom", "(", "names", "=", "names", ",", "module", "=", "module", ",", "level", "=", "dots_count", ",", "keyword_loc", "=", "from_loc", ",", "dots_loc", "=", "dots_loc", ",", "module_loc", "=", "module_loc", ",", "import_loc", "=", "import_loc", ",", "lparen_loc", "=", "lparen_loc", ",", "rparen_loc", "=", "rparen_loc", ",", "loc", "=", "loc", ")" ]
(2.6, 2.7) import_from: ('from' ('.'* dotted_name | '.'+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) (3.0-) # note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names))
[ "(", "2", ".", "6", "2", ".", "7", ")", "import_from", ":", "(", "from", "(", ".", "*", "dotted_name", "|", ".", "+", ")", "import", "(", "*", "|", "(", "import_as_names", ")", "|", "import_as_names", "))", "(", "3", ".", "0", "-", ")", "#", "note", "below", ":", "the", "(", ".", "|", "...", ")", "is", "necessary", "because", "...", "is", "tokenized", "as", "ELLIPSIS", "import_from", ":", "(", "from", "((", ".", "|", "...", ")", "*", "dotted_name", "|", "(", ".", "|", "...", ")", "+", ")", "import", "(", "*", "|", "(", "import_as_names", ")", "|", "import_as_names", "))" ]
python
valid
hazelcast/hazelcast-python-client
hazelcast/proxy/transactional_set.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/transactional_set.py#L21-L29
def remove(self, item): """ Transactional implementation of :func:`Set.remove(item) <hazelcast.proxy.set.Set.remove>` :param item: (object), the specified item to be deleted. :return: (bool), ``true`` if item is remove successfully, ``false`` otherwise. """ check_not_none(item, "item can't be none") return self._encode_invoke(transactional_set_remove_codec, item=self._to_data(item))
[ "def", "remove", "(", "self", ",", "item", ")", ":", "check_not_none", "(", "item", ",", "\"item can't be none\"", ")", "return", "self", ".", "_encode_invoke", "(", "transactional_set_remove_codec", ",", "item", "=", "self", ".", "_to_data", "(", "item", ")", ")" ]
Transactional implementation of :func:`Set.remove(item) <hazelcast.proxy.set.Set.remove>` :param item: (object), the specified item to be deleted. :return: (bool), ``true`` if item is remove successfully, ``false`` otherwise.
[ "Transactional", "implementation", "of", ":", "func", ":", "Set", ".", "remove", "(", "item", ")", "<hazelcast", ".", "proxy", ".", "set", ".", "Set", ".", "remove", ">" ]
python
train
google/transitfeed
transitfeed/util.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/util.py#L549-L562
def writerow(self, row): """Write row to the csv file. Any unicode strings in row are encoded as utf-8.""" encoded_row = [] for s in row: if isinstance(s, unicode): encoded_row.append(s.encode("utf-8")) else: encoded_row.append(s) try: self.writer.writerow(encoded_row) except Exception as e: print('error writing %s as %s' % (row, encoded_row)) raise e
[ "def", "writerow", "(", "self", ",", "row", ")", ":", "encoded_row", "=", "[", "]", "for", "s", "in", "row", ":", "if", "isinstance", "(", "s", ",", "unicode", ")", ":", "encoded_row", ".", "append", "(", "s", ".", "encode", "(", "\"utf-8\"", ")", ")", "else", ":", "encoded_row", ".", "append", "(", "s", ")", "try", ":", "self", ".", "writer", ".", "writerow", "(", "encoded_row", ")", "except", "Exception", "as", "e", ":", "print", "(", "'error writing %s as %s'", "%", "(", "row", ",", "encoded_row", ")", ")", "raise", "e" ]
Write row to the csv file. Any unicode strings in row are encoded as utf-8.
[ "Write", "row", "to", "the", "csv", "file", ".", "Any", "unicode", "strings", "in", "row", "are", "encoded", "as", "utf", "-", "8", "." ]
python
train
google/grr
grr/server/grr_response_server/rdfvalues/objects.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/rdfvalues/objects.py#L428-L443
def GetParent(self): """Constructs a path info corresponding to the parent of current path. The root path (represented by an empty list of components, corresponds to `/` on Unix-like systems) does not have a parent. Returns: Instance of `rdf_objects.PathInfo` or `None` if parent does not exist. """ if self.root: return None return PathInfo( components=self.components[:-1], path_type=self.path_type, directory=True)
[ "def", "GetParent", "(", "self", ")", ":", "if", "self", ".", "root", ":", "return", "None", "return", "PathInfo", "(", "components", "=", "self", ".", "components", "[", ":", "-", "1", "]", ",", "path_type", "=", "self", ".", "path_type", ",", "directory", "=", "True", ")" ]
Constructs a path info corresponding to the parent of current path. The root path (represented by an empty list of components, corresponds to `/` on Unix-like systems) does not have a parent. Returns: Instance of `rdf_objects.PathInfo` or `None` if parent does not exist.
[ "Constructs", "a", "path", "info", "corresponding", "to", "the", "parent", "of", "current", "path", "." ]
python
train
squaresLab/BugZoo
bugzoo/mgr/container.py
https://github.com/squaresLab/BugZoo/blob/68664f1977e85b37a78604f7c570382ffae1fa3b/bugzoo/mgr/container.py#L446-L484
def copy_to(self, container: Container, fn_host: str, fn_container: str ) -> None: """ Copies a file from the host machine to a specified location inside a container. Raises: FileNotFound: if the host file wasn't found. subprocess.CalledProcessError: if the file could not be copied to the container. """ logger.debug("Copying file to container, %s: %s -> %s", container.uid, fn_host, fn_container) if not os.path.exists(fn_host): logger.error("Failed to copy file [%s] to [%s] in container [%s]: not found.", # noqa: pycodestyle fn_host, fn_container, container.uid) raise FileNotFound(fn_host) cmd = "docker cp '{}' '{}:{}'".format(fn_host, container.id, fn_container) try: subprocess.check_output(cmd, shell=True) logger.debug("Copied file to container, %s: %s -> %s", container.uid, fn_host, fn_container) r = self.command(container, "sudo chown $(whoami) '{}'".format(fn_container)) if r.code != 0: m = "failed to update permissions for container file [{}] (exit code: {}): {}" # noqa: pycodestyle m = m.format(fn_container, r.code, r.output) raise BugZooException(m) # TODO implement error handling except subprocess.CalledProcessError: logger.exception("Failed to copy file to container, %s: %s -> %s", container.uid, fn_host, fn_container) raise
[ "def", "copy_to", "(", "self", ",", "container", ":", "Container", ",", "fn_host", ":", "str", ",", "fn_container", ":", "str", ")", "->", "None", ":", "logger", ".", "debug", "(", "\"Copying file to container, %s: %s -> %s\"", ",", "container", ".", "uid", ",", "fn_host", ",", "fn_container", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "fn_host", ")", ":", "logger", ".", "error", "(", "\"Failed to copy file [%s] to [%s] in container [%s]: not found.\"", ",", "# noqa: pycodestyle", "fn_host", ",", "fn_container", ",", "container", ".", "uid", ")", "raise", "FileNotFound", "(", "fn_host", ")", "cmd", "=", "\"docker cp '{}' '{}:{}'\"", ".", "format", "(", "fn_host", ",", "container", ".", "id", ",", "fn_container", ")", "try", ":", "subprocess", ".", "check_output", "(", "cmd", ",", "shell", "=", "True", ")", "logger", ".", "debug", "(", "\"Copied file to container, %s: %s -> %s\"", ",", "container", ".", "uid", ",", "fn_host", ",", "fn_container", ")", "r", "=", "self", ".", "command", "(", "container", ",", "\"sudo chown $(whoami) '{}'\"", ".", "format", "(", "fn_container", ")", ")", "if", "r", ".", "code", "!=", "0", ":", "m", "=", "\"failed to update permissions for container file [{}] (exit code: {}): {}\"", "# noqa: pycodestyle", "m", "=", "m", ".", "format", "(", "fn_container", ",", "r", ".", "code", ",", "r", ".", "output", ")", "raise", "BugZooException", "(", "m", ")", "# TODO implement error handling", "except", "subprocess", ".", "CalledProcessError", ":", "logger", ".", "exception", "(", "\"Failed to copy file to container, %s: %s -> %s\"", ",", "container", ".", "uid", ",", "fn_host", ",", "fn_container", ")", "raise" ]
Copies a file from the host machine to a specified location inside a container. Raises: FileNotFound: if the host file wasn't found. subprocess.CalledProcessError: if the file could not be copied to the container.
[ "Copies", "a", "file", "from", "the", "host", "machine", "to", "a", "specified", "location", "inside", "a", "container", "." ]
python
train
fananimi/pyzk
zk/base.py
https://github.com/fananimi/pyzk/blob/1a765d616526efdcb4c9adfcc9b1d10f6ed8b938/zk/base.py#L639-L650
def free_data(self): """ clear buffer :return: bool """ command = const.CMD_FREE_DATA cmd_response = self.__send_command(command) if cmd_response.get('status'): return True else: raise ZKErrorResponse("can't free data")
[ "def", "free_data", "(", "self", ")", ":", "command", "=", "const", ".", "CMD_FREE_DATA", "cmd_response", "=", "self", ".", "__send_command", "(", "command", ")", "if", "cmd_response", ".", "get", "(", "'status'", ")", ":", "return", "True", "else", ":", "raise", "ZKErrorResponse", "(", "\"can't free data\"", ")" ]
clear buffer :return: bool
[ "clear", "buffer" ]
python
train
MonashBI/arcana
arcana/pipeline/base.py
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L783-L825
def _make_outputnode(self, frequency): """ Generates an output node for the given frequency. It also adds implicit file format conversion nodes to the pipeline. Parameters ---------- frequency : str The frequency (i.e. 'per_session', 'per_visit', 'per_subject' or 'per_study') of the output node to retrieve """ # Check to see whether there are any outputs for the given frequency outputs = list(self.frequency_outputs(frequency)) if not outputs: raise ArcanaError( "No outputs to '{}' pipeline for requested freqency '{}'" .format(self.name, frequency)) # Get list of output names for the requested frequency, addding fields # to hold iterator IDs output_names = [o.name for o in outputs] # Generate output node and connect it to appropriate nodes outputnode = self.add('{}_outputnode'.format(frequency), IdentityInterface(fields=output_names)) # Loop through list of nodes connected to study data specs and # connect them to the newly created output node for output in outputs: # @ReservedAssignment (node, node_out, format, # @ReservedAssignment @IgnorePep8 conv_kwargs) = self._output_conns[output.name] # If fileset formats differ between study and pipeline # outputs create converter node (if one hasn't been already) # and connect output to that before connecting to outputnode if self.requires_conversion(output, format): conv = output.format.converter_from(format, **conv_kwargs) node = self.add( 'conv_{}_from_{}_format'.format(output.name, format.name), conv.interface, inputs={conv.input: (node, node_out)}, requirements=conv.requirements, mem_gb=conv.mem_gb, wall_time=conv.wall_time) node_out = conv.output self.connect(node, node_out, outputnode, output.name) return outputnode
[ "def", "_make_outputnode", "(", "self", ",", "frequency", ")", ":", "# Check to see whether there are any outputs for the given frequency", "outputs", "=", "list", "(", "self", ".", "frequency_outputs", "(", "frequency", ")", ")", "if", "not", "outputs", ":", "raise", "ArcanaError", "(", "\"No outputs to '{}' pipeline for requested freqency '{}'\"", ".", "format", "(", "self", ".", "name", ",", "frequency", ")", ")", "# Get list of output names for the requested frequency, addding fields", "# to hold iterator IDs", "output_names", "=", "[", "o", ".", "name", "for", "o", "in", "outputs", "]", "# Generate output node and connect it to appropriate nodes", "outputnode", "=", "self", ".", "add", "(", "'{}_outputnode'", ".", "format", "(", "frequency", ")", ",", "IdentityInterface", "(", "fields", "=", "output_names", ")", ")", "# Loop through list of nodes connected to study data specs and", "# connect them to the newly created output node", "for", "output", "in", "outputs", ":", "# @ReservedAssignment", "(", "node", ",", "node_out", ",", "format", ",", "# @ReservedAssignment @IgnorePep8", "conv_kwargs", ")", "=", "self", ".", "_output_conns", "[", "output", ".", "name", "]", "# If fileset formats differ between study and pipeline", "# outputs create converter node (if one hasn't been already)", "# and connect output to that before connecting to outputnode", "if", "self", ".", "requires_conversion", "(", "output", ",", "format", ")", ":", "conv", "=", "output", ".", "format", ".", "converter_from", "(", "format", ",", "*", "*", "conv_kwargs", ")", "node", "=", "self", ".", "add", "(", "'conv_{}_from_{}_format'", ".", "format", "(", "output", ".", "name", ",", "format", ".", "name", ")", ",", "conv", ".", "interface", ",", "inputs", "=", "{", "conv", ".", "input", ":", "(", "node", ",", "node_out", ")", "}", ",", "requirements", "=", "conv", ".", "requirements", ",", "mem_gb", "=", "conv", ".", "mem_gb", ",", "wall_time", "=", "conv", ".", "wall_time", ")", "node_out", "=", "conv", ".", "output", "self", ".", "connect", "(", "node", ",", "node_out", ",", "outputnode", ",", "output", ".", "name", ")", "return", "outputnode" ]
Generates an output node for the given frequency. It also adds implicit file format conversion nodes to the pipeline. Parameters ---------- frequency : str The frequency (i.e. 'per_session', 'per_visit', 'per_subject' or 'per_study') of the output node to retrieve
[ "Generates", "an", "output", "node", "for", "the", "given", "frequency", ".", "It", "also", "adds", "implicit", "file", "format", "conversion", "nodes", "to", "the", "pipeline", "." ]
python
train
aws/aws-encryption-sdk-python
examples/src/basic_encryption.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/examples/src/basic_encryption.py#L17-L47
def cycle_string(key_arn, source_plaintext, botocore_session=None): """Encrypts and then decrypts a string under a KMS customer master key (CMK). :param str key_arn: Amazon Resource Name (ARN) of the KMS CMK :param bytes source_plaintext: Data to encrypt :param botocore_session: existing botocore session instance :type botocore_session: botocore.session.Session """ # Create a KMS master key provider kms_kwargs = dict(key_ids=[key_arn]) if botocore_session is not None: kms_kwargs["botocore_session"] = botocore_session master_key_provider = aws_encryption_sdk.KMSMasterKeyProvider(**kms_kwargs) # Encrypt the plaintext source data ciphertext, encryptor_header = aws_encryption_sdk.encrypt(source=source_plaintext, key_provider=master_key_provider) # Decrypt the ciphertext cycled_plaintext, decrypted_header = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=master_key_provider) # Verify that the "cycled" (encrypted, then decrypted) plaintext is identical to the source plaintext assert cycled_plaintext == source_plaintext # Verify that the encryption context used in the decrypt operation includes all key pairs from # the encrypt operation. (The SDK can add pairs, so don't require an exact match.) # # In production, always use a meaningful encryption context. In this sample, we omit the # encryption context (no key pairs). assert all( pair in decrypted_header.encryption_context.items() for pair in encryptor_header.encryption_context.items() )
[ "def", "cycle_string", "(", "key_arn", ",", "source_plaintext", ",", "botocore_session", "=", "None", ")", ":", "# Create a KMS master key provider", "kms_kwargs", "=", "dict", "(", "key_ids", "=", "[", "key_arn", "]", ")", "if", "botocore_session", "is", "not", "None", ":", "kms_kwargs", "[", "\"botocore_session\"", "]", "=", "botocore_session", "master_key_provider", "=", "aws_encryption_sdk", ".", "KMSMasterKeyProvider", "(", "*", "*", "kms_kwargs", ")", "# Encrypt the plaintext source data", "ciphertext", ",", "encryptor_header", "=", "aws_encryption_sdk", ".", "encrypt", "(", "source", "=", "source_plaintext", ",", "key_provider", "=", "master_key_provider", ")", "# Decrypt the ciphertext", "cycled_plaintext", ",", "decrypted_header", "=", "aws_encryption_sdk", ".", "decrypt", "(", "source", "=", "ciphertext", ",", "key_provider", "=", "master_key_provider", ")", "# Verify that the \"cycled\" (encrypted, then decrypted) plaintext is identical to the source plaintext", "assert", "cycled_plaintext", "==", "source_plaintext", "# Verify that the encryption context used in the decrypt operation includes all key pairs from", "# the encrypt operation. (The SDK can add pairs, so don't require an exact match.)", "#", "# In production, always use a meaningful encryption context. In this sample, we omit the", "# encryption context (no key pairs).", "assert", "all", "(", "pair", "in", "decrypted_header", ".", "encryption_context", ".", "items", "(", ")", "for", "pair", "in", "encryptor_header", ".", "encryption_context", ".", "items", "(", ")", ")" ]
Encrypts and then decrypts a string under a KMS customer master key (CMK). :param str key_arn: Amazon Resource Name (ARN) of the KMS CMK :param bytes source_plaintext: Data to encrypt :param botocore_session: existing botocore session instance :type botocore_session: botocore.session.Session
[ "Encrypts", "and", "then", "decrypts", "a", "string", "under", "a", "KMS", "customer", "master", "key", "(", "CMK", ")", "." ]
python
train
eternnoir/pyTelegramBotAPI
telebot/__init__.py
https://github.com/eternnoir/pyTelegramBotAPI/blob/47b53b88123097f1b9562a6cd5d4e080b86185d1/telebot/__init__.py#L613-L620
def delete_message(self, chat_id, message_id): """ Use this method to delete message. Returns True on success. :param chat_id: in which chat to delete :param message_id: which message to delete :return: API reply. """ return apihelper.delete_message(self.token, chat_id, message_id)
[ "def", "delete_message", "(", "self", ",", "chat_id", ",", "message_id", ")", ":", "return", "apihelper", ".", "delete_message", "(", "self", ".", "token", ",", "chat_id", ",", "message_id", ")" ]
Use this method to delete message. Returns True on success. :param chat_id: in which chat to delete :param message_id: which message to delete :return: API reply.
[ "Use", "this", "method", "to", "delete", "message", ".", "Returns", "True", "on", "success", ".", ":", "param", "chat_id", ":", "in", "which", "chat", "to", "delete", ":", "param", "message_id", ":", "which", "message", "to", "delete", ":", "return", ":", "API", "reply", "." ]
python
train
iotile/coretools
iotilecore/iotile/core/utilities/kvstore_json.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/kvstore_json.py#L107-L119
def remove(self, key): """Remove a key from the data store Args: key (string): The key to remove Raises: KeyError: if the key was not found """ data = self._load_file() del data[key] self._save_file(data)
[ "def", "remove", "(", "self", ",", "key", ")", ":", "data", "=", "self", ".", "_load_file", "(", ")", "del", "data", "[", "key", "]", "self", ".", "_save_file", "(", "data", ")" ]
Remove a key from the data store Args: key (string): The key to remove Raises: KeyError: if the key was not found
[ "Remove", "a", "key", "from", "the", "data", "store" ]
python
train
pantsbuild/pants
pants-plugins/src/python/internal_backend/sitegen/tasks/sitegen.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/pants-plugins/src/python/internal_backend/sitegen/tasks/sitegen.py#L153-L161
def precompute(config, soups): """Return info we want to compute (and preserve) before we mutate things.""" show_toc = config.get('show_toc', {}) page = {} pantsrefs = precompute_pantsrefs(soups) for p, soup in soups.items(): title = get_title(soup) or p page[p] = PrecomputedPageInfo(title=title, show_toc=show_toc.get(p, True)) return Precomputed(page=page, pantsref=pantsrefs)
[ "def", "precompute", "(", "config", ",", "soups", ")", ":", "show_toc", "=", "config", ".", "get", "(", "'show_toc'", ",", "{", "}", ")", "page", "=", "{", "}", "pantsrefs", "=", "precompute_pantsrefs", "(", "soups", ")", "for", "p", ",", "soup", "in", "soups", ".", "items", "(", ")", ":", "title", "=", "get_title", "(", "soup", ")", "or", "p", "page", "[", "p", "]", "=", "PrecomputedPageInfo", "(", "title", "=", "title", ",", "show_toc", "=", "show_toc", ".", "get", "(", "p", ",", "True", ")", ")", "return", "Precomputed", "(", "page", "=", "page", ",", "pantsref", "=", "pantsrefs", ")" ]
Return info we want to compute (and preserve) before we mutate things.
[ "Return", "info", "we", "want", "to", "compute", "(", "and", "preserve", ")", "before", "we", "mutate", "things", "." ]
python
train
ska-sa/katcp-python
katcp/resource_client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/resource_client.py#L458-L488
def set_sampling_strategies(self, filter, strategy_and_parms): """Set a strategy for all sensors matching the filter, including unseen sensors The strategy should persist across sensor disconnect/reconnect. filter : str Filter for sensor names strategy_and_params : seq of str or str As tuple contains (<strat_name>, [<strat_parm1>, ...]) where the strategy names and parameters are as defined by the KATCP spec. As str contains the same elements in space-separated form. Returns ------- done : tornado Future Resolves when done """ sensor_list = yield self.list_sensors(filter=filter) sensor_dict = {} for sens in sensor_list: # Set the strategy on each sensor try: sensor_name = sens.object.normalised_name yield self.set_sampling_strategy(sensor_name, strategy_and_parms) sensor_dict[sensor_name] = strategy_and_parms except Exception as exc: self._logger.exception( 'Unhandled exception trying to set sensor strategies {!r} for {} ({})' .format(strategy_and_parms, sens, exc)) sensor_dict[sensor_name] = None # Otherwise, depend on self._add_sensors() to handle it from the cache when the sensor appears\ raise tornado.gen.Return(sensor_dict)
[ "def", "set_sampling_strategies", "(", "self", ",", "filter", ",", "strategy_and_parms", ")", ":", "sensor_list", "=", "yield", "self", ".", "list_sensors", "(", "filter", "=", "filter", ")", "sensor_dict", "=", "{", "}", "for", "sens", "in", "sensor_list", ":", "# Set the strategy on each sensor", "try", ":", "sensor_name", "=", "sens", ".", "object", ".", "normalised_name", "yield", "self", ".", "set_sampling_strategy", "(", "sensor_name", ",", "strategy_and_parms", ")", "sensor_dict", "[", "sensor_name", "]", "=", "strategy_and_parms", "except", "Exception", "as", "exc", ":", "self", ".", "_logger", ".", "exception", "(", "'Unhandled exception trying to set sensor strategies {!r} for {} ({})'", ".", "format", "(", "strategy_and_parms", ",", "sens", ",", "exc", ")", ")", "sensor_dict", "[", "sensor_name", "]", "=", "None", "# Otherwise, depend on self._add_sensors() to handle it from the cache when the sensor appears\\", "raise", "tornado", ".", "gen", ".", "Return", "(", "sensor_dict", ")" ]
Set a strategy for all sensors matching the filter, including unseen sensors The strategy should persist across sensor disconnect/reconnect. filter : str Filter for sensor names strategy_and_params : seq of str or str As tuple contains (<strat_name>, [<strat_parm1>, ...]) where the strategy names and parameters are as defined by the KATCP spec. As str contains the same elements in space-separated form. Returns ------- done : tornado Future Resolves when done
[ "Set", "a", "strategy", "for", "all", "sensors", "matching", "the", "filter", "including", "unseen", "sensors", "The", "strategy", "should", "persist", "across", "sensor", "disconnect", "/", "reconnect", "." ]
python
train
HiPERCAM/hcam_widgets
hcam_widgets/widgets.py
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/widgets.py#L749-L755
def set(self, num): """ Sets current value to num """ if self.validate(num) is not None: self.index = self.allowed.index(num) IntegerEntry.set(self, num)
[ "def", "set", "(", "self", ",", "num", ")", ":", "if", "self", ".", "validate", "(", "num", ")", "is", "not", "None", ":", "self", ".", "index", "=", "self", ".", "allowed", ".", "index", "(", "num", ")", "IntegerEntry", ".", "set", "(", "self", ",", "num", ")" ]
Sets current value to num
[ "Sets", "current", "value", "to", "num" ]
python
train
juztin/flask-tracy
flask_tracy/base.py
https://github.com/juztin/flask-tracy/blob/8a43094f0fced3c216f7b65ad6c5c7a22c14ea25/flask_tracy/base.py#L76-L107
def _after(self, response): """Calculates the request duration, and adds a transaction ID to the header. """ # Ignore excluded routes. if getattr(request, '_tracy_exclude', False): return response duration = None if getattr(request, '_tracy_start_time', None): duration = monotonic() - request._tracy_start_time # Add Trace_ID header. trace_id = None if getattr(request, '_tracy_id', None): trace_id = request._tracy_id response.headers[trace_header_id] = trace_id # Get the invoking client. trace_client = None if getattr(request, '_tracy_client', None): trace_client = request._tracy_client # Extra log kwargs. d = {'status_code': response.status_code, 'url': request.base_url, 'client_ip': request.remote_addr, 'trace_name': trace_client, 'trace_id': trace_id, 'trace_duration': duration} logger.info(None, extra=d) return response
[ "def", "_after", "(", "self", ",", "response", ")", ":", "# Ignore excluded routes.", "if", "getattr", "(", "request", ",", "'_tracy_exclude'", ",", "False", ")", ":", "return", "response", "duration", "=", "None", "if", "getattr", "(", "request", ",", "'_tracy_start_time'", ",", "None", ")", ":", "duration", "=", "monotonic", "(", ")", "-", "request", ".", "_tracy_start_time", "# Add Trace_ID header.", "trace_id", "=", "None", "if", "getattr", "(", "request", ",", "'_tracy_id'", ",", "None", ")", ":", "trace_id", "=", "request", ".", "_tracy_id", "response", ".", "headers", "[", "trace_header_id", "]", "=", "trace_id", "# Get the invoking client.", "trace_client", "=", "None", "if", "getattr", "(", "request", ",", "'_tracy_client'", ",", "None", ")", ":", "trace_client", "=", "request", ".", "_tracy_client", "# Extra log kwargs.", "d", "=", "{", "'status_code'", ":", "response", ".", "status_code", ",", "'url'", ":", "request", ".", "base_url", ",", "'client_ip'", ":", "request", ".", "remote_addr", ",", "'trace_name'", ":", "trace_client", ",", "'trace_id'", ":", "trace_id", ",", "'trace_duration'", ":", "duration", "}", "logger", ".", "info", "(", "None", ",", "extra", "=", "d", ")", "return", "response" ]
Calculates the request duration, and adds a transaction ID to the header.
[ "Calculates", "the", "request", "duration", "and", "adds", "a", "transaction", "ID", "to", "the", "header", "." ]
python
valid
rehandalal/therapist
therapist/utils/filesystem.py
https://github.com/rehandalal/therapist/blob/1995a7e396eea2ec8685bb32a779a4110b459b1f/therapist/utils/filesystem.py#L14-L23
def list_files(path): """Recursively collects a list of files at a path.""" files = [] if os.path.isdir(path): for stats in os.walk(path): for f in stats[2]: files.append(os.path.join(stats[0], f)) elif os.path.isfile(path): files = [path] return files
[ "def", "list_files", "(", "path", ")", ":", "files", "=", "[", "]", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "for", "stats", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "f", "in", "stats", "[", "2", "]", ":", "files", ".", "append", "(", "os", ".", "path", ".", "join", "(", "stats", "[", "0", "]", ",", "f", ")", ")", "elif", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "files", "=", "[", "path", "]", "return", "files" ]
Recursively collects a list of files at a path.
[ "Recursively", "collects", "a", "list", "of", "files", "at", "a", "path", "." ]
python
train
pyopenapi/pyswagger
pyswagger/spec/base.py
https://github.com/pyopenapi/pyswagger/blob/333c4ca08e758cd2194943d9904a3eda3fe43977/pyswagger/spec/base.py#L217-L236
def _assign_parent(self, ctx): """ parent assignment, internal usage only """ def _assign(cls, _, obj): if obj == None: return if cls.is_produced(obj): if isinstance(obj, BaseObj): obj._parent__ = self else: raise ValueError('Object is not instance of {0} but {1}'.format(cls.__swagger_ref_object__.__name__, obj.__class__.__name__)) # set self as childrent's parent for name, (ct, ctx) in six.iteritems(ctx.__swagger_child__): obj = getattr(self, name) if obj == None: continue container_apply(ct, obj, functools.partial(_assign, ctx))
[ "def", "_assign_parent", "(", "self", ",", "ctx", ")", ":", "def", "_assign", "(", "cls", ",", "_", ",", "obj", ")", ":", "if", "obj", "==", "None", ":", "return", "if", "cls", ".", "is_produced", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "BaseObj", ")", ":", "obj", ".", "_parent__", "=", "self", "else", ":", "raise", "ValueError", "(", "'Object is not instance of {0} but {1}'", ".", "format", "(", "cls", ".", "__swagger_ref_object__", ".", "__name__", ",", "obj", ".", "__class__", ".", "__name__", ")", ")", "# set self as childrent's parent", "for", "name", ",", "(", "ct", ",", "ctx", ")", "in", "six", ".", "iteritems", "(", "ctx", ".", "__swagger_child__", ")", ":", "obj", "=", "getattr", "(", "self", ",", "name", ")", "if", "obj", "==", "None", ":", "continue", "container_apply", "(", "ct", ",", "obj", ",", "functools", ".", "partial", "(", "_assign", ",", "ctx", ")", ")" ]
parent assignment, internal usage only
[ "parent", "assignment", "internal", "usage", "only" ]
python
train
opereto/pyopereto
pyopereto/client.py
https://github.com/opereto/pyopereto/blob/16ca987738a7e1b82b52b0b099794a74ed557223/pyopereto/client.py#L1148-L1159
def get_process_rca(self, pid=None): ''' get_process_rca(self, pid=None) Get the RCA tree of a given failed process. The RCA tree contains all failed child processes that caused the failure of the given process. :Parameters: * *pid* (`string`) -- Identifier of an existing process ''' pid = self._get_pid(pid) return self._call_rest_api('get', '/processes/'+pid+'/rca', error='Failed to fetch process information')
[ "def", "get_process_rca", "(", "self", ",", "pid", "=", "None", ")", ":", "pid", "=", "self", ".", "_get_pid", "(", "pid", ")", "return", "self", ".", "_call_rest_api", "(", "'get'", ",", "'/processes/'", "+", "pid", "+", "'/rca'", ",", "error", "=", "'Failed to fetch process information'", ")" ]
get_process_rca(self, pid=None) Get the RCA tree of a given failed process. The RCA tree contains all failed child processes that caused the failure of the given process. :Parameters: * *pid* (`string`) -- Identifier of an existing process
[ "get_process_rca", "(", "self", "pid", "=", "None", ")" ]
python
train
spry-group/python-vultr
vultr/v1_server.py
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_server.py#L63-L74
def halt(self, subid, params=None): ''' /v1/server/halt POST - account Halt a virtual machine. This is a hard power off (basically, unplugging the machine). The data on the machine will not be modified, and you will still be billed for the machine. To completely delete a machine, see v1/server/destroy Link: https://www.vultr.com/api/#server_halt ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/server/halt', params, 'POST')
[ "def", "halt", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/server/halt'", ",", "params", ",", "'POST'", ")" ]
/v1/server/halt POST - account Halt a virtual machine. This is a hard power off (basically, unplugging the machine). The data on the machine will not be modified, and you will still be billed for the machine. To completely delete a machine, see v1/server/destroy Link: https://www.vultr.com/api/#server_halt
[ "/", "v1", "/", "server", "/", "halt", "POST", "-", "account", "Halt", "a", "virtual", "machine", ".", "This", "is", "a", "hard", "power", "off", "(", "basically", "unplugging", "the", "machine", ")", ".", "The", "data", "on", "the", "machine", "will", "not", "be", "modified", "and", "you", "will", "still", "be", "billed", "for", "the", "machine", ".", "To", "completely", "delete", "a", "machine", "see", "v1", "/", "server", "/", "destroy" ]
python
train
zhanglab/psamm
psamm/commands/fluxcheck.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/commands/fluxcheck.py#L62-L157
def run(self): """Run flux consistency check command""" # Load compound information def compound_name(id): if id not in self._model.compounds: return id return self._model.compounds[id].properties.get('name', id) epsilon = self._args.epsilon if self._args.unrestricted: # Allow all exchange reactions with no flux limits for reaction in self._mm.reactions: if self._mm.is_exchange(reaction): del self._mm.limits[reaction].bounds loop_removal = self._get_loop_removal_option() enable_tfba = loop_removal == 'tfba' enable_fastcore = self._args.fastcore if enable_tfba and enable_fastcore: self.argument_error( 'Using Fastcore with thermodynamic constraints' ' is not supported!') start_time = time.time() if enable_fastcore: solver = self._get_solver() try: inconsistent = set(fastcore.fastcc( self._mm, epsilon, solver=solver)) except fluxanalysis.FluxBalanceError as e: self.report_flux_balance_error(e) else: if enable_tfba: solver = self._get_solver(integer=True) else: solver = self._get_solver() if self._args.reduce_lp: logger.info('Running with reduced number of LP problems.') try: inconsistent = set( fluxanalysis.consistency_check( self._mm, self._mm.reactions, epsilon, tfba=enable_tfba, solver=solver)) except fluxanalysis.FluxBalanceError as e: self.report_flux_balance_error(e) else: logger.info('Using flux bounds to determine consistency.') try: inconsistent = set(self._run_fva_fluxcheck( self._mm, solver, enable_tfba, epsilon)) except FluxCheckFVATaskError: self.report_flux_balance_error() logger.info('Solving took {:.2f} seconds'.format( time.time() - start_time)) # Count the number of reactions that are fixed at zero. While these # reactions are still inconsistent, they are inconsistent because they # have been explicitly disabled. disabled_exchange = 0 disabled_internal = 0 count_exchange = 0 total_exchange = 0 count_internal = 0 total_internal = 0 # Print result for reaction in sorted(self._mm.reactions): disabled = self._mm.limits[reaction].bounds == (0, 0) if self._mm.is_exchange(reaction): total_exchange += 1 count_exchange += int(reaction in inconsistent) disabled_exchange += int(disabled) else: total_internal += 1 count_internal += int(reaction in inconsistent) disabled_internal += int(disabled) if reaction in inconsistent: rx = self._mm.get_reaction(reaction) rxt = rx.translated_compounds(compound_name) print('{}\t{}'.format(reaction, rxt)) logger.info('Model has {}/{} inconsistent internal reactions' ' ({} disabled by user)'.format( count_internal, total_internal, disabled_internal)) logger.info('Model has {}/{} inconsistent exchange reactions' ' ({} disabled by user)'.format( count_exchange, total_exchange, disabled_exchange))
[ "def", "run", "(", "self", ")", ":", "# Load compound information", "def", "compound_name", "(", "id", ")", ":", "if", "id", "not", "in", "self", ".", "_model", ".", "compounds", ":", "return", "id", "return", "self", ".", "_model", ".", "compounds", "[", "id", "]", ".", "properties", ".", "get", "(", "'name'", ",", "id", ")", "epsilon", "=", "self", ".", "_args", ".", "epsilon", "if", "self", ".", "_args", ".", "unrestricted", ":", "# Allow all exchange reactions with no flux limits", "for", "reaction", "in", "self", ".", "_mm", ".", "reactions", ":", "if", "self", ".", "_mm", ".", "is_exchange", "(", "reaction", ")", ":", "del", "self", ".", "_mm", ".", "limits", "[", "reaction", "]", ".", "bounds", "loop_removal", "=", "self", ".", "_get_loop_removal_option", "(", ")", "enable_tfba", "=", "loop_removal", "==", "'tfba'", "enable_fastcore", "=", "self", ".", "_args", ".", "fastcore", "if", "enable_tfba", "and", "enable_fastcore", ":", "self", ".", "argument_error", "(", "'Using Fastcore with thermodynamic constraints'", "' is not supported!'", ")", "start_time", "=", "time", ".", "time", "(", ")", "if", "enable_fastcore", ":", "solver", "=", "self", ".", "_get_solver", "(", ")", "try", ":", "inconsistent", "=", "set", "(", "fastcore", ".", "fastcc", "(", "self", ".", "_mm", ",", "epsilon", ",", "solver", "=", "solver", ")", ")", "except", "fluxanalysis", ".", "FluxBalanceError", "as", "e", ":", "self", ".", "report_flux_balance_error", "(", "e", ")", "else", ":", "if", "enable_tfba", ":", "solver", "=", "self", ".", "_get_solver", "(", "integer", "=", "True", ")", "else", ":", "solver", "=", "self", ".", "_get_solver", "(", ")", "if", "self", ".", "_args", ".", "reduce_lp", ":", "logger", ".", "info", "(", "'Running with reduced number of LP problems.'", ")", "try", ":", "inconsistent", "=", "set", "(", "fluxanalysis", ".", "consistency_check", "(", "self", ".", "_mm", ",", "self", ".", "_mm", ".", "reactions", ",", "epsilon", ",", "tfba", "=", "enable_tfba", ",", "solver", "=", "solver", ")", ")", "except", "fluxanalysis", ".", "FluxBalanceError", "as", "e", ":", "self", ".", "report_flux_balance_error", "(", "e", ")", "else", ":", "logger", ".", "info", "(", "'Using flux bounds to determine consistency.'", ")", "try", ":", "inconsistent", "=", "set", "(", "self", ".", "_run_fva_fluxcheck", "(", "self", ".", "_mm", ",", "solver", ",", "enable_tfba", ",", "epsilon", ")", ")", "except", "FluxCheckFVATaskError", ":", "self", ".", "report_flux_balance_error", "(", ")", "logger", ".", "info", "(", "'Solving took {:.2f} seconds'", ".", "format", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", ")", "# Count the number of reactions that are fixed at zero. While these", "# reactions are still inconsistent, they are inconsistent because they", "# have been explicitly disabled.", "disabled_exchange", "=", "0", "disabled_internal", "=", "0", "count_exchange", "=", "0", "total_exchange", "=", "0", "count_internal", "=", "0", "total_internal", "=", "0", "# Print result", "for", "reaction", "in", "sorted", "(", "self", ".", "_mm", ".", "reactions", ")", ":", "disabled", "=", "self", ".", "_mm", ".", "limits", "[", "reaction", "]", ".", "bounds", "==", "(", "0", ",", "0", ")", "if", "self", ".", "_mm", ".", "is_exchange", "(", "reaction", ")", ":", "total_exchange", "+=", "1", "count_exchange", "+=", "int", "(", "reaction", "in", "inconsistent", ")", "disabled_exchange", "+=", "int", "(", "disabled", ")", "else", ":", "total_internal", "+=", "1", "count_internal", "+=", "int", "(", "reaction", "in", "inconsistent", ")", "disabled_internal", "+=", "int", "(", "disabled", ")", "if", "reaction", "in", "inconsistent", ":", "rx", "=", "self", ".", "_mm", ".", "get_reaction", "(", "reaction", ")", "rxt", "=", "rx", ".", "translated_compounds", "(", "compound_name", ")", "print", "(", "'{}\\t{}'", ".", "format", "(", "reaction", ",", "rxt", ")", ")", "logger", ".", "info", "(", "'Model has {}/{} inconsistent internal reactions'", "' ({} disabled by user)'", ".", "format", "(", "count_internal", ",", "total_internal", ",", "disabled_internal", ")", ")", "logger", ".", "info", "(", "'Model has {}/{} inconsistent exchange reactions'", "' ({} disabled by user)'", ".", "format", "(", "count_exchange", ",", "total_exchange", ",", "disabled_exchange", ")", ")" ]
Run flux consistency check command
[ "Run", "flux", "consistency", "check", "command" ]
python
train
s1s1ty/py-jsonq
pyjsonq/query.py
https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/query.py#L411-L427
def group_by(self, property): """Getting the grouped result by the given property :@param property :@type property: string :@return self """ self.__prepare() group_data = {} for data in self._json_data: if data[property] not in group_data: group_data[data[property]] = [] group_data[data[property]].append(data) self._json_data = group_data return self
[ "def", "group_by", "(", "self", ",", "property", ")", ":", "self", ".", "__prepare", "(", ")", "group_data", "=", "{", "}", "for", "data", "in", "self", ".", "_json_data", ":", "if", "data", "[", "property", "]", "not", "in", "group_data", ":", "group_data", "[", "data", "[", "property", "]", "]", "=", "[", "]", "group_data", "[", "data", "[", "property", "]", "]", ".", "append", "(", "data", ")", "self", ".", "_json_data", "=", "group_data", "return", "self" ]
Getting the grouped result by the given property :@param property :@type property: string :@return self
[ "Getting", "the", "grouped", "result", "by", "the", "given", "property" ]
python
train
load-tools/netort
netort/resource.py
https://github.com/load-tools/netort/blob/b5233a70cea74108857ea24ba5c37975057ca00f/netort/resource.py#L85-L102
def get_opener(self, path): """ Args: path: str, resource file url or resource file absolute/relative path. Returns: file object """ self.path = path opener = None # FIXME this parser/matcher should use `urlparse` stdlib for opener_name, signature in self.openers.items(): if self.path.startswith(signature[0]): opener = signature[1](self.path) break if not opener: opener = FileOpener(self.path) return opener
[ "def", "get_opener", "(", "self", ",", "path", ")", ":", "self", ".", "path", "=", "path", "opener", "=", "None", "# FIXME this parser/matcher should use `urlparse` stdlib", "for", "opener_name", ",", "signature", "in", "self", ".", "openers", ".", "items", "(", ")", ":", "if", "self", ".", "path", ".", "startswith", "(", "signature", "[", "0", "]", ")", ":", "opener", "=", "signature", "[", "1", "]", "(", "self", ".", "path", ")", "break", "if", "not", "opener", ":", "opener", "=", "FileOpener", "(", "self", ".", "path", ")", "return", "opener" ]
Args: path: str, resource file url or resource file absolute/relative path. Returns: file object
[ "Args", ":", "path", ":", "str", "resource", "file", "url", "or", "resource", "file", "absolute", "/", "relative", "path", "." ]
python
train
m32/endesive
endesive/pdf/fpdf/fpdf.py
https://github.com/m32/endesive/blob/973091dc69847fe2df594c80ac9235a8d08460ff/endesive/pdf/fpdf/fpdf.py#L329-L337
def set_fill_color(self,r,g=-1,b=-1): "Set color for all filling operations" if((r==0 and g==0 and b==0) or g==-1): self.fill_color=sprintf('%.3f g',r/255.0) else: self.fill_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0) self.color_flag=(self.fill_color!=self.text_color) if(self.page>0): self._out(self.fill_color)
[ "def", "set_fill_color", "(", "self", ",", "r", ",", "g", "=", "-", "1", ",", "b", "=", "-", "1", ")", ":", "if", "(", "(", "r", "==", "0", "and", "g", "==", "0", "and", "b", "==", "0", ")", "or", "g", "==", "-", "1", ")", ":", "self", ".", "fill_color", "=", "sprintf", "(", "'%.3f g'", ",", "r", "/", "255.0", ")", "else", ":", "self", ".", "fill_color", "=", "sprintf", "(", "'%.3f %.3f %.3f rg'", ",", "r", "/", "255.0", ",", "g", "/", "255.0", ",", "b", "/", "255.0", ")", "self", ".", "color_flag", "=", "(", "self", ".", "fill_color", "!=", "self", ".", "text_color", ")", "if", "(", "self", ".", "page", ">", "0", ")", ":", "self", ".", "_out", "(", "self", ".", "fill_color", ")" ]
Set color for all filling operations
[ "Set", "color", "for", "all", "filling", "operations" ]
python
train
ArduPilot/MAVProxy
MAVProxy/tools/MAVExplorer.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/tools/MAVExplorer.py#L279-L290
def flightmode_colours(): '''return mapping of flight mode to colours''' from MAVProxy.modules.lib.grapher import flightmode_colours mapping = {} idx = 0 for (mode,t0,t1) in flightmodes: if not mode in mapping: mapping[mode] = flightmode_colours[idx] idx += 1 if idx >= len(flightmode_colours): idx = 0 return mapping
[ "def", "flightmode_colours", "(", ")", ":", "from", "MAVProxy", ".", "modules", ".", "lib", ".", "grapher", "import", "flightmode_colours", "mapping", "=", "{", "}", "idx", "=", "0", "for", "(", "mode", ",", "t0", ",", "t1", ")", "in", "flightmodes", ":", "if", "not", "mode", "in", "mapping", ":", "mapping", "[", "mode", "]", "=", "flightmode_colours", "[", "idx", "]", "idx", "+=", "1", "if", "idx", ">=", "len", "(", "flightmode_colours", ")", ":", "idx", "=", "0", "return", "mapping" ]
return mapping of flight mode to colours
[ "return", "mapping", "of", "flight", "mode", "to", "colours" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/plugins/do_symfix.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/plugins/do_symfix.py#L35-L37
def do(self, arg): ".symfix - Set the default Microsoft Symbol Store settings if missing" self.debug.system.fix_symbol_store_path(remote = True, force = False)
[ "def", "do", "(", "self", ",", "arg", ")", ":", "self", ".", "debug", ".", "system", ".", "fix_symbol_store_path", "(", "remote", "=", "True", ",", "force", "=", "False", ")" ]
.symfix - Set the default Microsoft Symbol Store settings if missing
[ ".", "symfix", "-", "Set", "the", "default", "Microsoft", "Symbol", "Store", "settings", "if", "missing" ]
python
train
twisted/vertex
vertex/q2qclient.py
https://github.com/twisted/vertex/blob/feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca/vertex/q2qclient.py#L321-L343
def enregister(svc, newAddress, password): """ Register a new account and return a Deferred that fires if it worked. @param svc: a Q2QService @param newAddress: a Q2QAddress object @param password: a shared secret (str) """ return svc.connectQ2Q(q2q.Q2QAddress("",""), q2q.Q2QAddress(newAddress.domain, "accounts"), 'identity-admin', protocol.ClientFactory.forProtocol(AMP) ).addCallback( AMP.callRemote, AddUser, name=newAddress.resource, password=password ).addErrback( Failure.trap, error.ConnectionDone )
[ "def", "enregister", "(", "svc", ",", "newAddress", ",", "password", ")", ":", "return", "svc", ".", "connectQ2Q", "(", "q2q", ".", "Q2QAddress", "(", "\"\"", ",", "\"\"", ")", ",", "q2q", ".", "Q2QAddress", "(", "newAddress", ".", "domain", ",", "\"accounts\"", ")", ",", "'identity-admin'", ",", "protocol", ".", "ClientFactory", ".", "forProtocol", "(", "AMP", ")", ")", ".", "addCallback", "(", "AMP", ".", "callRemote", ",", "AddUser", ",", "name", "=", "newAddress", ".", "resource", ",", "password", "=", "password", ")", ".", "addErrback", "(", "Failure", ".", "trap", ",", "error", ".", "ConnectionDone", ")" ]
Register a new account and return a Deferred that fires if it worked. @param svc: a Q2QService @param newAddress: a Q2QAddress object @param password: a shared secret (str)
[ "Register", "a", "new", "account", "and", "return", "a", "Deferred", "that", "fires", "if", "it", "worked", "." ]
python
train
mitsei/dlkit
dlkit/services/commenting.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/commenting.py#L1225-L1233
def get_comment_form(self, *args, **kwargs): """Pass through to provider CommentAdminSession.get_comment_form_for_update""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.get_resource_form_for_update # This method might be a bit sketchy. Time will tell. if isinstance(args[-1], list) or 'comment_record_types' in kwargs: return self.get_comment_form_for_create(*args, **kwargs) else: return self.get_comment_form_for_update(*args, **kwargs)
[ "def", "get_comment_form", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Implemented from kitosid template for -", "# osid.resource.ResourceAdminSession.get_resource_form_for_update", "# This method might be a bit sketchy. Time will tell.", "if", "isinstance", "(", "args", "[", "-", "1", "]", ",", "list", ")", "or", "'comment_record_types'", "in", "kwargs", ":", "return", "self", ".", "get_comment_form_for_create", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "self", ".", "get_comment_form_for_update", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Pass through to provider CommentAdminSession.get_comment_form_for_update
[ "Pass", "through", "to", "provider", "CommentAdminSession", ".", "get_comment_form_for_update" ]
python
train
iotile/coretools
iotilegateway/iotilegateway/supervisor/service_manager.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilegateway/iotilegateway/supervisor/service_manager.py#L103-L134
def add_service(self, name, long_name, preregistered=False, notify=True): """Add a service to the list of tracked services. Args: name (string): A unique short service name for the service long_name (string): A longer, user friendly name for the service preregistered (bool): Whether this service is an expected preregistered service. notify (bool): Send notifications about this service to all clients Returns: awaitable: If notify is True, an awaitable for the notifications. Otherwise None. """ if name in self.services: raise ArgumentError("Could not add service because the long_name is taken", long_name=long_name) serv_state = states.ServiceState(name, long_name, preregistered) service = { 'state': serv_state, 'heartbeat_threshold': 600 } self.services[name] = service if notify: return self._notify_update(name, 'new_service', self.service_info(name)) return None
[ "def", "add_service", "(", "self", ",", "name", ",", "long_name", ",", "preregistered", "=", "False", ",", "notify", "=", "True", ")", ":", "if", "name", "in", "self", ".", "services", ":", "raise", "ArgumentError", "(", "\"Could not add service because the long_name is taken\"", ",", "long_name", "=", "long_name", ")", "serv_state", "=", "states", ".", "ServiceState", "(", "name", ",", "long_name", ",", "preregistered", ")", "service", "=", "{", "'state'", ":", "serv_state", ",", "'heartbeat_threshold'", ":", "600", "}", "self", ".", "services", "[", "name", "]", "=", "service", "if", "notify", ":", "return", "self", ".", "_notify_update", "(", "name", ",", "'new_service'", ",", "self", ".", "service_info", "(", "name", ")", ")", "return", "None" ]
Add a service to the list of tracked services. Args: name (string): A unique short service name for the service long_name (string): A longer, user friendly name for the service preregistered (bool): Whether this service is an expected preregistered service. notify (bool): Send notifications about this service to all clients Returns: awaitable: If notify is True, an awaitable for the notifications. Otherwise None.
[ "Add", "a", "service", "to", "the", "list", "of", "tracked", "services", "." ]
python
train
swistakm/graceful
src/graceful/authentication.py
https://github.com/swistakm/graceful/blob/d4678cb6349a5c843a5e58002fc80140821609e4/src/graceful/authentication.py#L166-L171
def _get_storage_key(self, identified_with, identifier): """Get key string for given user identifier in consistent manner.""" return ':'.join(( self.key_prefix, identified_with.name, self.hash_identifier(identified_with, identifier), ))
[ "def", "_get_storage_key", "(", "self", ",", "identified_with", ",", "identifier", ")", ":", "return", "':'", ".", "join", "(", "(", "self", ".", "key_prefix", ",", "identified_with", ".", "name", ",", "self", ".", "hash_identifier", "(", "identified_with", ",", "identifier", ")", ",", ")", ")" ]
Get key string for given user identifier in consistent manner.
[ "Get", "key", "string", "for", "given", "user", "identifier", "in", "consistent", "manner", "." ]
python
train
apache/incubator-mxnet
python/mxnet/ndarray/sparse.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/sparse.py#L796-L806
def _prepare_src_array(source_array, dtype): """Prepare `source_array` so that it can be used to construct NDArray. `source_array` is converted to a `np.ndarray` if it's neither an `NDArray` \ nor an `np.ndarray`. """ if not isinstance(source_array, NDArray) and not isinstance(source_array, np.ndarray): try: source_array = np.array(source_array, dtype=dtype) except: raise TypeError('values must be array like object') return source_array
[ "def", "_prepare_src_array", "(", "source_array", ",", "dtype", ")", ":", "if", "not", "isinstance", "(", "source_array", ",", "NDArray", ")", "and", "not", "isinstance", "(", "source_array", ",", "np", ".", "ndarray", ")", ":", "try", ":", "source_array", "=", "np", ".", "array", "(", "source_array", ",", "dtype", "=", "dtype", ")", "except", ":", "raise", "TypeError", "(", "'values must be array like object'", ")", "return", "source_array" ]
Prepare `source_array` so that it can be used to construct NDArray. `source_array` is converted to a `np.ndarray` if it's neither an `NDArray` \ nor an `np.ndarray`.
[ "Prepare", "source_array", "so", "that", "it", "can", "be", "used", "to", "construct", "NDArray", ".", "source_array", "is", "converted", "to", "a", "np", ".", "ndarray", "if", "it", "s", "neither", "an", "NDArray", "\\", "nor", "an", "np", ".", "ndarray", "." ]
python
train
ArchiveTeam/wpull
wpull/processor/coprocessor/phantomjs.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/processor/coprocessor/phantomjs.py#L270-L295
def _add_warc_snapshot(self, filename, url): '''Add the snaphot to the WARC file.''' _logger.debug('Adding snapshot record.') extension = os.path.splitext(filename)[1] content_type = { '.pdf': 'application/pdf', '.html': 'text/html', '.png': 'image/png', '.gif': 'image/gif' }[extension] record = WARCRecord() record.set_common_fields('resource', content_type) record.fields['WARC-Target-URI'] = 'urn:X-wpull:snapshot?url={0}' \ .format(wpull.url.percent_encode_query_value(url)) if self._action_warc_record: record.fields['WARC-Concurrent-To'] = \ self._action_warc_record.fields[WARCRecord.WARC_RECORD_ID] with open(filename, 'rb') as in_file: record.block_file = in_file self._warc_recorder.set_length_and_maybe_checksums(record) self._warc_recorder.write_record(record)
[ "def", "_add_warc_snapshot", "(", "self", ",", "filename", ",", "url", ")", ":", "_logger", ".", "debug", "(", "'Adding snapshot record.'", ")", "extension", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "1", "]", "content_type", "=", "{", "'.pdf'", ":", "'application/pdf'", ",", "'.html'", ":", "'text/html'", ",", "'.png'", ":", "'image/png'", ",", "'.gif'", ":", "'image/gif'", "}", "[", "extension", "]", "record", "=", "WARCRecord", "(", ")", "record", ".", "set_common_fields", "(", "'resource'", ",", "content_type", ")", "record", ".", "fields", "[", "'WARC-Target-URI'", "]", "=", "'urn:X-wpull:snapshot?url={0}'", ".", "format", "(", "wpull", ".", "url", ".", "percent_encode_query_value", "(", "url", ")", ")", "if", "self", ".", "_action_warc_record", ":", "record", ".", "fields", "[", "'WARC-Concurrent-To'", "]", "=", "self", ".", "_action_warc_record", ".", "fields", "[", "WARCRecord", ".", "WARC_RECORD_ID", "]", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "in_file", ":", "record", ".", "block_file", "=", "in_file", "self", ".", "_warc_recorder", ".", "set_length_and_maybe_checksums", "(", "record", ")", "self", ".", "_warc_recorder", ".", "write_record", "(", "record", ")" ]
Add the snaphot to the WARC file.
[ "Add", "the", "snaphot", "to", "the", "WARC", "file", "." ]
python
train
jasonrbriggs/stomp.py
stomp/transport.py
https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/transport.py#L331-L370
def __receiver_loop(self): """ Main loop listening for incoming data. """ log.info("Starting receiver loop") notify_disconnected = True try: while self.running: try: while self.running: frames = self.__read() for frame in frames: f = utils.parse_frame(frame) if f is None: continue if self.__auto_decode: f.body = decode(f.body) self.process_frame(f, frame) except exception.ConnectionClosedException: if self.running: # # Clear out any half-received messages after losing connection # self.__recvbuf = b'' self.running = False notify_disconnected = True break finally: self.cleanup() finally: with self.__receiver_thread_exit_condition: self.__receiver_thread_exited = True self.__receiver_thread_exit_condition.notifyAll() log.info("Receiver loop ended") self.notify('receiver_loop_completed') if notify_disconnected: self.notify('disconnected') with self.__connect_wait_condition: self.__connect_wait_condition.notifyAll()
[ "def", "__receiver_loop", "(", "self", ")", ":", "log", ".", "info", "(", "\"Starting receiver loop\"", ")", "notify_disconnected", "=", "True", "try", ":", "while", "self", ".", "running", ":", "try", ":", "while", "self", ".", "running", ":", "frames", "=", "self", ".", "__read", "(", ")", "for", "frame", "in", "frames", ":", "f", "=", "utils", ".", "parse_frame", "(", "frame", ")", "if", "f", "is", "None", ":", "continue", "if", "self", ".", "__auto_decode", ":", "f", ".", "body", "=", "decode", "(", "f", ".", "body", ")", "self", ".", "process_frame", "(", "f", ",", "frame", ")", "except", "exception", ".", "ConnectionClosedException", ":", "if", "self", ".", "running", ":", "#", "# Clear out any half-received messages after losing connection", "#", "self", ".", "__recvbuf", "=", "b''", "self", ".", "running", "=", "False", "notify_disconnected", "=", "True", "break", "finally", ":", "self", ".", "cleanup", "(", ")", "finally", ":", "with", "self", ".", "__receiver_thread_exit_condition", ":", "self", ".", "__receiver_thread_exited", "=", "True", "self", ".", "__receiver_thread_exit_condition", ".", "notifyAll", "(", ")", "log", ".", "info", "(", "\"Receiver loop ended\"", ")", "self", ".", "notify", "(", "'receiver_loop_completed'", ")", "if", "notify_disconnected", ":", "self", ".", "notify", "(", "'disconnected'", ")", "with", "self", ".", "__connect_wait_condition", ":", "self", ".", "__connect_wait_condition", ".", "notifyAll", "(", ")" ]
Main loop listening for incoming data.
[ "Main", "loop", "listening", "for", "incoming", "data", "." ]
python
train
gem/oq-engine
openquake/hazardlib/geo/utils.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/utils.py#L428-L442
def get_middle_point(lon1, lat1, lon2, lat2): """ Given two points return the point exactly in the middle lying on the same great circle arc. Parameters are point coordinates in degrees. :returns: Tuple of longitude and latitude of the point in the middle. """ if lon1 == lon2 and lat1 == lat2: return lon1, lat1 dist = geodetic.geodetic_distance(lon1, lat1, lon2, lat2) azimuth = geodetic.azimuth(lon1, lat1, lon2, lat2) return geodetic.point_at(lon1, lat1, azimuth, dist / 2.0)
[ "def", "get_middle_point", "(", "lon1", ",", "lat1", ",", "lon2", ",", "lat2", ")", ":", "if", "lon1", "==", "lon2", "and", "lat1", "==", "lat2", ":", "return", "lon1", ",", "lat1", "dist", "=", "geodetic", ".", "geodetic_distance", "(", "lon1", ",", "lat1", ",", "lon2", ",", "lat2", ")", "azimuth", "=", "geodetic", ".", "azimuth", "(", "lon1", ",", "lat1", ",", "lon2", ",", "lat2", ")", "return", "geodetic", ".", "point_at", "(", "lon1", ",", "lat1", ",", "azimuth", ",", "dist", "/", "2.0", ")" ]
Given two points return the point exactly in the middle lying on the same great circle arc. Parameters are point coordinates in degrees. :returns: Tuple of longitude and latitude of the point in the middle.
[ "Given", "two", "points", "return", "the", "point", "exactly", "in", "the", "middle", "lying", "on", "the", "same", "great", "circle", "arc", "." ]
python
train
saltstack/salt
salt/modules/ethtool.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ethtool.py#L199-L235
def set_coalesce(devname, **kwargs): ''' Changes the coalescing settings of the specified network device CLI Example: .. code-block:: bash salt '*' ethtool.set_coalesce <devname> [adaptive_rx=on|off] [adaptive_tx=on|off] [rx_usecs=N] [rx_frames=N] [rx_usecs_irq=N] [rx_frames_irq=N] [tx_usecs=N] [tx_frames=N] [tx_usecs_irq=N] [tx_frames_irq=N] [stats_block_usecs=N] [pkt_rate_low=N] [rx_usecs_low=N] [rx_frames_low=N] [tx_usecs_low=N] [tx_frames_low=N] [pkt_rate_high=N] [rx_usecs_high=N] [rx_frames_high=N] [tx_usecs_high=N] [tx_frames_high=N] [sample_interval=N] ''' try: coalesce = ethtool.get_coalesce(devname) except IOError: log.error('Interrupt coalescing not supported on %s', devname) return 'Not supported' changed = False for param, value in kwargs.items(): if param in ethtool_coalesce_map: param = ethtool_coalesce_map[param] if param in coalesce: if coalesce[param] != value: coalesce[param] = value changed = True try: if changed: ethtool.set_coalesce(devname, coalesce) return show_coalesce(devname) except IOError: log.error('Invalid coalesce arguments on %s: %s', devname, coalesce) return 'Invalid arguments'
[ "def", "set_coalesce", "(", "devname", ",", "*", "*", "kwargs", ")", ":", "try", ":", "coalesce", "=", "ethtool", ".", "get_coalesce", "(", "devname", ")", "except", "IOError", ":", "log", ".", "error", "(", "'Interrupt coalescing not supported on %s'", ",", "devname", ")", "return", "'Not supported'", "changed", "=", "False", "for", "param", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "if", "param", "in", "ethtool_coalesce_map", ":", "param", "=", "ethtool_coalesce_map", "[", "param", "]", "if", "param", "in", "coalesce", ":", "if", "coalesce", "[", "param", "]", "!=", "value", ":", "coalesce", "[", "param", "]", "=", "value", "changed", "=", "True", "try", ":", "if", "changed", ":", "ethtool", ".", "set_coalesce", "(", "devname", ",", "coalesce", ")", "return", "show_coalesce", "(", "devname", ")", "except", "IOError", ":", "log", ".", "error", "(", "'Invalid coalesce arguments on %s: %s'", ",", "devname", ",", "coalesce", ")", "return", "'Invalid arguments'" ]
Changes the coalescing settings of the specified network device CLI Example: .. code-block:: bash salt '*' ethtool.set_coalesce <devname> [adaptive_rx=on|off] [adaptive_tx=on|off] [rx_usecs=N] [rx_frames=N] [rx_usecs_irq=N] [rx_frames_irq=N] [tx_usecs=N] [tx_frames=N] [tx_usecs_irq=N] [tx_frames_irq=N] [stats_block_usecs=N] [pkt_rate_low=N] [rx_usecs_low=N] [rx_frames_low=N] [tx_usecs_low=N] [tx_frames_low=N] [pkt_rate_high=N] [rx_usecs_high=N] [rx_frames_high=N] [tx_usecs_high=N] [tx_frames_high=N] [sample_interval=N]
[ "Changes", "the", "coalescing", "settings", "of", "the", "specified", "network", "device" ]
python
train
nicolargo/glances
glances/config.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/config.py#L304-L309
def get_float_value(self, section, option, default=0.0): """Get the float value of an option, if it exists.""" try: return self.parser.getfloat(section, option) except NoOptionError: return float(default)
[ "def", "get_float_value", "(", "self", ",", "section", ",", "option", ",", "default", "=", "0.0", ")", ":", "try", ":", "return", "self", ".", "parser", ".", "getfloat", "(", "section", ",", "option", ")", "except", "NoOptionError", ":", "return", "float", "(", "default", ")" ]
Get the float value of an option, if it exists.
[ "Get", "the", "float", "value", "of", "an", "option", "if", "it", "exists", "." ]
python
train
svenkreiss/pysparkling
pysparkling/streaming/dstream.py
https://github.com/svenkreiss/pysparkling/blob/596d0ef2793100f7115efe228ff9bfc17beaa08d/pysparkling/streaming/dstream.py#L94-L120
def countByValue(self): """Apply countByValue to every RDD.abs :rtype: DStream .. warning:: Implemented as a local operation. Example: >>> import pysparkling >>> sc = pysparkling.Context() >>> ssc = pysparkling.streaming.StreamingContext(sc, 0.1) >>> ( ... ssc ... .queueStream([[1, 1, 5, 5, 5, 2]]) ... .countByValue() ... .foreachRDD(lambda rdd: print(sorted(rdd.collect()))) ... ) >>> ssc.start() >>> ssc.awaitTermination(0.15) [(1, 2), (2, 1), (5, 3)] """ return self.transform( lambda rdd: self._context._context.parallelize( rdd.countByValue().items()))
[ "def", "countByValue", "(", "self", ")", ":", "return", "self", ".", "transform", "(", "lambda", "rdd", ":", "self", ".", "_context", ".", "_context", ".", "parallelize", "(", "rdd", ".", "countByValue", "(", ")", ".", "items", "(", ")", ")", ")" ]
Apply countByValue to every RDD.abs :rtype: DStream .. warning:: Implemented as a local operation. Example: >>> import pysparkling >>> sc = pysparkling.Context() >>> ssc = pysparkling.streaming.StreamingContext(sc, 0.1) >>> ( ... ssc ... .queueStream([[1, 1, 5, 5, 5, 2]]) ... .countByValue() ... .foreachRDD(lambda rdd: print(sorted(rdd.collect()))) ... ) >>> ssc.start() >>> ssc.awaitTermination(0.15) [(1, 2), (2, 1), (5, 3)]
[ "Apply", "countByValue", "to", "every", "RDD", ".", "abs" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/quaternion.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/quaternion.py#L253-L262
def normalize_array(q): """ Normalizes the list with len 4 so that it can be used as quaternion :param q: array of len 4 :returns: normalized array """ assert(len(q) == 4) q = np.array(q) n = QuaternionBase.norm_array(q) return q / n
[ "def", "normalize_array", "(", "q", ")", ":", "assert", "(", "len", "(", "q", ")", "==", "4", ")", "q", "=", "np", ".", "array", "(", "q", ")", "n", "=", "QuaternionBase", ".", "norm_array", "(", "q", ")", "return", "q", "/", "n" ]
Normalizes the list with len 4 so that it can be used as quaternion :param q: array of len 4 :returns: normalized array
[ "Normalizes", "the", "list", "with", "len", "4", "so", "that", "it", "can", "be", "used", "as", "quaternion", ":", "param", "q", ":", "array", "of", "len", "4", ":", "returns", ":", "normalized", "array" ]
python
train
kisom/pypcapfile
pcapfile/protocols/linklayer/wifi.py
https://github.com/kisom/pypcapfile/blob/67520cfbb6c2e9ab3e7c181a8012ddc56ec5cad8/pcapfile/protocols/linklayer/wifi.py#L1106-L1168
def strip_vht(self, idx): """strip(12 byte) radiotap.vht :idx: int :return: int idx :return: collections.namedtuple """ vht = collections.namedtuple( 'vht', ['known_bits', 'have_stbc', 'have_txop_ps', 'have_gi', 'have_sgi_nsym_da', 'have_ldpc_extra', 'have_beamformed', 'have_bw', 'have_gid', 'have_paid', 'stbc', 'txop_ps', 'gi', 'sgi_nysm_da', 'ldpc_extra', 'group_id', 'partial_id', 'beamformed', 'user_0', 'user_1', 'user_2', 'user_3']) user = collections.namedtuple('user', ['nss', 'mcs', 'coding']) idx = Radiotap.align(idx, 2) known, flags, bw = struct.unpack_from('<HBB', self._rtap, idx) mcs_nss_0, mcs_nss_1, mcs_nss_2, mcs_nss_3 = struct.unpack_from('<BBBB', self._rtap, idx + 4) coding, group_id, partial_id = struct.unpack_from('<BBH', self._rtap, idx + 8) known_bits = format(known, '032b')[::-1] vht.known_bits = known_bits vht.have_stbc = int(known_bits[0]) # Space Time Block Coding vht.have_txop_ps = int(known_bits[1]) # TXOP_PS_NOT_ALLOWD vht.have_gi = int(known_bits[2]) # Short/Long Guard Interval vht.have_sgi_nsym_da = int(known_bits[3]) # Short Guard Interval Nsym Disambiguation vht.have_ldpc_extra = int(known_bits[4]) # LDPC(Low Density Parity Check) vht.have_beamformed = int(known_bits[5]) # Beamformed vht.have_bw = int(known_bits[6]) # Bandwidth vht.have_gid = int(known_bits[7]) # Group ID vht.have_paid = int(known_bits[8]) # Partial AID flag_bits = format(flags, '032b')[::-1] vht.flag_bits = flag_bits vht.stbc = int(flag_bits[0]) vht.txop_ps = int(flag_bits[1]) vht.gi = int(flag_bits[2]) vht.sgi_nysm_da = int(flag_bits[3]) vht.ldpc_extra = int(flag_bits[4]) vht.beamformed = int(flag_bits[5]) vht.group_id = group_id vht.partial_id = partial_id vht.bw = bw vht.user_0 = user(None, None, None) vht.user_1 = user(None, None, None) vht.user_2 = user(None, None, None) vht.user_3 = user(None, None, None) for (i, mcs_nss) in enumerate([mcs_nss_0, mcs_nss_1, mcs_nss_2, mcs_nss_3]): if mcs_nss: nss = mcs_nss & 0xf0 >> 4 mcs = (mcs_nss & 0xf0) >> 4 coding = (coding & 2**i) >> i if i == 0: vht.user_0 = user(nss, mcs, coding) elif i == 1: vht.user_1 = user(nss, mcs, coding) elif i == 2: vht.user_2 = user(nss, mcs, coding) elif i == 3: vht.user_3 = user(nss, mcs, coding) return idx + 12, vht
[ "def", "strip_vht", "(", "self", ",", "idx", ")", ":", "vht", "=", "collections", ".", "namedtuple", "(", "'vht'", ",", "[", "'known_bits'", ",", "'have_stbc'", ",", "'have_txop_ps'", ",", "'have_gi'", ",", "'have_sgi_nsym_da'", ",", "'have_ldpc_extra'", ",", "'have_beamformed'", ",", "'have_bw'", ",", "'have_gid'", ",", "'have_paid'", ",", "'stbc'", ",", "'txop_ps'", ",", "'gi'", ",", "'sgi_nysm_da'", ",", "'ldpc_extra'", ",", "'group_id'", ",", "'partial_id'", ",", "'beamformed'", ",", "'user_0'", ",", "'user_1'", ",", "'user_2'", ",", "'user_3'", "]", ")", "user", "=", "collections", ".", "namedtuple", "(", "'user'", ",", "[", "'nss'", ",", "'mcs'", ",", "'coding'", "]", ")", "idx", "=", "Radiotap", ".", "align", "(", "idx", ",", "2", ")", "known", ",", "flags", ",", "bw", "=", "struct", ".", "unpack_from", "(", "'<HBB'", ",", "self", ".", "_rtap", ",", "idx", ")", "mcs_nss_0", ",", "mcs_nss_1", ",", "mcs_nss_2", ",", "mcs_nss_3", "=", "struct", ".", "unpack_from", "(", "'<BBBB'", ",", "self", ".", "_rtap", ",", "idx", "+", "4", ")", "coding", ",", "group_id", ",", "partial_id", "=", "struct", ".", "unpack_from", "(", "'<BBH'", ",", "self", ".", "_rtap", ",", "idx", "+", "8", ")", "known_bits", "=", "format", "(", "known", ",", "'032b'", ")", "[", ":", ":", "-", "1", "]", "vht", ".", "known_bits", "=", "known_bits", "vht", ".", "have_stbc", "=", "int", "(", "known_bits", "[", "0", "]", ")", "# Space Time Block Coding", "vht", ".", "have_txop_ps", "=", "int", "(", "known_bits", "[", "1", "]", ")", "# TXOP_PS_NOT_ALLOWD", "vht", ".", "have_gi", "=", "int", "(", "known_bits", "[", "2", "]", ")", "# Short/Long Guard Interval", "vht", ".", "have_sgi_nsym_da", "=", "int", "(", "known_bits", "[", "3", "]", ")", "# Short Guard Interval Nsym Disambiguation", "vht", ".", "have_ldpc_extra", "=", "int", "(", "known_bits", "[", "4", "]", ")", "# LDPC(Low Density Parity Check)", "vht", ".", "have_beamformed", "=", "int", "(", "known_bits", "[", "5", "]", ")", "# Beamformed", "vht", ".", "have_bw", "=", "int", "(", "known_bits", "[", "6", "]", ")", "# Bandwidth", "vht", ".", "have_gid", "=", "int", "(", "known_bits", "[", "7", "]", ")", "# Group ID", "vht", ".", "have_paid", "=", "int", "(", "known_bits", "[", "8", "]", ")", "# Partial AID", "flag_bits", "=", "format", "(", "flags", ",", "'032b'", ")", "[", ":", ":", "-", "1", "]", "vht", ".", "flag_bits", "=", "flag_bits", "vht", ".", "stbc", "=", "int", "(", "flag_bits", "[", "0", "]", ")", "vht", ".", "txop_ps", "=", "int", "(", "flag_bits", "[", "1", "]", ")", "vht", ".", "gi", "=", "int", "(", "flag_bits", "[", "2", "]", ")", "vht", ".", "sgi_nysm_da", "=", "int", "(", "flag_bits", "[", "3", "]", ")", "vht", ".", "ldpc_extra", "=", "int", "(", "flag_bits", "[", "4", "]", ")", "vht", ".", "beamformed", "=", "int", "(", "flag_bits", "[", "5", "]", ")", "vht", ".", "group_id", "=", "group_id", "vht", ".", "partial_id", "=", "partial_id", "vht", ".", "bw", "=", "bw", "vht", ".", "user_0", "=", "user", "(", "None", ",", "None", ",", "None", ")", "vht", ".", "user_1", "=", "user", "(", "None", ",", "None", ",", "None", ")", "vht", ".", "user_2", "=", "user", "(", "None", ",", "None", ",", "None", ")", "vht", ".", "user_3", "=", "user", "(", "None", ",", "None", ",", "None", ")", "for", "(", "i", ",", "mcs_nss", ")", "in", "enumerate", "(", "[", "mcs_nss_0", ",", "mcs_nss_1", ",", "mcs_nss_2", ",", "mcs_nss_3", "]", ")", ":", "if", "mcs_nss", ":", "nss", "=", "mcs_nss", "&", "0xf0", ">>", "4", "mcs", "=", "(", "mcs_nss", "&", "0xf0", ")", ">>", "4", "coding", "=", "(", "coding", "&", "2", "**", "i", ")", ">>", "i", "if", "i", "==", "0", ":", "vht", ".", "user_0", "=", "user", "(", "nss", ",", "mcs", ",", "coding", ")", "elif", "i", "==", "1", ":", "vht", ".", "user_1", "=", "user", "(", "nss", ",", "mcs", ",", "coding", ")", "elif", "i", "==", "2", ":", "vht", ".", "user_2", "=", "user", "(", "nss", ",", "mcs", ",", "coding", ")", "elif", "i", "==", "3", ":", "vht", ".", "user_3", "=", "user", "(", "nss", ",", "mcs", ",", "coding", ")", "return", "idx", "+", "12", ",", "vht" ]
strip(12 byte) radiotap.vht :idx: int :return: int idx :return: collections.namedtuple
[ "strip", "(", "12", "byte", ")", "radiotap", ".", "vht", ":", "idx", ":", "int", ":", "return", ":", "int", "idx", ":", "return", ":", "collections", ".", "namedtuple" ]
python
valid
eyeseast/python-tablefu
table_fu/formatting.py
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/formatting.py#L41-L54
def capfirst(value, failure_string='N/A'): """ Capitalizes the first character of the value. If the submitted value isn't a string, returns the `failure_string` keyword argument. Cribbs from django's default filter set """ try: value = value.lower() return value[0].upper() + value[1:] except: return failure_string
[ "def", "capfirst", "(", "value", ",", "failure_string", "=", "'N/A'", ")", ":", "try", ":", "value", "=", "value", ".", "lower", "(", ")", "return", "value", "[", "0", "]", ".", "upper", "(", ")", "+", "value", "[", "1", ":", "]", "except", ":", "return", "failure_string" ]
Capitalizes the first character of the value. If the submitted value isn't a string, returns the `failure_string` keyword argument. Cribbs from django's default filter set
[ "Capitalizes", "the", "first", "character", "of", "the", "value", ".", "If", "the", "submitted", "value", "isn", "t", "a", "string", "returns", "the", "failure_string", "keyword", "argument", ".", "Cribbs", "from", "django", "s", "default", "filter", "set" ]
python
train
SoftwareDefinedBuildings/XBOS
apps/Data_quality_analysis/Wrapper.py
https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Wrapper.py#L376-L436
def import_data(self, file_name='*', folder_name='.', head_row=0, index_col=0, convert_col=True, concat_files=False, save_file=True): """ Imports csv file(s) and stores the result in self.imported_data. Note ---- 1. If folder exists out of current directory, folder_name should contain correct regex 2. Assuming there's no file called "\*.csv" Parameters ---------- file_name : str CSV file to be imported. Defaults to '\*' - all csv files in the folder. folder_name : str Folder where file resides. Defaults to '.' - current directory. head_row : int Skips all rows from 0 to head_row-1 index_col : int Skips all columns from 0 to index_col-1 convert_col : bool Convert columns to numeric type concat_files : bool Appends data from files to result dataframe save_file : bool Specifies whether to save file or not. Defaults to True. Returns ------- pd.DataFrame() Dataframe containing imported data. """ # Create instance and import the data import_data_obj = Import_Data() import_data_obj.import_csv(file_name=file_name, folder_name=folder_name, head_row=head_row, index_col=index_col, convert_col=convert_col, concat_files=concat_files) # Store imported data in wrapper class self.imported_data = import_data_obj.data # Logging self.result['Import'] = { 'File Name': file_name, 'Folder Name': folder_name, 'Head Row': head_row, 'Index Col': index_col, 'Convert Col': convert_col, 'Concat Files': concat_files, 'Save File': save_file } if save_file: f = self.results_folder_name + '/imported_data-' + str(self.get_global_count()) + '.csv' self.imported_data.to_csv(f) self.result['Import']['Saved File'] = f else: self.result['Import']['Saved File'] = '' return self.imported_data
[ "def", "import_data", "(", "self", ",", "file_name", "=", "'*'", ",", "folder_name", "=", "'.'", ",", "head_row", "=", "0", ",", "index_col", "=", "0", ",", "convert_col", "=", "True", ",", "concat_files", "=", "False", ",", "save_file", "=", "True", ")", ":", "# Create instance and import the data", "import_data_obj", "=", "Import_Data", "(", ")", "import_data_obj", ".", "import_csv", "(", "file_name", "=", "file_name", ",", "folder_name", "=", "folder_name", ",", "head_row", "=", "head_row", ",", "index_col", "=", "index_col", ",", "convert_col", "=", "convert_col", ",", "concat_files", "=", "concat_files", ")", "# Store imported data in wrapper class", "self", ".", "imported_data", "=", "import_data_obj", ".", "data", "# Logging", "self", ".", "result", "[", "'Import'", "]", "=", "{", "'File Name'", ":", "file_name", ",", "'Folder Name'", ":", "folder_name", ",", "'Head Row'", ":", "head_row", ",", "'Index Col'", ":", "index_col", ",", "'Convert Col'", ":", "convert_col", ",", "'Concat Files'", ":", "concat_files", ",", "'Save File'", ":", "save_file", "}", "if", "save_file", ":", "f", "=", "self", ".", "results_folder_name", "+", "'/imported_data-'", "+", "str", "(", "self", ".", "get_global_count", "(", ")", ")", "+", "'.csv'", "self", ".", "imported_data", ".", "to_csv", "(", "f", ")", "self", ".", "result", "[", "'Import'", "]", "[", "'Saved File'", "]", "=", "f", "else", ":", "self", ".", "result", "[", "'Import'", "]", "[", "'Saved File'", "]", "=", "''", "return", "self", ".", "imported_data" ]
Imports csv file(s) and stores the result in self.imported_data. Note ---- 1. If folder exists out of current directory, folder_name should contain correct regex 2. Assuming there's no file called "\*.csv" Parameters ---------- file_name : str CSV file to be imported. Defaults to '\*' - all csv files in the folder. folder_name : str Folder where file resides. Defaults to '.' - current directory. head_row : int Skips all rows from 0 to head_row-1 index_col : int Skips all columns from 0 to index_col-1 convert_col : bool Convert columns to numeric type concat_files : bool Appends data from files to result dataframe save_file : bool Specifies whether to save file or not. Defaults to True. Returns ------- pd.DataFrame() Dataframe containing imported data.
[ "Imports", "csv", "file", "(", "s", ")", "and", "stores", "the", "result", "in", "self", ".", "imported_data", ".", "Note", "----", "1", ".", "If", "folder", "exists", "out", "of", "current", "directory", "folder_name", "should", "contain", "correct", "regex", "2", ".", "Assuming", "there", "s", "no", "file", "called", "\\", "*", ".", "csv" ]
python
train
sassoftware/saspy
saspy/sasiostdio.py
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasiostdio.py#L1639-L1818
def sasdata2dataframeCSV(self, table: str, libref: str ='', dsopts: dict = None, tempfile: str=None, tempkeep: bool=False, **kwargs) -> '<Pandas Data Frame object>': """ This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object. table - the name of the SAS Data Set you want to export to a Pandas Data Frame libref - the libref for the SAS Data Set. dsopts - data set options for the input SAS Data Set port - port to use for socket. Defaults to 0 which uses a random available ephemeral port tempfile - file to use to store CSV, else temporary file will be used. tempkeep - if you specify your own file to use with tempfile=, this controls whether it's cleaned up after using it """ dsopts = dsopts if dsopts is not None else {} port = kwargs.get('port', 0) if port==0 and self.sascfg.tunnel: # we are using a tunnel; default to that port port = self.sascfg.tunnel if libref: tabname = libref+"."+table else: tabname = table tmpdir = None if tempfile is None: tmpdir = tf.TemporaryDirectory() tmpcsv = tmpdir.name+os.sep+"tomodsx" else: tmpcsv = tempfile code = "proc sql; create view sasdata2dataframe as select * from "+tabname+self._sb._dsopts(dsopts)+";quit;\n" code += "data _null_; file STDERR;d = open('sasdata2dataframe');\n" code += "lrecl = attrn(d, 'LRECL'); nvars = attrn(d, 'NVARS');\n" code += "lr='LRECL='; vn='VARNUMS='; vl='VARLIST='; vt='VARTYPE=';\n" code += "put lr lrecl; put vn nvars; put vl;\n" code += "do i = 1 to nvars; var = varname(d, i); put var; end;\n" code += "put vt;\n" code += "do i = 1 to nvars; var = vartype(d, i); put var; end;\n" code += "run;" ll = self.submit(code, "text") l2 = ll['LOG'].rpartition("LRECL= ") l2 = l2[2].partition("\n") lrecl = int(l2[0]) l2 = l2[2].partition("VARNUMS= ") l2 = l2[2].partition("\n") nvars = int(l2[0]) l2 = l2[2].partition("\n") varlist = l2[2].split("\n", nvars) del varlist[nvars] l2 = l2[2].partition("VARTYPE=") l2 = l2[2].partition("\n") vartype = l2[2].split("\n", nvars) del vartype[nvars] topts = dict(dsopts) topts['obs'] = 0 topts['firstobs'] = '' code = "data work._n_u_l_l_;output;run;\n" code += "data _null_; set "+tabname+self._sb._dsopts(topts)+" work._n_u_l_l_;put 'FMT_CATS=';\n" for i in range(nvars): code += "_tom = vformatn('"+varlist[i]+"'n);put _tom;\n" code += "run;\nproc delete data=work._n_u_l_l_;run;" ll = self.submit(code, "text") l2 = ll['LOG'].rpartition("FMT_CATS=") l2 = l2[2].partition("\n") varcat = l2[2].split("\n", nvars) del varcat[nvars] if self.sascfg.ssh: try: sock = socks.socket() if self.sascfg.tunnel: sock.bind(('localhost', port)) else: sock.bind(('', port)) port = sock.getsockname()[1] except OSError: print('Error try to open a socket in the sasdata2dataframe method. Call failed.') return None if not self.sascfg.tunnel: host = self.sascfg.hostip #socks.gethostname() else: host = 'localhost' code = "filename sock socket '"+host+":"+str(port)+"' lrecl="+str(self.sascfg.lrecl)+" recfm=v encoding='utf-8';\n" else: host = '' code = "filename sock '"+tmpcsv+"' lrecl="+str(self.sascfg.lrecl)+" recfm=v encoding='utf-8';\n" code += "data sasdata2dataframe / view=sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";\nformat " for i in range(nvars): if vartype[i] == 'N': code += "'"+varlist[i]+"'n " if varcat[i] in self._sb.sas_date_fmts: code += 'E8601DA10. ' else: if varcat[i] in self._sb.sas_time_fmts: code += 'E8601TM15.6 ' else: if varcat[i] in self._sb.sas_datetime_fmts: code += 'E8601DT26.6 ' else: code += 'best32. ' code += ";\n run;\n" ll = self.submit(code, "text") dts = kwargs.pop('dtype', '') if dts == '': dts = {} for i in range(nvars): if vartype[i] == 'N': if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts: dts[varlist[i]] = 'float' else: dts[varlist[i]] = 'str' else: dts[varlist[i]] = 'str' code = '' #code += "options nosource;\n" code = "proc export data=sasdata2dataframe outfile=sock dbms=csv replace; run\n;" #code += "options source;\n" if self.sascfg.ssh: csv = open(tmpcsv, mode='wb') sock.listen(1) self._asubmit(code, 'text') newsock = (0,0) try: newsock = sock.accept() while True: data = newsock[0].recv(4096) if not len(data): break csv.write(data) except: print("sasdata2dataframe was interupted. Trying to return the saslog instead of a data frame.") if newsock[0]: newsock[0].shutdown(socks.SHUT_RDWR) newsock[0].close() sock.close() ll = self.submit("", 'text') return ll['LOG'] newsock[0].shutdown(socks.SHUT_RDWR) newsock[0].close() sock.close() ll = self.submit("", 'text') csv.close() df = pd.read_csv(tmpcsv, index_col=False, engine='c', dtype=dts, **kwargs) else: ll = self.submit(code, "text") df = pd.read_csv(tmpcsv, index_col=False, engine='c', dtype=dts, **kwargs) if tmpdir: tmpdir.cleanup() else: if not tempkeep: os.remove(tmpcsv) for i in range(nvars): if varcat[i] in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts: df[varlist[i]] = pd.to_datetime(df[varlist[i]], errors='coerce') return df
[ "def", "sasdata2dataframeCSV", "(", "self", ",", "table", ":", "str", ",", "libref", ":", "str", "=", "''", ",", "dsopts", ":", "dict", "=", "None", ",", "tempfile", ":", "str", "=", "None", ",", "tempkeep", ":", "bool", "=", "False", ",", "*", "*", "kwargs", ")", "->", "'<Pandas Data Frame object>'", ":", "dsopts", "=", "dsopts", "if", "dsopts", "is", "not", "None", "else", "{", "}", "port", "=", "kwargs", ".", "get", "(", "'port'", ",", "0", ")", "if", "port", "==", "0", "and", "self", ".", "sascfg", ".", "tunnel", ":", "# we are using a tunnel; default to that port", "port", "=", "self", ".", "sascfg", ".", "tunnel", "if", "libref", ":", "tabname", "=", "libref", "+", "\".\"", "+", "table", "else", ":", "tabname", "=", "table", "tmpdir", "=", "None", "if", "tempfile", "is", "None", ":", "tmpdir", "=", "tf", ".", "TemporaryDirectory", "(", ")", "tmpcsv", "=", "tmpdir", ".", "name", "+", "os", ".", "sep", "+", "\"tomodsx\"", "else", ":", "tmpcsv", "=", "tempfile", "code", "=", "\"proc sql; create view sasdata2dataframe as select * from \"", "+", "tabname", "+", "self", ".", "_sb", ".", "_dsopts", "(", "dsopts", ")", "+", "\";quit;\\n\"", "code", "+=", "\"data _null_; file STDERR;d = open('sasdata2dataframe');\\n\"", "code", "+=", "\"lrecl = attrn(d, 'LRECL'); nvars = attrn(d, 'NVARS');\\n\"", "code", "+=", "\"lr='LRECL='; vn='VARNUMS='; vl='VARLIST='; vt='VARTYPE=';\\n\"", "code", "+=", "\"put lr lrecl; put vn nvars; put vl;\\n\"", "code", "+=", "\"do i = 1 to nvars; var = varname(d, i); put var; end;\\n\"", "code", "+=", "\"put vt;\\n\"", "code", "+=", "\"do i = 1 to nvars; var = vartype(d, i); put var; end;\\n\"", "code", "+=", "\"run;\"", "ll", "=", "self", ".", "submit", "(", "code", ",", "\"text\"", ")", "l2", "=", "ll", "[", "'LOG'", "]", ".", "rpartition", "(", "\"LRECL= \"", ")", "l2", "=", "l2", "[", "2", "]", ".", "partition", "(", "\"\\n\"", ")", "lrecl", "=", "int", "(", "l2", "[", "0", "]", ")", "l2", "=", "l2", "[", "2", "]", ".", "partition", "(", "\"VARNUMS= \"", ")", "l2", "=", "l2", "[", "2", "]", ".", "partition", "(", "\"\\n\"", ")", "nvars", "=", "int", "(", "l2", "[", "0", "]", ")", "l2", "=", "l2", "[", "2", "]", ".", "partition", "(", "\"\\n\"", ")", "varlist", "=", "l2", "[", "2", "]", ".", "split", "(", "\"\\n\"", ",", "nvars", ")", "del", "varlist", "[", "nvars", "]", "l2", "=", "l2", "[", "2", "]", ".", "partition", "(", "\"VARTYPE=\"", ")", "l2", "=", "l2", "[", "2", "]", ".", "partition", "(", "\"\\n\"", ")", "vartype", "=", "l2", "[", "2", "]", ".", "split", "(", "\"\\n\"", ",", "nvars", ")", "del", "vartype", "[", "nvars", "]", "topts", "=", "dict", "(", "dsopts", ")", "topts", "[", "'obs'", "]", "=", "0", "topts", "[", "'firstobs'", "]", "=", "''", "code", "=", "\"data work._n_u_l_l_;output;run;\\n\"", "code", "+=", "\"data _null_; set \"", "+", "tabname", "+", "self", ".", "_sb", ".", "_dsopts", "(", "topts", ")", "+", "\" work._n_u_l_l_;put 'FMT_CATS=';\\n\"", "for", "i", "in", "range", "(", "nvars", ")", ":", "code", "+=", "\"_tom = vformatn('\"", "+", "varlist", "[", "i", "]", "+", "\"'n);put _tom;\\n\"", "code", "+=", "\"run;\\nproc delete data=work._n_u_l_l_;run;\"", "ll", "=", "self", ".", "submit", "(", "code", ",", "\"text\"", ")", "l2", "=", "ll", "[", "'LOG'", "]", ".", "rpartition", "(", "\"FMT_CATS=\"", ")", "l2", "=", "l2", "[", "2", "]", ".", "partition", "(", "\"\\n\"", ")", "varcat", "=", "l2", "[", "2", "]", ".", "split", "(", "\"\\n\"", ",", "nvars", ")", "del", "varcat", "[", "nvars", "]", "if", "self", ".", "sascfg", ".", "ssh", ":", "try", ":", "sock", "=", "socks", ".", "socket", "(", ")", "if", "self", ".", "sascfg", ".", "tunnel", ":", "sock", ".", "bind", "(", "(", "'localhost'", ",", "port", ")", ")", "else", ":", "sock", ".", "bind", "(", "(", "''", ",", "port", ")", ")", "port", "=", "sock", ".", "getsockname", "(", ")", "[", "1", "]", "except", "OSError", ":", "print", "(", "'Error try to open a socket in the sasdata2dataframe method. Call failed.'", ")", "return", "None", "if", "not", "self", ".", "sascfg", ".", "tunnel", ":", "host", "=", "self", ".", "sascfg", ".", "hostip", "#socks.gethostname()", "else", ":", "host", "=", "'localhost'", "code", "=", "\"filename sock socket '\"", "+", "host", "+", "\":\"", "+", "str", "(", "port", ")", "+", "\"' lrecl=\"", "+", "str", "(", "self", ".", "sascfg", ".", "lrecl", ")", "+", "\" recfm=v encoding='utf-8';\\n\"", "else", ":", "host", "=", "''", "code", "=", "\"filename sock '\"", "+", "tmpcsv", "+", "\"' lrecl=\"", "+", "str", "(", "self", ".", "sascfg", ".", "lrecl", ")", "+", "\" recfm=v encoding='utf-8';\\n\"", "code", "+=", "\"data sasdata2dataframe / view=sasdata2dataframe; set \"", "+", "tabname", "+", "self", ".", "_sb", ".", "_dsopts", "(", "dsopts", ")", "+", "\";\\nformat \"", "for", "i", "in", "range", "(", "nvars", ")", ":", "if", "vartype", "[", "i", "]", "==", "'N'", ":", "code", "+=", "\"'\"", "+", "varlist", "[", "i", "]", "+", "\"'n \"", "if", "varcat", "[", "i", "]", "in", "self", ".", "_sb", ".", "sas_date_fmts", ":", "code", "+=", "'E8601DA10. '", "else", ":", "if", "varcat", "[", "i", "]", "in", "self", ".", "_sb", ".", "sas_time_fmts", ":", "code", "+=", "'E8601TM15.6 '", "else", ":", "if", "varcat", "[", "i", "]", "in", "self", ".", "_sb", ".", "sas_datetime_fmts", ":", "code", "+=", "'E8601DT26.6 '", "else", ":", "code", "+=", "'best32. '", "code", "+=", "\";\\n run;\\n\"", "ll", "=", "self", ".", "submit", "(", "code", ",", "\"text\"", ")", "dts", "=", "kwargs", ".", "pop", "(", "'dtype'", ",", "''", ")", "if", "dts", "==", "''", ":", "dts", "=", "{", "}", "for", "i", "in", "range", "(", "nvars", ")", ":", "if", "vartype", "[", "i", "]", "==", "'N'", ":", "if", "varcat", "[", "i", "]", "not", "in", "self", ".", "_sb", ".", "sas_date_fmts", "+", "self", ".", "_sb", ".", "sas_time_fmts", "+", "self", ".", "_sb", ".", "sas_datetime_fmts", ":", "dts", "[", "varlist", "[", "i", "]", "]", "=", "'float'", "else", ":", "dts", "[", "varlist", "[", "i", "]", "]", "=", "'str'", "else", ":", "dts", "[", "varlist", "[", "i", "]", "]", "=", "'str'", "code", "=", "''", "#code += \"options nosource;\\n\"", "code", "=", "\"proc export data=sasdata2dataframe outfile=sock dbms=csv replace; run\\n;\"", "#code += \"options source;\\n\"", "if", "self", ".", "sascfg", ".", "ssh", ":", "csv", "=", "open", "(", "tmpcsv", ",", "mode", "=", "'wb'", ")", "sock", ".", "listen", "(", "1", ")", "self", ".", "_asubmit", "(", "code", ",", "'text'", ")", "newsock", "=", "(", "0", ",", "0", ")", "try", ":", "newsock", "=", "sock", ".", "accept", "(", ")", "while", "True", ":", "data", "=", "newsock", "[", "0", "]", ".", "recv", "(", "4096", ")", "if", "not", "len", "(", "data", ")", ":", "break", "csv", ".", "write", "(", "data", ")", "except", ":", "print", "(", "\"sasdata2dataframe was interupted. Trying to return the saslog instead of a data frame.\"", ")", "if", "newsock", "[", "0", "]", ":", "newsock", "[", "0", "]", ".", "shutdown", "(", "socks", ".", "SHUT_RDWR", ")", "newsock", "[", "0", "]", ".", "close", "(", ")", "sock", ".", "close", "(", ")", "ll", "=", "self", ".", "submit", "(", "\"\"", ",", "'text'", ")", "return", "ll", "[", "'LOG'", "]", "newsock", "[", "0", "]", ".", "shutdown", "(", "socks", ".", "SHUT_RDWR", ")", "newsock", "[", "0", "]", ".", "close", "(", ")", "sock", ".", "close", "(", ")", "ll", "=", "self", ".", "submit", "(", "\"\"", ",", "'text'", ")", "csv", ".", "close", "(", ")", "df", "=", "pd", ".", "read_csv", "(", "tmpcsv", ",", "index_col", "=", "False", ",", "engine", "=", "'c'", ",", "dtype", "=", "dts", ",", "*", "*", "kwargs", ")", "else", ":", "ll", "=", "self", ".", "submit", "(", "code", ",", "\"text\"", ")", "df", "=", "pd", ".", "read_csv", "(", "tmpcsv", ",", "index_col", "=", "False", ",", "engine", "=", "'c'", ",", "dtype", "=", "dts", ",", "*", "*", "kwargs", ")", "if", "tmpdir", ":", "tmpdir", ".", "cleanup", "(", ")", "else", ":", "if", "not", "tempkeep", ":", "os", ".", "remove", "(", "tmpcsv", ")", "for", "i", "in", "range", "(", "nvars", ")", ":", "if", "varcat", "[", "i", "]", "in", "self", ".", "_sb", ".", "sas_date_fmts", "+", "self", ".", "_sb", ".", "sas_time_fmts", "+", "self", ".", "_sb", ".", "sas_datetime_fmts", ":", "df", "[", "varlist", "[", "i", "]", "]", "=", "pd", ".", "to_datetime", "(", "df", "[", "varlist", "[", "i", "]", "]", ",", "errors", "=", "'coerce'", ")", "return", "df" ]
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object. table - the name of the SAS Data Set you want to export to a Pandas Data Frame libref - the libref for the SAS Data Set. dsopts - data set options for the input SAS Data Set port - port to use for socket. Defaults to 0 which uses a random available ephemeral port tempfile - file to use to store CSV, else temporary file will be used. tempkeep - if you specify your own file to use with tempfile=, this controls whether it's cleaned up after using it
[ "This", "method", "exports", "the", "SAS", "Data", "Set", "to", "a", "Pandas", "Data", "Frame", "returning", "the", "Data", "Frame", "object", ".", "table", "-", "the", "name", "of", "the", "SAS", "Data", "Set", "you", "want", "to", "export", "to", "a", "Pandas", "Data", "Frame", "libref", "-", "the", "libref", "for", "the", "SAS", "Data", "Set", ".", "dsopts", "-", "data", "set", "options", "for", "the", "input", "SAS", "Data", "Set", "port", "-", "port", "to", "use", "for", "socket", ".", "Defaults", "to", "0", "which", "uses", "a", "random", "available", "ephemeral", "port", "tempfile", "-", "file", "to", "use", "to", "store", "CSV", "else", "temporary", "file", "will", "be", "used", ".", "tempkeep", "-", "if", "you", "specify", "your", "own", "file", "to", "use", "with", "tempfile", "=", "this", "controls", "whether", "it", "s", "cleaned", "up", "after", "using", "it" ]
python
train
CyberReboot/vent
vent/api/tools.py
https://github.com/CyberReboot/vent/blob/9956a09146b11a89a0eabab3bc7ce8906d124885/vent/api/tools.py#L685-L696
def _start_remaining_containers(self, containers_remaining, tool_d): """ Select remaining containers that didn't have priorities to start """ s_containers = [] f_containers = [] for container in containers_remaining: s_containers, f_containers = self._start_container(container, tool_d, s_containers, f_containers) return (s_containers, f_containers)
[ "def", "_start_remaining_containers", "(", "self", ",", "containers_remaining", ",", "tool_d", ")", ":", "s_containers", "=", "[", "]", "f_containers", "=", "[", "]", "for", "container", "in", "containers_remaining", ":", "s_containers", ",", "f_containers", "=", "self", ".", "_start_container", "(", "container", ",", "tool_d", ",", "s_containers", ",", "f_containers", ")", "return", "(", "s_containers", ",", "f_containers", ")" ]
Select remaining containers that didn't have priorities to start
[ "Select", "remaining", "containers", "that", "didn", "t", "have", "priorities", "to", "start" ]
python
train
petrjasek/eve-elastic
eve_elastic/helpers.py
https://github.com/petrjasek/eve-elastic/blob/f146f31b348d22ac5559cf78717b3bb02efcb2d7/eve_elastic/helpers.py#L81-L137
def _process_bulk_chunk(client, bulk_actions, raise_on_exception=True, raise_on_error=True, **kwargs): """ Send a bulk request to elasticsearch and process the output. """ # if raise on error is set, we need to collect errors per chunk before raising them errors = [] try: # send the actual request resp = client.bulk('\n'.join(bulk_actions) + '\n', **kwargs) except TransportError as e: # default behavior - just propagate exception if raise_on_exception: raise e # if we are not propagating, mark all actions in current chunk as failed err_message = str(e) exc_errors = [] # deserialize the data back, thisis expensive but only run on # errors if raise_on_exception is false, so shouldn't be a real # issue bulk_data = iter(map(client.transport.serializer.loads, bulk_actions)) while True: try: # collect all the information about failed actions action = next(bulk_data) op_type, action = action.popitem() info = {"error": err_message, "status": e.status_code, "exception": e} if op_type != 'delete': info['data'] = next(bulk_data) info.update(action) exc_errors.append({op_type: info}) except StopIteration: break # emulate standard behavior for failed actions if raise_on_error: raise BulkIndexError('%i document(s) failed to index.' % len(exc_errors), exc_errors) else: for err in exc_errors: yield False, err return # go through request-reponse pairs and detect failures for op_type, item in map(methodcaller('popitem'), resp['items']): ok = 200 <= item.get('status', 500) < 300 if not ok and raise_on_error: errors.append({op_type: item}) if ok or not errors: # if we are not just recording all errors to be able to raise # them all at once, yield items individually yield ok, {op_type: item} if errors: raise BulkIndexError('%i document(s) failed to index.' % len(errors), errors)
[ "def", "_process_bulk_chunk", "(", "client", ",", "bulk_actions", ",", "raise_on_exception", "=", "True", ",", "raise_on_error", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# if raise on error is set, we need to collect errors per chunk before raising them", "errors", "=", "[", "]", "try", ":", "# send the actual request", "resp", "=", "client", ".", "bulk", "(", "'\\n'", ".", "join", "(", "bulk_actions", ")", "+", "'\\n'", ",", "*", "*", "kwargs", ")", "except", "TransportError", "as", "e", ":", "# default behavior - just propagate exception", "if", "raise_on_exception", ":", "raise", "e", "# if we are not propagating, mark all actions in current chunk as failed", "err_message", "=", "str", "(", "e", ")", "exc_errors", "=", "[", "]", "# deserialize the data back, thisis expensive but only run on", "# errors if raise_on_exception is false, so shouldn't be a real", "# issue", "bulk_data", "=", "iter", "(", "map", "(", "client", ".", "transport", ".", "serializer", ".", "loads", ",", "bulk_actions", ")", ")", "while", "True", ":", "try", ":", "# collect all the information about failed actions", "action", "=", "next", "(", "bulk_data", ")", "op_type", ",", "action", "=", "action", ".", "popitem", "(", ")", "info", "=", "{", "\"error\"", ":", "err_message", ",", "\"status\"", ":", "e", ".", "status_code", ",", "\"exception\"", ":", "e", "}", "if", "op_type", "!=", "'delete'", ":", "info", "[", "'data'", "]", "=", "next", "(", "bulk_data", ")", "info", ".", "update", "(", "action", ")", "exc_errors", ".", "append", "(", "{", "op_type", ":", "info", "}", ")", "except", "StopIteration", ":", "break", "# emulate standard behavior for failed actions", "if", "raise_on_error", ":", "raise", "BulkIndexError", "(", "'%i document(s) failed to index.'", "%", "len", "(", "exc_errors", ")", ",", "exc_errors", ")", "else", ":", "for", "err", "in", "exc_errors", ":", "yield", "False", ",", "err", "return", "# go through request-reponse pairs and detect failures", "for", "op_type", ",", "item", "in", "map", "(", "methodcaller", "(", "'popitem'", ")", ",", "resp", "[", "'items'", "]", ")", ":", "ok", "=", "200", "<=", "item", ".", "get", "(", "'status'", ",", "500", ")", "<", "300", "if", "not", "ok", "and", "raise_on_error", ":", "errors", ".", "append", "(", "{", "op_type", ":", "item", "}", ")", "if", "ok", "or", "not", "errors", ":", "# if we are not just recording all errors to be able to raise", "# them all at once, yield items individually", "yield", "ok", ",", "{", "op_type", ":", "item", "}", "if", "errors", ":", "raise", "BulkIndexError", "(", "'%i document(s) failed to index.'", "%", "len", "(", "errors", ")", ",", "errors", ")" ]
Send a bulk request to elasticsearch and process the output.
[ "Send", "a", "bulk", "request", "to", "elasticsearch", "and", "process", "the", "output", "." ]
python
train
turicas/rows
rows/plugins/plugin_pdf.py
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_pdf.py#L446-L453
def selected_objects(self): """Filter out objects outside table boundaries""" return [ obj for obj in self.text_objects if contains_or_overlap(self.table_bbox, obj.bbox) ]
[ "def", "selected_objects", "(", "self", ")", ":", "return", "[", "obj", "for", "obj", "in", "self", ".", "text_objects", "if", "contains_or_overlap", "(", "self", ".", "table_bbox", ",", "obj", ".", "bbox", ")", "]" ]
Filter out objects outside table boundaries
[ "Filter", "out", "objects", "outside", "table", "boundaries" ]
python
train
sassoo/goldman
goldman/queryparams/fields.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/fields.py#L53-L76
def _validate_param(rtype, fields): """ Ensure the sparse fields exists on the models """ try: # raises ValueError if not found model = rtype_to_model(rtype) model_fields = model.all_fields except ValueError: raise InvalidQueryParams(**{ 'detail': 'The fields query param provided with a ' 'field type of "%s" is unknown.' % rtype, 'links': LINK, 'parameter': PARAM, }) for field in fields: if field not in model_fields: raise InvalidQueryParams(**{ 'detail': 'The fields query param "TYPE" of "%s" ' 'is not possible. It does not have a field ' 'by the name of "%s".' % (rtype, field), 'links': LINK, 'parameter': PARAM, })
[ "def", "_validate_param", "(", "rtype", ",", "fields", ")", ":", "try", ":", "# raises ValueError if not found", "model", "=", "rtype_to_model", "(", "rtype", ")", "model_fields", "=", "model", ".", "all_fields", "except", "ValueError", ":", "raise", "InvalidQueryParams", "(", "*", "*", "{", "'detail'", ":", "'The fields query param provided with a '", "'field type of \"%s\" is unknown.'", "%", "rtype", ",", "'links'", ":", "LINK", ",", "'parameter'", ":", "PARAM", ",", "}", ")", "for", "field", "in", "fields", ":", "if", "field", "not", "in", "model_fields", ":", "raise", "InvalidQueryParams", "(", "*", "*", "{", "'detail'", ":", "'The fields query param \"TYPE\" of \"%s\" '", "'is not possible. It does not have a field '", "'by the name of \"%s\".'", "%", "(", "rtype", ",", "field", ")", ",", "'links'", ":", "LINK", ",", "'parameter'", ":", "PARAM", ",", "}", ")" ]
Ensure the sparse fields exists on the models
[ "Ensure", "the", "sparse", "fields", "exists", "on", "the", "models" ]
python
train
saltstack/salt
salt/modules/out.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/out.py#L91-L115
def html_format(data, out='nested', opts=None, **kwargs): ''' Return the formatted string as HTML. data The JSON serializable object. out: ``nested`` The name of the output to use to transform the data. Default: ``nested``. opts Dictionary of configuration options. Default: ``__opts__``. kwargs Arguments to sent to the outputter module. CLI Example: .. code-block:: bash salt '*' out.html_format "{'key': 'value'}" out=yaml ''' if not opts: opts = __opts__ return salt.output.html_format(data, out, opts=opts, **kwargs)
[ "def", "html_format", "(", "data", ",", "out", "=", "'nested'", ",", "opts", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "opts", ":", "opts", "=", "__opts__", "return", "salt", ".", "output", ".", "html_format", "(", "data", ",", "out", ",", "opts", "=", "opts", ",", "*", "*", "kwargs", ")" ]
Return the formatted string as HTML. data The JSON serializable object. out: ``nested`` The name of the output to use to transform the data. Default: ``nested``. opts Dictionary of configuration options. Default: ``__opts__``. kwargs Arguments to sent to the outputter module. CLI Example: .. code-block:: bash salt '*' out.html_format "{'key': 'value'}" out=yaml
[ "Return", "the", "formatted", "string", "as", "HTML", "." ]
python
train
yeraydiazdiaz/lunr.py
lunr/tokenizer.py
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/tokenizer.py#L14-L59
def Tokenizer(obj, metadata=None, separator=SEPARATOR): """Splits a string into tokens ready to be inserted into the search index. This tokenizer will convert its parameter to a string by calling `str` and then will split this string on characters matching `separator`. Lists will have their elements converted to strings and wrapped in a lunr `Token`. Optional metadata can be passed to the tokenizer, this metadata will be cloned and added as metadata to every token that is created from the object to be tokenized. """ if obj is None: return [] metadata = metadata or {} if isinstance(obj, (list, tuple)): return [ Token(as_string(element).lower(), deepcopy(metadata)) for element in obj ] string = str(obj).strip().lower() length = len(string) tokens = [] slice_start = 0 for slice_end in range(length): char = string[slice_end] slice_length = slice_end - slice_start if separator.match(char) or slice_end == length - 1: if slice_length > 0: sl = slice(slice_start, slice_end if slice_end < length - 1 else None) token_metadata = {} token_metadata["position"] = [ slice_start, slice_length if slice_end < length - 1 else slice_length + 1, ] token_metadata["index"] = len(tokens) token_metadata.update(metadata) tokens.append(Token(string[sl], token_metadata)) slice_start = slice_end + 1 return tokens
[ "def", "Tokenizer", "(", "obj", ",", "metadata", "=", "None", ",", "separator", "=", "SEPARATOR", ")", ":", "if", "obj", "is", "None", ":", "return", "[", "]", "metadata", "=", "metadata", "or", "{", "}", "if", "isinstance", "(", "obj", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "Token", "(", "as_string", "(", "element", ")", ".", "lower", "(", ")", ",", "deepcopy", "(", "metadata", ")", ")", "for", "element", "in", "obj", "]", "string", "=", "str", "(", "obj", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "length", "=", "len", "(", "string", ")", "tokens", "=", "[", "]", "slice_start", "=", "0", "for", "slice_end", "in", "range", "(", "length", ")", ":", "char", "=", "string", "[", "slice_end", "]", "slice_length", "=", "slice_end", "-", "slice_start", "if", "separator", ".", "match", "(", "char", ")", "or", "slice_end", "==", "length", "-", "1", ":", "if", "slice_length", ">", "0", ":", "sl", "=", "slice", "(", "slice_start", ",", "slice_end", "if", "slice_end", "<", "length", "-", "1", "else", "None", ")", "token_metadata", "=", "{", "}", "token_metadata", "[", "\"position\"", "]", "=", "[", "slice_start", ",", "slice_length", "if", "slice_end", "<", "length", "-", "1", "else", "slice_length", "+", "1", ",", "]", "token_metadata", "[", "\"index\"", "]", "=", "len", "(", "tokens", ")", "token_metadata", ".", "update", "(", "metadata", ")", "tokens", ".", "append", "(", "Token", "(", "string", "[", "sl", "]", ",", "token_metadata", ")", ")", "slice_start", "=", "slice_end", "+", "1", "return", "tokens" ]
Splits a string into tokens ready to be inserted into the search index. This tokenizer will convert its parameter to a string by calling `str` and then will split this string on characters matching `separator`. Lists will have their elements converted to strings and wrapped in a lunr `Token`. Optional metadata can be passed to the tokenizer, this metadata will be cloned and added as metadata to every token that is created from the object to be tokenized.
[ "Splits", "a", "string", "into", "tokens", "ready", "to", "be", "inserted", "into", "the", "search", "index", "." ]
python
train
spyder-ide/spyder
spyder/config/gui.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/config/gui.py#L86-L92
def set_font(font, section='appearance', option='font'): """Set font""" CONF.set(section, option+'/family', to_text_string(font.family())) CONF.set(section, option+'/size', float(font.pointSize())) CONF.set(section, option+'/italic', int(font.italic())) CONF.set(section, option+'/bold', int(font.bold())) FONT_CACHE[(section, option)] = font
[ "def", "set_font", "(", "font", ",", "section", "=", "'appearance'", ",", "option", "=", "'font'", ")", ":", "CONF", ".", "set", "(", "section", ",", "option", "+", "'/family'", ",", "to_text_string", "(", "font", ".", "family", "(", ")", ")", ")", "CONF", ".", "set", "(", "section", ",", "option", "+", "'/size'", ",", "float", "(", "font", ".", "pointSize", "(", ")", ")", ")", "CONF", ".", "set", "(", "section", ",", "option", "+", "'/italic'", ",", "int", "(", "font", ".", "italic", "(", ")", ")", ")", "CONF", ".", "set", "(", "section", ",", "option", "+", "'/bold'", ",", "int", "(", "font", ".", "bold", "(", ")", ")", ")", "FONT_CACHE", "[", "(", "section", ",", "option", ")", "]", "=", "font" ]
Set font
[ "Set", "font" ]
python
train
tamasgal/km3pipe
km3pipe/stats.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/stats.py#L170-L187
def bootstrap_params(rv_cont, data, n_iter=5, **kwargs): """Bootstrap the fit params of a distribution. Parameters ========== rv_cont: scipy.stats.rv_continuous instance The distribution which to fit. data: array-like, 1d The data on which to fit. n_iter: int [default=10] Number of bootstrap iterations. """ fit_res = [] for _ in range(n_iter): params = rv_cont.fit(resample_1d(data, **kwargs)) fit_res.append(params) fit_res = np.array(fit_res) return fit_res
[ "def", "bootstrap_params", "(", "rv_cont", ",", "data", ",", "n_iter", "=", "5", ",", "*", "*", "kwargs", ")", ":", "fit_res", "=", "[", "]", "for", "_", "in", "range", "(", "n_iter", ")", ":", "params", "=", "rv_cont", ".", "fit", "(", "resample_1d", "(", "data", ",", "*", "*", "kwargs", ")", ")", "fit_res", ".", "append", "(", "params", ")", "fit_res", "=", "np", ".", "array", "(", "fit_res", ")", "return", "fit_res" ]
Bootstrap the fit params of a distribution. Parameters ========== rv_cont: scipy.stats.rv_continuous instance The distribution which to fit. data: array-like, 1d The data on which to fit. n_iter: int [default=10] Number of bootstrap iterations.
[ "Bootstrap", "the", "fit", "params", "of", "a", "distribution", "." ]
python
train
kislyuk/ensure
ensure/main.py
https://github.com/kislyuk/ensure/blob/0a562a4b469ffbaf71c75dc4d394e94334c831f0/ensure/main.py#L608-L616
def called_with(self, *args, **kwargs): """ Before evaluating subsequent predicates, calls :attr:`subject` with given arguments (but unlike a direct call, catches and transforms any exceptions that arise during the call). """ self._args = args self._kwargs = kwargs self._call_subject = True return CallableInspector(self)
[ "def", "called_with", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_args", "=", "args", "self", ".", "_kwargs", "=", "kwargs", "self", ".", "_call_subject", "=", "True", "return", "CallableInspector", "(", "self", ")" ]
Before evaluating subsequent predicates, calls :attr:`subject` with given arguments (but unlike a direct call, catches and transforms any exceptions that arise during the call).
[ "Before", "evaluating", "subsequent", "predicates", "calls", ":", "attr", ":", "subject", "with", "given", "arguments", "(", "but", "unlike", "a", "direct", "call", "catches", "and", "transforms", "any", "exceptions", "that", "arise", "during", "the", "call", ")", "." ]
python
train
mistio/mist.client
src/mistclient/model.py
https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/src/mistclient/model.py#L159-L167
def images(self): """ Available images to be used when creating a new machine. :returns: A list of all available images. """ req = self.request(self.mist_client.uri+'/clouds/'+self.id+'/images') images = req.get().json() return images
[ "def", "images", "(", "self", ")", ":", "req", "=", "self", ".", "request", "(", "self", ".", "mist_client", ".", "uri", "+", "'/clouds/'", "+", "self", ".", "id", "+", "'/images'", ")", "images", "=", "req", ".", "get", "(", ")", ".", "json", "(", ")", "return", "images" ]
Available images to be used when creating a new machine. :returns: A list of all available images.
[ "Available", "images", "to", "be", "used", "when", "creating", "a", "new", "machine", "." ]
python
train
juju/charm-helpers
charmhelpers/core/host.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/host.py#L1056-L1077
def install_ca_cert(ca_cert, name=None): """ Install the given cert as a trusted CA. The ``name`` is the stem of the filename where the cert is written, and if not provided, it will default to ``juju-{charm_name}``. If the cert is empty or None, or is unchanged, nothing is done. """ if not ca_cert: return if not isinstance(ca_cert, bytes): ca_cert = ca_cert.encode('utf8') if not name: name = 'juju-{}'.format(charm_name()) cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) new_hash = hashlib.md5(ca_cert).hexdigest() if file_hash(cert_file) == new_hash: return log("Installing new CA cert at: {}".format(cert_file), level=INFO) write_file(cert_file, ca_cert) subprocess.check_call(['update-ca-certificates', '--fresh'])
[ "def", "install_ca_cert", "(", "ca_cert", ",", "name", "=", "None", ")", ":", "if", "not", "ca_cert", ":", "return", "if", "not", "isinstance", "(", "ca_cert", ",", "bytes", ")", ":", "ca_cert", "=", "ca_cert", ".", "encode", "(", "'utf8'", ")", "if", "not", "name", ":", "name", "=", "'juju-{}'", ".", "format", "(", "charm_name", "(", ")", ")", "cert_file", "=", "'/usr/local/share/ca-certificates/{}.crt'", ".", "format", "(", "name", ")", "new_hash", "=", "hashlib", ".", "md5", "(", "ca_cert", ")", ".", "hexdigest", "(", ")", "if", "file_hash", "(", "cert_file", ")", "==", "new_hash", ":", "return", "log", "(", "\"Installing new CA cert at: {}\"", ".", "format", "(", "cert_file", ")", ",", "level", "=", "INFO", ")", "write_file", "(", "cert_file", ",", "ca_cert", ")", "subprocess", ".", "check_call", "(", "[", "'update-ca-certificates'", ",", "'--fresh'", "]", ")" ]
Install the given cert as a trusted CA. The ``name`` is the stem of the filename where the cert is written, and if not provided, it will default to ``juju-{charm_name}``. If the cert is empty or None, or is unchanged, nothing is done.
[ "Install", "the", "given", "cert", "as", "a", "trusted", "CA", "." ]
python
train
a1ezzz/wasp-general
wasp_general/network/clients/file.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/clients/file.py#L114-L122
def make_directory(self, directory_name, *args, **kwargs): """ :meth:`.WNetworkClientProto.make_directory` method implementation """ previous_path = self.session_path() try: self.session_path(directory_name) os.mkdir(self.full_path()) finally: self.session_path(previous_path)
[ "def", "make_directory", "(", "self", ",", "directory_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "previous_path", "=", "self", ".", "session_path", "(", ")", "try", ":", "self", ".", "session_path", "(", "directory_name", ")", "os", ".", "mkdir", "(", "self", ".", "full_path", "(", ")", ")", "finally", ":", "self", ".", "session_path", "(", "previous_path", ")" ]
:meth:`.WNetworkClientProto.make_directory` method implementation
[ ":", "meth", ":", ".", "WNetworkClientProto", ".", "make_directory", "method", "implementation" ]
python
train
bcbio/bcbio-nextgen
bcbio/rnaseq/gtf.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L284-L305
def tx2genedict(gtf, keep_version=False): """ produce a tx2gene dictionary from a GTF file """ d = {} with open_gzipsafe(gtf) as in_handle: for line in in_handle: if "gene_id" not in line or "transcript_id" not in line: continue geneid = line.split("gene_id")[1].split(" ")[1] geneid = _strip_non_alphanumeric(geneid) txid = line.split("transcript_id")[1].split(" ")[1] txid = _strip_non_alphanumeric(txid) if keep_version and "transcript_version" in line: txversion = line.split("transcript_version")[1].split(" ")[1] txversion = _strip_non_alphanumeric(txversion) txid += "." + txversion if has_transcript_version(line) and not keep_version: txid = _strip_feature_version(txid) geneid = _strip_feature_version(geneid) d[txid] = geneid return d
[ "def", "tx2genedict", "(", "gtf", ",", "keep_version", "=", "False", ")", ":", "d", "=", "{", "}", "with", "open_gzipsafe", "(", "gtf", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "if", "\"gene_id\"", "not", "in", "line", "or", "\"transcript_id\"", "not", "in", "line", ":", "continue", "geneid", "=", "line", ".", "split", "(", "\"gene_id\"", ")", "[", "1", "]", ".", "split", "(", "\" \"", ")", "[", "1", "]", "geneid", "=", "_strip_non_alphanumeric", "(", "geneid", ")", "txid", "=", "line", ".", "split", "(", "\"transcript_id\"", ")", "[", "1", "]", ".", "split", "(", "\" \"", ")", "[", "1", "]", "txid", "=", "_strip_non_alphanumeric", "(", "txid", ")", "if", "keep_version", "and", "\"transcript_version\"", "in", "line", ":", "txversion", "=", "line", ".", "split", "(", "\"transcript_version\"", ")", "[", "1", "]", ".", "split", "(", "\" \"", ")", "[", "1", "]", "txversion", "=", "_strip_non_alphanumeric", "(", "txversion", ")", "txid", "+=", "\".\"", "+", "txversion", "if", "has_transcript_version", "(", "line", ")", "and", "not", "keep_version", ":", "txid", "=", "_strip_feature_version", "(", "txid", ")", "geneid", "=", "_strip_feature_version", "(", "geneid", ")", "d", "[", "txid", "]", "=", "geneid", "return", "d" ]
produce a tx2gene dictionary from a GTF file
[ "produce", "a", "tx2gene", "dictionary", "from", "a", "GTF", "file" ]
python
train
quantumlib/Cirq
cirq/contrib/acquaintance/executor.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/contrib/acquaintance/executor.py#L61-L65
def get_operations(self, indices: Sequence[LogicalIndex], qubits: Sequence[ops.Qid] ) -> ops.OP_TREE: """Gets the logical operations to apply to qubits."""
[ "def", "get_operations", "(", "self", ",", "indices", ":", "Sequence", "[", "LogicalIndex", "]", ",", "qubits", ":", "Sequence", "[", "ops", ".", "Qid", "]", ")", "->", "ops", ".", "OP_TREE", ":" ]
Gets the logical operations to apply to qubits.
[ "Gets", "the", "logical", "operations", "to", "apply", "to", "qubits", "." ]
python
train
nerdvegas/rez
src/rez/resolved_context.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/resolved_context.py#L1358-L1473
def from_dict(cls, d, identifier_str=None): """Load a `ResolvedContext` from a dict. Args: d (dict): Dict containing context data. identifier_str (str): String identifying the context, this is only used to display in an error string if a serialization version mismatch is detected. Returns: `ResolvedContext` object. """ # check serialization version def _print_version(value): return '.'.join(str(x) for x in value) toks = str(d["serialize_version"]).split('.') load_ver = tuple(int(x) for x in toks) curr_ver = ResolvedContext.serialize_version if load_ver[0] > curr_ver[0]: msg = ["The context"] if identifier_str: msg.append("in %s" % identifier_str) msg.append("was written by a newer version of Rez. The load may " "fail (serialize version %d > %d)" % (_print_version(load_ver), _print_version(curr_ver))) print >> sys.stderr, ' '.join(msg) # create and init the context r = ResolvedContext.__new__(ResolvedContext) r.load_path = None r.pre_resolve_bindings = None r.timestamp = d["timestamp"] r.building = d["building"] r.caching = d["caching"] r.implicit_packages = [PackageRequest(x) for x in d["implicit_packages"]] r._package_requests = [PackageRequest(x) for x in d["package_requests"]] r.package_paths = d["package_paths"] r.rez_version = d["rez_version"] r.rez_path = d["rez_path"] r.user = d["user"] r.host = d["host"] r.platform = d["platform"] r.arch = d["arch"] r.os = d["os"] r.created = d["created"] r.verbosity = d.get("verbosity", 0) r.status_ = ResolverStatus[d["status"]] r.failure_description = d["failure_description"] r.solve_time = d["solve_time"] r.load_time = d["load_time"] r.graph_string = d["graph"] r.graph_ = None r._resolved_packages = [] for d_ in d["resolved_packages"]: variant_handle = d_ if load_ver < (4, 0): # -- SINCE SERIALIZE VERSION 4.0 from rez.utils.backcompat import convert_old_variant_handle variant_handle = convert_old_variant_handle(variant_handle) variant = get_variant(variant_handle) variant.set_context(r) r._resolved_packages.append(variant) # -- SINCE SERIALIZE VERSION 1 r.requested_timestamp = d.get("requested_timestamp", 0) # -- SINCE SERIALIZE VERSION 2 r.parent_suite_path = d.get("parent_suite_path") r.suite_context_name = d.get("suite_context_name") # -- SINCE SERIALIZE VERSION 3 r.default_patch_lock = PatchLock[d.get("default_patch_lock", "no_lock")] patch_locks = d.get("patch_locks", {}) r.patch_locks = dict((k, PatchLock[v]) for k, v in patch_locks) # -- SINCE SERIALIZE VERSION 4.0 r.from_cache = d.get("from_cache", False) # -- SINCE SERIALIZE VERSION 4.1 data = d.get("package_filter", []) r.package_filter = PackageFilterList.from_pod(data) # -- SINCE SERIALIZE VERSION 4.2 data = d.get("package_orderers") if data: r.package_orderers = [package_order.from_pod(x) for x in data] else: r.package_orderers = None # -- SINCE SERIALIZE VERSION 4.3 r.num_loaded_packages = d.get("num_loaded_packages", -1) # track context usage if config.context_tracking_host: data = dict((k, v) for k, v in d.iteritems() if k in config.context_tracking_context_fields) r._track_context(data, action="sourced") return r
[ "def", "from_dict", "(", "cls", ",", "d", ",", "identifier_str", "=", "None", ")", ":", "# check serialization version", "def", "_print_version", "(", "value", ")", ":", "return", "'.'", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "value", ")", "toks", "=", "str", "(", "d", "[", "\"serialize_version\"", "]", ")", ".", "split", "(", "'.'", ")", "load_ver", "=", "tuple", "(", "int", "(", "x", ")", "for", "x", "in", "toks", ")", "curr_ver", "=", "ResolvedContext", ".", "serialize_version", "if", "load_ver", "[", "0", "]", ">", "curr_ver", "[", "0", "]", ":", "msg", "=", "[", "\"The context\"", "]", "if", "identifier_str", ":", "msg", ".", "append", "(", "\"in %s\"", "%", "identifier_str", ")", "msg", ".", "append", "(", "\"was written by a newer version of Rez. The load may \"", "\"fail (serialize version %d > %d)\"", "%", "(", "_print_version", "(", "load_ver", ")", ",", "_print_version", "(", "curr_ver", ")", ")", ")", "print", ">>", "sys", ".", "stderr", ",", "' '", ".", "join", "(", "msg", ")", "# create and init the context", "r", "=", "ResolvedContext", ".", "__new__", "(", "ResolvedContext", ")", "r", ".", "load_path", "=", "None", "r", ".", "pre_resolve_bindings", "=", "None", "r", ".", "timestamp", "=", "d", "[", "\"timestamp\"", "]", "r", ".", "building", "=", "d", "[", "\"building\"", "]", "r", ".", "caching", "=", "d", "[", "\"caching\"", "]", "r", ".", "implicit_packages", "=", "[", "PackageRequest", "(", "x", ")", "for", "x", "in", "d", "[", "\"implicit_packages\"", "]", "]", "r", ".", "_package_requests", "=", "[", "PackageRequest", "(", "x", ")", "for", "x", "in", "d", "[", "\"package_requests\"", "]", "]", "r", ".", "package_paths", "=", "d", "[", "\"package_paths\"", "]", "r", ".", "rez_version", "=", "d", "[", "\"rez_version\"", "]", "r", ".", "rez_path", "=", "d", "[", "\"rez_path\"", "]", "r", ".", "user", "=", "d", "[", "\"user\"", "]", "r", ".", "host", "=", "d", "[", "\"host\"", "]", "r", ".", "platform", "=", "d", "[", "\"platform\"", "]", "r", ".", "arch", "=", "d", "[", "\"arch\"", "]", "r", ".", "os", "=", "d", "[", "\"os\"", "]", "r", ".", "created", "=", "d", "[", "\"created\"", "]", "r", ".", "verbosity", "=", "d", ".", "get", "(", "\"verbosity\"", ",", "0", ")", "r", ".", "status_", "=", "ResolverStatus", "[", "d", "[", "\"status\"", "]", "]", "r", ".", "failure_description", "=", "d", "[", "\"failure_description\"", "]", "r", ".", "solve_time", "=", "d", "[", "\"solve_time\"", "]", "r", ".", "load_time", "=", "d", "[", "\"load_time\"", "]", "r", ".", "graph_string", "=", "d", "[", "\"graph\"", "]", "r", ".", "graph_", "=", "None", "r", ".", "_resolved_packages", "=", "[", "]", "for", "d_", "in", "d", "[", "\"resolved_packages\"", "]", ":", "variant_handle", "=", "d_", "if", "load_ver", "<", "(", "4", ",", "0", ")", ":", "# -- SINCE SERIALIZE VERSION 4.0", "from", "rez", ".", "utils", ".", "backcompat", "import", "convert_old_variant_handle", "variant_handle", "=", "convert_old_variant_handle", "(", "variant_handle", ")", "variant", "=", "get_variant", "(", "variant_handle", ")", "variant", ".", "set_context", "(", "r", ")", "r", ".", "_resolved_packages", ".", "append", "(", "variant", ")", "# -- SINCE SERIALIZE VERSION 1", "r", ".", "requested_timestamp", "=", "d", ".", "get", "(", "\"requested_timestamp\"", ",", "0", ")", "# -- SINCE SERIALIZE VERSION 2", "r", ".", "parent_suite_path", "=", "d", ".", "get", "(", "\"parent_suite_path\"", ")", "r", ".", "suite_context_name", "=", "d", ".", "get", "(", "\"suite_context_name\"", ")", "# -- SINCE SERIALIZE VERSION 3", "r", ".", "default_patch_lock", "=", "PatchLock", "[", "d", ".", "get", "(", "\"default_patch_lock\"", ",", "\"no_lock\"", ")", "]", "patch_locks", "=", "d", ".", "get", "(", "\"patch_locks\"", ",", "{", "}", ")", "r", ".", "patch_locks", "=", "dict", "(", "(", "k", ",", "PatchLock", "[", "v", "]", ")", "for", "k", ",", "v", "in", "patch_locks", ")", "# -- SINCE SERIALIZE VERSION 4.0", "r", ".", "from_cache", "=", "d", ".", "get", "(", "\"from_cache\"", ",", "False", ")", "# -- SINCE SERIALIZE VERSION 4.1", "data", "=", "d", ".", "get", "(", "\"package_filter\"", ",", "[", "]", ")", "r", ".", "package_filter", "=", "PackageFilterList", ".", "from_pod", "(", "data", ")", "# -- SINCE SERIALIZE VERSION 4.2", "data", "=", "d", ".", "get", "(", "\"package_orderers\"", ")", "if", "data", ":", "r", ".", "package_orderers", "=", "[", "package_order", ".", "from_pod", "(", "x", ")", "for", "x", "in", "data", "]", "else", ":", "r", ".", "package_orderers", "=", "None", "# -- SINCE SERIALIZE VERSION 4.3", "r", ".", "num_loaded_packages", "=", "d", ".", "get", "(", "\"num_loaded_packages\"", ",", "-", "1", ")", "# track context usage", "if", "config", ".", "context_tracking_host", ":", "data", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "d", ".", "iteritems", "(", ")", "if", "k", "in", "config", ".", "context_tracking_context_fields", ")", "r", ".", "_track_context", "(", "data", ",", "action", "=", "\"sourced\"", ")", "return", "r" ]
Load a `ResolvedContext` from a dict. Args: d (dict): Dict containing context data. identifier_str (str): String identifying the context, this is only used to display in an error string if a serialization version mismatch is detected. Returns: `ResolvedContext` object.
[ "Load", "a", "ResolvedContext", "from", "a", "dict", "." ]
python
train
happyleavesaoc/python-motorparts
motorparts/__init__.py
https://github.com/happyleavesaoc/python-motorparts/blob/4a6b4dc72dd45524dd64a7a079478bd98c55215c/motorparts/__init__.py#L168-L173
def _get_model(vehicle): """Clean the model field. Best guess.""" model = vehicle['model'] model = model.replace(vehicle['year'], '') model = model.replace(vehicle['make'], '') return model.strip().split(' ')[0]
[ "def", "_get_model", "(", "vehicle", ")", ":", "model", "=", "vehicle", "[", "'model'", "]", "model", "=", "model", ".", "replace", "(", "vehicle", "[", "'year'", "]", ",", "''", ")", "model", "=", "model", ".", "replace", "(", "vehicle", "[", "'make'", "]", ",", "''", ")", "return", "model", ".", "strip", "(", ")", ".", "split", "(", "' '", ")", "[", "0", "]" ]
Clean the model field. Best guess.
[ "Clean", "the", "model", "field", ".", "Best", "guess", "." ]
python
train
limodou/uliweb
uliweb/orm/__init__.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L2677-L2693
def filter(self, *condition): """ If there are multple condition, then treats them *and* relastion. """ if not condition: return self cond = true() for c in condition: if c is not None: if isinstance(c, (str, unicode)): c = text(c) cond = and_(c, cond) if self.condition is not None: self.condition = and_(cond, self.condition) else: self.condition = cond return self
[ "def", "filter", "(", "self", ",", "*", "condition", ")", ":", "if", "not", "condition", ":", "return", "self", "cond", "=", "true", "(", ")", "for", "c", "in", "condition", ":", "if", "c", "is", "not", "None", ":", "if", "isinstance", "(", "c", ",", "(", "str", ",", "unicode", ")", ")", ":", "c", "=", "text", "(", "c", ")", "cond", "=", "and_", "(", "c", ",", "cond", ")", "if", "self", ".", "condition", "is", "not", "None", ":", "self", ".", "condition", "=", "and_", "(", "cond", ",", "self", ".", "condition", ")", "else", ":", "self", ".", "condition", "=", "cond", "return", "self" ]
If there are multple condition, then treats them *and* relastion.
[ "If", "there", "are", "multple", "condition", "then", "treats", "them", "*", "and", "*", "relastion", "." ]
python
train
ozgur/python-firebase
firebase/firebase.py
https://github.com/ozgur/python-firebase/blob/6b96b326f6d8f477503ca42fdfbd81bcbe1f9e0d/firebase/firebase.py#L319-L329
def post(self, url, data, params=None, headers=None, connection=None): """ Synchronous POST request. ``data`` must be a JSONable value. """ params = params or {} headers = headers or {} endpoint = self._build_endpoint_url(url, None) self._authenticate(params, headers) data = json.dumps(data, cls=JSONEncoder) return make_post_request(endpoint, data, params, headers, connection=connection)
[ "def", "post", "(", "self", ",", "url", ",", "data", ",", "params", "=", "None", ",", "headers", "=", "None", ",", "connection", "=", "None", ")", ":", "params", "=", "params", "or", "{", "}", "headers", "=", "headers", "or", "{", "}", "endpoint", "=", "self", ".", "_build_endpoint_url", "(", "url", ",", "None", ")", "self", ".", "_authenticate", "(", "params", ",", "headers", ")", "data", "=", "json", ".", "dumps", "(", "data", ",", "cls", "=", "JSONEncoder", ")", "return", "make_post_request", "(", "endpoint", ",", "data", ",", "params", ",", "headers", ",", "connection", "=", "connection", ")" ]
Synchronous POST request. ``data`` must be a JSONable value.
[ "Synchronous", "POST", "request", ".", "data", "must", "be", "a", "JSONable", "value", "." ]
python
valid
PythonCharmers/python-future
src/future/backports/email/message.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/message.py#L600-L620
def get_params(self, failobj=None, header='content-type', unquote=True): """Return the message's Content-Type parameters, as a list. The elements of the returned list are 2-tuples of key/value pairs, as split on the `=' sign. The left hand side of the `=' is the key, while the right hand side is the value. If there is no `=' sign in the parameter the value is the empty string. The value is as described in the get_param() method. Optional failobj is the object to return if there is no Content-Type header. Optional header is the header to search instead of Content-Type. If unquote is True, the value is unquoted. """ missing = object() params = self._get_params_preserve(missing, header) if params is missing: return failobj if unquote: return [(k, _unquotevalue(v)) for k, v in params] else: return params
[ "def", "get_params", "(", "self", ",", "failobj", "=", "None", ",", "header", "=", "'content-type'", ",", "unquote", "=", "True", ")", ":", "missing", "=", "object", "(", ")", "params", "=", "self", ".", "_get_params_preserve", "(", "missing", ",", "header", ")", "if", "params", "is", "missing", ":", "return", "failobj", "if", "unquote", ":", "return", "[", "(", "k", ",", "_unquotevalue", "(", "v", ")", ")", "for", "k", ",", "v", "in", "params", "]", "else", ":", "return", "params" ]
Return the message's Content-Type parameters, as a list. The elements of the returned list are 2-tuples of key/value pairs, as split on the `=' sign. The left hand side of the `=' is the key, while the right hand side is the value. If there is no `=' sign in the parameter the value is the empty string. The value is as described in the get_param() method. Optional failobj is the object to return if there is no Content-Type header. Optional header is the header to search instead of Content-Type. If unquote is True, the value is unquoted.
[ "Return", "the", "message", "s", "Content", "-", "Type", "parameters", "as", "a", "list", "." ]
python
train
jkenlooper/chill
src/chill/migrations.py
https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/migrations.py#L10-L87
def migrate1(): "Migrate from version 0 to 1" initial = [ "create table Chill (version integer);", "insert into Chill (version) values (1);", "alter table SelectSQL rename to Query;", "alter table Node add column template integer references Template (id) on delete set null;", "alter table Node add column query integer references Query (id) on delete set null;" ] cleanup = [ "drop table SelectSQL_Node;", "drop table Template_Node;" ] c = db.cursor() try: c.execute("select version from Chill limit 1;") except sqlite3.DatabaseError as err: pass result = c.fetchone() if result: version = result[0] if version == 1: current_app.logger.warn("Migration from version 0 to 1 is not needed.") else: current_app.logger.warn("Migration from version 0 to {0} is not supported.".format(version)) return try: for query in initial: c.execute(query) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) try: c.execute(fetch_query_string('select_all_nodes.sql')) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) result = c.fetchall() if result: (result, col_names) = rowify(result, c.description) for kw in result: try: c.execute(""" update Node set template = ( select t.id from Template as t join Template_Node as tn on ( tn.template_id = t.id ) join Node as n on ( n.id = tn.node_id ) where n.id is :node_id group by t.id) where id is :node_id; """, {'node_id':kw['id']}) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) try: c.execute(""" update Node set query = ( select s.id from Query as s join SelectSQL_Node as sn on ( sn.selectsql_id = s.id ) join Node as n on ( n.id = sn.node_id ) where n.id is :node_id group by s.id) where id is :node_id; """, {'node_id':kw['id']}) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) try: for query in cleanup: c.execute(query) except sqlite3.DatabaseError as err: current_app.logger.error("DatabaseError: %s", err) db.commit()
[ "def", "migrate1", "(", ")", ":", "initial", "=", "[", "\"create table Chill (version integer);\"", ",", "\"insert into Chill (version) values (1);\"", ",", "\"alter table SelectSQL rename to Query;\"", ",", "\"alter table Node add column template integer references Template (id) on delete set null;\"", ",", "\"alter table Node add column query integer references Query (id) on delete set null;\"", "]", "cleanup", "=", "[", "\"drop table SelectSQL_Node;\"", ",", "\"drop table Template_Node;\"", "]", "c", "=", "db", ".", "cursor", "(", ")", "try", ":", "c", ".", "execute", "(", "\"select version from Chill limit 1;\"", ")", "except", "sqlite3", ".", "DatabaseError", "as", "err", ":", "pass", "result", "=", "c", ".", "fetchone", "(", ")", "if", "result", ":", "version", "=", "result", "[", "0", "]", "if", "version", "==", "1", ":", "current_app", ".", "logger", ".", "warn", "(", "\"Migration from version 0 to 1 is not needed.\"", ")", "else", ":", "current_app", ".", "logger", ".", "warn", "(", "\"Migration from version 0 to {0} is not supported.\"", ".", "format", "(", "version", ")", ")", "return", "try", ":", "for", "query", "in", "initial", ":", "c", ".", "execute", "(", "query", ")", "except", "sqlite3", ".", "DatabaseError", "as", "err", ":", "current_app", ".", "logger", ".", "error", "(", "\"DatabaseError: %s\"", ",", "err", ")", "try", ":", "c", ".", "execute", "(", "fetch_query_string", "(", "'select_all_nodes.sql'", ")", ")", "except", "sqlite3", ".", "DatabaseError", "as", "err", ":", "current_app", ".", "logger", ".", "error", "(", "\"DatabaseError: %s\"", ",", "err", ")", "result", "=", "c", ".", "fetchall", "(", ")", "if", "result", ":", "(", "result", ",", "col_names", ")", "=", "rowify", "(", "result", ",", "c", ".", "description", ")", "for", "kw", "in", "result", ":", "try", ":", "c", ".", "execute", "(", "\"\"\"\n update Node set template = (\n select t.id from Template as t\n join Template_Node as tn on ( tn.template_id = t.id )\n join Node as n on ( n.id = tn.node_id )\n where n.id is :node_id\n group by t.id)\n where id is :node_id;\n \"\"\"", ",", "{", "'node_id'", ":", "kw", "[", "'id'", "]", "}", ")", "except", "sqlite3", ".", "DatabaseError", "as", "err", ":", "current_app", ".", "logger", ".", "error", "(", "\"DatabaseError: %s\"", ",", "err", ")", "try", ":", "c", ".", "execute", "(", "\"\"\"\n update Node set query = (\n select s.id from Query as s\n join SelectSQL_Node as sn on ( sn.selectsql_id = s.id )\n join Node as n on ( n.id = sn.node_id )\n where n.id is :node_id\n group by s.id)\n where id is :node_id;\n \"\"\"", ",", "{", "'node_id'", ":", "kw", "[", "'id'", "]", "}", ")", "except", "sqlite3", ".", "DatabaseError", "as", "err", ":", "current_app", ".", "logger", ".", "error", "(", "\"DatabaseError: %s\"", ",", "err", ")", "try", ":", "for", "query", "in", "cleanup", ":", "c", ".", "execute", "(", "query", ")", "except", "sqlite3", ".", "DatabaseError", "as", "err", ":", "current_app", ".", "logger", ".", "error", "(", "\"DatabaseError: %s\"", ",", "err", ")", "db", ".", "commit", "(", ")" ]
Migrate from version 0 to 1
[ "Migrate", "from", "version", "0", "to", "1" ]
python
train
bububa/pyTOP
pyTOP/campaign.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/campaign.py#L456-L466
def recommend_get(self, adgroup_id, **kwargs): '''xxxxx.xxxxx.keywords.recommend.get =================================== 取得一个推广组的推荐关键词列表''' request = TOPRequest('xxxxx.xxxxx.keywords.recommend.get') request['adgroup_id'] = adgroup_id for k, v in kwargs.iteritems(): if k not in ('nick', 'order_by', 'search', 'pertinence', 'page_size', 'page_no') and v==None: continue request[k] = v self.create(self.execute(request), models = {'result':RecommendWordPage}) return self.result
[ "def", "recommend_get", "(", "self", ",", "adgroup_id", ",", "*", "*", "kwargs", ")", ":", "request", "=", "TOPRequest", "(", "'xxxxx.xxxxx.keywords.recommend.get'", ")", "request", "[", "'adgroup_id'", "]", "=", "adgroup_id", "for", "k", ",", "v", "in", "kwargs", ".", "iteritems", "(", ")", ":", "if", "k", "not", "in", "(", "'nick'", ",", "'order_by'", ",", "'search'", ",", "'pertinence'", ",", "'page_size'", ",", "'page_no'", ")", "and", "v", "==", "None", ":", "continue", "request", "[", "k", "]", "=", "v", "self", ".", "create", "(", "self", ".", "execute", "(", "request", ")", ",", "models", "=", "{", "'result'", ":", "RecommendWordPage", "}", ")", "return", "self", ".", "result" ]
xxxxx.xxxxx.keywords.recommend.get =================================== 取得一个推广组的推荐关键词列表
[ "xxxxx", ".", "xxxxx", ".", "keywords", ".", "recommend", ".", "get", "===================================", "取得一个推广组的推荐关键词列表" ]
python
train
newville/wxmplot
wxmplot/plotframe.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/plotframe.py#L45-L47
def scatterplot(self, x, y, **kw): """plot after clearing current plot """ self.panel.scatterplot(x, y, **kw)
[ "def", "scatterplot", "(", "self", ",", "x", ",", "y", ",", "*", "*", "kw", ")", ":", "self", ".", "panel", ".", "scatterplot", "(", "x", ",", "y", ",", "*", "*", "kw", ")" ]
plot after clearing current plot
[ "plot", "after", "clearing", "current", "plot" ]
python
train
yyuu/botornado
boto/ec2/autoscale/__init__.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/autoscale/__init__.py#L344-L359
def delete_scheduled_action(self, scheduled_action_name, autoscale_group=None): """ Deletes a previously scheduled action. :type scheduled_action_name: str :param scheduled_action_name: The name of the action you want to delete. :type autoscale_group: str :param autoscale_group: The name of the autoscale group. """ params = {'ScheduledActionName': scheduled_action_name} if autoscale_group: params['AutoScalingGroupName'] = autoscale_group return self.get_status('DeleteScheduledAction', params)
[ "def", "delete_scheduled_action", "(", "self", ",", "scheduled_action_name", ",", "autoscale_group", "=", "None", ")", ":", "params", "=", "{", "'ScheduledActionName'", ":", "scheduled_action_name", "}", "if", "autoscale_group", ":", "params", "[", "'AutoScalingGroupName'", "]", "=", "autoscale_group", "return", "self", ".", "get_status", "(", "'DeleteScheduledAction'", ",", "params", ")" ]
Deletes a previously scheduled action. :type scheduled_action_name: str :param scheduled_action_name: The name of the action you want to delete. :type autoscale_group: str :param autoscale_group: The name of the autoscale group.
[ "Deletes", "a", "previously", "scheduled", "action", "." ]
python
train
deepmind/pysc2
pysc2/lib/remote_controller.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/remote_controller.py#L248-L255
def actions(self, req_action): """Send a `sc_pb.RequestAction`, which may include multiple actions.""" if FLAGS.sc2_log_actions: for action in req_action.actions: sys.stderr.write(str(action)) sys.stderr.flush() return self._client.send(action=req_action)
[ "def", "actions", "(", "self", ",", "req_action", ")", ":", "if", "FLAGS", ".", "sc2_log_actions", ":", "for", "action", "in", "req_action", ".", "actions", ":", "sys", ".", "stderr", ".", "write", "(", "str", "(", "action", ")", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "return", "self", ".", "_client", ".", "send", "(", "action", "=", "req_action", ")" ]
Send a `sc_pb.RequestAction`, which may include multiple actions.
[ "Send", "a", "sc_pb", ".", "RequestAction", "which", "may", "include", "multiple", "actions", "." ]
python
train
arista-eosplus/pyeapi
pyeapi/api/routemaps.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/routemaps.py#L222-L257
def set_match_statements(self, name, action, seqno, statements): """Configures the match statements within the routemap clause. The final configuration of match statements will reflect the list of statements passed into the statements attribute. This implies match statements found in the routemap that are not specified in the statements attribute will be removed. Args: name (string): The full name of the routemap. action (string): The action to take for this routemap clause. seqno (integer): The sequence number for the routemap clause. statements (list): A list of the match-related statements. Note that the statements should omit the leading match. Returns: True if the operation succeeds otherwise False """ try: current_statements = self.get(name)[action][seqno]['match'] except: current_statements = [] commands = list() # remove set statements from current routemap for entry in set(current_statements).difference(statements): commands.append('route-map %s %s %s' % (name, action, seqno)) commands.append('no match %s' % entry) # add new set statements to the routemap for entry in set(statements).difference(current_statements): commands.append('route-map %s %s %s' % (name, action, seqno)) commands.append('match %s' % entry) return self.configure(commands) if commands else True
[ "def", "set_match_statements", "(", "self", ",", "name", ",", "action", ",", "seqno", ",", "statements", ")", ":", "try", ":", "current_statements", "=", "self", ".", "get", "(", "name", ")", "[", "action", "]", "[", "seqno", "]", "[", "'match'", "]", "except", ":", "current_statements", "=", "[", "]", "commands", "=", "list", "(", ")", "# remove set statements from current routemap", "for", "entry", "in", "set", "(", "current_statements", ")", ".", "difference", "(", "statements", ")", ":", "commands", ".", "append", "(", "'route-map %s %s %s'", "%", "(", "name", ",", "action", ",", "seqno", ")", ")", "commands", ".", "append", "(", "'no match %s'", "%", "entry", ")", "# add new set statements to the routemap", "for", "entry", "in", "set", "(", "statements", ")", ".", "difference", "(", "current_statements", ")", ":", "commands", ".", "append", "(", "'route-map %s %s %s'", "%", "(", "name", ",", "action", ",", "seqno", ")", ")", "commands", ".", "append", "(", "'match %s'", "%", "entry", ")", "return", "self", ".", "configure", "(", "commands", ")", "if", "commands", "else", "True" ]
Configures the match statements within the routemap clause. The final configuration of match statements will reflect the list of statements passed into the statements attribute. This implies match statements found in the routemap that are not specified in the statements attribute will be removed. Args: name (string): The full name of the routemap. action (string): The action to take for this routemap clause. seqno (integer): The sequence number for the routemap clause. statements (list): A list of the match-related statements. Note that the statements should omit the leading match. Returns: True if the operation succeeds otherwise False
[ "Configures", "the", "match", "statements", "within", "the", "routemap", "clause", ".", "The", "final", "configuration", "of", "match", "statements", "will", "reflect", "the", "list", "of", "statements", "passed", "into", "the", "statements", "attribute", ".", "This", "implies", "match", "statements", "found", "in", "the", "routemap", "that", "are", "not", "specified", "in", "the", "statements", "attribute", "will", "be", "removed", "." ]
python
train
user-cont/conu
conu/backend/docker/container.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/docker/container.py#L378-L400
def get_port_mappings(self, port=None): """ Get list of port mappings between container and host. The format of dicts is: {"HostIp": XX, "HostPort": YY}; When port is None - return all port mappings. The container needs to be running, otherwise this returns an empty list. :param port: int or None, container port :return: list of dict or None; dict when port=None """ port_mappings = self.inspect(refresh=True)["NetworkSettings"]["Ports"] if not port: return port_mappings if str(port) not in self.get_ports(): return [] for p in port_mappings: if p.split("/")[0] == str(port): return port_mappings[p]
[ "def", "get_port_mappings", "(", "self", ",", "port", "=", "None", ")", ":", "port_mappings", "=", "self", ".", "inspect", "(", "refresh", "=", "True", ")", "[", "\"NetworkSettings\"", "]", "[", "\"Ports\"", "]", "if", "not", "port", ":", "return", "port_mappings", "if", "str", "(", "port", ")", "not", "in", "self", ".", "get_ports", "(", ")", ":", "return", "[", "]", "for", "p", "in", "port_mappings", ":", "if", "p", ".", "split", "(", "\"/\"", ")", "[", "0", "]", "==", "str", "(", "port", ")", ":", "return", "port_mappings", "[", "p", "]" ]
Get list of port mappings between container and host. The format of dicts is: {"HostIp": XX, "HostPort": YY}; When port is None - return all port mappings. The container needs to be running, otherwise this returns an empty list. :param port: int or None, container port :return: list of dict or None; dict when port=None
[ "Get", "list", "of", "port", "mappings", "between", "container", "and", "host", ".", "The", "format", "of", "dicts", "is", ":" ]
python
train
benoitkugler/abstractDataLibrary
pyDLib/GUI/app.py
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/app.py#L160-L172
def init_login(self, from_local=False): """Display login screen. May ask for local data loading if from_local is True.""" if self.toolbar: self.removeToolBar(self.toolbar) widget_login = login.Loading(self.statusBar(), self.theory_main) self.centralWidget().addWidget(widget_login) widget_login.loaded.connect(self.init_tabs) widget_login.canceled.connect(self._quit) widget_login.updated.connect(self.on_update_at_launch) if from_local: widget_login.propose_load_local() else: self.statusBar().showMessage("Données chargées depuis le serveur.", 5000)
[ "def", "init_login", "(", "self", ",", "from_local", "=", "False", ")", ":", "if", "self", ".", "toolbar", ":", "self", ".", "removeToolBar", "(", "self", ".", "toolbar", ")", "widget_login", "=", "login", ".", "Loading", "(", "self", ".", "statusBar", "(", ")", ",", "self", ".", "theory_main", ")", "self", ".", "centralWidget", "(", ")", ".", "addWidget", "(", "widget_login", ")", "widget_login", ".", "loaded", ".", "connect", "(", "self", ".", "init_tabs", ")", "widget_login", ".", "canceled", ".", "connect", "(", "self", ".", "_quit", ")", "widget_login", ".", "updated", ".", "connect", "(", "self", ".", "on_update_at_launch", ")", "if", "from_local", ":", "widget_login", ".", "propose_load_local", "(", ")", "else", ":", "self", ".", "statusBar", "(", ")", ".", "showMessage", "(", "\"Données chargées depuis le serveur.\", ", "5", "00)", "" ]
Display login screen. May ask for local data loading if from_local is True.
[ "Display", "login", "screen", ".", "May", "ask", "for", "local", "data", "loading", "if", "from_local", "is", "True", "." ]
python
train
materialsproject/pymatgen
pymatgen/electronic_structure/plotter.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/electronic_structure/plotter.py#L120-L214
def get_plot(self, xlim=None, ylim=None): """ Get a matplotlib plot showing the DOS. Args: xlim: Specifies the x-axis limits. Set to None for automatic determination. ylim: Specifies the y-axis limits. """ ncolors = max(3, len(self._doses)) ncolors = min(9, ncolors) import palettable colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors y = None alldensities = [] allenergies = [] plt = pretty_plot(12, 8) # Note that this complicated processing of energies is to allow for # stacked plots in matplotlib. for key, dos in self._doses.items(): energies = dos['energies'] densities = dos['densities'] if not y: y = {Spin.up: np.zeros(energies.shape), Spin.down: np.zeros(energies.shape)} newdens = {} for spin in [Spin.up, Spin.down]: if spin in densities: if self.stack: y[spin] += densities[spin] newdens[spin] = y[spin].copy() else: newdens[spin] = densities[spin] allenergies.append(energies) alldensities.append(newdens) keys = list(self._doses.keys()) keys.reverse() alldensities.reverse() allenergies.reverse() allpts = [] for i, key in enumerate(keys): x = [] y = [] for spin in [Spin.up, Spin.down]: if spin in alldensities[i]: densities = list(int(spin) * alldensities[i][spin]) energies = list(allenergies[i]) if spin == Spin.down: energies.reverse() densities.reverse() x.extend(energies) y.extend(densities) allpts.extend(list(zip(x, y))) if self.stack: plt.fill(x, y, color=colors[i % ncolors], label=str(key)) else: plt.plot(x, y, color=colors[i % ncolors], label=str(key), linewidth=3) if not self.zero_at_efermi: ylim = plt.ylim() plt.plot([self._doses[key]['efermi'], self._doses[key]['efermi']], ylim, color=colors[i % ncolors], linestyle='--', linewidth=2) if xlim: plt.xlim(xlim) if ylim: plt.ylim(ylim) else: xlim = plt.xlim() relevanty = [p[1] for p in allpts if xlim[0] < p[0] < xlim[1]] plt.ylim((min(relevanty), max(relevanty))) if self.zero_at_efermi: ylim = plt.ylim() plt.plot([0, 0], ylim, 'k--', linewidth=2) plt.xlabel('Energies (eV)') plt.ylabel('Density of states') plt.legend() leg = plt.gca().get_legend() ltext = leg.get_texts() # all the text.Text instance in the legend plt.setp(ltext, fontsize=30) plt.tight_layout() return plt
[ "def", "get_plot", "(", "self", ",", "xlim", "=", "None", ",", "ylim", "=", "None", ")", ":", "ncolors", "=", "max", "(", "3", ",", "len", "(", "self", ".", "_doses", ")", ")", "ncolors", "=", "min", "(", "9", ",", "ncolors", ")", "import", "palettable", "colors", "=", "palettable", ".", "colorbrewer", ".", "qualitative", ".", "Set1_9", ".", "mpl_colors", "y", "=", "None", "alldensities", "=", "[", "]", "allenergies", "=", "[", "]", "plt", "=", "pretty_plot", "(", "12", ",", "8", ")", "# Note that this complicated processing of energies is to allow for", "# stacked plots in matplotlib.", "for", "key", ",", "dos", "in", "self", ".", "_doses", ".", "items", "(", ")", ":", "energies", "=", "dos", "[", "'energies'", "]", "densities", "=", "dos", "[", "'densities'", "]", "if", "not", "y", ":", "y", "=", "{", "Spin", ".", "up", ":", "np", ".", "zeros", "(", "energies", ".", "shape", ")", ",", "Spin", ".", "down", ":", "np", ".", "zeros", "(", "energies", ".", "shape", ")", "}", "newdens", "=", "{", "}", "for", "spin", "in", "[", "Spin", ".", "up", ",", "Spin", ".", "down", "]", ":", "if", "spin", "in", "densities", ":", "if", "self", ".", "stack", ":", "y", "[", "spin", "]", "+=", "densities", "[", "spin", "]", "newdens", "[", "spin", "]", "=", "y", "[", "spin", "]", ".", "copy", "(", ")", "else", ":", "newdens", "[", "spin", "]", "=", "densities", "[", "spin", "]", "allenergies", ".", "append", "(", "energies", ")", "alldensities", ".", "append", "(", "newdens", ")", "keys", "=", "list", "(", "self", ".", "_doses", ".", "keys", "(", ")", ")", "keys", ".", "reverse", "(", ")", "alldensities", ".", "reverse", "(", ")", "allenergies", ".", "reverse", "(", ")", "allpts", "=", "[", "]", "for", "i", ",", "key", "in", "enumerate", "(", "keys", ")", ":", "x", "=", "[", "]", "y", "=", "[", "]", "for", "spin", "in", "[", "Spin", ".", "up", ",", "Spin", ".", "down", "]", ":", "if", "spin", "in", "alldensities", "[", "i", "]", ":", "densities", "=", "list", "(", "int", "(", "spin", ")", "*", "alldensities", "[", "i", "]", "[", "spin", "]", ")", "energies", "=", "list", "(", "allenergies", "[", "i", "]", ")", "if", "spin", "==", "Spin", ".", "down", ":", "energies", ".", "reverse", "(", ")", "densities", ".", "reverse", "(", ")", "x", ".", "extend", "(", "energies", ")", "y", ".", "extend", "(", "densities", ")", "allpts", ".", "extend", "(", "list", "(", "zip", "(", "x", ",", "y", ")", ")", ")", "if", "self", ".", "stack", ":", "plt", ".", "fill", "(", "x", ",", "y", ",", "color", "=", "colors", "[", "i", "%", "ncolors", "]", ",", "label", "=", "str", "(", "key", ")", ")", "else", ":", "plt", ".", "plot", "(", "x", ",", "y", ",", "color", "=", "colors", "[", "i", "%", "ncolors", "]", ",", "label", "=", "str", "(", "key", ")", ",", "linewidth", "=", "3", ")", "if", "not", "self", ".", "zero_at_efermi", ":", "ylim", "=", "plt", ".", "ylim", "(", ")", "plt", ".", "plot", "(", "[", "self", ".", "_doses", "[", "key", "]", "[", "'efermi'", "]", ",", "self", ".", "_doses", "[", "key", "]", "[", "'efermi'", "]", "]", ",", "ylim", ",", "color", "=", "colors", "[", "i", "%", "ncolors", "]", ",", "linestyle", "=", "'--'", ",", "linewidth", "=", "2", ")", "if", "xlim", ":", "plt", ".", "xlim", "(", "xlim", ")", "if", "ylim", ":", "plt", ".", "ylim", "(", "ylim", ")", "else", ":", "xlim", "=", "plt", ".", "xlim", "(", ")", "relevanty", "=", "[", "p", "[", "1", "]", "for", "p", "in", "allpts", "if", "xlim", "[", "0", "]", "<", "p", "[", "0", "]", "<", "xlim", "[", "1", "]", "]", "plt", ".", "ylim", "(", "(", "min", "(", "relevanty", ")", ",", "max", "(", "relevanty", ")", ")", ")", "if", "self", ".", "zero_at_efermi", ":", "ylim", "=", "plt", ".", "ylim", "(", ")", "plt", ".", "plot", "(", "[", "0", ",", "0", "]", ",", "ylim", ",", "'k--'", ",", "linewidth", "=", "2", ")", "plt", ".", "xlabel", "(", "'Energies (eV)'", ")", "plt", ".", "ylabel", "(", "'Density of states'", ")", "plt", ".", "legend", "(", ")", "leg", "=", "plt", ".", "gca", "(", ")", ".", "get_legend", "(", ")", "ltext", "=", "leg", ".", "get_texts", "(", ")", "# all the text.Text instance in the legend", "plt", ".", "setp", "(", "ltext", ",", "fontsize", "=", "30", ")", "plt", ".", "tight_layout", "(", ")", "return", "plt" ]
Get a matplotlib plot showing the DOS. Args: xlim: Specifies the x-axis limits. Set to None for automatic determination. ylim: Specifies the y-axis limits.
[ "Get", "a", "matplotlib", "plot", "showing", "the", "DOS", "." ]
python
train
Kortemme-Lab/klab
klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py#L464-L570
def create_dataframe(self, pdb_data = {}, verbose = True): '''This function creates a dataframe (a matrix with one row per dataset record and one column for fields of interest) from the benchmark run and the dataset data. For rows with multiple mutations, there may be multiple values for some fields e.g. wildtype residue exposure. We take the approach of marking these records as None (to be read as: N/A). Another approach is to take averages of continuous and binary values. This function also determines scalar_adjustments used to scale the predictions to try to improve the fraction correct score and the MAE. ''' if self.use_existing_benchmark_data and self.store_data_on_disk and os.path.exists(self.analysis_pandas_input_filepath): self.read_dataframe(self.analysis_pandas_input_filepath) return analysis_data = self.analysis_data dataset_cases = self.dataset_cases # Create XY data if self.store_data_on_disk: self.log('Creating the analysis input file %s and human-readable CSV and JSON versions %s and %s.' % (self.analysis_pandas_input_filepath, self.analysis_csv_input_filepath, self.analysis_json_input_filepath)) if len(analysis_data) > len(dataset_cases): raise colortext.Exception('ERROR: There seems to be an error - there are more predictions than cases in the dataset. Exiting.') elif len(analysis_data) < len(dataset_cases): self.log('\nWARNING: %d cases missing for analysis; there are %d predictions in the output directory but %d cases in the dataset. The analysis below does not cover the complete dataset.\n' % (len(dataset_cases) - len(analysis_data), len(analysis_data), len(dataset_cases)), colortext.error) # ddg_analysis_type can be set to 'DDG', 'DDG_Top[x]' (e.g. 'DDG_Top3'), eyc. # 'DDG' uses the value reported by the application. For the Rosetta application ddg_monomer by Kellogg et al., this is the value output at the end of a run (which is not the recommended value - the publication uses take_lowest := 3). # 'DDG_Top3' (generated by default) uses the metric from Kellogg et al. based on the three lowest scoring mutant structures and the three lowest scoring wildtype structures if self.use_single_reported_value or self.ddg_analysis_type == 'DDG': assert( self.ddg_analysis_type == 'DDG' ) self.ddg_analysis_type_description = '\nThe predicted DDG value per case is the single DDG value reported by the application.' elif self.ddg_analysis_type[4:].startswith('Top') and int(self.ddg_analysis_type[7:]) == 3: take_lowest = int(self.ddg_analysis_type[7:]) self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed using the {0} lowest-scoring mutant structures and the {0} lowest-scoring wildtype structures as in the paper by Kellogg et al.'.format(take_lowest) elif self.ddg_analysis_type[4:].startswith('Top'): take_lowest = int(self.ddg_analysis_type[7:]) self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed using the {0} lowest-scoring mutant structures and the {0} lowest-scoring wildtype structures.'.format(take_lowest) elif self.ddg_analysis_type[4:].startswith('Random'): ddg_analysis_type = self.ddg_analysis_type[4:] if len( ddg_analysis_type ) > len('Random'): self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed by pairing {0} random mutant structures with {0} random wildtype structures.'.format( int(ddg_analysis_type[len('Random'):]) ) else: self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed by pairing random mutant structures with random wildtype structures.' elif self.ddg_analysis_type[4:] == 'AvgAllPairs': self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed by constructing all pairs of all mutant structures with all wildtype structures.' elif self.ddg_analysis_type[4:] == 'MatchPairs': self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed by matching each wildtype structure with its corresponding (round number) mutant structure.' elif self.ddg_analysis_type[4:].startswith( 'CplxBoltzWT' ): assert( len(self.ddg_analysis_type[4:]) > len( 'CplxBoltzWT' ) ) self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed by boltzmann weighting matching DDG scores on the wildtype complex score (temperature %.2f).' % float(self.ddg_analysis_type[4+len('CplxBoltzWT'):]) elif self.ddg_analysis_type[4:].startswith( 'CplxBoltzMut' ): assert( len(self.ddg_analysis_type[4:]) > len( 'CplxBoltzMut' ) ) self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed by boltzmann weighting matching DDG scores on the mutant complex score (temperature %.2f).' % float(self.ddg_analysis_type[4+len('CplxBoltzMut'):]) elif self.ddg_analysis_type[4:].startswith( 'CplxBoltzBoth' ): assert( len(self.ddg_analysis_type[4:]) > len( 'CplxBoltzBoth' ) ) self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed by boltzmann weighting matching DDG scores on both the mutant complex score and the wildtype complex score, added together (temperature %.2f).' % float(self.ddg_analysis_type[4+len('CplxBoltzBoth'):]) else: raise Exception("Couldn't parse ddg_analysis_type: " + str(ddg_analysis_type)) self.log(self.ddg_analysis_type_description) # Initialize the data structures #csv_file = [] # Set the PDB input path if not pdb_data: try: pdb_data_ = json.loads(read_file('../../input/json/pdbs.json')) for k, v in pdb_data_.iteritems(): pdb_data[k.upper()] = v except Exception, e: self.log('input/json/pdbs.json could not be found - PDB-specific analysis cannot be performed.', colortext.error) else: # Normalize to upper case to avoid matching problems later new_pdb_data = {} for k, v in pdb_data.iteritems(): assert(k.upper() not in new_pdb_data) new_pdb_data[k.upper()] = v pdb_data = new_pdb_data # Determine columns specific to the prediction data to be added additional_prediction_data_columns = set() for adv in analysis_data.values(): additional_prediction_data_columns = additional_prediction_data_columns.union(set(adv.keys())) assert(len(additional_prediction_data_columns.intersection(set(self.csv_headers))) == 0) assert(self.ddg_analysis_type in additional_prediction_data_columns) additional_prediction_data_columns.remove(self.ddg_analysis_type) additional_prediction_data_columns = sorted(additional_prediction_data_columns) # Initialize the dataframe self.reset_csv_headers() # this is necessary for the DBBenchmarkRun class which is missing the Experimental, AbsoluteError, and StabilityClassification columns since it adds new columns per analysis set. res = pandas.DataFrame(columns=(self.csv_headers + additional_prediction_data_columns)) dataframe_columns = self.csv_headers + additional_prediction_data_columns additional_prediction_data_columns = tuple(additional_prediction_data_columns) # Create the dataframe dataframe_table = {} indices = [] for record_id, predicted_data in sorted(analysis_data.iteritems()): dataframe_record = self.get_dataframe_row(dataset_cases, predicted_data, pdb_data, record_id, additional_prediction_data_columns) if dataframe_record: indices.append(dataframe_record['DatasetID']) for h in dataframe_columns: dataframe_table[h] = dataframe_table.get(h, []) dataframe_table[h].append(dataframe_record[h]) assert(sorted(dataframe_columns) == sorted(dataframe_record.keys())) dataframe = pandas.DataFrame(dataframe_table, index = indices) return self.set_dataframe(dataframe, verbose = verbose)
[ "def", "create_dataframe", "(", "self", ",", "pdb_data", "=", "{", "}", ",", "verbose", "=", "True", ")", ":", "if", "self", ".", "use_existing_benchmark_data", "and", "self", ".", "store_data_on_disk", "and", "os", ".", "path", ".", "exists", "(", "self", ".", "analysis_pandas_input_filepath", ")", ":", "self", ".", "read_dataframe", "(", "self", ".", "analysis_pandas_input_filepath", ")", "return", "analysis_data", "=", "self", ".", "analysis_data", "dataset_cases", "=", "self", ".", "dataset_cases", "# Create XY data", "if", "self", ".", "store_data_on_disk", ":", "self", ".", "log", "(", "'Creating the analysis input file %s and human-readable CSV and JSON versions %s and %s.'", "%", "(", "self", ".", "analysis_pandas_input_filepath", ",", "self", ".", "analysis_csv_input_filepath", ",", "self", ".", "analysis_json_input_filepath", ")", ")", "if", "len", "(", "analysis_data", ")", ">", "len", "(", "dataset_cases", ")", ":", "raise", "colortext", ".", "Exception", "(", "'ERROR: There seems to be an error - there are more predictions than cases in the dataset. Exiting.'", ")", "elif", "len", "(", "analysis_data", ")", "<", "len", "(", "dataset_cases", ")", ":", "self", ".", "log", "(", "'\\nWARNING: %d cases missing for analysis; there are %d predictions in the output directory but %d cases in the dataset. The analysis below does not cover the complete dataset.\\n'", "%", "(", "len", "(", "dataset_cases", ")", "-", "len", "(", "analysis_data", ")", ",", "len", "(", "analysis_data", ")", ",", "len", "(", "dataset_cases", ")", ")", ",", "colortext", ".", "error", ")", "# ddg_analysis_type can be set to 'DDG', 'DDG_Top[x]' (e.g. 'DDG_Top3'), eyc.", "# 'DDG' uses the value reported by the application. For the Rosetta application ddg_monomer by Kellogg et al., this is the value output at the end of a run (which is not the recommended value - the publication uses take_lowest := 3).", "# 'DDG_Top3' (generated by default) uses the metric from Kellogg et al. based on the three lowest scoring mutant structures and the three lowest scoring wildtype structures", "if", "self", ".", "use_single_reported_value", "or", "self", ".", "ddg_analysis_type", "==", "'DDG'", ":", "assert", "(", "self", ".", "ddg_analysis_type", "==", "'DDG'", ")", "self", ".", "ddg_analysis_type_description", "=", "'\\nThe predicted DDG value per case is the single DDG value reported by the application.'", "elif", "self", ".", "ddg_analysis_type", "[", "4", ":", "]", ".", "startswith", "(", "'Top'", ")", "and", "int", "(", "self", ".", "ddg_analysis_type", "[", "7", ":", "]", ")", "==", "3", ":", "take_lowest", "=", "int", "(", "self", ".", "ddg_analysis_type", "[", "7", ":", "]", ")", "self", ".", "ddg_analysis_type_description", "=", "'\\nThe predicted DDG value per case is computed using the {0} lowest-scoring mutant structures and the {0} lowest-scoring wildtype structures as in the paper by Kellogg et al.'", ".", "format", "(", "take_lowest", ")", "elif", "self", ".", "ddg_analysis_type", "[", "4", ":", "]", ".", "startswith", "(", "'Top'", ")", ":", "take_lowest", "=", "int", "(", "self", ".", "ddg_analysis_type", "[", "7", ":", "]", ")", "self", ".", "ddg_analysis_type_description", "=", "'\\nThe predicted DDG value per case is computed using the {0} lowest-scoring mutant structures and the {0} lowest-scoring wildtype structures.'", ".", "format", "(", "take_lowest", ")", "elif", "self", ".", "ddg_analysis_type", "[", "4", ":", "]", ".", "startswith", "(", "'Random'", ")", ":", "ddg_analysis_type", "=", "self", ".", "ddg_analysis_type", "[", "4", ":", "]", "if", "len", "(", "ddg_analysis_type", ")", ">", "len", "(", "'Random'", ")", ":", "self", ".", "ddg_analysis_type_description", "=", "'\\nThe predicted DDG value per case is computed by pairing {0} random mutant structures with {0} random wildtype structures.'", ".", "format", "(", "int", "(", "ddg_analysis_type", "[", "len", "(", "'Random'", ")", ":", "]", ")", ")", "else", ":", "self", ".", "ddg_analysis_type_description", "=", "'\\nThe predicted DDG value per case is computed by pairing random mutant structures with random wildtype structures.'", "elif", "self", ".", "ddg_analysis_type", "[", "4", ":", "]", "==", "'AvgAllPairs'", ":", "self", ".", "ddg_analysis_type_description", "=", "'\\nThe predicted DDG value per case is computed by constructing all pairs of all mutant structures with all wildtype structures.'", "elif", "self", ".", "ddg_analysis_type", "[", "4", ":", "]", "==", "'MatchPairs'", ":", "self", ".", "ddg_analysis_type_description", "=", "'\\nThe predicted DDG value per case is computed by matching each wildtype structure with its corresponding (round number) mutant structure.'", "elif", "self", ".", "ddg_analysis_type", "[", "4", ":", "]", ".", "startswith", "(", "'CplxBoltzWT'", ")", ":", "assert", "(", "len", "(", "self", ".", "ddg_analysis_type", "[", "4", ":", "]", ")", ">", "len", "(", "'CplxBoltzWT'", ")", ")", "self", ".", "ddg_analysis_type_description", "=", "'\\nThe predicted DDG value per case is computed by boltzmann weighting matching DDG scores on the wildtype complex score (temperature %.2f).'", "%", "float", "(", "self", ".", "ddg_analysis_type", "[", "4", "+", "len", "(", "'CplxBoltzWT'", ")", ":", "]", ")", "elif", "self", ".", "ddg_analysis_type", "[", "4", ":", "]", ".", "startswith", "(", "'CplxBoltzMut'", ")", ":", "assert", "(", "len", "(", "self", ".", "ddg_analysis_type", "[", "4", ":", "]", ")", ">", "len", "(", "'CplxBoltzMut'", ")", ")", "self", ".", "ddg_analysis_type_description", "=", "'\\nThe predicted DDG value per case is computed by boltzmann weighting matching DDG scores on the mutant complex score (temperature %.2f).'", "%", "float", "(", "self", ".", "ddg_analysis_type", "[", "4", "+", "len", "(", "'CplxBoltzMut'", ")", ":", "]", ")", "elif", "self", ".", "ddg_analysis_type", "[", "4", ":", "]", ".", "startswith", "(", "'CplxBoltzBoth'", ")", ":", "assert", "(", "len", "(", "self", ".", "ddg_analysis_type", "[", "4", ":", "]", ")", ">", "len", "(", "'CplxBoltzBoth'", ")", ")", "self", ".", "ddg_analysis_type_description", "=", "'\\nThe predicted DDG value per case is computed by boltzmann weighting matching DDG scores on both the mutant complex score and the wildtype complex score, added together (temperature %.2f).'", "%", "float", "(", "self", ".", "ddg_analysis_type", "[", "4", "+", "len", "(", "'CplxBoltzBoth'", ")", ":", "]", ")", "else", ":", "raise", "Exception", "(", "\"Couldn't parse ddg_analysis_type: \"", "+", "str", "(", "ddg_analysis_type", ")", ")", "self", ".", "log", "(", "self", ".", "ddg_analysis_type_description", ")", "# Initialize the data structures", "#csv_file = []", "# Set the PDB input path", "if", "not", "pdb_data", ":", "try", ":", "pdb_data_", "=", "json", ".", "loads", "(", "read_file", "(", "'../../input/json/pdbs.json'", ")", ")", "for", "k", ",", "v", "in", "pdb_data_", ".", "iteritems", "(", ")", ":", "pdb_data", "[", "k", ".", "upper", "(", ")", "]", "=", "v", "except", "Exception", ",", "e", ":", "self", ".", "log", "(", "'input/json/pdbs.json could not be found - PDB-specific analysis cannot be performed.'", ",", "colortext", ".", "error", ")", "else", ":", "# Normalize to upper case to avoid matching problems later", "new_pdb_data", "=", "{", "}", "for", "k", ",", "v", "in", "pdb_data", ".", "iteritems", "(", ")", ":", "assert", "(", "k", ".", "upper", "(", ")", "not", "in", "new_pdb_data", ")", "new_pdb_data", "[", "k", ".", "upper", "(", ")", "]", "=", "v", "pdb_data", "=", "new_pdb_data", "# Determine columns specific to the prediction data to be added", "additional_prediction_data_columns", "=", "set", "(", ")", "for", "adv", "in", "analysis_data", ".", "values", "(", ")", ":", "additional_prediction_data_columns", "=", "additional_prediction_data_columns", ".", "union", "(", "set", "(", "adv", ".", "keys", "(", ")", ")", ")", "assert", "(", "len", "(", "additional_prediction_data_columns", ".", "intersection", "(", "set", "(", "self", ".", "csv_headers", ")", ")", ")", "==", "0", ")", "assert", "(", "self", ".", "ddg_analysis_type", "in", "additional_prediction_data_columns", ")", "additional_prediction_data_columns", ".", "remove", "(", "self", ".", "ddg_analysis_type", ")", "additional_prediction_data_columns", "=", "sorted", "(", "additional_prediction_data_columns", ")", "# Initialize the dataframe", "self", ".", "reset_csv_headers", "(", ")", "# this is necessary for the DBBenchmarkRun class which is missing the Experimental, AbsoluteError, and StabilityClassification columns since it adds new columns per analysis set.", "res", "=", "pandas", ".", "DataFrame", "(", "columns", "=", "(", "self", ".", "csv_headers", "+", "additional_prediction_data_columns", ")", ")", "dataframe_columns", "=", "self", ".", "csv_headers", "+", "additional_prediction_data_columns", "additional_prediction_data_columns", "=", "tuple", "(", "additional_prediction_data_columns", ")", "# Create the dataframe", "dataframe_table", "=", "{", "}", "indices", "=", "[", "]", "for", "record_id", ",", "predicted_data", "in", "sorted", "(", "analysis_data", ".", "iteritems", "(", ")", ")", ":", "dataframe_record", "=", "self", ".", "get_dataframe_row", "(", "dataset_cases", ",", "predicted_data", ",", "pdb_data", ",", "record_id", ",", "additional_prediction_data_columns", ")", "if", "dataframe_record", ":", "indices", ".", "append", "(", "dataframe_record", "[", "'DatasetID'", "]", ")", "for", "h", "in", "dataframe_columns", ":", "dataframe_table", "[", "h", "]", "=", "dataframe_table", ".", "get", "(", "h", ",", "[", "]", ")", "dataframe_table", "[", "h", "]", ".", "append", "(", "dataframe_record", "[", "h", "]", ")", "assert", "(", "sorted", "(", "dataframe_columns", ")", "==", "sorted", "(", "dataframe_record", ".", "keys", "(", ")", ")", ")", "dataframe", "=", "pandas", ".", "DataFrame", "(", "dataframe_table", ",", "index", "=", "indices", ")", "return", "self", ".", "set_dataframe", "(", "dataframe", ",", "verbose", "=", "verbose", ")" ]
This function creates a dataframe (a matrix with one row per dataset record and one column for fields of interest) from the benchmark run and the dataset data. For rows with multiple mutations, there may be multiple values for some fields e.g. wildtype residue exposure. We take the approach of marking these records as None (to be read as: N/A). Another approach is to take averages of continuous and binary values. This function also determines scalar_adjustments used to scale the predictions to try to improve the fraction correct score and the MAE.
[ "This", "function", "creates", "a", "dataframe", "(", "a", "matrix", "with", "one", "row", "per", "dataset", "record", "and", "one", "column", "for", "fields", "of", "interest", ")", "from", "the", "benchmark", "run", "and", "the", "dataset", "data", ".", "For", "rows", "with", "multiple", "mutations", "there", "may", "be", "multiple", "values", "for", "some", "fields", "e", ".", "g", ".", "wildtype", "residue", "exposure", ".", "We", "take", "the", "approach", "of", "marking", "these", "records", "as", "None", "(", "to", "be", "read", "as", ":", "N", "/", "A", ")", ".", "Another", "approach", "is", "to", "take", "averages", "of", "continuous", "and", "binary", "values", ".", "This", "function", "also", "determines", "scalar_adjustments", "used", "to", "scale", "the", "predictions", "to", "try", "to", "improve", "the", "fraction", "correct", "score", "and", "the", "MAE", "." ]
python
train
WZBSocialScienceCenter/tmtoolkit
tmtoolkit/topicmod/model_stats.py
https://github.com/WZBSocialScienceCenter/tmtoolkit/blob/ca8b9d072e37ccc82b533f47d48bd9755722305b/tmtoolkit/topicmod/model_stats.py#L153-L166
def get_topic_word_relevance(topic_word_distrib, doc_topic_distrib, doc_lengths, lambda_): """ Calculate the topic-word relevance score with a lambda parameter `lambda_` according to Sievert and Shirley 2014. relevance(w,T|lambda) = lambda * log phi_{w,t} + (1-lambda) * log (phi_{w,t} / p(w)) with phi .. topic-word distribution p(w) .. marginal word probability """ p_t = get_marginal_topic_distrib(doc_topic_distrib, doc_lengths) p_w = get_marginal_word_distrib(topic_word_distrib, p_t) logtw = np.log(topic_word_distrib) loglift = np.log(topic_word_distrib / p_w) return lambda_ * logtw + (1-lambda_) * loglift
[ "def", "get_topic_word_relevance", "(", "topic_word_distrib", ",", "doc_topic_distrib", ",", "doc_lengths", ",", "lambda_", ")", ":", "p_t", "=", "get_marginal_topic_distrib", "(", "doc_topic_distrib", ",", "doc_lengths", ")", "p_w", "=", "get_marginal_word_distrib", "(", "topic_word_distrib", ",", "p_t", ")", "logtw", "=", "np", ".", "log", "(", "topic_word_distrib", ")", "loglift", "=", "np", ".", "log", "(", "topic_word_distrib", "/", "p_w", ")", "return", "lambda_", "*", "logtw", "+", "(", "1", "-", "lambda_", ")", "*", "loglift" ]
Calculate the topic-word relevance score with a lambda parameter `lambda_` according to Sievert and Shirley 2014. relevance(w,T|lambda) = lambda * log phi_{w,t} + (1-lambda) * log (phi_{w,t} / p(w)) with phi .. topic-word distribution p(w) .. marginal word probability
[ "Calculate", "the", "topic", "-", "word", "relevance", "score", "with", "a", "lambda", "parameter", "lambda_", "according", "to", "Sievert", "and", "Shirley", "2014", ".", "relevance", "(", "w", "T|lambda", ")", "=", "lambda", "*", "log", "phi_", "{", "w", "t", "}", "+", "(", "1", "-", "lambda", ")", "*", "log", "(", "phi_", "{", "w", "t", "}", "/", "p", "(", "w", "))", "with", "phi", "..", "topic", "-", "word", "distribution", "p", "(", "w", ")", "..", "marginal", "word", "probability" ]
python
train
titusjan/argos
argos/inspector/pgplugins/lineplot1d.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/inspector/pgplugins/lineplot1d.py#L106-L111
def setAutoRangeOn(self, axisNumber): """ Sets the auto-range of the axis on. :param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes). """ setXYAxesAutoRangeOn(self, self.xAxisRangeCti, self.yAxisRangeCti, axisNumber)
[ "def", "setAutoRangeOn", "(", "self", ",", "axisNumber", ")", ":", "setXYAxesAutoRangeOn", "(", "self", ",", "self", ".", "xAxisRangeCti", ",", "self", ".", "yAxisRangeCti", ",", "axisNumber", ")" ]
Sets the auto-range of the axis on. :param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes).
[ "Sets", "the", "auto", "-", "range", "of", "the", "axis", "on", "." ]
python
train
imjoey/pyhaproxy
pyhaproxy/render.py
https://github.com/imjoey/pyhaproxy/blob/4f0904acfc6bdb29ba6104ce2f6724c0330441d3/pyhaproxy/render.py#L103-L133
def __render_config_block(self, config_block): """Summary Args: config_block [config.Item, ...]: config lines Returns: str: config block str """ config_block_str = '' for line in config_block: if isinstance(line, config.Option): line_str = self.__render_option(line) elif isinstance(line, config.Config): line_str = self.__render_config(line) elif isinstance(line, config.Server): line_str = self.__render_server(line) elif isinstance(line, config.Bind): line_str = self.__render_bind(line) elif isinstance(line, config.Acl): line_str = self.__render_acl(line) elif isinstance(line, config.UseBackend): line_str = self.__render_usebackend(line) elif isinstance(line, config.User): line_str = self.__render_user(line) elif isinstance(line, config.Group): line_str = self.__render_group(line) # append line str config_block_str = config_block_str + line_str return config_block_str
[ "def", "__render_config_block", "(", "self", ",", "config_block", ")", ":", "config_block_str", "=", "''", "for", "line", "in", "config_block", ":", "if", "isinstance", "(", "line", ",", "config", ".", "Option", ")", ":", "line_str", "=", "self", ".", "__render_option", "(", "line", ")", "elif", "isinstance", "(", "line", ",", "config", ".", "Config", ")", ":", "line_str", "=", "self", ".", "__render_config", "(", "line", ")", "elif", "isinstance", "(", "line", ",", "config", ".", "Server", ")", ":", "line_str", "=", "self", ".", "__render_server", "(", "line", ")", "elif", "isinstance", "(", "line", ",", "config", ".", "Bind", ")", ":", "line_str", "=", "self", ".", "__render_bind", "(", "line", ")", "elif", "isinstance", "(", "line", ",", "config", ".", "Acl", ")", ":", "line_str", "=", "self", ".", "__render_acl", "(", "line", ")", "elif", "isinstance", "(", "line", ",", "config", ".", "UseBackend", ")", ":", "line_str", "=", "self", ".", "__render_usebackend", "(", "line", ")", "elif", "isinstance", "(", "line", ",", "config", ".", "User", ")", ":", "line_str", "=", "self", ".", "__render_user", "(", "line", ")", "elif", "isinstance", "(", "line", ",", "config", ".", "Group", ")", ":", "line_str", "=", "self", ".", "__render_group", "(", "line", ")", "# append line str", "config_block_str", "=", "config_block_str", "+", "line_str", "return", "config_block_str" ]
Summary Args: config_block [config.Item, ...]: config lines Returns: str: config block str
[ "Summary" ]
python
train
jkenlooper/chill
src/chill/api.py
https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/api.py#L120-L154
def render_node(_node_id, value=None, noderequest={}, **kw): "Recursively render a node's value" if value == None: kw.update( noderequest ) results = _query(_node_id, **kw) current_app.logger.debug("results: %s", results) if results: values = [] for (result, cols) in results: if set(cols) == set(['node_id', 'name', 'value']): for subresult in result: #if subresult.get('name') == kw.get('name'): # This is a link node current_app.logger.debug("sub: %s", subresult) name = subresult['name'] if noderequest.get('_no_template'): # For debugging or just simply viewing with the # operate script we append the node_id to the name # of each. This doesn't work with templates. name = "{0} ({1})".format(name, subresult['node_id']) values.append( {name: render_node( subresult['node_id'], noderequest=noderequest, **subresult )} ) #elif 'node_id' and 'name' in cols: # for subresult in result: # current_app.logger.debug("sub2: %s", subresult) # values.append( {subresult.get('name'): render_node( subresult.get('node_id'), **subresult )} ) else: values.append( result ) value = values value = _short_circuit(value) if not noderequest.get('_no_template'): value = _template(_node_id, value) return value
[ "def", "render_node", "(", "_node_id", ",", "value", "=", "None", ",", "noderequest", "=", "{", "}", ",", "*", "*", "kw", ")", ":", "if", "value", "==", "None", ":", "kw", ".", "update", "(", "noderequest", ")", "results", "=", "_query", "(", "_node_id", ",", "*", "*", "kw", ")", "current_app", ".", "logger", ".", "debug", "(", "\"results: %s\"", ",", "results", ")", "if", "results", ":", "values", "=", "[", "]", "for", "(", "result", ",", "cols", ")", "in", "results", ":", "if", "set", "(", "cols", ")", "==", "set", "(", "[", "'node_id'", ",", "'name'", ",", "'value'", "]", ")", ":", "for", "subresult", "in", "result", ":", "#if subresult.get('name') == kw.get('name'):", "# This is a link node", "current_app", ".", "logger", ".", "debug", "(", "\"sub: %s\"", ",", "subresult", ")", "name", "=", "subresult", "[", "'name'", "]", "if", "noderequest", ".", "get", "(", "'_no_template'", ")", ":", "# For debugging or just simply viewing with the", "# operate script we append the node_id to the name", "# of each. This doesn't work with templates.", "name", "=", "\"{0} ({1})\"", ".", "format", "(", "name", ",", "subresult", "[", "'node_id'", "]", ")", "values", ".", "append", "(", "{", "name", ":", "render_node", "(", "subresult", "[", "'node_id'", "]", ",", "noderequest", "=", "noderequest", ",", "*", "*", "subresult", ")", "}", ")", "#elif 'node_id' and 'name' in cols:", "# for subresult in result:", "# current_app.logger.debug(\"sub2: %s\", subresult)", "# values.append( {subresult.get('name'): render_node( subresult.get('node_id'), **subresult )} )", "else", ":", "values", ".", "append", "(", "result", ")", "value", "=", "values", "value", "=", "_short_circuit", "(", "value", ")", "if", "not", "noderequest", ".", "get", "(", "'_no_template'", ")", ":", "value", "=", "_template", "(", "_node_id", ",", "value", ")", "return", "value" ]
Recursively render a node's value
[ "Recursively", "render", "a", "node", "s", "value" ]
python
train
ranaroussi/pywallet
pywallet/utils/bip32.py
https://github.com/ranaroussi/pywallet/blob/206ff224389c490d8798f660c9e79fe97ebb64cf/pywallet/utils/bip32.py#L501-L578
def deserialize(cls, key, network="bitcoin_testnet"): """Load the ExtendedBip32Key from a hex key. The key consists of * 4 byte version bytes (network key) * 1 byte depth: - 0x00 for master nodes, - 0x01 for level-1 descendants, .... * 4 byte fingerprint of the parent's key (0x00000000 if master key) * 4 byte child number. This is the number i in x_i = x_{par}/i, with x_i the key being serialized. This is encoded in MSB order. (0x00000000 if master key) * 32 bytes: the chain code * 33 bytes: the public key or private key data (0x02 + X or 0x03 + X for public keys, 0x00 + k for private keys) (Note that this also supports 0x04 + X + Y uncompressed points, but this is totally non-standard and this library won't even generate such data.) """ network = Wallet.get_network(network) if len(key) in [78, (78 + 32)]: # we have a byte array, so pass pass else: key = ensure_bytes(key) if len(key) in [78 * 2, (78 + 32) * 2]: # we have a hexlified non-base58 key, continue! key = unhexlify(key) elif len(key) == 111: # We have a base58 encoded string key = base58.b58decode_check(key) # Now that we double checkd the values, convert back to bytes because # they're easier to slice version, depth, parent_fingerprint, child, chain_code, key_data = ( key[:4], key[4], key[5:9], key[9:13], key[13:45], key[45:]) version_long = long_or_int(hexlify(version), 16) exponent = None pubkey = None point_type = key_data[0] if not isinstance(point_type, six.integer_types): point_type = ord(point_type) if point_type == 0: # Private key if version_long != network.EXT_SECRET_KEY: raise incompatible_network_exception_factory( network.NAME, network.EXT_SECRET_KEY, version) exponent = key_data[1:] elif point_type in [2, 3, 4]: # Compressed public coordinates if version_long != network.EXT_PUBLIC_KEY: raise incompatible_network_exception_factory( network.NAME, network.EXT_PUBLIC_KEY, version) pubkey = PublicKey.from_hex_key(key_data, network=network) # Even though this was generated from a compressed pubkey, we # want to store it as an uncompressed pubkey pubkey.compressed = False else: raise ValueError("Invalid key_data prefix, got %s" % point_type) def l(byte_seq): if byte_seq is None: return byte_seq elif isinstance(byte_seq, six.integer_types): return byte_seq return long_or_int(hexlify(byte_seq), 16) return cls(depth=l(depth), parent_fingerprint=l(parent_fingerprint), child_number=l(child), chain_code=l(chain_code), private_exponent=l(exponent), public_key=pubkey, network=network)
[ "def", "deserialize", "(", "cls", ",", "key", ",", "network", "=", "\"bitcoin_testnet\"", ")", ":", "network", "=", "Wallet", ".", "get_network", "(", "network", ")", "if", "len", "(", "key", ")", "in", "[", "78", ",", "(", "78", "+", "32", ")", "]", ":", "# we have a byte array, so pass", "pass", "else", ":", "key", "=", "ensure_bytes", "(", "key", ")", "if", "len", "(", "key", ")", "in", "[", "78", "*", "2", ",", "(", "78", "+", "32", ")", "*", "2", "]", ":", "# we have a hexlified non-base58 key, continue!", "key", "=", "unhexlify", "(", "key", ")", "elif", "len", "(", "key", ")", "==", "111", ":", "# We have a base58 encoded string", "key", "=", "base58", ".", "b58decode_check", "(", "key", ")", "# Now that we double checkd the values, convert back to bytes because", "# they're easier to slice", "version", ",", "depth", ",", "parent_fingerprint", ",", "child", ",", "chain_code", ",", "key_data", "=", "(", "key", "[", ":", "4", "]", ",", "key", "[", "4", "]", ",", "key", "[", "5", ":", "9", "]", ",", "key", "[", "9", ":", "13", "]", ",", "key", "[", "13", ":", "45", "]", ",", "key", "[", "45", ":", "]", ")", "version_long", "=", "long_or_int", "(", "hexlify", "(", "version", ")", ",", "16", ")", "exponent", "=", "None", "pubkey", "=", "None", "point_type", "=", "key_data", "[", "0", "]", "if", "not", "isinstance", "(", "point_type", ",", "six", ".", "integer_types", ")", ":", "point_type", "=", "ord", "(", "point_type", ")", "if", "point_type", "==", "0", ":", "# Private key", "if", "version_long", "!=", "network", ".", "EXT_SECRET_KEY", ":", "raise", "incompatible_network_exception_factory", "(", "network", ".", "NAME", ",", "network", ".", "EXT_SECRET_KEY", ",", "version", ")", "exponent", "=", "key_data", "[", "1", ":", "]", "elif", "point_type", "in", "[", "2", ",", "3", ",", "4", "]", ":", "# Compressed public coordinates", "if", "version_long", "!=", "network", ".", "EXT_PUBLIC_KEY", ":", "raise", "incompatible_network_exception_factory", "(", "network", ".", "NAME", ",", "network", ".", "EXT_PUBLIC_KEY", ",", "version", ")", "pubkey", "=", "PublicKey", ".", "from_hex_key", "(", "key_data", ",", "network", "=", "network", ")", "# Even though this was generated from a compressed pubkey, we", "# want to store it as an uncompressed pubkey", "pubkey", ".", "compressed", "=", "False", "else", ":", "raise", "ValueError", "(", "\"Invalid key_data prefix, got %s\"", "%", "point_type", ")", "def", "l", "(", "byte_seq", ")", ":", "if", "byte_seq", "is", "None", ":", "return", "byte_seq", "elif", "isinstance", "(", "byte_seq", ",", "six", ".", "integer_types", ")", ":", "return", "byte_seq", "return", "long_or_int", "(", "hexlify", "(", "byte_seq", ")", ",", "16", ")", "return", "cls", "(", "depth", "=", "l", "(", "depth", ")", ",", "parent_fingerprint", "=", "l", "(", "parent_fingerprint", ")", ",", "child_number", "=", "l", "(", "child", ")", ",", "chain_code", "=", "l", "(", "chain_code", ")", ",", "private_exponent", "=", "l", "(", "exponent", ")", ",", "public_key", "=", "pubkey", ",", "network", "=", "network", ")" ]
Load the ExtendedBip32Key from a hex key. The key consists of * 4 byte version bytes (network key) * 1 byte depth: - 0x00 for master nodes, - 0x01 for level-1 descendants, .... * 4 byte fingerprint of the parent's key (0x00000000 if master key) * 4 byte child number. This is the number i in x_i = x_{par}/i, with x_i the key being serialized. This is encoded in MSB order. (0x00000000 if master key) * 32 bytes: the chain code * 33 bytes: the public key or private key data (0x02 + X or 0x03 + X for public keys, 0x00 + k for private keys) (Note that this also supports 0x04 + X + Y uncompressed points, but this is totally non-standard and this library won't even generate such data.)
[ "Load", "the", "ExtendedBip32Key", "from", "a", "hex", "key", "." ]
python
train
wagtail/django-modelcluster
modelcluster/models.py
https://github.com/wagtail/django-modelcluster/blob/bfc8bd755af0ddd49e2aee2f2ca126921573d38b/modelcluster/models.py#L135-L143
def get_all_child_m2m_relations(model): """ Return a list of ParentalManyToManyFields on the given model, including ones attached to ancestors of the model """ return [ field for field in model._meta.get_fields() if isinstance(field, ParentalManyToManyField) ]
[ "def", "get_all_child_m2m_relations", "(", "model", ")", ":", "return", "[", "field", "for", "field", "in", "model", ".", "_meta", ".", "get_fields", "(", ")", "if", "isinstance", "(", "field", ",", "ParentalManyToManyField", ")", "]" ]
Return a list of ParentalManyToManyFields on the given model, including ones attached to ancestors of the model
[ "Return", "a", "list", "of", "ParentalManyToManyFields", "on", "the", "given", "model", "including", "ones", "attached", "to", "ancestors", "of", "the", "model" ]
python
test
openstack/quark
quark/plugin_modules/mac_address_ranges.py
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/mac_address_ranges.py#L120-L136
def delete_mac_address_range(context, id): """Delete a mac_address_range. : param context: neutron api request context : param id: UUID representing the mac_address_range to delete. """ LOG.info("delete_mac_address_range %s for tenant %s" % (id, context.tenant_id)) if not context.is_admin: raise n_exc.NotAuthorized() with context.session.begin(): mar = db_api.mac_address_range_find(context, id=id, scope=db_api.ONE) if not mar: raise q_exc.MacAddressRangeNotFound( mac_address_range_id=id) _delete_mac_address_range(context, mar)
[ "def", "delete_mac_address_range", "(", "context", ",", "id", ")", ":", "LOG", ".", "info", "(", "\"delete_mac_address_range %s for tenant %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "if", "not", "context", ".", "is_admin", ":", "raise", "n_exc", ".", "NotAuthorized", "(", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "mar", "=", "db_api", ".", "mac_address_range_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "mar", ":", "raise", "q_exc", ".", "MacAddressRangeNotFound", "(", "mac_address_range_id", "=", "id", ")", "_delete_mac_address_range", "(", "context", ",", "mar", ")" ]
Delete a mac_address_range. : param context: neutron api request context : param id: UUID representing the mac_address_range to delete.
[ "Delete", "a", "mac_address_range", "." ]
python
valid