nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
mne-tools/mne-python
f90b303ce66a8415e64edd4605b09ac0179c1ebf
mne/io/bti/read.py
python
read_float_matrix
(fid, rows, cols)
return _unpack_matrix(fid, rows, cols, dtype='>f4', out_dtype=np.float32)
Read 32bit float matrix from bti file.
Read 32bit float matrix from bti file.
[ "Read", "32bit", "float", "matrix", "from", "bti", "file", "." ]
def read_float_matrix(fid, rows, cols): """Read 32bit float matrix from bti file.""" return _unpack_matrix(fid, rows, cols, dtype='>f4', out_dtype=np.float32)
[ "def", "read_float_matrix", "(", "fid", ",", "rows", ",", "cols", ")", ":", "return", "_unpack_matrix", "(", "fid", ",", "rows", ",", "cols", ",", "dtype", "=", "'>f4'", ",", "out_dtype", "=", "np", ".", "float32", ")" ]
https://github.com/mne-tools/mne-python/blob/f90b303ce66a8415e64edd4605b09ac0179c1ebf/mne/io/bti/read.py#L96-L99
JaniceWuo/MovieRecommend
4c86db64ca45598917d304f535413df3bc9fea65
movierecommend/venv1/Lib/site-packages/django/contrib/gis/db/models/lookups.py
python
DistanceLookupBase.process_rhs
(self, compiler, connection)
return (rhs, params)
[]
def process_rhs(self, compiler, connection): if not isinstance(self.rhs, (tuple, list)) or not 2 <= len(self.rhs) <= 4: raise ValueError("2, 3, or 4-element tuple required for '%s' lookup." % self.lookup_name) elif len(self.rhs) == 4 and not self.rhs[3] == 'spheroid': raise ValueError("For 4-element tuples the last argument must be the 'speroid' directive.") # Check if the second parameter is a band index. if len(self.rhs) > 2 and not self.rhs[2] == 'spheroid': self.process_band_indices() params = [connection.ops.Adapter(self.rhs[0])] # Getting the distance parameter in the units of the field. dist_param = self.rhs[1] if hasattr(dist_param, 'resolve_expression'): dist_param = dist_param.resolve_expression(compiler.query) sql, expr_params = compiler.compile(dist_param) self.template_params['value'] = sql params.extend(expr_params) else: params += connection.ops.get_distance( self.lhs.output_field, (dist_param,) + self.rhs[2:], self.lookup_name, handle_spheroid=False ) rhs = connection.ops.get_geom_placeholder(self.lhs.output_field, params[0], compiler) return (rhs, params)
[ "def", "process_rhs", "(", "self", ",", "compiler", ",", "connection", ")", ":", "if", "not", "isinstance", "(", "self", ".", "rhs", ",", "(", "tuple", ",", "list", ")", ")", "or", "not", "2", "<=", "len", "(", "self", ".", "rhs", ")", "<=", "4", ":", "raise", "ValueError", "(", "\"2, 3, or 4-element tuple required for '%s' lookup.\"", "%", "self", ".", "lookup_name", ")", "elif", "len", "(", "self", ".", "rhs", ")", "==", "4", "and", "not", "self", ".", "rhs", "[", "3", "]", "==", "'spheroid'", ":", "raise", "ValueError", "(", "\"For 4-element tuples the last argument must be the 'speroid' directive.\"", ")", "# Check if the second parameter is a band index.", "if", "len", "(", "self", ".", "rhs", ")", ">", "2", "and", "not", "self", ".", "rhs", "[", "2", "]", "==", "'spheroid'", ":", "self", ".", "process_band_indices", "(", ")", "params", "=", "[", "connection", ".", "ops", ".", "Adapter", "(", "self", ".", "rhs", "[", "0", "]", ")", "]", "# Getting the distance parameter in the units of the field.", "dist_param", "=", "self", ".", "rhs", "[", "1", "]", "if", "hasattr", "(", "dist_param", ",", "'resolve_expression'", ")", ":", "dist_param", "=", "dist_param", ".", "resolve_expression", "(", "compiler", ".", "query", ")", "sql", ",", "expr_params", "=", "compiler", ".", "compile", "(", "dist_param", ")", "self", ".", "template_params", "[", "'value'", "]", "=", "sql", "params", ".", "extend", "(", "expr_params", ")", "else", ":", "params", "+=", "connection", ".", "ops", ".", "get_distance", "(", "self", ".", "lhs", ".", "output_field", ",", "(", "dist_param", ",", ")", "+", "self", ".", "rhs", "[", "2", ":", "]", ",", "self", ".", "lookup_name", ",", "handle_spheroid", "=", "False", ")", "rhs", "=", "connection", ".", "ops", ".", "get_geom_placeholder", "(", "self", ".", "lhs", ".", "output_field", ",", "params", "[", "0", "]", ",", "compiler", ")", "return", "(", "rhs", ",", "params", ")" ]
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/contrib/gis/db/models/lookups.py#L420-L445
yandex/yandex-tank
b41bcc04396c4ed46fc8b28a261197320854fd33
yandextank/plugins/DataUploader/cli.py
python
check_log
(log_name)
[]
def check_log(log_name): assert os.path.exists(log_name), \ 'Data log {} not found\n'.format(log_name) + \ 'JsonReport plugin should be enabled when launching Yandex-tank'
[ "def", "check_log", "(", "log_name", ")", ":", "assert", "os", ".", "path", ".", "exists", "(", "log_name", ")", ",", "'Data log {} not found\\n'", ".", "format", "(", "log_name", ")", "+", "'JsonReport plugin should be enabled when launching Yandex-tank'" ]
https://github.com/yandex/yandex-tank/blob/b41bcc04396c4ed46fc8b28a261197320854fd33/yandextank/plugins/DataUploader/cli.py#L64-L67
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/wireless/v1/sim/__init__.py
python
SimInstance.data_sessions
(self)
return self._proxy.data_sessions
Access the data_sessions :returns: twilio.rest.wireless.v1.sim.data_session.DataSessionList :rtype: twilio.rest.wireless.v1.sim.data_session.DataSessionList
Access the data_sessions
[ "Access", "the", "data_sessions" ]
def data_sessions(self): """ Access the data_sessions :returns: twilio.rest.wireless.v1.sim.data_session.DataSessionList :rtype: twilio.rest.wireless.v1.sim.data_session.DataSessionList """ return self._proxy.data_sessions
[ "def", "data_sessions", "(", "self", ")", ":", "return", "self", ".", "_proxy", ".", "data_sessions" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/wireless/v1/sim/__init__.py#L711-L718
nltk/nltk_contrib
c9da2c29777ca9df650740145f1f4a375ccac961
nltk_contrib/tiger/query/ast_utils.py
python
create_vardef
(name, expression, var_type=ast.VariableTypes.NodeIdentifier, container_type=ast.ContainerTypes.Single)
return ast.VariableDefinition(ast.Variable(name, var_type, container_type), expression)
Creates a new `VariableDefinition` of a variable `name` and a RHS `expression`. *Parameters*: * `name`: the name of the variable * `expression`: the expression which is assigned to the variable * `var_type`: the type of the variable, a member of `nltk_contrib.tiger.query.ast.VariableTypes` * `container_type`: the container type, a member of `nltk_contrib.tiger.query.ast.ContainerTypes`
Creates a new `VariableDefinition` of a variable `name` and a RHS `expression`. *Parameters*: * `name`: the name of the variable * `expression`: the expression which is assigned to the variable * `var_type`: the type of the variable, a member of `nltk_contrib.tiger.query.ast.VariableTypes` * `container_type`: the container type, a member of `nltk_contrib.tiger.query.ast.ContainerTypes`
[ "Creates", "a", "new", "VariableDefinition", "of", "a", "variable", "name", "and", "a", "RHS", "expression", ".", "*", "Parameters", "*", ":", "*", "name", ":", "the", "name", "of", "the", "variable", "*", "expression", ":", "the", "expression", "which", "is", "assigned", "to", "the", "variable", "*", "var_type", ":", "the", "type", "of", "the", "variable", "a", "member", "of", "nltk_contrib", ".", "tiger", ".", "query", ".", "ast", ".", "VariableTypes", "*", "container_type", ":", "the", "container", "type", "a", "member", "of", "nltk_contrib", ".", "tiger", ".", "query", ".", "ast", ".", "ContainerTypes" ]
def create_vardef(name, expression, var_type=ast.VariableTypes.NodeIdentifier, container_type=ast.ContainerTypes.Single): """Creates a new `VariableDefinition` of a variable `name` and a RHS `expression`. *Parameters*: * `name`: the name of the variable * `expression`: the expression which is assigned to the variable * `var_type`: the type of the variable, a member of `nltk_contrib.tiger.query.ast.VariableTypes` * `container_type`: the container type, a member of `nltk_contrib.tiger.query.ast.ContainerTypes` """ return ast.VariableDefinition(ast.Variable(name, var_type, container_type), expression)
[ "def", "create_vardef", "(", "name", ",", "expression", ",", "var_type", "=", "ast", ".", "VariableTypes", ".", "NodeIdentifier", ",", "container_type", "=", "ast", ".", "ContainerTypes", ".", "Single", ")", ":", "return", "ast", ".", "VariableDefinition", "(", "ast", ".", "Variable", "(", "name", ",", "var_type", ",", "container_type", ")", ",", "expression", ")" ]
https://github.com/nltk/nltk_contrib/blob/c9da2c29777ca9df650740145f1f4a375ccac961/nltk_contrib/tiger/query/ast_utils.py#L29-L39
xuehy/pytorch-maddpg
b7c1acf027a64a492eee6ab1c304ed3b8e69b5c4
pursuit/waterworld.py
python
MAWaterWorld._caught
(self, is_colliding_N1_N2, n_coop)
return is_caught_cN2, who_caught_cN1
Checke whether collision results in catching the object This is because you need `n_coop` agents to collide with the object to actually catch it
Checke whether collision results in catching the object
[ "Checke", "whether", "collision", "results", "in", "catching", "the", "object" ]
def _caught(self, is_colliding_N1_N2, n_coop): """ Checke whether collision results in catching the object This is because you need `n_coop` agents to collide with the object to actually catch it """ # number of N1 colliding with given N2 n_collisions_N2 = is_colliding_N1_N2.sum(axis=0) is_caught_cN2 = np.where(n_collisions_N2 >= n_coop)[0] # number of N2 colliding with given N1 who_collisions_N1_cN2 = is_colliding_N1_N2[:, is_caught_cN2] who_caught_cN1 = np.where(who_collisions_N1_cN2 >= 1)[0] return is_caught_cN2, who_caught_cN1
[ "def", "_caught", "(", "self", ",", "is_colliding_N1_N2", ",", "n_coop", ")", ":", "# number of N1 colliding with given N2", "n_collisions_N2", "=", "is_colliding_N1_N2", ".", "sum", "(", "axis", "=", "0", ")", "is_caught_cN2", "=", "np", ".", "where", "(", "n_collisions_N2", ">=", "n_coop", ")", "[", "0", "]", "# number of N2 colliding with given N1", "who_collisions_N1_cN2", "=", "is_colliding_N1_N2", "[", ":", ",", "is_caught_cN2", "]", "who_caught_cN1", "=", "np", ".", "where", "(", "who_collisions_N1_cN2", ">=", "1", ")", "[", "0", "]", "return", "is_caught_cN2", ",", "who_caught_cN1" ]
https://github.com/xuehy/pytorch-maddpg/blob/b7c1acf027a64a492eee6ab1c304ed3b8e69b5c4/pursuit/waterworld.py#L180-L193
dragonfly/dragonfly
a579b5eadf452e23b07d4caf27b402703b0012b7
dragonfly/opt/ga_optimiser.py
python
GAOptimiser._add_data_to_model
(self, qinfos)
Update the optimisation model.
Update the optimisation model.
[ "Update", "the", "optimisation", "model", "." ]
def _add_data_to_model(self, qinfos): """ Update the optimisation model. """ pass
[ "def", "_add_data_to_model", "(", "self", ",", "qinfos", ")", ":", "pass" ]
https://github.com/dragonfly/dragonfly/blob/a579b5eadf452e23b07d4caf27b402703b0012b7/dragonfly/opt/ga_optimiser.py#L62-L64
pypa/pip
7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4
src/pip/_vendor/html5lib/treebuilders/base.py
python
Node.reparentChildren
(self, newParent)
Move all the children of the current node to newParent. This is needed so that trees that don't store text as nodes move the text in the correct way :arg newParent: the node to move all this node's children to
Move all the children of the current node to newParent. This is needed so that trees that don't store text as nodes move the text in the correct way
[ "Move", "all", "the", "children", "of", "the", "current", "node", "to", "newParent", ".", "This", "is", "needed", "so", "that", "trees", "that", "don", "t", "store", "text", "as", "nodes", "move", "the", "text", "in", "the", "correct", "way" ]
def reparentChildren(self, newParent): """Move all the children of the current node to newParent. This is needed so that trees that don't store text as nodes move the text in the correct way :arg newParent: the node to move all this node's children to """ # XXX - should this method be made more general? for child in self.childNodes: newParent.appendChild(child) self.childNodes = []
[ "def", "reparentChildren", "(", "self", ",", "newParent", ")", ":", "# XXX - should this method be made more general?", "for", "child", "in", "self", ".", "childNodes", ":", "newParent", ".", "appendChild", "(", "child", ")", "self", ".", "childNodes", "=", "[", "]" ]
https://github.com/pypa/pip/blob/7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4/src/pip/_vendor/html5lib/treebuilders/base.py#L97-L108
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/home_connect/binary_sensor.py
python
HomeConnectBinarySensor.__init__
(self, device, desc, sensor_type, device_class=None)
Initialize the entity.
Initialize the entity.
[ "Initialize", "the", "entity", "." ]
def __init__(self, device, desc, sensor_type, device_class=None): """Initialize the entity.""" super().__init__(device, desc) self._state = None self._device_class = device_class self._type = sensor_type if self._type == "door": self._update_key = BSH_DOOR_STATE self._false_value_list = (BSH_DOOR_STATE_CLOSED, BSH_DOOR_STATE_LOCKED) self._true_value_list = [BSH_DOOR_STATE_OPEN] elif self._type == "remote_control": self._update_key = BSH_REMOTE_CONTROL_ACTIVATION_STATE self._false_value_list = [False] self._true_value_list = [True] elif self._type == "remote_start": self._update_key = BSH_REMOTE_START_ALLOWANCE_STATE self._false_value_list = [False] self._true_value_list = [True]
[ "def", "__init__", "(", "self", ",", "device", ",", "desc", ",", "sensor_type", ",", "device_class", "=", "None", ")", ":", "super", "(", ")", ".", "__init__", "(", "device", ",", "desc", ")", "self", ".", "_state", "=", "None", "self", ".", "_device_class", "=", "device_class", "self", ".", "_type", "=", "sensor_type", "if", "self", ".", "_type", "==", "\"door\"", ":", "self", ".", "_update_key", "=", "BSH_DOOR_STATE", "self", ".", "_false_value_list", "=", "(", "BSH_DOOR_STATE_CLOSED", ",", "BSH_DOOR_STATE_LOCKED", ")", "self", ".", "_true_value_list", "=", "[", "BSH_DOOR_STATE_OPEN", "]", "elif", "self", ".", "_type", "==", "\"remote_control\"", ":", "self", ".", "_update_key", "=", "BSH_REMOTE_CONTROL_ACTIVATION_STATE", "self", ".", "_false_value_list", "=", "[", "False", "]", "self", ".", "_true_value_list", "=", "[", "True", "]", "elif", "self", ".", "_type", "==", "\"remote_start\"", ":", "self", ".", "_update_key", "=", "BSH_REMOTE_START_ALLOWANCE_STATE", "self", ".", "_false_value_list", "=", "[", "False", "]", "self", ".", "_true_value_list", "=", "[", "True", "]" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/home_connect/binary_sensor.py#L46-L63
readthedocs/readthedocs.org
0852d7c10d725d954d3e9a93513171baa1116d9f
readthedocs/search/faceted_search.py
python
RTDFacetedSearch._is_advanced_query
(self, query)
return not tokens.isdisjoint(query_tokens)
Check if query looks like to be using the syntax from a simple query string. .. note:: We don't check if the syntax is valid. The tokens used aren't very common in a normal query, so checking if the query contains any of them should be enough to determinate if it's an advanced query. Simple query syntax: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html#simple-query-string-syntax
Check if query looks like to be using the syntax from a simple query string.
[ "Check", "if", "query", "looks", "like", "to", "be", "using", "the", "syntax", "from", "a", "simple", "query", "string", "." ]
def _is_advanced_query(self, query): """ Check if query looks like to be using the syntax from a simple query string. .. note:: We don't check if the syntax is valid. The tokens used aren't very common in a normal query, so checking if the query contains any of them should be enough to determinate if it's an advanced query. Simple query syntax: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html#simple-query-string-syntax """ tokens = {'+', '|', '-', '"', '*', '(', ')', '~'} query_tokens = set(query) return not tokens.isdisjoint(query_tokens)
[ "def", "_is_advanced_query", "(", "self", ",", "query", ")", ":", "tokens", "=", "{", "'+'", ",", "'|'", ",", "'-'", ",", "'\"'", ",", "'*'", ",", "'('", ",", "')'", ",", "'~'", "}", "query_tokens", "=", "set", "(", "query", ")", "return", "not", "tokens", ".", "isdisjoint", "(", "query_tokens", ")" ]
https://github.com/readthedocs/readthedocs.org/blob/0852d7c10d725d954d3e9a93513171baa1116d9f/readthedocs/search/faceted_search.py#L201-L218
computationalprivacy/bandicoot
86e5f192a67dff690a06f28ed2f7b1ffdd141efb
bandicoot/io.py
python
read_orange
(user_id, records_path, antennas_path=None, attributes_path=None, recharges_path=None, network=False, describe=True, warnings=True, errors=False)
return user
Load user records from a CSV file in *orange* format: ``call_record_type;basic_service;user_msisdn;call_partner_identity;datetime;call_duration;longitude;latitude`` ``basic_service`` takes one of the following values: - 11: telephony; - 12: emergency calls; - 21: short message (in) - 22: short message (out) Parameters ---------- user_id : str ID of the user (filename) records_path : str Path of the directory all the user files. antennas_path : str, optional Path of the CSV file containing (antenna_id, latitude, longitude) values. This allows antennas to be mapped to their locations. attributes_path : str, optional Path of the directory containing attributes files (``key, value`` CSV file). Attributes can for instance be variables such as like, age, or gender. Attributes can be helpful to compute specific metrics. network : bool, optional If network is True, bandicoot loads the network of the user's correspondants from the same path. Defaults to False. describe : boolean If describe is True, it will print a description of the loaded user to the standard output. errors : boolean If errors is True, returns a tuple (user, errors), where user is the user object and errors are the records which could not be loaded.
Load user records from a CSV file in *orange* format:
[ "Load", "user", "records", "from", "a", "CSV", "file", "in", "*", "orange", "*", "format", ":" ]
def read_orange(user_id, records_path, antennas_path=None, attributes_path=None, recharges_path=None, network=False, describe=True, warnings=True, errors=False): """ Load user records from a CSV file in *orange* format: ``call_record_type;basic_service;user_msisdn;call_partner_identity;datetime;call_duration;longitude;latitude`` ``basic_service`` takes one of the following values: - 11: telephony; - 12: emergency calls; - 21: short message (in) - 22: short message (out) Parameters ---------- user_id : str ID of the user (filename) records_path : str Path of the directory all the user files. antennas_path : str, optional Path of the CSV file containing (antenna_id, latitude, longitude) values. This allows antennas to be mapped to their locations. attributes_path : str, optional Path of the directory containing attributes files (``key, value`` CSV file). Attributes can for instance be variables such as like, age, or gender. Attributes can be helpful to compute specific metrics. network : bool, optional If network is True, bandicoot loads the network of the user's correspondants from the same path. Defaults to False. describe : boolean If describe is True, it will print a description of the loaded user to the standard output. errors : boolean If errors is True, returns a tuple (user, errors), where user is the user object and errors are the records which could not be loaded. """ def _parse(reader): records = [] antennas = dict() for row in reader: direction = 'out' if row['call_record_type'] == '1' else 'in' interaction = 'call' if row[ 'basic_service'] in ['11', '12'] else 'text' contact = row['call_partner_identity'] date = datetime.strptime(row['datetime'], "%Y-%m-%d %H:%M:%S") call_duration = float(row['call_duration']) if row[ 'call_duration'] != "" else None lon, lat = float(row['longitude']), float(row['latitude']) latlon = (lat, lon) antenna = None for key, value in antennas.items(): if latlon == value: antenna = key break if antenna is None: antenna = len(antennas) + 1 antennas[antenna] = latlon position = Position(antenna=antenna, location=latlon) record = Record(direction=direction, interaction=interaction, correspondent_id=contact, call_duration=call_duration, datetime=date, position=position) records.append(record) return records, antennas user_records = os.path.join(records_path, user_id + ".csv") fields = ['call_record_type', 'basic_service', 'user_msisdn', 'call_partner_identity', 'datetime', 'call_duration', 'longitude', 'latitude'] with open(user_records, 'r') as f: reader = csv.DictReader(f, delimiter=";", fieldnames=fields) records, antennas = _parse(reader) attributes = None if attributes_path is not None: user_attributes = os.path.join(attributes_path, user_id + '.csv') attributes = _load_attributes(user_attributes) recharges = None if recharges_path is not None: user_recharges = os.path.join(recharges_path, user_id + '.csv') recharges = _load_recharges(user_recharges) user, bad_records = load(user_id, records, antennas, attributes, recharges, antennas_path, attributes_path, recharges_path, describe=False, warnings=warnings) if network is True: user.network = _read_network(user, records_path, attributes_path, read_orange, antennas_path, warnings) user.recompute_missing_neighbors() if describe: user.describe() if errors: return user, bad_records return user
[ "def", "read_orange", "(", "user_id", ",", "records_path", ",", "antennas_path", "=", "None", ",", "attributes_path", "=", "None", ",", "recharges_path", "=", "None", ",", "network", "=", "False", ",", "describe", "=", "True", ",", "warnings", "=", "True", ",", "errors", "=", "False", ")", ":", "def", "_parse", "(", "reader", ")", ":", "records", "=", "[", "]", "antennas", "=", "dict", "(", ")", "for", "row", "in", "reader", ":", "direction", "=", "'out'", "if", "row", "[", "'call_record_type'", "]", "==", "'1'", "else", "'in'", "interaction", "=", "'call'", "if", "row", "[", "'basic_service'", "]", "in", "[", "'11'", ",", "'12'", "]", "else", "'text'", "contact", "=", "row", "[", "'call_partner_identity'", "]", "date", "=", "datetime", ".", "strptime", "(", "row", "[", "'datetime'", "]", ",", "\"%Y-%m-%d %H:%M:%S\"", ")", "call_duration", "=", "float", "(", "row", "[", "'call_duration'", "]", ")", "if", "row", "[", "'call_duration'", "]", "!=", "\"\"", "else", "None", "lon", ",", "lat", "=", "float", "(", "row", "[", "'longitude'", "]", ")", ",", "float", "(", "row", "[", "'latitude'", "]", ")", "latlon", "=", "(", "lat", ",", "lon", ")", "antenna", "=", "None", "for", "key", ",", "value", "in", "antennas", ".", "items", "(", ")", ":", "if", "latlon", "==", "value", ":", "antenna", "=", "key", "break", "if", "antenna", "is", "None", ":", "antenna", "=", "len", "(", "antennas", ")", "+", "1", "antennas", "[", "antenna", "]", "=", "latlon", "position", "=", "Position", "(", "antenna", "=", "antenna", ",", "location", "=", "latlon", ")", "record", "=", "Record", "(", "direction", "=", "direction", ",", "interaction", "=", "interaction", ",", "correspondent_id", "=", "contact", ",", "call_duration", "=", "call_duration", ",", "datetime", "=", "date", ",", "position", "=", "position", ")", "records", ".", "append", "(", "record", ")", "return", "records", ",", "antennas", "user_records", "=", "os", ".", "path", ".", "join", "(", "records_path", ",", "user_id", "+", "\".csv\"", ")", "fields", "=", "[", "'call_record_type'", ",", "'basic_service'", ",", "'user_msisdn'", ",", "'call_partner_identity'", ",", "'datetime'", ",", "'call_duration'", ",", "'longitude'", ",", "'latitude'", "]", "with", "open", "(", "user_records", ",", "'r'", ")", "as", "f", ":", "reader", "=", "csv", ".", "DictReader", "(", "f", ",", "delimiter", "=", "\";\"", ",", "fieldnames", "=", "fields", ")", "records", ",", "antennas", "=", "_parse", "(", "reader", ")", "attributes", "=", "None", "if", "attributes_path", "is", "not", "None", ":", "user_attributes", "=", "os", ".", "path", ".", "join", "(", "attributes_path", ",", "user_id", "+", "'.csv'", ")", "attributes", "=", "_load_attributes", "(", "user_attributes", ")", "recharges", "=", "None", "if", "recharges_path", "is", "not", "None", ":", "user_recharges", "=", "os", ".", "path", ".", "join", "(", "recharges_path", ",", "user_id", "+", "'.csv'", ")", "recharges", "=", "_load_recharges", "(", "user_recharges", ")", "user", ",", "bad_records", "=", "load", "(", "user_id", ",", "records", ",", "antennas", ",", "attributes", ",", "recharges", ",", "antennas_path", ",", "attributes_path", ",", "recharges_path", ",", "describe", "=", "False", ",", "warnings", "=", "warnings", ")", "if", "network", "is", "True", ":", "user", ".", "network", "=", "_read_network", "(", "user", ",", "records_path", ",", "attributes_path", ",", "read_orange", ",", "antennas_path", ",", "warnings", ")", "user", ".", "recompute_missing_neighbors", "(", ")", "if", "describe", ":", "user", ".", "describe", "(", ")", "if", "errors", ":", "return", "user", ",", "bad_records", "return", "user" ]
https://github.com/computationalprivacy/bandicoot/blob/86e5f192a67dff690a06f28ed2f7b1ffdd141efb/bandicoot/io.py#L607-L721
conda/conda-build
2a19925f2b2ca188f80ffa625cd783e7d403793f
conda_build/utils.py
python
sort_list_in_nested_structure
(dictionary, omissions='')
Recurse through a nested dictionary and sort any lists that are found. If the list that is found contains anything but strings, it is skipped as we can't compare lists containing different types. The omissions argument allows for certain sections of the dictionary to be omitted from sorting.
Recurse through a nested dictionary and sort any lists that are found.
[ "Recurse", "through", "a", "nested", "dictionary", "and", "sort", "any", "lists", "that", "are", "found", "." ]
def sort_list_in_nested_structure(dictionary, omissions=''): """Recurse through a nested dictionary and sort any lists that are found. If the list that is found contains anything but strings, it is skipped as we can't compare lists containing different types. The omissions argument allows for certain sections of the dictionary to be omitted from sorting. """ for field, value in dictionary.items(): if isinstance(value, dict): for key in value.keys(): section = dictionary[field][key] if isinstance(section, dict): sort_list_in_nested_structure(section) elif (isinstance(section, list) and '{}/{}' .format(field, key) not in omissions and all(isinstance(item, str) for item in section)): section.sort() # there's a possibility for nested lists containing dictionaries # in this case we recurse until we find a list to sort elif isinstance(value, list): for element in value: if isinstance(element, dict): sort_list_in_nested_structure(element) try: value.sort() except TypeError: pass
[ "def", "sort_list_in_nested_structure", "(", "dictionary", ",", "omissions", "=", "''", ")", ":", "for", "field", ",", "value", "in", "dictionary", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "for", "key", "in", "value", ".", "keys", "(", ")", ":", "section", "=", "dictionary", "[", "field", "]", "[", "key", "]", "if", "isinstance", "(", "section", ",", "dict", ")", ":", "sort_list_in_nested_structure", "(", "section", ")", "elif", "(", "isinstance", "(", "section", ",", "list", ")", "and", "'{}/{}'", ".", "format", "(", "field", ",", "key", ")", "not", "in", "omissions", "and", "all", "(", "isinstance", "(", "item", ",", "str", ")", "for", "item", "in", "section", ")", ")", ":", "section", ".", "sort", "(", ")", "# there's a possibility for nested lists containing dictionaries", "# in this case we recurse until we find a list to sort", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "for", "element", "in", "value", ":", "if", "isinstance", "(", "element", ",", "dict", ")", ":", "sort_list_in_nested_structure", "(", "element", ")", "try", ":", "value", ".", "sort", "(", ")", "except", "TypeError", ":", "pass" ]
https://github.com/conda/conda-build/blob/2a19925f2b2ca188f80ffa625cd783e7d403793f/conda_build/utils.py#L1805-L1832
galaxyproject/galaxy
4c03520f05062e0f4a1b3655dc0b7452fda69943
lib/tool_shed/metadata/repository_metadata_manager.py
python
RepositoryMetadataManager.new_tool_dependency_metadata_required
(self, repository_metadata)
Compare the last saved metadata for each tool dependency in the repository with the new metadata in self.metadata_dict to determine if a new repository_metadata table record is required or if the last saved metadata record can be updated for tool_dependencies instead.
Compare the last saved metadata for each tool dependency in the repository with the new metadata in self.metadata_dict to determine if a new repository_metadata table record is required or if the last saved metadata record can be updated for tool_dependencies instead.
[ "Compare", "the", "last", "saved", "metadata", "for", "each", "tool", "dependency", "in", "the", "repository", "with", "the", "new", "metadata", "in", "self", ".", "metadata_dict", "to", "determine", "if", "a", "new", "repository_metadata", "table", "record", "is", "required", "or", "if", "the", "last", "saved", "metadata", "record", "can", "be", "updated", "for", "tool_dependencies", "instead", "." ]
def new_tool_dependency_metadata_required(self, repository_metadata): """ Compare the last saved metadata for each tool dependency in the repository with the new metadata in self.metadata_dict to determine if a new repository_metadata table record is required or if the last saved metadata record can be updated for tool_dependencies instead. """ if repository_metadata: metadata = repository_metadata.metadata if metadata: if 'tool_dependencies' in metadata: saved_tool_dependencies = metadata['tool_dependencies'] new_tool_dependencies = self.metadata_dict.get('tool_dependencies', None) if new_tool_dependencies: # TODO: We used to include the following here to handle the case where # tool dependency definitions were deleted. However, this erroneously # returned True in cases where is should not have done so. This usually # occurred where multiple single files were uploaded when a single tarball # should have been. We need to implement support for handling deleted # tool dependency definitions so that we can guarantee reproducibility, # but we need to do it in a way that is better than the following. # for new_tool_dependency in new_tool_dependencies: # if new_tool_dependency not in saved_tool_dependencies: # return True # The saved metadata must be a subset of the new metadata. for saved_tool_dependency in saved_tool_dependencies: if saved_tool_dependency not in new_tool_dependencies: return True return False else: # The tool_dependencies.xml file must have been deleted, so create a new # repository_metadata record so we always have # access to the deleted file. return True else: return False else: # We have repository metadata that does not include metadata for any tool dependencies # in the repository, so we can update the existing repository metadata. return False else: if 'tool_dependencies' in self.metadata_dict: # There is no saved repository metadata, so we need to create a new repository_metadata # record. return True else: # self.metadata_dict includes no metadata for tool dependencies, so a new repository_metadata # record is not needed. return False
[ "def", "new_tool_dependency_metadata_required", "(", "self", ",", "repository_metadata", ")", ":", "if", "repository_metadata", ":", "metadata", "=", "repository_metadata", ".", "metadata", "if", "metadata", ":", "if", "'tool_dependencies'", "in", "metadata", ":", "saved_tool_dependencies", "=", "metadata", "[", "'tool_dependencies'", "]", "new_tool_dependencies", "=", "self", ".", "metadata_dict", ".", "get", "(", "'tool_dependencies'", ",", "None", ")", "if", "new_tool_dependencies", ":", "# TODO: We used to include the following here to handle the case where", "# tool dependency definitions were deleted. However, this erroneously", "# returned True in cases where is should not have done so. This usually", "# occurred where multiple single files were uploaded when a single tarball", "# should have been. We need to implement support for handling deleted", "# tool dependency definitions so that we can guarantee reproducibility,", "# but we need to do it in a way that is better than the following.", "# for new_tool_dependency in new_tool_dependencies:", "# if new_tool_dependency not in saved_tool_dependencies:", "# return True", "# The saved metadata must be a subset of the new metadata.", "for", "saved_tool_dependency", "in", "saved_tool_dependencies", ":", "if", "saved_tool_dependency", "not", "in", "new_tool_dependencies", ":", "return", "True", "return", "False", "else", ":", "# The tool_dependencies.xml file must have been deleted, so create a new", "# repository_metadata record so we always have", "# access to the deleted file.", "return", "True", "else", ":", "return", "False", "else", ":", "# We have repository metadata that does not include metadata for any tool dependencies", "# in the repository, so we can update the existing repository metadata.", "return", "False", "else", ":", "if", "'tool_dependencies'", "in", "self", ".", "metadata_dict", ":", "# There is no saved repository metadata, so we need to create a new repository_metadata", "# record.", "return", "True", "else", ":", "# self.metadata_dict includes no metadata for tool dependencies, so a new repository_metadata", "# record is not needed.", "return", "False" ]
https://github.com/galaxyproject/galaxy/blob/4c03520f05062e0f4a1b3655dc0b7452fda69943/lib/tool_shed/metadata/repository_metadata_manager.py#L671-L718
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/modules/keystoneng.py
python
project_create
(auth=None, **kwargs)
return cloud.create_project(**kwargs)
Create a project CLI Example: .. code-block:: bash salt '*' keystoneng.project_create name=project1 salt '*' keystoneng.project_create name=project2 domain_id=b62e76fbeeff4e8fb77073f591cf211e salt '*' keystoneng.project_create name=project3 enabled=False description='my project3'
Create a project
[ "Create", "a", "project" ]
def project_create(auth=None, **kwargs): """ Create a project CLI Example: .. code-block:: bash salt '*' keystoneng.project_create name=project1 salt '*' keystoneng.project_create name=project2 domain_id=b62e76fbeeff4e8fb77073f591cf211e salt '*' keystoneng.project_create name=project3 enabled=False description='my project3' """ cloud = get_openstack_cloud(auth) kwargs = _clean_kwargs(keep_name=True, **kwargs) return cloud.create_project(**kwargs)
[ "def", "project_create", "(", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cloud", "=", "get_openstack_cloud", "(", "auth", ")", "kwargs", "=", "_clean_kwargs", "(", "keep_name", "=", "True", ",", "*", "*", "kwargs", ")", "return", "cloud", ".", "create_project", "(", "*", "*", "kwargs", ")" ]
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/keystoneng.py#L235-L249
Tencent/PocketFlow
53b82cba5a34834400619e7c335a23995d45c2a6
learners/uniform_quantization/utils.py
python
UniformQuantization.__safe_check
(self)
TODO: Check the name of bucket_type, the value of bucket_size
TODO: Check the name of bucket_type, the value of bucket_size
[ "TODO", ":", "Check", "the", "name", "of", "bucket_type", "the", "value", "of", "bucket_size" ]
def __safe_check(self): """ TODO: Check the name of bucket_type, the value of bucket_size """ if self.bucket_size < 0: raise ValueError("Bucket size must be a postive integer") if self.bucket_type != 'split' and self.bucket_type != 'channel': raise ValueError("Unrecognized bucket type, must be 'weight' or 'channel'.")
[ "def", "__safe_check", "(", "self", ")", ":", "if", "self", ".", "bucket_size", "<", "0", ":", "raise", "ValueError", "(", "\"Bucket size must be a postive integer\"", ")", "if", "self", ".", "bucket_type", "!=", "'split'", "and", "self", ".", "bucket_type", "!=", "'channel'", ":", "raise", "ValueError", "(", "\"Unrecognized bucket type, must be 'weight' or 'channel'.\"", ")" ]
https://github.com/Tencent/PocketFlow/blob/53b82cba5a34834400619e7c335a23995d45c2a6/learners/uniform_quantization/utils.py#L291-L297
david8862/keras-YOLOv3-model-set
e9f0f94109430973525219e66eeafe8a2f51363d
common/yolo_postprocess_np.py
python
yolo_decode
(prediction, anchors, num_classes, input_shape, scale_x_y=None, use_softmax=False)
return np.concatenate([box_xy, box_wh, objectness, class_scores], axis=2)
Decode final layer features to bounding box parameters.
Decode final layer features to bounding box parameters.
[ "Decode", "final", "layer", "features", "to", "bounding", "box", "parameters", "." ]
def yolo_decode(prediction, anchors, num_classes, input_shape, scale_x_y=None, use_softmax=False): '''Decode final layer features to bounding box parameters.''' batch_size = np.shape(prediction)[0] num_anchors = len(anchors) grid_shape = np.shape(prediction)[1:3] #check if stride on height & width are same assert input_shape[0]//grid_shape[0] == input_shape[1]//grid_shape[1], 'model stride mismatch.' stride = input_shape[0] // grid_shape[0] prediction = np.reshape(prediction, (batch_size, grid_shape[0] * grid_shape[1] * num_anchors, num_classes + 5)) ################################ # generate x_y_offset grid map grid_y = np.arange(grid_shape[0]) grid_x = np.arange(grid_shape[1]) x_offset, y_offset = np.meshgrid(grid_x, grid_y) x_offset = np.reshape(x_offset, (-1, 1)) y_offset = np.reshape(y_offset, (-1, 1)) x_y_offset = np.concatenate((x_offset, y_offset), axis=1) x_y_offset = np.tile(x_y_offset, (1, num_anchors)) x_y_offset = np.reshape(x_y_offset, (-1, 2)) x_y_offset = np.expand_dims(x_y_offset, 0) ################################ # Log space transform of the height and width anchors = np.tile(anchors, (grid_shape[0] * grid_shape[1], 1)) anchors = np.expand_dims(anchors, 0) if scale_x_y: # Eliminate grid sensitivity trick involved in YOLOv4 # # Reference Paper & code: # "YOLOv4: Optimal Speed and Accuracy of Object Detection" # https://arxiv.org/abs/2004.10934 # https://github.com/opencv/opencv/issues/17148 # box_xy_tmp = expit(prediction[..., :2]) * scale_x_y - (scale_x_y - 1) / 2 box_xy = (box_xy_tmp + x_y_offset) / np.array(grid_shape)[::-1] else: box_xy = (expit(prediction[..., :2]) + x_y_offset) / np.array(grid_shape)[::-1] box_wh = (np.exp(prediction[..., 2:4]) * anchors) / np.array(input_shape)[::-1] # Sigmoid objectness scores objectness = expit(prediction[..., 4]) # p_o (objectness score) objectness = np.expand_dims(objectness, -1) # To make the same number of values for axis 0 and 1 if use_softmax: # Softmax class scores class_scores = softmax(prediction[..., 5:], axis=-1) else: # Sigmoid class scores class_scores = expit(prediction[..., 5:]) return np.concatenate([box_xy, box_wh, objectness, class_scores], axis=2)
[ "def", "yolo_decode", "(", "prediction", ",", "anchors", ",", "num_classes", ",", "input_shape", ",", "scale_x_y", "=", "None", ",", "use_softmax", "=", "False", ")", ":", "batch_size", "=", "np", ".", "shape", "(", "prediction", ")", "[", "0", "]", "num_anchors", "=", "len", "(", "anchors", ")", "grid_shape", "=", "np", ".", "shape", "(", "prediction", ")", "[", "1", ":", "3", "]", "#check if stride on height & width are same", "assert", "input_shape", "[", "0", "]", "//", "grid_shape", "[", "0", "]", "==", "input_shape", "[", "1", "]", "//", "grid_shape", "[", "1", "]", ",", "'model stride mismatch.'", "stride", "=", "input_shape", "[", "0", "]", "//", "grid_shape", "[", "0", "]", "prediction", "=", "np", ".", "reshape", "(", "prediction", ",", "(", "batch_size", ",", "grid_shape", "[", "0", "]", "*", "grid_shape", "[", "1", "]", "*", "num_anchors", ",", "num_classes", "+", "5", ")", ")", "################################", "# generate x_y_offset grid map", "grid_y", "=", "np", ".", "arange", "(", "grid_shape", "[", "0", "]", ")", "grid_x", "=", "np", ".", "arange", "(", "grid_shape", "[", "1", "]", ")", "x_offset", ",", "y_offset", "=", "np", ".", "meshgrid", "(", "grid_x", ",", "grid_y", ")", "x_offset", "=", "np", ".", "reshape", "(", "x_offset", ",", "(", "-", "1", ",", "1", ")", ")", "y_offset", "=", "np", ".", "reshape", "(", "y_offset", ",", "(", "-", "1", ",", "1", ")", ")", "x_y_offset", "=", "np", ".", "concatenate", "(", "(", "x_offset", ",", "y_offset", ")", ",", "axis", "=", "1", ")", "x_y_offset", "=", "np", ".", "tile", "(", "x_y_offset", ",", "(", "1", ",", "num_anchors", ")", ")", "x_y_offset", "=", "np", ".", "reshape", "(", "x_y_offset", ",", "(", "-", "1", ",", "2", ")", ")", "x_y_offset", "=", "np", ".", "expand_dims", "(", "x_y_offset", ",", "0", ")", "################################", "# Log space transform of the height and width", "anchors", "=", "np", ".", "tile", "(", "anchors", ",", "(", "grid_shape", "[", "0", "]", "*", "grid_shape", "[", "1", "]", ",", "1", ")", ")", "anchors", "=", "np", ".", "expand_dims", "(", "anchors", ",", "0", ")", "if", "scale_x_y", ":", "# Eliminate grid sensitivity trick involved in YOLOv4", "#", "# Reference Paper & code:", "# \"YOLOv4: Optimal Speed and Accuracy of Object Detection\"", "# https://arxiv.org/abs/2004.10934", "# https://github.com/opencv/opencv/issues/17148", "#", "box_xy_tmp", "=", "expit", "(", "prediction", "[", "...", ",", ":", "2", "]", ")", "*", "scale_x_y", "-", "(", "scale_x_y", "-", "1", ")", "/", "2", "box_xy", "=", "(", "box_xy_tmp", "+", "x_y_offset", ")", "/", "np", ".", "array", "(", "grid_shape", ")", "[", ":", ":", "-", "1", "]", "else", ":", "box_xy", "=", "(", "expit", "(", "prediction", "[", "...", ",", ":", "2", "]", ")", "+", "x_y_offset", ")", "/", "np", ".", "array", "(", "grid_shape", ")", "[", ":", ":", "-", "1", "]", "box_wh", "=", "(", "np", ".", "exp", "(", "prediction", "[", "...", ",", "2", ":", "4", "]", ")", "*", "anchors", ")", "/", "np", ".", "array", "(", "input_shape", ")", "[", ":", ":", "-", "1", "]", "# Sigmoid objectness scores", "objectness", "=", "expit", "(", "prediction", "[", "...", ",", "4", "]", ")", "# p_o (objectness score)", "objectness", "=", "np", ".", "expand_dims", "(", "objectness", ",", "-", "1", ")", "# To make the same number of values for axis 0 and 1", "if", "use_softmax", ":", "# Softmax class scores", "class_scores", "=", "softmax", "(", "prediction", "[", "...", ",", "5", ":", "]", ",", "axis", "=", "-", "1", ")", "else", ":", "# Sigmoid class scores", "class_scores", "=", "expit", "(", "prediction", "[", "...", ",", "5", ":", "]", ")", "return", "np", ".", "concatenate", "(", "[", "box_xy", ",", "box_wh", ",", "objectness", ",", "class_scores", "]", ",", "axis", "=", "2", ")" ]
https://github.com/david8862/keras-YOLOv3-model-set/blob/e9f0f94109430973525219e66eeafe8a2f51363d/common/yolo_postprocess_np.py#L9-L67
beetbox/beets
2fea53c34dd505ba391cb345424e0613901c8025
beetsplug/bucket.py
python
complete_year_spans
(spans)
Set the `to` value of spans if empty and sort them chronologically.
Set the `to` value of spans if empty and sort them chronologically.
[ "Set", "the", "to", "value", "of", "spans", "if", "empty", "and", "sort", "them", "chronologically", "." ]
def complete_year_spans(spans): """Set the `to` value of spans if empty and sort them chronologically. """ spans.sort(key=lambda x: x['from']) for (x, y) in pairwise(spans): if 'to' not in x: x['to'] = y['from'] - 1 if spans and 'to' not in spans[-1]: spans[-1]['to'] = datetime.now().year
[ "def", "complete_year_spans", "(", "spans", ")", ":", "spans", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "'from'", "]", ")", "for", "(", "x", ",", "y", ")", "in", "pairwise", "(", "spans", ")", ":", "if", "'to'", "not", "in", "x", ":", "x", "[", "'to'", "]", "=", "y", "[", "'from'", "]", "-", "1", "if", "spans", "and", "'to'", "not", "in", "spans", "[", "-", "1", "]", ":", "spans", "[", "-", "1", "]", "[", "'to'", "]", "=", "datetime", ".", "now", "(", ")", ".", "year" ]
https://github.com/beetbox/beets/blob/2fea53c34dd505ba391cb345424e0613901c8025/beetsplug/bucket.py#L76-L84
c0rv4x/project-black
2d3df00ba1b1453c99ec5a247793a74e11adba2a
black/workers/dirsearch/dirsearch_ext/thirdparty/requests/auth.py
python
_basic_auth_str
(username, password)
return authstr
Returns a Basic Auth string.
Returns a Basic Auth string.
[ "Returns", "a", "Basic", "Auth", "string", "." ]
def _basic_auth_str(username, password): """Returns a Basic Auth string.""" authstr = 'Basic ' + to_native_string( b64encode(('%s:%s' % (username, password)).encode('latin1')).strip() ) return authstr
[ "def", "_basic_auth_str", "(", "username", ",", "password", ")", ":", "authstr", "=", "'Basic '", "+", "to_native_string", "(", "b64encode", "(", "(", "'%s:%s'", "%", "(", "username", ",", "password", ")", ")", ".", "encode", "(", "'latin1'", ")", ")", ".", "strip", "(", ")", ")", "return", "authstr" ]
https://github.com/c0rv4x/project-black/blob/2d3df00ba1b1453c99ec5a247793a74e11adba2a/black/workers/dirsearch/dirsearch_ext/thirdparty/requests/auth.py#L26-L33
scrapinghub/spidermon
f2b21e45e70796f583bbb97f39b823c31d242b17
spidermon/results/monitor.py
python
MonitorResult.monitors_passed_action_results
(self)
return self._step_monitors_passed.all_items
[]
def monitors_passed_action_results(self): return self._step_monitors_passed.all_items
[ "def", "monitors_passed_action_results", "(", "self", ")", ":", "return", "self", ".", "_step_monitors_passed", ".", "all_items" ]
https://github.com/scrapinghub/spidermon/blob/f2b21e45e70796f583bbb97f39b823c31d242b17/spidermon/results/monitor.py#L57-L58
fake-name/ReadableWebProxy
ed5c7abe38706acc2684a1e6cd80242a03c5f010
WebMirror/management/rss_parser_funcs/feed_parse_extractAutumnpondreflectionWordpressCom.py
python
extractAutumnpondreflectionWordpressCom
(item)
return False
Parser for 'autumnpondreflection.wordpress.com'
Parser for 'autumnpondreflection.wordpress.com'
[ "Parser", "for", "autumnpondreflection", ".", "wordpress", ".", "com" ]
def extractAutumnpondreflectionWordpressCom(item): ''' Parser for 'autumnpondreflection.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('tgp', 'The Grand Princess', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
[ "def", "extractAutumnpondreflectionWordpressCom", "(", "item", ")", ":", "vol", ",", "chp", ",", "frag", ",", "postfix", "=", "extractVolChapterFragmentPostfix", "(", "item", "[", "'title'", "]", ")", "if", "not", "(", "chp", "or", "vol", ")", "or", "\"preview\"", "in", "item", "[", "'title'", "]", ".", "lower", "(", ")", ":", "return", "None", "tagmap", "=", "[", "(", "'tgp'", ",", "'The Grand Princess'", ",", "'translated'", ")", ",", "(", "'PRC'", ",", "'PRC'", ",", "'translated'", ")", ",", "(", "'Loiterous'", ",", "'Loiterous'", ",", "'oel'", ")", ",", "]", "for", "tagname", ",", "name", ",", "tl_type", "in", "tagmap", ":", "if", "tagname", "in", "item", "[", "'tags'", "]", ":", "return", "buildReleaseMessageWithType", "(", "item", ",", "name", ",", "vol", ",", "chp", ",", "frag", "=", "frag", ",", "postfix", "=", "postfix", ",", "tl_type", "=", "tl_type", ")", "return", "False" ]
https://github.com/fake-name/ReadableWebProxy/blob/ed5c7abe38706acc2684a1e6cd80242a03c5f010/WebMirror/management/rss_parser_funcs/feed_parse_extractAutumnpondreflectionWordpressCom.py#L1-L21
sympy/sympy
d822fcba181155b85ff2b29fe525adbafb22b448
sympy/geometry/polygon.py
python
RegularPolygon.angles
(self)
return ret
Returns a dictionary with keys, the vertices of the Polygon, and values, the interior angle at each vertex. Examples ======== >>> from sympy import RegularPolygon, Point >>> r = RegularPolygon(Point(0, 0), 5, 3) >>> r.angles {Point2D(-5/2, -5*sqrt(3)/2): pi/3, Point2D(-5/2, 5*sqrt(3)/2): pi/3, Point2D(5, 0): pi/3}
Returns a dictionary with keys, the vertices of the Polygon, and values, the interior angle at each vertex.
[ "Returns", "a", "dictionary", "with", "keys", "the", "vertices", "of", "the", "Polygon", "and", "values", "the", "interior", "angle", "at", "each", "vertex", "." ]
def angles(self): """ Returns a dictionary with keys, the vertices of the Polygon, and values, the interior angle at each vertex. Examples ======== >>> from sympy import RegularPolygon, Point >>> r = RegularPolygon(Point(0, 0), 5, 3) >>> r.angles {Point2D(-5/2, -5*sqrt(3)/2): pi/3, Point2D(-5/2, 5*sqrt(3)/2): pi/3, Point2D(5, 0): pi/3} """ ret = {} ang = self.interior_angle for v in self.vertices: ret[v] = ang return ret
[ "def", "angles", "(", "self", ")", ":", "ret", "=", "{", "}", "ang", "=", "self", ".", "interior_angle", "for", "v", "in", "self", ".", "vertices", ":", "ret", "[", "v", "]", "=", "ang", "return", "ret" ]
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/geometry/polygon.py#L1821-L1840
svinota/pyroute2
d320acd67067206b4217bb862afdae23bcb55266
pyroute2.ipdb/pr2modules/ipdb/routes.py
python
MPLSRoute.make_key
(cls, msg)
return ret
Construct from a netlink message a key that can be used to locate the route in the table
Construct from a netlink message a key that can be used to locate the route in the table
[ "Construct", "from", "a", "netlink", "message", "a", "key", "that", "can", "be", "used", "to", "locate", "the", "route", "in", "the", "table" ]
def make_key(cls, msg): ''' Construct from a netlink message a key that can be used to locate the route in the table ''' ret = None if isinstance(msg, nlmsg): ret = msg.get_attr('RTA_DST') elif isinstance(msg, dict): ret = msg.get('dst', None) else: raise TypeError('prime not supported') if isinstance(ret, list): ret = ret[0]['label'] return ret
[ "def", "make_key", "(", "cls", ",", "msg", ")", ":", "ret", "=", "None", "if", "isinstance", "(", "msg", ",", "nlmsg", ")", ":", "ret", "=", "msg", ".", "get_attr", "(", "'RTA_DST'", ")", "elif", "isinstance", "(", "msg", ",", "dict", ")", ":", "ret", "=", "msg", ".", "get", "(", "'dst'", ",", "None", ")", "else", ":", "raise", "TypeError", "(", "'prime not supported'", ")", "if", "isinstance", "(", "ret", ",", "list", ")", ":", "ret", "=", "ret", "[", "0", "]", "[", "'label'", "]", "return", "ret" ]
https://github.com/svinota/pyroute2/blob/d320acd67067206b4217bb862afdae23bcb55266/pyroute2.ipdb/pr2modules/ipdb/routes.py#L781-L795
shiweibsw/Translation-Tools
2fbbf902364e557fa7017f9a74a8797b7440c077
venv/Lib/site-packages/pip-9.0.3-py3.6.egg/pip/_vendor/html5lib/serializer.py
python
serialize
(input, tree="etree", encoding=None, **serializer_opts)
return s.render(walker(input), encoding)
[]
def serialize(input, tree="etree", encoding=None, **serializer_opts): # XXX: Should we cache this? walker = treewalkers.getTreeWalker(tree) s = HTMLSerializer(**serializer_opts) return s.render(walker(input), encoding)
[ "def", "serialize", "(", "input", ",", "tree", "=", "\"etree\"", ",", "encoding", "=", "None", ",", "*", "*", "serializer_opts", ")", ":", "# XXX: Should we cache this?", "walker", "=", "treewalkers", ".", "getTreeWalker", "(", "tree", ")", "s", "=", "HTMLSerializer", "(", "*", "*", "serializer_opts", ")", "return", "s", ".", "render", "(", "walker", "(", "input", ")", ",", "encoding", ")" ]
https://github.com/shiweibsw/Translation-Tools/blob/2fbbf902364e557fa7017f9a74a8797b7440c077/venv/Lib/site-packages/pip-9.0.3-py3.6.egg/pip/_vendor/html5lib/serializer.py#L74-L78
twitter/zktraffic
82db04d9aafa13f694d4f5c7265069db42c0307c
zktraffic/omni/omni_sniffer.py
python
OmniSniffer._is_packet_fle_initial
(self, packet)
return True
[]
def _is_packet_fle_initial(self, packet): data = get_ip_packet(packet.load).data.data proto, offset = read_long(data, 0) if proto != FLE.Initial.PROTO_VER: return False server_id, offset = read_long(data, offset) if server_id < 0: return False election_addr, offset = read_string(data, offset) if election_addr.count(":") != 1: return False expected_len = 8 + 8 + 4 + len(election_addr) if len(data) != expected_len: return False return True
[ "def", "_is_packet_fle_initial", "(", "self", ",", "packet", ")", ":", "data", "=", "get_ip_packet", "(", "packet", ".", "load", ")", ".", "data", ".", "data", "proto", ",", "offset", "=", "read_long", "(", "data", ",", "0", ")", "if", "proto", "!=", "FLE", ".", "Initial", ".", "PROTO_VER", ":", "return", "False", "server_id", ",", "offset", "=", "read_long", "(", "data", ",", "offset", ")", "if", "server_id", "<", "0", ":", "return", "False", "election_addr", ",", "offset", "=", "read_string", "(", "data", ",", "offset", ")", "if", "election_addr", ".", "count", "(", "\":\"", ")", "!=", "1", ":", "return", "False", "expected_len", "=", "8", "+", "8", "+", "4", "+", "len", "(", "election_addr", ")", "if", "len", "(", "data", ")", "!=", "expected_len", ":", "return", "False", "return", "True" ]
https://github.com/twitter/zktraffic/blob/82db04d9aafa13f694d4f5c7265069db42c0307c/zktraffic/omni/omni_sniffer.py#L177-L192
snarfed/granary
ab085de2aef0cff8ac31a99b5e21443a249e8419
granary/mastodon.py
python
Mastodon.status_url
(self, id)
return urllib.parse.urljoin(self.instance, f'/web/statuses/{id}')
Returns the local instance URL for a status with a given id.
Returns the local instance URL for a status with a given id.
[ "Returns", "the", "local", "instance", "URL", "for", "a", "status", "with", "a", "given", "id", "." ]
def status_url(self, id): """Returns the local instance URL for a status with a given id.""" return urllib.parse.urljoin(self.instance, f'/web/statuses/{id}')
[ "def", "status_url", "(", "self", ",", "id", ")", ":", "return", "urllib", ".", "parse", ".", "urljoin", "(", "self", ".", "instance", ",", "f'/web/statuses/{id}'", ")" ]
https://github.com/snarfed/granary/blob/ab085de2aef0cff8ac31a99b5e21443a249e8419/granary/mastodon.py#L750-L752
GNS3/gns3-gui
da8adbaa18ab60e053af2a619efd468f4c8950f3
gns3/modules/builtin/pages/cloud_configuration_page.py
python
CloudConfigurationPage._loadNetworkInterfaces
(self, interfaces)
Loads Ethernet and TAP interfaces.
Loads Ethernet and TAP interfaces.
[ "Loads", "Ethernet", "and", "TAP", "interfaces", "." ]
def _loadNetworkInterfaces(self, interfaces): """ Loads Ethernet and TAP interfaces. """ self.uiEthernetComboBox.clear() index = 0 for interface in interfaces: if interface["type"] == "ethernet" and not interface["special"]: self.uiEthernetComboBox.addItem(interface["name"]) index += 1 # load all TAP interfaces self.uiTAPComboBox.clear() index = 0 for interface in interfaces: if interface["type"] == "tap": self.uiTAPComboBox.addItem(interface["name"]) index += 1
[ "def", "_loadNetworkInterfaces", "(", "self", ",", "interfaces", ")", ":", "self", ".", "uiEthernetComboBox", ".", "clear", "(", ")", "index", "=", "0", "for", "interface", "in", "interfaces", ":", "if", "interface", "[", "\"type\"", "]", "==", "\"ethernet\"", "and", "not", "interface", "[", "\"special\"", "]", ":", "self", ".", "uiEthernetComboBox", ".", "addItem", "(", "interface", "[", "\"name\"", "]", ")", "index", "+=", "1", "# load all TAP interfaces", "self", ".", "uiTAPComboBox", ".", "clear", "(", ")", "index", "=", "0", "for", "interface", "in", "interfaces", ":", "if", "interface", "[", "\"type\"", "]", "==", "\"tap\"", ":", "self", ".", "uiTAPComboBox", ".", "addItem", "(", "interface", "[", "\"name\"", "]", ")", "index", "+=", "1" ]
https://github.com/GNS3/gns3-gui/blob/da8adbaa18ab60e053af2a619efd468f4c8950f3/gns3/modules/builtin/pages/cloud_configuration_page.py#L385-L403
YosaiProject/yosai
7f96aa6b837ceae9bf3d7387cd7e35f5ab032575
yosai/core/mgt/mgt.py
python
NativeSecurityManager.save
(self, subject)
Saves the subject's state to a persistent location for future reference. This implementation merely delegates saving to the internal subject_store.
Saves the subject's state to a persistent location for future reference. This implementation merely delegates saving to the internal subject_store.
[ "Saves", "the", "subject", "s", "state", "to", "a", "persistent", "location", "for", "future", "reference", ".", "This", "implementation", "merely", "delegates", "saving", "to", "the", "internal", "subject_store", "." ]
def save(self, subject): """ Saves the subject's state to a persistent location for future reference. This implementation merely delegates saving to the internal subject_store. """ self.subject_store.save(subject)
[ "def", "save", "(", "self", ",", "subject", ")", ":", "self", ".", "subject_store", ".", "save", "(", "subject", ")" ]
https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/mgt/mgt.py#L723-L728
prody/ProDy
b24bbf58aa8fffe463c8548ae50e3955910e5b7f
prody/ensemble/conformation.py
python
Conformation.getDeviations
(self)
Returns deviations from the ensemble reference coordinates. Deviations are calculated for (selected) atoms.
Returns deviations from the ensemble reference coordinates. Deviations are calculated for (selected) atoms.
[ "Returns", "deviations", "from", "the", "ensemble", "reference", "coordinates", ".", "Deviations", "are", "calculated", "for", "(", "selected", ")", "atoms", "." ]
def getDeviations(self): """Returns deviations from the ensemble reference coordinates. Deviations are calculated for (selected) atoms.""" ensemble = self._ensemble if ensemble._confs is None: return None indices = ensemble._indices if indices is None: return ensemble._confs[self._index] - ensemble._coords else: return (ensemble._confs[self._index, indices].copy() - ensemble._coords[indices])
[ "def", "getDeviations", "(", "self", ")", ":", "ensemble", "=", "self", ".", "_ensemble", "if", "ensemble", ".", "_confs", "is", "None", ":", "return", "None", "indices", "=", "ensemble", ".", "_indices", "if", "indices", "is", "None", ":", "return", "ensemble", ".", "_confs", "[", "self", ".", "_index", "]", "-", "ensemble", ".", "_coords", "else", ":", "return", "(", "ensemble", ".", "_confs", "[", "self", ".", "_index", ",", "indices", "]", ".", "copy", "(", ")", "-", "ensemble", ".", "_coords", "[", "indices", "]", ")" ]
https://github.com/prody/ProDy/blob/b24bbf58aa8fffe463c8548ae50e3955910e5b7f/prody/ensemble/conformation.py#L122-L134
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/crypto/block_cipher/miniaes.py
python
MiniAES.encrypt
(self, P, key)
return ciphertext
r""" Use Phan's Mini-AES to encrypt the plaintext ``P`` with the secret key ``key``. Both ``P`` and ``key`` must be `2 \times 2` matrices over the finite field `\GF{2^4}`. Let `\gamma` denote the operation of nibble-sub, `\pi` denote shift-row, `\theta` denote mix-column, and `\sigma_{K_i}` denote add-key with the round key `K_i`. Then encryption `E` using Phan's Mini-AES is the function composition .. MATH:: E = \sigma_{K_2} \circ \pi \circ \gamma \circ \sigma_{K_1} \circ \theta \circ \pi \circ \gamma \circ \sigma_{K_0} where the order of execution is from right to left. Note that `\gamma` is the nibble-sub operation that uses the S-box for encryption. INPUT: - ``P`` -- a plaintext block; must be a `2 \times 2` matrix over the finite field `\GF{2^4}` - ``key`` -- a secret key for this Mini-AES block cipher; must be a `2 \times 2` matrix over the finite field `\GF{2^4}` OUTPUT: - The ciphertext corresponding to ``P``. EXAMPLES: Here we work with elements of `\GF{2^4}`:: sage: from sage.crypto.block_cipher.miniaes import MiniAES sage: maes = MiniAES() sage: K = FiniteField(16, "x") sage: MS = MatrixSpace(K, 2, 2) sage: P = MS([ [K("x^3 + 1"), K("x^2 + x")], [K("x^3 + x^2"), K("x + 1")] ]); P <BLANKLINE> [ x^3 + 1 x^2 + x] [x^3 + x^2 x + 1] sage: key = MS([ [K("x^3 + x^2"), K("x^3 + x^2 + x + 1")], [K("x + 1"), K("0")] ]); key <BLANKLINE> [ x^3 + x^2 x^3 + x^2 + x + 1] [ x + 1 0] sage: maes.encrypt(P, key) <BLANKLINE> [x^2 + x + 1 x^3 + x^2] [ x x^2 + x] But we can also work with binary strings:: sage: bin = BinaryStrings() sage: P = bin.encoding("de"); P 0110010001100101 sage: P = MS(maes.binary_to_GF(P)); P <BLANKLINE> [x^2 + x x^2] [x^2 + x x^2 + 1] sage: key = bin.encoding("ke"); key 0110101101100101 sage: key = MS(maes.binary_to_GF(key)); key <BLANKLINE> [ x^2 + x x^3 + x + 1] [ x^2 + x x^2 + 1] sage: C = maes.encrypt(P, key) sage: plaintxt = maes.decrypt(C, key) sage: plaintxt == P True Now we work with integers `n` such that `0 \leq n \leq 15`:: sage: P = [1, 5, 8, 12]; P [1, 5, 8, 12] sage: key = [5, 9, 15, 0]; key [5, 9, 15, 0] sage: P = MS(maes.integer_to_GF(P)); P <BLANKLINE> [ 1 x^2 + 1] [ x^3 x^3 + x^2] sage: key = MS(maes.integer_to_GF(key)); key <BLANKLINE> [ x^2 + 1 x^3 + 1] [x^3 + x^2 + x + 1 0] sage: C = maes.encrypt(P, key) sage: plaintxt = maes.decrypt(C, key) sage: plaintxt == P True TESTS: The input block must be a matrix:: sage: from sage.crypto.block_cipher.miniaes import MiniAES sage: maes = MiniAES() sage: K = FiniteField(16, "x") sage: MS = MatrixSpace(K, 2, 2) sage: key = MS([ [K("x^3 + x^2"), K("x^3 + x^2 + x + 1")], [K("x + 1"), K("0")] ]) sage: maes.encrypt("P", key) Traceback (most recent call last): ... TypeError: plaintext block must be a 2 x 2 matrix over GF(16) sage: P = MS([ [K("x^3 + 1"), K("x^2 + x")], [K("x^3 + x^2"), K("x + 1")] ]) sage: maes.encrypt(P, "key") Traceback (most recent call last): ... TypeError: secret key must be a 2 x 2 matrix over GF(16) In addition, the dimensions of the input matrices must be `2 \times 2`:: sage: MS = MatrixSpace(K, 1, 2) sage: P = MS([ [K("x^3 + 1"), K("x^2 + x")]]) sage: maes.encrypt(P, "key") Traceback (most recent call last): ... TypeError: plaintext block must be a 2 x 2 matrix over GF(16) sage: MSP = MatrixSpace(K, 2, 2) sage: P = MSP([ [K("x^3 + 1"), K("x^2 + x")], [K("x^3 + x^2"), K("x + 1")] ]) sage: MSK = MatrixSpace(K, 1, 2) sage: key = MSK([ [K("x^3 + x^2"), K("x^3 + x^2 + x + 1")]]) sage: maes.encrypt(P, key) Traceback (most recent call last): ... TypeError: secret key must be a 2 x 2 matrix over GF(16)
r""" Use Phan's Mini-AES to encrypt the plaintext ``P`` with the secret key ``key``. Both ``P`` and ``key`` must be `2 \times 2` matrices over the finite field `\GF{2^4}`. Let `\gamma` denote the operation of nibble-sub, `\pi` denote shift-row, `\theta` denote mix-column, and `\sigma_{K_i}` denote add-key with the round key `K_i`. Then encryption `E` using Phan's Mini-AES is the function composition
[ "r", "Use", "Phan", "s", "Mini", "-", "AES", "to", "encrypt", "the", "plaintext", "P", "with", "the", "secret", "key", "key", ".", "Both", "P", "and", "key", "must", "be", "2", "\\", "times", "2", "matrices", "over", "the", "finite", "field", "\\", "GF", "{", "2^4", "}", ".", "Let", "\\", "gamma", "denote", "the", "operation", "of", "nibble", "-", "sub", "\\", "pi", "denote", "shift", "-", "row", "\\", "theta", "denote", "mix", "-", "column", "and", "\\", "sigma_", "{", "K_i", "}", "denote", "add", "-", "key", "with", "the", "round", "key", "K_i", ".", "Then", "encryption", "E", "using", "Phan", "s", "Mini", "-", "AES", "is", "the", "function", "composition" ]
def encrypt(self, P, key): r""" Use Phan's Mini-AES to encrypt the plaintext ``P`` with the secret key ``key``. Both ``P`` and ``key`` must be `2 \times 2` matrices over the finite field `\GF{2^4}`. Let `\gamma` denote the operation of nibble-sub, `\pi` denote shift-row, `\theta` denote mix-column, and `\sigma_{K_i}` denote add-key with the round key `K_i`. Then encryption `E` using Phan's Mini-AES is the function composition .. MATH:: E = \sigma_{K_2} \circ \pi \circ \gamma \circ \sigma_{K_1} \circ \theta \circ \pi \circ \gamma \circ \sigma_{K_0} where the order of execution is from right to left. Note that `\gamma` is the nibble-sub operation that uses the S-box for encryption. INPUT: - ``P`` -- a plaintext block; must be a `2 \times 2` matrix over the finite field `\GF{2^4}` - ``key`` -- a secret key for this Mini-AES block cipher; must be a `2 \times 2` matrix over the finite field `\GF{2^4}` OUTPUT: - The ciphertext corresponding to ``P``. EXAMPLES: Here we work with elements of `\GF{2^4}`:: sage: from sage.crypto.block_cipher.miniaes import MiniAES sage: maes = MiniAES() sage: K = FiniteField(16, "x") sage: MS = MatrixSpace(K, 2, 2) sage: P = MS([ [K("x^3 + 1"), K("x^2 + x")], [K("x^3 + x^2"), K("x + 1")] ]); P <BLANKLINE> [ x^3 + 1 x^2 + x] [x^3 + x^2 x + 1] sage: key = MS([ [K("x^3 + x^2"), K("x^3 + x^2 + x + 1")], [K("x + 1"), K("0")] ]); key <BLANKLINE> [ x^3 + x^2 x^3 + x^2 + x + 1] [ x + 1 0] sage: maes.encrypt(P, key) <BLANKLINE> [x^2 + x + 1 x^3 + x^2] [ x x^2 + x] But we can also work with binary strings:: sage: bin = BinaryStrings() sage: P = bin.encoding("de"); P 0110010001100101 sage: P = MS(maes.binary_to_GF(P)); P <BLANKLINE> [x^2 + x x^2] [x^2 + x x^2 + 1] sage: key = bin.encoding("ke"); key 0110101101100101 sage: key = MS(maes.binary_to_GF(key)); key <BLANKLINE> [ x^2 + x x^3 + x + 1] [ x^2 + x x^2 + 1] sage: C = maes.encrypt(P, key) sage: plaintxt = maes.decrypt(C, key) sage: plaintxt == P True Now we work with integers `n` such that `0 \leq n \leq 15`:: sage: P = [1, 5, 8, 12]; P [1, 5, 8, 12] sage: key = [5, 9, 15, 0]; key [5, 9, 15, 0] sage: P = MS(maes.integer_to_GF(P)); P <BLANKLINE> [ 1 x^2 + 1] [ x^3 x^3 + x^2] sage: key = MS(maes.integer_to_GF(key)); key <BLANKLINE> [ x^2 + 1 x^3 + 1] [x^3 + x^2 + x + 1 0] sage: C = maes.encrypt(P, key) sage: plaintxt = maes.decrypt(C, key) sage: plaintxt == P True TESTS: The input block must be a matrix:: sage: from sage.crypto.block_cipher.miniaes import MiniAES sage: maes = MiniAES() sage: K = FiniteField(16, "x") sage: MS = MatrixSpace(K, 2, 2) sage: key = MS([ [K("x^3 + x^2"), K("x^3 + x^2 + x + 1")], [K("x + 1"), K("0")] ]) sage: maes.encrypt("P", key) Traceback (most recent call last): ... TypeError: plaintext block must be a 2 x 2 matrix over GF(16) sage: P = MS([ [K("x^3 + 1"), K("x^2 + x")], [K("x^3 + x^2"), K("x + 1")] ]) sage: maes.encrypt(P, "key") Traceback (most recent call last): ... TypeError: secret key must be a 2 x 2 matrix over GF(16) In addition, the dimensions of the input matrices must be `2 \times 2`:: sage: MS = MatrixSpace(K, 1, 2) sage: P = MS([ [K("x^3 + 1"), K("x^2 + x")]]) sage: maes.encrypt(P, "key") Traceback (most recent call last): ... TypeError: plaintext block must be a 2 x 2 matrix over GF(16) sage: MSP = MatrixSpace(K, 2, 2) sage: P = MSP([ [K("x^3 + 1"), K("x^2 + x")], [K("x^3 + x^2"), K("x + 1")] ]) sage: MSK = MatrixSpace(K, 1, 2) sage: key = MSK([ [K("x^3 + x^2"), K("x^3 + x^2 + x + 1")]]) sage: maes.encrypt(P, key) Traceback (most recent call last): ... TypeError: secret key must be a 2 x 2 matrix over GF(16) """ if not isinstance(P, Matrix_dense) or \ not (P.base_ring().order() == 16 and P.base_ring().is_field()): raise TypeError("plaintext block must be a 2 x 2 matrix over GF(16)") if not (P.nrows() == P.ncols() == 2): raise TypeError("plaintext block must be a 2 x 2 matrix over GF(16)") if not isinstance(key, Matrix_dense) or \ not (key.base_ring().order() == 16 and key.base_ring().is_field()): raise TypeError("secret key must be a 2 x 2 matrix over GF(16)") if not (key.nrows() == key.ncols() == 2): raise TypeError("secret key must be a 2 x 2 matrix over GF(16)") # pre-compute the round keys rkey0 = self.round_key(key, 0) rkey1 = self.round_key(key, 1) rkey2 = self.round_key(key, 2) # now proceed with encrypting the plaintext # round 0 ciphertext = self.add_key(P, rkey0) # round 1 ciphertext = self.nibble_sub(ciphertext, algorithm="encrypt") ciphertext = self.shift_row(ciphertext) ciphertext = self.mix_column(ciphertext) ciphertext = self.add_key(ciphertext, rkey1) # round 2 ciphertext = self.nibble_sub(ciphertext, algorithm="encrypt") ciphertext = self.shift_row(ciphertext) ciphertext = self.add_key(ciphertext, rkey2) return ciphertext
[ "def", "encrypt", "(", "self", ",", "P", ",", "key", ")", ":", "if", "not", "isinstance", "(", "P", ",", "Matrix_dense", ")", "or", "not", "(", "P", ".", "base_ring", "(", ")", ".", "order", "(", ")", "==", "16", "and", "P", ".", "base_ring", "(", ")", ".", "is_field", "(", ")", ")", ":", "raise", "TypeError", "(", "\"plaintext block must be a 2 x 2 matrix over GF(16)\"", ")", "if", "not", "(", "P", ".", "nrows", "(", ")", "==", "P", ".", "ncols", "(", ")", "==", "2", ")", ":", "raise", "TypeError", "(", "\"plaintext block must be a 2 x 2 matrix over GF(16)\"", ")", "if", "not", "isinstance", "(", "key", ",", "Matrix_dense", ")", "or", "not", "(", "key", ".", "base_ring", "(", ")", ".", "order", "(", ")", "==", "16", "and", "key", ".", "base_ring", "(", ")", ".", "is_field", "(", ")", ")", ":", "raise", "TypeError", "(", "\"secret key must be a 2 x 2 matrix over GF(16)\"", ")", "if", "not", "(", "key", ".", "nrows", "(", ")", "==", "key", ".", "ncols", "(", ")", "==", "2", ")", ":", "raise", "TypeError", "(", "\"secret key must be a 2 x 2 matrix over GF(16)\"", ")", "# pre-compute the round keys", "rkey0", "=", "self", ".", "round_key", "(", "key", ",", "0", ")", "rkey1", "=", "self", ".", "round_key", "(", "key", ",", "1", ")", "rkey2", "=", "self", ".", "round_key", "(", "key", ",", "2", ")", "# now proceed with encrypting the plaintext", "# round 0", "ciphertext", "=", "self", ".", "add_key", "(", "P", ",", "rkey0", ")", "# round 1", "ciphertext", "=", "self", ".", "nibble_sub", "(", "ciphertext", ",", "algorithm", "=", "\"encrypt\"", ")", "ciphertext", "=", "self", ".", "shift_row", "(", "ciphertext", ")", "ciphertext", "=", "self", ".", "mix_column", "(", "ciphertext", ")", "ciphertext", "=", "self", ".", "add_key", "(", "ciphertext", ",", "rkey1", ")", "# round 2", "ciphertext", "=", "self", ".", "nibble_sub", "(", "ciphertext", ",", "algorithm", "=", "\"encrypt\"", ")", "ciphertext", "=", "self", ".", "shift_row", "(", "ciphertext", ")", "ciphertext", "=", "self", ".", "add_key", "(", "ciphertext", ",", "rkey2", ")", "return", "ciphertext" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/crypto/block_cipher/miniaes.py#L740-L899
dtmilano/AndroidViewClient
421b86e3f1a57683557fc0173951cd0332ab43f4
src/com/dtmilano/android/adb/adbclient.py
python
AdbClient.startActivity
(self, component=None, flags=None, uri=None, package=None)
Starts an Activity. If package is specified instead of component the corresponding MAIN activity for the package will be resolved and used.
Starts an Activity. If package is specified instead of component the corresponding MAIN activity for the package will be resolved and used.
[ "Starts", "an", "Activity", ".", "If", "package", "is", "specified", "instead", "of", "component", "the", "corresponding", "MAIN", "activity", "for", "the", "package", "will", "be", "resolved", "and", "used", "." ]
def startActivity(self, component=None, flags=None, uri=None, package=None): """ Starts an Activity. If package is specified instead of component the corresponding MAIN activity for the package will be resolved and used. """ self.__checkTransport() cmd = 'am start' if package and not component: version = self.getSdkVersion() if version >= 24: component = self.resolveActivity(package) else: component = self.dumpsys(Dumpsys.PACKAGE, package).package['main-activity'] if component: cmd += ' -n %s' % component if flags: cmd += ' -f %s' % flags if uri: cmd += ' %s' % uri if DEBUG: print("Starting activity: %s" % cmd, file=sys.stderr) out = self.shell(cmd) if re.search(r"(Error type)|(Error: )|(Cannot find 'App')", out, re.IGNORECASE | re.MULTILINE): raise RuntimeError(out)
[ "def", "startActivity", "(", "self", ",", "component", "=", "None", ",", "flags", "=", "None", ",", "uri", "=", "None", ",", "package", "=", "None", ")", ":", "self", ".", "__checkTransport", "(", ")", "cmd", "=", "'am start'", "if", "package", "and", "not", "component", ":", "version", "=", "self", ".", "getSdkVersion", "(", ")", "if", "version", ">=", "24", ":", "component", "=", "self", ".", "resolveActivity", "(", "package", ")", "else", ":", "component", "=", "self", ".", "dumpsys", "(", "Dumpsys", ".", "PACKAGE", ",", "package", ")", ".", "package", "[", "'main-activity'", "]", "if", "component", ":", "cmd", "+=", "' -n %s'", "%", "component", "if", "flags", ":", "cmd", "+=", "' -f %s'", "%", "flags", "if", "uri", ":", "cmd", "+=", "' %s'", "%", "uri", "if", "DEBUG", ":", "print", "(", "\"Starting activity: %s\"", "%", "cmd", ",", "file", "=", "sys", ".", "stderr", ")", "out", "=", "self", ".", "shell", "(", "cmd", ")", "if", "re", ".", "search", "(", "r\"(Error type)|(Error: )|(Cannot find 'App')\"", ",", "out", ",", "re", ".", "IGNORECASE", "|", "re", ".", "MULTILINE", ")", ":", "raise", "RuntimeError", "(", "out", ")" ]
https://github.com/dtmilano/AndroidViewClient/blob/421b86e3f1a57683557fc0173951cd0332ab43f4/src/com/dtmilano/android/adb/adbclient.py#L787-L811
etetoolkit/ete
2b207357dc2a40ccad7bfd8f54964472c72e4726
ete3/coretype/tree.py
python
TreeNode.convert_to_ultrametric
(self, tree_length=None, strategy='balanced')
.. versionadded: 2.1 Converts a tree into ultrametric topology (all leaves must have the same distance to root). Note that, for visual inspection of ultrametric trees, node.img_style["size"] should be set to 0.
.. versionadded: 2.1
[ "..", "versionadded", ":", "2", ".", "1" ]
def convert_to_ultrametric(self, tree_length=None, strategy='balanced'): """ .. versionadded: 2.1 Converts a tree into ultrametric topology (all leaves must have the same distance to root). Note that, for visual inspection of ultrametric trees, node.img_style["size"] should be set to 0. """ # Could something like this replace the old algorithm? #most_distant_leaf, tree_length = self.get_farthest_leaf() #for leaf in self: # d = leaf.get_distance(self) # leaf.dist += (tree_length - d) #return # pre-calculate how many splits remain under each node node2max_depth = {} for node in self.traverse("postorder"): if not node.is_leaf(): max_depth = max([node2max_depth[c] for c in node.children]) + 1 node2max_depth[node] = max_depth else: node2max_depth[node] = 1 node2dist = {self: 0.0} if not tree_length: most_distant_leaf, tree_length = self.get_farthest_leaf() else: tree_length = float(tree_length) step = tree_length / node2max_depth[self] for node in self.iter_descendants("levelorder"): if strategy == "balanced": node.dist = (tree_length - node2dist[node.up]) / node2max_depth[node] node2dist[node] = node.dist + node2dist[node.up] elif strategy == "fixed": if not node.is_leaf(): node.dist = step else: node.dist = tree_length - ((node2dist[node.up]) * step) node2dist[node] = node2dist[node.up] + 1 node.dist = node.dist
[ "def", "convert_to_ultrametric", "(", "self", ",", "tree_length", "=", "None", ",", "strategy", "=", "'balanced'", ")", ":", "# Could something like this replace the old algorithm?", "#most_distant_leaf, tree_length = self.get_farthest_leaf()", "#for leaf in self:", "# d = leaf.get_distance(self)", "# leaf.dist += (tree_length - d)", "#return", "# pre-calculate how many splits remain under each node", "node2max_depth", "=", "{", "}", "for", "node", "in", "self", ".", "traverse", "(", "\"postorder\"", ")", ":", "if", "not", "node", ".", "is_leaf", "(", ")", ":", "max_depth", "=", "max", "(", "[", "node2max_depth", "[", "c", "]", "for", "c", "in", "node", ".", "children", "]", ")", "+", "1", "node2max_depth", "[", "node", "]", "=", "max_depth", "else", ":", "node2max_depth", "[", "node", "]", "=", "1", "node2dist", "=", "{", "self", ":", "0.0", "}", "if", "not", "tree_length", ":", "most_distant_leaf", ",", "tree_length", "=", "self", ".", "get_farthest_leaf", "(", ")", "else", ":", "tree_length", "=", "float", "(", "tree_length", ")", "step", "=", "tree_length", "/", "node2max_depth", "[", "self", "]", "for", "node", "in", "self", ".", "iter_descendants", "(", "\"levelorder\"", ")", ":", "if", "strategy", "==", "\"balanced\"", ":", "node", ".", "dist", "=", "(", "tree_length", "-", "node2dist", "[", "node", ".", "up", "]", ")", "/", "node2max_depth", "[", "node", "]", "node2dist", "[", "node", "]", "=", "node", ".", "dist", "+", "node2dist", "[", "node", ".", "up", "]", "elif", "strategy", "==", "\"fixed\"", ":", "if", "not", "node", ".", "is_leaf", "(", ")", ":", "node", ".", "dist", "=", "step", "else", ":", "node", ".", "dist", "=", "tree_length", "-", "(", "(", "node2dist", "[", "node", ".", "up", "]", ")", "*", "step", ")", "node2dist", "[", "node", "]", "=", "node2dist", "[", "node", ".", "up", "]", "+", "1", "node", ".", "dist", "=", "node", ".", "dist" ]
https://github.com/etetoolkit/ete/blob/2b207357dc2a40ccad7bfd8f54964472c72e4726/ete3/coretype/tree.py#L2084-L2129
spotify/luigi
c3b66f4a5fa7eaa52f9a72eb6704b1049035c789
luigi/task_register.py
python
Register._missing_task_msg
(cls, task_name)
[]
def _missing_task_msg(cls, task_name): weighted_tasks = [(Register._editdistance(task_name, task_name_2), task_name_2) for task_name_2 in cls.task_names()] ordered_tasks = sorted(weighted_tasks, key=lambda pair: pair[0]) candidates = [task for (dist, task) in ordered_tasks if dist <= 5 and dist < len(task)] if candidates: return "No task %s. Did you mean:\n%s" % (task_name, '\n'.join(candidates)) else: return "No task %s. Candidates are: %s" % (task_name, cls.tasks_str())
[ "def", "_missing_task_msg", "(", "cls", ",", "task_name", ")", ":", "weighted_tasks", "=", "[", "(", "Register", ".", "_editdistance", "(", "task_name", ",", "task_name_2", ")", ",", "task_name_2", ")", "for", "task_name_2", "in", "cls", ".", "task_names", "(", ")", "]", "ordered_tasks", "=", "sorted", "(", "weighted_tasks", ",", "key", "=", "lambda", "pair", ":", "pair", "[", "0", "]", ")", "candidates", "=", "[", "task", "for", "(", "dist", ",", "task", ")", "in", "ordered_tasks", "if", "dist", "<=", "5", "and", "dist", "<", "len", "(", "task", ")", "]", "if", "candidates", ":", "return", "\"No task %s. Did you mean:\\n%s\"", "%", "(", "task_name", ",", "'\\n'", ".", "join", "(", "candidates", ")", ")", "else", ":", "return", "\"No task %s. Candidates are: %s\"", "%", "(", "task_name", ",", "cls", ".", "tasks_str", "(", ")", ")" ]
https://github.com/spotify/luigi/blob/c3b66f4a5fa7eaa52f9a72eb6704b1049035c789/luigi/task_register.py#L215-L222
wistbean/fxxkpython
88e16d79d8dd37236ba6ecd0d0ff11d63143968c
vip/qyxuan/projects/Snake/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/html5lib/_inputstream.py
python
lookupEncoding
(encoding)
Return the python codec name corresponding to an encoding or None if the string doesn't correspond to a valid encoding.
Return the python codec name corresponding to an encoding or None if the string doesn't correspond to a valid encoding.
[ "Return", "the", "python", "codec", "name", "corresponding", "to", "an", "encoding", "or", "None", "if", "the", "string", "doesn", "t", "correspond", "to", "a", "valid", "encoding", "." ]
def lookupEncoding(encoding): """Return the python codec name corresponding to an encoding or None if the string doesn't correspond to a valid encoding.""" if isinstance(encoding, binary_type): try: encoding = encoding.decode("ascii") except UnicodeDecodeError: return None if encoding is not None: try: return webencodings.lookup(encoding) except AttributeError: return None else: return None
[ "def", "lookupEncoding", "(", "encoding", ")", ":", "if", "isinstance", "(", "encoding", ",", "binary_type", ")", ":", "try", ":", "encoding", "=", "encoding", ".", "decode", "(", "\"ascii\"", ")", "except", "UnicodeDecodeError", ":", "return", "None", "if", "encoding", "is", "not", "None", ":", "try", ":", "return", "webencodings", ".", "lookup", "(", "encoding", ")", "except", "AttributeError", ":", "return", "None", "else", ":", "return", "None" ]
https://github.com/wistbean/fxxkpython/blob/88e16d79d8dd37236ba6ecd0d0ff11d63143968c/vip/qyxuan/projects/Snake/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/html5lib/_inputstream.py#L908-L923
google-research/language
61fa7260ac7d690d11ef72ca863e45a37c0bdc80
language/nqg/model/induction/induction_utils.py
python
_example_to_rule
(source_str, target_str)
return qcfg_rule.QCFGRule( tuple(source_str.split()), tuple(target_str.split()), arity=0)
Convert (source, target) example to a QCFGRule.
Convert (source, target) example to a QCFGRule.
[ "Convert", "(", "source", "target", ")", "example", "to", "a", "QCFGRule", "." ]
def _example_to_rule(source_str, target_str): """Convert (source, target) example to a QCFGRule.""" return qcfg_rule.QCFGRule( tuple(source_str.split()), tuple(target_str.split()), arity=0)
[ "def", "_example_to_rule", "(", "source_str", ",", "target_str", ")", ":", "return", "qcfg_rule", ".", "QCFGRule", "(", "tuple", "(", "source_str", ".", "split", "(", ")", ")", ",", "tuple", "(", "target_str", ".", "split", "(", ")", ")", ",", "arity", "=", "0", ")" ]
https://github.com/google-research/language/blob/61fa7260ac7d690d11ef72ca863e45a37c0bdc80/language/nqg/model/induction/induction_utils.py#L229-L232
daoluan/decode-Django
d46a858b45b56de48b0355f50dd9e45402d04cfd
Django-1.5.1/django/views/i18n.py
python
null_javascript_catalog
(request, domain=None, packages=None)
return http.HttpResponse(''.join(src), 'text/javascript')
Returns "identity" versions of the JavaScript i18n functions -- i.e., versions that don't actually do anything.
Returns "identity" versions of the JavaScript i18n functions -- i.e., versions that don't actually do anything.
[ "Returns", "identity", "versions", "of", "the", "JavaScript", "i18n", "functions", "--", "i", ".", "e", ".", "versions", "that", "don", "t", "actually", "do", "anything", "." ]
def null_javascript_catalog(request, domain=None, packages=None): """ Returns "identity" versions of the JavaScript i18n functions -- i.e., versions that don't actually do anything. """ src = [NullSource, InterPolate, LibFormatHead, get_formats(), LibFormatFoot] return http.HttpResponse(''.join(src), 'text/javascript')
[ "def", "null_javascript_catalog", "(", "request", ",", "domain", "=", "None", ",", "packages", "=", "None", ")", ":", "src", "=", "[", "NullSource", ",", "InterPolate", ",", "LibFormatHead", ",", "get_formats", "(", ")", ",", "LibFormatFoot", "]", "return", "http", ".", "HttpResponse", "(", "''", ".", "join", "(", "src", ")", ",", "'text/javascript'", ")" ]
https://github.com/daoluan/decode-Django/blob/d46a858b45b56de48b0355f50dd9e45402d04cfd/Django-1.5.1/django/views/i18n.py#L165-L171
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/combinat/crystals/kirillov_reshetikhin.py
python
KR_type_C.classical_decomposition
(self)
return CrystalOfTableaux(self.cartan_type().classical(), shapes=horizontal_dominoes_removed(self.r(), self.s()))
r""" Return the classical crystal underlying the Kirillov-Reshetikhin crystal of type `C_n^{(1)}`. It is given by `B^{r,s} \cong \bigoplus_{\Lambda} B(\Lambda)`, where `\Lambda` are weights obtained from a rectangle of width `s` and height `r` by removing horizontal dominoes. Here we identify the fundamental weight `\Lambda_i` with a column of height `i`. EXAMPLES:: sage: K = crystals.KirillovReshetikhin(['C',3,1], 2,2) sage: K.classical_decomposition() The crystal of tableaux of type ['C', 3] and shape(s) [[], [2], [2, 2]]
r""" Return the classical crystal underlying the Kirillov-Reshetikhin crystal of type `C_n^{(1)}`.
[ "r", "Return", "the", "classical", "crystal", "underlying", "the", "Kirillov", "-", "Reshetikhin", "crystal", "of", "type", "C_n^", "{", "(", "1", ")", "}", "." ]
def classical_decomposition(self): r""" Return the classical crystal underlying the Kirillov-Reshetikhin crystal of type `C_n^{(1)}`. It is given by `B^{r,s} \cong \bigoplus_{\Lambda} B(\Lambda)`, where `\Lambda` are weights obtained from a rectangle of width `s` and height `r` by removing horizontal dominoes. Here we identify the fundamental weight `\Lambda_i` with a column of height `i`. EXAMPLES:: sage: K = crystals.KirillovReshetikhin(['C',3,1], 2,2) sage: K.classical_decomposition() The crystal of tableaux of type ['C', 3] and shape(s) [[], [2], [2, 2]] """ return CrystalOfTableaux(self.cartan_type().classical(), shapes=horizontal_dominoes_removed(self.r(), self.s()))
[ "def", "classical_decomposition", "(", "self", ")", ":", "return", "CrystalOfTableaux", "(", "self", ".", "cartan_type", "(", ")", ".", "classical", "(", ")", ",", "shapes", "=", "horizontal_dominoes_removed", "(", "self", ".", "r", "(", ")", ",", "self", ".", "s", "(", ")", ")", ")" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/combinat/crystals/kirillov_reshetikhin.py#L1319-L1336
brechtm/rinohtype
d03096f9b1b0ba2d821a25356d84dc6d3028c96c
src/rinoh/text.py
python
StyledText.y_offset
(self, container)
return offset
Vertical baseline offset (up is positive).
Vertical baseline offset (up is positive).
[ "Vertical", "baseline", "offset", "(", "up", "is", "positive", ")", "." ]
def y_offset(self, container): """Vertical baseline offset (up is positive).""" offset = (self.parent.y_offset(container) if hasattr(self.parent, 'y_offset') else 0) if self.is_script(container): position = self.get_style('position', container) offset += self.parent.height(container) * self.position[position] return offset
[ "def", "y_offset", "(", "self", ",", "container", ")", ":", "offset", "=", "(", "self", ".", "parent", ".", "y_offset", "(", "container", ")", "if", "hasattr", "(", "self", ".", "parent", ",", "'y_offset'", ")", "else", "0", ")", "if", "self", ".", "is_script", "(", "container", ")", ":", "position", "=", "self", ".", "get_style", "(", "'position'", ",", "container", ")", "offset", "+=", "self", ".", "parent", ".", "height", "(", "container", ")", "*", "self", ".", "position", "[", "position", "]", "return", "offset" ]
https://github.com/brechtm/rinohtype/blob/d03096f9b1b0ba2d821a25356d84dc6d3028c96c/src/rinoh/text.py#L300-L307
cbrgm/telegram-robot-rss
58fe98de427121fdc152c8df0721f1891174e6c9
venv/lib/python2.7/site-packages/setuptools/command/build_py.py
python
build_py.check_package
(self, package, package_dir)
return init_py
Check namespace packages' __init__ for declare_namespace
Check namespace packages' __init__ for declare_namespace
[ "Check", "namespace", "packages", "__init__", "for", "declare_namespace" ]
def check_package(self, package, package_dir): """Check namespace packages' __init__ for declare_namespace""" try: return self.packages_checked[package] except KeyError: pass init_py = orig.build_py.check_package(self, package, package_dir) self.packages_checked[package] = init_py if not init_py or not self.distribution.namespace_packages: return init_py for pkg in self.distribution.namespace_packages: if pkg == package or pkg.startswith(package + '.'): break else: return init_py with io.open(init_py, 'rb') as f: contents = f.read() if b'declare_namespace' not in contents: raise distutils.errors.DistutilsError( "Namespace package problem: %s is a namespace package, but " "its\n__init__.py does not call declare_namespace()! Please " 'fix it.\n(See the setuptools manual under ' '"Namespace Packages" for details.)\n"' % (package,) ) return init_py
[ "def", "check_package", "(", "self", ",", "package", ",", "package_dir", ")", ":", "try", ":", "return", "self", ".", "packages_checked", "[", "package", "]", "except", "KeyError", ":", "pass", "init_py", "=", "orig", ".", "build_py", ".", "check_package", "(", "self", ",", "package", ",", "package_dir", ")", "self", ".", "packages_checked", "[", "package", "]", "=", "init_py", "if", "not", "init_py", "or", "not", "self", ".", "distribution", ".", "namespace_packages", ":", "return", "init_py", "for", "pkg", "in", "self", ".", "distribution", ".", "namespace_packages", ":", "if", "pkg", "==", "package", "or", "pkg", ".", "startswith", "(", "package", "+", "'.'", ")", ":", "break", "else", ":", "return", "init_py", "with", "io", ".", "open", "(", "init_py", ",", "'rb'", ")", "as", "f", ":", "contents", "=", "f", ".", "read", "(", ")", "if", "b'declare_namespace'", "not", "in", "contents", ":", "raise", "distutils", ".", "errors", ".", "DistutilsError", "(", "\"Namespace package problem: %s is a namespace package, but \"", "\"its\\n__init__.py does not call declare_namespace()! Please \"", "'fix it.\\n(See the setuptools manual under '", "'\"Namespace Packages\" for details.)\\n\"'", "%", "(", "package", ",", ")", ")", "return", "init_py" ]
https://github.com/cbrgm/telegram-robot-rss/blob/58fe98de427121fdc152c8df0721f1891174e6c9/venv/lib/python2.7/site-packages/setuptools/command/build_py.py#L156-L184
GoSecure/pyrdp
abd8b8762b6d7fd0e49d4a927b529f892b412743
pyrdp/parser/rdp/orders/primary.py
python
MultiScrBlt.__init__
(self, ctx: PrimaryContext)
[]
def __init__(self, ctx: PrimaryContext): self.ctx = ctx self.nLeftRect = 0 self.nTopRect = 0 self.nWidth = 0 self.nHeight = 0 self.bRop = 0 self.nXSrc = 0 self.nYSrc = 0 self.numRectangles = 0 self.cbData = 0 self.rectangles = []
[ "def", "__init__", "(", "self", ",", "ctx", ":", "PrimaryContext", ")", ":", "self", ".", "ctx", "=", "ctx", "self", ".", "nLeftRect", "=", "0", "self", ".", "nTopRect", "=", "0", "self", ".", "nWidth", "=", "0", "self", ".", "nHeight", "=", "0", "self", ".", "bRop", "=", "0", "self", ".", "nXSrc", "=", "0", "self", ".", "nYSrc", "=", "0", "self", ".", "numRectangles", "=", "0", "self", ".", "cbData", "=", "0", "self", ".", "rectangles", "=", "[", "]" ]
https://github.com/GoSecure/pyrdp/blob/abd8b8762b6d7fd0e49d4a927b529f892b412743/pyrdp/parser/rdp/orders/primary.py#L724-L736
airbnb/streamalert
26cf1d08432ca285fd4f7410511a6198ca104bbb
streamalert/shared/lookup_tables/core.py
python
LookupTablesCore.get
(self, table_name, key, default=None)
return self.get_table(table_name).get(key, default)
Syntax sugar for get_table().get() Params: table_name (str) key (str) default (mixed) Returns: mixed
Syntax sugar for get_table().get()
[ "Syntax", "sugar", "for", "get_table", "()", ".", "get", "()" ]
def get(self, table_name, key, default=None): """ Syntax sugar for get_table().get() Params: table_name (str) key (str) default (mixed) Returns: mixed """ return self.get_table(table_name).get(key, default)
[ "def", "get", "(", "self", ",", "table_name", ",", "key", ",", "default", "=", "None", ")", ":", "return", "self", ".", "get_table", "(", "table_name", ")", ".", "get", "(", "key", ",", "default", ")" ]
https://github.com/airbnb/streamalert/blob/26cf1d08432ca285fd4f7410511a6198ca104bbb/streamalert/shared/lookup_tables/core.py#L141-L153
wangheda/youtube-8m
07e54b387ee027cb58b0c14f5eb7c88cfa516d58
youtube-8m-wangheda/eval_util.py
python
EvaluationMetrics.clear
(self)
Clear the evaluation metrics and reset the EvaluationMetrics object.
Clear the evaluation metrics and reset the EvaluationMetrics object.
[ "Clear", "the", "evaluation", "metrics", "and", "reset", "the", "EvaluationMetrics", "object", "." ]
def clear(self): """Clear the evaluation metrics and reset the EvaluationMetrics object.""" self.sum_hit_at_one = 0.0 self.sum_perr = 0.0 self.sum_loss = 0.0 self.map_calculator.clear() self.global_ap_calculator.clear() self.num_examples = 0
[ "def", "clear", "(", "self", ")", ":", "self", ".", "sum_hit_at_one", "=", "0.0", "self", ".", "sum_perr", "=", "0.0", "self", ".", "sum_loss", "=", "0.0", "self", ".", "map_calculator", ".", "clear", "(", ")", "self", ".", "global_ap_calculator", ".", "clear", "(", ")", "self", ".", "num_examples", "=", "0" ]
https://github.com/wangheda/youtube-8m/blob/07e54b387ee027cb58b0c14f5eb7c88cfa516d58/youtube-8m-wangheda/eval_util.py#L247-L254
jaywink/socialhome
c3178b044936a5c57a502ab6ed2b4f43c8e076ca
socialhome/contrib/sites/migrations/0002_set_site_domain_and_name.py
python
update_site_forward
(apps, schema_editor)
Set site domain and name.
Set site domain and name.
[ "Set", "site", "domain", "and", "name", "." ]
def update_site_forward(apps, schema_editor): """Set site domain and name.""" Site = apps.get_model("sites", "Site") Site.objects.update_or_create( id=settings.SITE_ID, defaults={ "domain": "socialhome.network", "name": "Socialhome" } )
[ "def", "update_site_forward", "(", "apps", ",", "schema_editor", ")", ":", "Site", "=", "apps", ".", "get_model", "(", "\"sites\"", ",", "\"Site\"", ")", "Site", ".", "objects", ".", "update_or_create", "(", "id", "=", "settings", ".", "SITE_ID", ",", "defaults", "=", "{", "\"domain\"", ":", "\"socialhome.network\"", ",", "\"name\"", ":", "\"Socialhome\"", "}", ")" ]
https://github.com/jaywink/socialhome/blob/c3178b044936a5c57a502ab6ed2b4f43c8e076ca/socialhome/contrib/sites/migrations/0002_set_site_domain_and_name.py#L14-L23
nlloyd/SubliminalCollaborator
5c619e17ddbe8acb9eea8996ec038169ddcd50a1
libs/twisted/mail/maildir.py
python
MaildirMailbox.appendMessage
(self, txt)
return result
Appends a message into the mailbox. @param txt: A C{str} or file-like object giving the message to append. @return: A L{Deferred} which fires when the message has been appended to the mailbox.
Appends a message into the mailbox.
[ "Appends", "a", "message", "into", "the", "mailbox", "." ]
def appendMessage(self, txt): """ Appends a message into the mailbox. @param txt: A C{str} or file-like object giving the message to append. @return: A L{Deferred} which fires when the message has been appended to the mailbox. """ task = self.AppendFactory(self, txt) result = task.defer task.startUp() return result
[ "def", "appendMessage", "(", "self", ",", "txt", ")", ":", "task", "=", "self", ".", "AppendFactory", "(", "self", ",", "txt", ")", "result", "=", "task", ".", "defer", "task", ".", "startUp", "(", ")", "return", "result" ]
https://github.com/nlloyd/SubliminalCollaborator/blob/5c619e17ddbe8acb9eea8996ec038169ddcd50a1/libs/twisted/mail/maildir.py#L344-L356
wangheda/youtube-8m
07e54b387ee027cb58b0c14f5eb7c88cfa516d58
youtube-8m-zhangteng/frame_level_models.py
python
LstmMultiscale3Model.rnn_standard
(self, model_input, lstm_size, num_frames,sub_scope="", **unused_params)
return state_out
Creates a model which uses a stack of LSTMs to represent the video. Args: model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of input features. vocab_size: The number of classes in the dataset. num_frames: A vector of length 'batch' which indicates the number of frames for each video (before padding). Returns: A dictionary with a tensor containing the probability predictions of the model in the 'predictions' key. The dimensions of the tensor are 'batch_size' x 'num_classes'.
Creates a model which uses a stack of LSTMs to represent the video.
[ "Creates", "a", "model", "which", "uses", "a", "stack", "of", "LSTMs", "to", "represent", "the", "video", "." ]
def rnn_standard(self, model_input, lstm_size, num_frames,sub_scope="", **unused_params): """Creates a model which uses a stack of LSTMs to represent the video. Args: model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of input features. vocab_size: The number of classes in the dataset. num_frames: A vector of length 'batch' which indicates the number of frames for each video (before padding). Returns: A dictionary with a tensor containing the probability predictions of the model in the 'predictions' key. The dimensions of the tensor are 'batch_size' x 'num_classes'. """ ## Batch normalize the input stacked_lstm = tf.contrib.rnn.MultiRNNCell( [ tf.contrib.rnn.BasicLSTMCell( lstm_size, forget_bias=1.0, state_is_tuple=True) for _ in range(1) ], state_is_tuple=True) with tf.variable_scope("RNN-"+sub_scope): outputs, state = tf.nn.dynamic_rnn(stacked_lstm, model_input, sequence_length=num_frames, swap_memory=True, dtype=tf.float32) state_out = tf.concat(map(lambda x: x.c, state), axis=1) return state_out
[ "def", "rnn_standard", "(", "self", ",", "model_input", ",", "lstm_size", ",", "num_frames", ",", "sub_scope", "=", "\"\"", ",", "*", "*", "unused_params", ")", ":", "## Batch normalize the input", "stacked_lstm", "=", "tf", ".", "contrib", ".", "rnn", ".", "MultiRNNCell", "(", "[", "tf", ".", "contrib", ".", "rnn", ".", "BasicLSTMCell", "(", "lstm_size", ",", "forget_bias", "=", "1.0", ",", "state_is_tuple", "=", "True", ")", "for", "_", "in", "range", "(", "1", ")", "]", ",", "state_is_tuple", "=", "True", ")", "with", "tf", ".", "variable_scope", "(", "\"RNN-\"", "+", "sub_scope", ")", ":", "outputs", ",", "state", "=", "tf", ".", "nn", ".", "dynamic_rnn", "(", "stacked_lstm", ",", "model_input", ",", "sequence_length", "=", "num_frames", ",", "swap_memory", "=", "True", ",", "dtype", "=", "tf", ".", "float32", ")", "state_out", "=", "tf", ".", "concat", "(", "map", "(", "lambda", "x", ":", "x", ".", "c", ",", "state", ")", ",", "axis", "=", "1", ")", "return", "state_out" ]
https://github.com/wangheda/youtube-8m/blob/07e54b387ee027cb58b0c14f5eb7c88cfa516d58/youtube-8m-zhangteng/frame_level_models.py#L3523-L3554
GoSecure/pyrdp
abd8b8762b6d7fd0e49d4a927b529f892b412743
pyrdp/logging/observers.py
python
MCSLogger.onPDUReceived
(self, pdu: MCSPDU)
[]
def onPDUReceived(self, pdu: MCSPDU): if pdu.header in [MCSPDUType.SEND_DATA_REQUEST, MCSPDUType.SEND_DATA_INDICATION]: self.log.debug("Received %(type)s", {"type": pdu.header}) else: self.logPDU(pdu) super().onPDUReceived(pdu)
[ "def", "onPDUReceived", "(", "self", ",", "pdu", ":", "MCSPDU", ")", ":", "if", "pdu", ".", "header", "in", "[", "MCSPDUType", ".", "SEND_DATA_REQUEST", ",", "MCSPDUType", ".", "SEND_DATA_INDICATION", "]", ":", "self", ".", "log", ".", "debug", "(", "\"Received %(type)s\"", ",", "{", "\"type\"", ":", "pdu", ".", "header", "}", ")", "else", ":", "self", ".", "logPDU", "(", "pdu", ")", "super", "(", ")", ".", "onPDUReceived", "(", "pdu", ")" ]
https://github.com/GoSecure/pyrdp/blob/abd8b8762b6d7fd0e49d4a927b529f892b412743/pyrdp/logging/observers.py#L53-L59
RaRe-Technologies/gensim
8b8203d8df354673732dff635283494a33d0d422
gensim/models/word2vec.py
python
Word2Vec.score
(self, sentences, total_sentences=int(1e6), chunksize=100, queue_factor=2, report_delay=1)
return sentence_scores[:sentence_count]
Score the log probability for a sequence of sentences. This does not change the fitted model in any way (see :meth:`~gensim.models.word2vec.Word2Vec.train` for that). Gensim has currently only implemented score for the hierarchical softmax scheme, so you need to have run word2vec with `hs=1` and `negative=0` for this to work. Note that you should specify `total_sentences`; you'll run into problems if you ask to score more than this number of sentences but it is inefficient to set the value too high. See the `article by Matt Taddy: "Document Classification by Inversion of Distributed Language Representations" <https://arxiv.org/pdf/1504.07295.pdf>`_ and the `gensim demo <https://github.com/piskvorky/gensim/blob/develop/docs/notebooks/deepir.ipynb>`_ for examples of how to use such scores in document classification. Parameters ---------- sentences : iterable of list of str The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora, consider an iterable that streams the sentences directly from disk/network. See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus` or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples. total_sentences : int, optional Count of sentences. chunksize : int, optional Chunksize of jobs queue_factor : int, optional Multiplier for size of queue (number of workers * queue_factor). report_delay : float, optional Seconds to wait before reporting progress.
Score the log probability for a sequence of sentences. This does not change the fitted model in any way (see :meth:`~gensim.models.word2vec.Word2Vec.train` for that).
[ "Score", "the", "log", "probability", "for", "a", "sequence", "of", "sentences", ".", "This", "does", "not", "change", "the", "fitted", "model", "in", "any", "way", "(", "see", ":", "meth", ":", "~gensim", ".", "models", ".", "word2vec", ".", "Word2Vec", ".", "train", "for", "that", ")", "." ]
def score(self, sentences, total_sentences=int(1e6), chunksize=100, queue_factor=2, report_delay=1): """Score the log probability for a sequence of sentences. This does not change the fitted model in any way (see :meth:`~gensim.models.word2vec.Word2Vec.train` for that). Gensim has currently only implemented score for the hierarchical softmax scheme, so you need to have run word2vec with `hs=1` and `negative=0` for this to work. Note that you should specify `total_sentences`; you'll run into problems if you ask to score more than this number of sentences but it is inefficient to set the value too high. See the `article by Matt Taddy: "Document Classification by Inversion of Distributed Language Representations" <https://arxiv.org/pdf/1504.07295.pdf>`_ and the `gensim demo <https://github.com/piskvorky/gensim/blob/develop/docs/notebooks/deepir.ipynb>`_ for examples of how to use such scores in document classification. Parameters ---------- sentences : iterable of list of str The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora, consider an iterable that streams the sentences directly from disk/network. See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus` or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples. total_sentences : int, optional Count of sentences. chunksize : int, optional Chunksize of jobs queue_factor : int, optional Multiplier for size of queue (number of workers * queue_factor). report_delay : float, optional Seconds to wait before reporting progress. """ logger.info( "scoring sentences with %i workers on %i vocabulary and %i features, " "using sg=%s hs=%s sample=%s and negative=%s", self.workers, len(self.wv), self.layer1_size, self.sg, self.hs, self.sample, self.negative ) if not self.wv.key_to_index: raise RuntimeError("you must first build vocabulary before scoring new data") if not self.hs: raise RuntimeError( "We have currently only implemented score for the hierarchical softmax scheme, " "so you need to have run word2vec with hs=1 and negative=0 for this to work." ) def worker_loop(): """Compute log probability for each sentence, lifting lists of sentences from the jobs queue.""" work = np.zeros(1, dtype=REAL) # for sg hs, we actually only need one memory loc (running sum) neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL) while True: job = job_queue.get() if job is None: # signal to finish break ns = 0 for sentence_id, sentence in job: if sentence_id >= total_sentences: break if self.sg: score = score_sentence_sg(self, sentence, work) else: score = score_sentence_cbow(self, sentence, work, neu1) sentence_scores[sentence_id] = score ns += 1 progress_queue.put(ns) # report progress start, next_report = default_timer(), 1.0 # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :( job_queue = Queue(maxsize=queue_factor * self.workers) progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers) workers = [threading.Thread(target=worker_loop) for _ in range(self.workers)] for thread in workers: thread.daemon = True # make interrupting the process with ctrl+c easier thread.start() sentence_count = 0 sentence_scores = matutils.zeros_aligned(total_sentences, dtype=REAL) push_done = False done_jobs = 0 jobs_source = enumerate(utils.grouper(enumerate(sentences), chunksize)) # fill jobs queue with (id, sentence) job items while True: try: job_no, items = next(jobs_source) if (job_no - 1) * chunksize > total_sentences: logger.warning( "terminating after %i sentences (set higher total_sentences if you want more).", total_sentences ) job_no -= 1 raise StopIteration() logger.debug("putting job #%i in the queue", job_no) job_queue.put(items) except StopIteration: logger.info("reached end of input; waiting to finish %i outstanding jobs", job_no - done_jobs + 1) for _ in range(self.workers): job_queue.put(None) # give the workers heads up that they can finish -- no more work! push_done = True try: while done_jobs < (job_no + 1) or not push_done: ns = progress_queue.get(push_done) # only block after all jobs pushed sentence_count += ns done_jobs += 1 elapsed = default_timer() - start if elapsed >= next_report: logger.info( "PROGRESS: at %.2f%% sentences, %.0f sentences/s", 100.0 * sentence_count, sentence_count / elapsed ) next_report = elapsed + report_delay # don't flood log, wait report_delay seconds else: # loop ended by job count; really done break except Empty: pass # already out of loop; continue to next push elapsed = default_timer() - start self.wv.norms = None # clear any cached lengths logger.info( "scoring %i sentences took %.1fs, %.0f sentences/s", sentence_count, elapsed, sentence_count / elapsed ) return sentence_scores[:sentence_count]
[ "def", "score", "(", "self", ",", "sentences", ",", "total_sentences", "=", "int", "(", "1e6", ")", ",", "chunksize", "=", "100", ",", "queue_factor", "=", "2", ",", "report_delay", "=", "1", ")", ":", "logger", ".", "info", "(", "\"scoring sentences with %i workers on %i vocabulary and %i features, \"", "\"using sg=%s hs=%s sample=%s and negative=%s\"", ",", "self", ".", "workers", ",", "len", "(", "self", ".", "wv", ")", ",", "self", ".", "layer1_size", ",", "self", ".", "sg", ",", "self", ".", "hs", ",", "self", ".", "sample", ",", "self", ".", "negative", ")", "if", "not", "self", ".", "wv", ".", "key_to_index", ":", "raise", "RuntimeError", "(", "\"you must first build vocabulary before scoring new data\"", ")", "if", "not", "self", ".", "hs", ":", "raise", "RuntimeError", "(", "\"We have currently only implemented score for the hierarchical softmax scheme, \"", "\"so you need to have run word2vec with hs=1 and negative=0 for this to work.\"", ")", "def", "worker_loop", "(", ")", ":", "\"\"\"Compute log probability for each sentence, lifting lists of sentences from the jobs queue.\"\"\"", "work", "=", "np", ".", "zeros", "(", "1", ",", "dtype", "=", "REAL", ")", "# for sg hs, we actually only need one memory loc (running sum)", "neu1", "=", "matutils", ".", "zeros_aligned", "(", "self", ".", "layer1_size", ",", "dtype", "=", "REAL", ")", "while", "True", ":", "job", "=", "job_queue", ".", "get", "(", ")", "if", "job", "is", "None", ":", "# signal to finish", "break", "ns", "=", "0", "for", "sentence_id", ",", "sentence", "in", "job", ":", "if", "sentence_id", ">=", "total_sentences", ":", "break", "if", "self", ".", "sg", ":", "score", "=", "score_sentence_sg", "(", "self", ",", "sentence", ",", "work", ")", "else", ":", "score", "=", "score_sentence_cbow", "(", "self", ",", "sentence", ",", "work", ",", "neu1", ")", "sentence_scores", "[", "sentence_id", "]", "=", "score", "ns", "+=", "1", "progress_queue", ".", "put", "(", "ns", ")", "# report progress", "start", ",", "next_report", "=", "default_timer", "(", ")", ",", "1.0", "# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(", "job_queue", "=", "Queue", "(", "maxsize", "=", "queue_factor", "*", "self", ".", "workers", ")", "progress_queue", "=", "Queue", "(", "maxsize", "=", "(", "queue_factor", "+", "1", ")", "*", "self", ".", "workers", ")", "workers", "=", "[", "threading", ".", "Thread", "(", "target", "=", "worker_loop", ")", "for", "_", "in", "range", "(", "self", ".", "workers", ")", "]", "for", "thread", "in", "workers", ":", "thread", ".", "daemon", "=", "True", "# make interrupting the process with ctrl+c easier", "thread", ".", "start", "(", ")", "sentence_count", "=", "0", "sentence_scores", "=", "matutils", ".", "zeros_aligned", "(", "total_sentences", ",", "dtype", "=", "REAL", ")", "push_done", "=", "False", "done_jobs", "=", "0", "jobs_source", "=", "enumerate", "(", "utils", ".", "grouper", "(", "enumerate", "(", "sentences", ")", ",", "chunksize", ")", ")", "# fill jobs queue with (id, sentence) job items", "while", "True", ":", "try", ":", "job_no", ",", "items", "=", "next", "(", "jobs_source", ")", "if", "(", "job_no", "-", "1", ")", "*", "chunksize", ">", "total_sentences", ":", "logger", ".", "warning", "(", "\"terminating after %i sentences (set higher total_sentences if you want more).\"", ",", "total_sentences", ")", "job_no", "-=", "1", "raise", "StopIteration", "(", ")", "logger", ".", "debug", "(", "\"putting job #%i in the queue\"", ",", "job_no", ")", "job_queue", ".", "put", "(", "items", ")", "except", "StopIteration", ":", "logger", ".", "info", "(", "\"reached end of input; waiting to finish %i outstanding jobs\"", ",", "job_no", "-", "done_jobs", "+", "1", ")", "for", "_", "in", "range", "(", "self", ".", "workers", ")", ":", "job_queue", ".", "put", "(", "None", ")", "# give the workers heads up that they can finish -- no more work!", "push_done", "=", "True", "try", ":", "while", "done_jobs", "<", "(", "job_no", "+", "1", ")", "or", "not", "push_done", ":", "ns", "=", "progress_queue", ".", "get", "(", "push_done", ")", "# only block after all jobs pushed", "sentence_count", "+=", "ns", "done_jobs", "+=", "1", "elapsed", "=", "default_timer", "(", ")", "-", "start", "if", "elapsed", ">=", "next_report", ":", "logger", ".", "info", "(", "\"PROGRESS: at %.2f%% sentences, %.0f sentences/s\"", ",", "100.0", "*", "sentence_count", ",", "sentence_count", "/", "elapsed", ")", "next_report", "=", "elapsed", "+", "report_delay", "# don't flood log, wait report_delay seconds", "else", ":", "# loop ended by job count; really done", "break", "except", "Empty", ":", "pass", "# already out of loop; continue to next push", "elapsed", "=", "default_timer", "(", ")", "-", "start", "self", ".", "wv", ".", "norms", "=", "None", "# clear any cached lengths", "logger", ".", "info", "(", "\"scoring %i sentences took %.1fs, %.0f sentences/s\"", ",", "sentence_count", ",", "elapsed", ",", "sentence_count", "/", "elapsed", ")", "return", "sentence_scores", "[", ":", "sentence_count", "]" ]
https://github.com/RaRe-Technologies/gensim/blob/8b8203d8df354673732dff635283494a33d0d422/gensim/models/word2vec.py#L1679-L1806
chapmanb/bcbb
dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027
nextgen/bcbio/variation/genotype.py
python
haplotype_caller
(align_bam, ref_file, config, dbsnp=None, region=None, out_file=None)
return out_file
Call variation with GATK's HaplotypeCaller. This requires the full non open-source version of GATK.
Call variation with GATK's HaplotypeCaller.
[ "Call", "variation", "with", "GATK", "s", "HaplotypeCaller", "." ]
def haplotype_caller(align_bam, ref_file, config, dbsnp=None, region=None, out_file=None): """Call variation with GATK's HaplotypeCaller. This requires the full non open-source version of GATK. """ broad_runner, params, out_file = \ _shared_gatk_call_prep(align_bams, ref_file, config, dbsnp, region, out_file) assert broad_runner.has_gatk_full(), \ "Require full version of GATK 2.0 for haplotype based calling" if not file_exists(out_file): if not all(has_aligned_reads(x, region) for x in align_bams): write_empty_vcf(out_file) else: with file_transaction(out_file) as tx_out_file: params += ["-T", "HaplotypeCaller", "-o", tx_out_file] broad_runner.run_gatk(params) return out_file
[ "def", "haplotype_caller", "(", "align_bam", ",", "ref_file", ",", "config", ",", "dbsnp", "=", "None", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "broad_runner", ",", "params", ",", "out_file", "=", "_shared_gatk_call_prep", "(", "align_bams", ",", "ref_file", ",", "config", ",", "dbsnp", ",", "region", ",", "out_file", ")", "assert", "broad_runner", ".", "has_gatk_full", "(", ")", ",", "\"Require full version of GATK 2.0 for haplotype based calling\"", "if", "not", "file_exists", "(", "out_file", ")", ":", "if", "not", "all", "(", "has_aligned_reads", "(", "x", ",", "region", ")", "for", "x", "in", "align_bams", ")", ":", "write_empty_vcf", "(", "out_file", ")", "else", ":", "with", "file_transaction", "(", "out_file", ")", "as", "tx_out_file", ":", "params", "+=", "[", "\"-T\"", ",", "\"HaplotypeCaller\"", ",", "\"-o\"", ",", "tx_out_file", "]", "broad_runner", ".", "run_gatk", "(", "params", ")", "return", "out_file" ]
https://github.com/chapmanb/bcbb/blob/dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027/nextgen/bcbio/variation/genotype.py#L81-L100
onaio/onadata
89ad16744e8f247fb748219476f6ac295869a95f
onadata/libs/serializers/metadata_serializer.py
python
MetaDataSerializer.get_content_object
(self, validated_data)
Returns the validated 'xform' or 'project' or 'instance' ids being linked to the metadata.
Returns the validated 'xform' or 'project' or 'instance' ids being linked to the metadata.
[ "Returns", "the", "validated", "xform", "or", "project", "or", "instance", "ids", "being", "linked", "to", "the", "metadata", "." ]
def get_content_object(self, validated_data): """ Returns the validated 'xform' or 'project' or 'instance' ids being linked to the metadata. """ if validated_data: return (validated_data.get('xform') or validated_data.get('project') or validated_data.get('instance'))
[ "def", "get_content_object", "(", "self", ",", "validated_data", ")", ":", "if", "validated_data", ":", "return", "(", "validated_data", ".", "get", "(", "'xform'", ")", "or", "validated_data", ".", "get", "(", "'project'", ")", "or", "validated_data", ".", "get", "(", "'instance'", ")", ")" ]
https://github.com/onaio/onadata/blob/89ad16744e8f247fb748219476f6ac295869a95f/onadata/libs/serializers/metadata_serializer.py#L211-L220
ntalekt/homeassistant
8fb6da881564430a3324125ddc2bd43cb7c8680f
custom_components/hacs/operational/setup_actions/categories.py
python
_setup_extra_stores
()
Set up extra stores in HACS if enabled in Home Assistant.
Set up extra stores in HACS if enabled in Home Assistant.
[ "Set", "up", "extra", "stores", "in", "HACS", "if", "enabled", "in", "Home", "Assistant", "." ]
def _setup_extra_stores(): """Set up extra stores in HACS if enabled in Home Assistant.""" hacs = get_hacs() hacs.log.debug("Starting setup task: Extra stores") hacs.common.categories = set() for category in ELEMENT_TYPES: enable_category(hacs, HacsCategory(category)) if HacsCategory.PYTHON_SCRIPT in hacs.hass.config.components: if HacsCategory.PYTHON_SCRIPT not in hacs.common.categories: enable_category(hacs, HacsCategory.PYTHON_SCRIPT) if ( hacs.hass.services._services.get("frontend", {}).get("reload_themes") is not None ): if HacsCategory.THEME not in hacs.common.categories: enable_category(hacs, HacsCategory.THEME) if hacs.configuration.appdaemon: enable_category(hacs, HacsCategory.APPDAEMON) if hacs.configuration.netdaemon: enable_category(hacs, HacsCategory.NETDAEMON)
[ "def", "_setup_extra_stores", "(", ")", ":", "hacs", "=", "get_hacs", "(", ")", "hacs", ".", "log", ".", "debug", "(", "\"Starting setup task: Extra stores\"", ")", "hacs", ".", "common", ".", "categories", "=", "set", "(", ")", "for", "category", "in", "ELEMENT_TYPES", ":", "enable_category", "(", "hacs", ",", "HacsCategory", "(", "category", ")", ")", "if", "HacsCategory", ".", "PYTHON_SCRIPT", "in", "hacs", ".", "hass", ".", "config", ".", "components", ":", "if", "HacsCategory", ".", "PYTHON_SCRIPT", "not", "in", "hacs", ".", "common", ".", "categories", ":", "enable_category", "(", "hacs", ",", "HacsCategory", ".", "PYTHON_SCRIPT", ")", "if", "(", "hacs", ".", "hass", ".", "services", ".", "_services", ".", "get", "(", "\"frontend\"", ",", "{", "}", ")", ".", "get", "(", "\"reload_themes\"", ")", "is", "not", "None", ")", ":", "if", "HacsCategory", ".", "THEME", "not", "in", "hacs", ".", "common", ".", "categories", ":", "enable_category", "(", "hacs", ",", "HacsCategory", ".", "THEME", ")", "if", "hacs", ".", "configuration", ".", "appdaemon", ":", "enable_category", "(", "hacs", ",", "HacsCategory", ".", "APPDAEMON", ")", "if", "hacs", ".", "configuration", ".", "netdaemon", ":", "enable_category", "(", "hacs", ",", "HacsCategory", ".", "NETDAEMON", ")" ]
https://github.com/ntalekt/homeassistant/blob/8fb6da881564430a3324125ddc2bd43cb7c8680f/custom_components/hacs/operational/setup_actions/categories.py#L8-L30
halcy/Mastodon.py
e9d2c3d53f7b1d371e5dc5bf47e5fe335b698c85
mastodon/Mastodon.py
python
Mastodon.timeline_home
(self, max_id=None, min_id=None, since_id=None, limit=None)
return self.timeline('home', max_id=max_id, min_id=min_id, since_id=since_id, limit=limit)
Fetch the logged-in users home timeline (i.e. followed users and self). Returns a list of `toot dicts`_.
Fetch the logged-in users home timeline (i.e. followed users and self).
[ "Fetch", "the", "logged", "-", "in", "users", "home", "timeline", "(", "i", ".", "e", ".", "followed", "users", "and", "self", ")", "." ]
def timeline_home(self, max_id=None, min_id=None, since_id=None, limit=None): """ Fetch the logged-in users home timeline (i.e. followed users and self). Returns a list of `toot dicts`_. """ return self.timeline('home', max_id=max_id, min_id=min_id, since_id=since_id, limit=limit)
[ "def", "timeline_home", "(", "self", ",", "max_id", "=", "None", ",", "min_id", "=", "None", ",", "since_id", "=", "None", ",", "limit", "=", "None", ")", ":", "return", "self", ".", "timeline", "(", "'home'", ",", "max_id", "=", "max_id", ",", "min_id", "=", "min_id", ",", "since_id", "=", "since_id", ",", "limit", "=", "limit", ")" ]
https://github.com/halcy/Mastodon.py/blob/e9d2c3d53f7b1d371e5dc5bf47e5fe335b698c85/mastodon/Mastodon.py#L737-L744
darxtrix/ptop
653e969c9ac865e38b7235aa5d656b36ac1f9e0d
ptop/statistics/statistics.py
python
Statistics.generate
(self)
Generate the stats using the plugins list periodically
Generate the stats using the plugins list periodically
[ "Generate", "the", "stats", "using", "the", "plugins", "list", "periodically" ]
def generate(self): ''' Generate the stats using the plugins list periodically ''' for sensor in self.plugins: # update the sensors value periodically job = ThreadJob(sensor.update,self.stop_event,self.sensor_refresh_rates[sensor]/1000) job.start()
[ "def", "generate", "(", "self", ")", ":", "for", "sensor", "in", "self", ".", "plugins", ":", "# update the sensors value periodically", "job", "=", "ThreadJob", "(", "sensor", ".", "update", ",", "self", ".", "stop_event", ",", "self", ".", "sensor_refresh_rates", "[", "sensor", "]", "/", "1000", ")", "job", ".", "start", "(", ")" ]
https://github.com/darxtrix/ptop/blob/653e969c9ac865e38b7235aa5d656b36ac1f9e0d/ptop/statistics/statistics.py#L29-L36
cleverhans-lab/cleverhans
e5d00e537ce7ad6119ed5a8db1f0e9736d1f6e1d
cleverhans_v3.1.0/examples/RL-attack/train.py
python
parse_args
()
return parser.parse_args()
[]
def parse_args(): parser = argparse.ArgumentParser("DQN experiments for Atari games") # Environment parser.add_argument("--env", type=str, default="Pong", help="name of the game") parser.add_argument("--seed", type=int, default=42, help="which seed to use") # Core DQN parameters parser.add_argument( "--replay-buffer-size", type=int, default=int(1e6), help="replay buffer size" ) parser.add_argument( "--lr", type=float, default=1e-4, help="learning rate for Adam optimizer" ) parser.add_argument( "--num-steps", type=int, default=int(2e8), help="total number of steps to \ run the environment for", ) parser.add_argument( "--batch-size", type=int, default=32, help="number of transitions to optimize \ at the same time", ) parser.add_argument( "--learning-freq", type=int, default=4, help="number of iterations between \ every optimization step", ) parser.add_argument( "--target-update-freq", type=int, default=40000, help="number of iterations between \ every target network update", ) # Bells and whistles boolean_flag(parser, "noisy", default=False, help="whether or not to NoisyNetwork") boolean_flag( parser, "double-q", default=True, help="whether or not to use double q learning" ) boolean_flag( parser, "dueling", default=False, help="whether or not to use dueling model" ) boolean_flag( parser, "prioritized", default=False, help="whether or not to use prioritized replay buffer", ) parser.add_argument( "--prioritized-alpha", type=float, default=0.6, help="alpha parameter for prioritized replay buffer", ) parser.add_argument( "--prioritized-beta0", type=float, default=0.4, help="initial value of beta \ parameters for prioritized replay", ) parser.add_argument( "--prioritized-eps", type=float, default=1e-6, help="eps parameter for prioritized replay buffer", ) # Checkpointing parser.add_argument( "--save-dir", type=str, default=None, required=True, help="directory in which \ training state and model should be saved.", ) parser.add_argument( "--save-azure-container", type=str, default=None, help="It present data will saved/loaded from Azure. \ Should be in format ACCOUNT_NAME:ACCOUNT_KEY:\ CONTAINER", ) parser.add_argument( "--save-freq", type=int, default=1e6, help="save model once every time this many \ iterations are completed", ) boolean_flag( parser, "load-on-start", default=True, help="if true and model was previously saved then training \ will be resumed", ) # V: Attack Arguments # parser.add_argument( "--attack", type=str, default=None, help="Method to attack the model." ) parser.add_argument( "--attack-init", type=int, default=0, help="Iteration no. to begin attacks" ) parser.add_argument( "--attack-prob", type=float, default=0.0, help="Probability of attack at each step, \ float in range 0 - 1.0", ) return parser.parse_args()
[ "def", "parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "\"DQN experiments for Atari games\"", ")", "# Environment", "parser", ".", "add_argument", "(", "\"--env\"", ",", "type", "=", "str", ",", "default", "=", "\"Pong\"", ",", "help", "=", "\"name of the game\"", ")", "parser", ".", "add_argument", "(", "\"--seed\"", ",", "type", "=", "int", ",", "default", "=", "42", ",", "help", "=", "\"which seed to use\"", ")", "# Core DQN parameters", "parser", ".", "add_argument", "(", "\"--replay-buffer-size\"", ",", "type", "=", "int", ",", "default", "=", "int", "(", "1e6", ")", ",", "help", "=", "\"replay buffer size\"", ")", "parser", ".", "add_argument", "(", "\"--lr\"", ",", "type", "=", "float", ",", "default", "=", "1e-4", ",", "help", "=", "\"learning rate for Adam optimizer\"", ")", "parser", ".", "add_argument", "(", "\"--num-steps\"", ",", "type", "=", "int", ",", "default", "=", "int", "(", "2e8", ")", ",", "help", "=", "\"total number of steps to \\\n run the environment for\"", ",", ")", "parser", ".", "add_argument", "(", "\"--batch-size\"", ",", "type", "=", "int", ",", "default", "=", "32", ",", "help", "=", "\"number of transitions to optimize \\\n at the same time\"", ",", ")", "parser", ".", "add_argument", "(", "\"--learning-freq\"", ",", "type", "=", "int", ",", "default", "=", "4", ",", "help", "=", "\"number of iterations between \\\n every optimization step\"", ",", ")", "parser", ".", "add_argument", "(", "\"--target-update-freq\"", ",", "type", "=", "int", ",", "default", "=", "40000", ",", "help", "=", "\"number of iterations between \\\n every target network update\"", ",", ")", "# Bells and whistles", "boolean_flag", "(", "parser", ",", "\"noisy\"", ",", "default", "=", "False", ",", "help", "=", "\"whether or not to NoisyNetwork\"", ")", "boolean_flag", "(", "parser", ",", "\"double-q\"", ",", "default", "=", "True", ",", "help", "=", "\"whether or not to use double q learning\"", ")", "boolean_flag", "(", "parser", ",", "\"dueling\"", ",", "default", "=", "False", ",", "help", "=", "\"whether or not to use dueling model\"", ")", "boolean_flag", "(", "parser", ",", "\"prioritized\"", ",", "default", "=", "False", ",", "help", "=", "\"whether or not to use prioritized replay buffer\"", ",", ")", "parser", ".", "add_argument", "(", "\"--prioritized-alpha\"", ",", "type", "=", "float", ",", "default", "=", "0.6", ",", "help", "=", "\"alpha parameter for prioritized replay buffer\"", ",", ")", "parser", ".", "add_argument", "(", "\"--prioritized-beta0\"", ",", "type", "=", "float", ",", "default", "=", "0.4", ",", "help", "=", "\"initial value of beta \\\n parameters for prioritized replay\"", ",", ")", "parser", ".", "add_argument", "(", "\"--prioritized-eps\"", ",", "type", "=", "float", ",", "default", "=", "1e-6", ",", "help", "=", "\"eps parameter for prioritized replay buffer\"", ",", ")", "# Checkpointing", "parser", ".", "add_argument", "(", "\"--save-dir\"", ",", "type", "=", "str", ",", "default", "=", "None", ",", "required", "=", "True", ",", "help", "=", "\"directory in which \\\n training state and model should be saved.\"", ",", ")", "parser", ".", "add_argument", "(", "\"--save-azure-container\"", ",", "type", "=", "str", ",", "default", "=", "None", ",", "help", "=", "\"It present data will saved/loaded from Azure. \\\n Should be in format ACCOUNT_NAME:ACCOUNT_KEY:\\\n CONTAINER\"", ",", ")", "parser", ".", "add_argument", "(", "\"--save-freq\"", ",", "type", "=", "int", ",", "default", "=", "1e6", ",", "help", "=", "\"save model once every time this many \\\n iterations are completed\"", ",", ")", "boolean_flag", "(", "parser", ",", "\"load-on-start\"", ",", "default", "=", "True", ",", "help", "=", "\"if true and model was previously saved then training \\\n will be resumed\"", ",", ")", "# V: Attack Arguments #", "parser", ".", "add_argument", "(", "\"--attack\"", ",", "type", "=", "str", ",", "default", "=", "None", ",", "help", "=", "\"Method to attack the model.\"", ")", "parser", ".", "add_argument", "(", "\"--attack-init\"", ",", "type", "=", "int", ",", "default", "=", "0", ",", "help", "=", "\"Iteration no. to begin attacks\"", ")", "parser", ".", "add_argument", "(", "\"--attack-prob\"", ",", "type", "=", "float", ",", "default", "=", "0.0", ",", "help", "=", "\"Probability of attack at each step, \\\n float in range 0 - 1.0\"", ",", ")", "return", "parser", ".", "parse_args", "(", ")" ]
https://github.com/cleverhans-lab/cleverhans/blob/e5d00e537ce7ad6119ed5a8db1f0e9736d1f6e1d/cleverhans_v3.1.0/examples/RL-attack/train.py#L35-L154
plastex/plastex
af1628719b50cf25fbe80f16a3e100d566e9bc32
plasTeX/__init__.py
python
Macro.postParse
(self, tex)
Do operations that must be done immediately after parsing arguments Required Arguments: tex -- the TeX instance containing the current context
Do operations that must be done immediately after parsing arguments
[ "Do", "operations", "that", "must", "be", "done", "immediately", "after", "parsing", "arguments" ]
def postParse(self, tex): """ Do operations that must be done immediately after parsing arguments Required Arguments: tex -- the TeX instance containing the current context """ if self.counter: try: secnumdepth = self.config['document']['sec-num-depth'] except: secnumdepth = 10 if secnumdepth >= self.level or self.level > self.ENDSECTIONS_LEVEL: self.ref = self.ownerDocument.createElement('the' + self.counter).expand(tex) self.captionName = self.ownerDocument.createElement(self.counter + 'name').expand(tex)
[ "def", "postParse", "(", "self", ",", "tex", ")", ":", "if", "self", ".", "counter", ":", "try", ":", "secnumdepth", "=", "self", ".", "config", "[", "'document'", "]", "[", "'sec-num-depth'", "]", "except", ":", "secnumdepth", "=", "10", "if", "secnumdepth", ">=", "self", ".", "level", "or", "self", ".", "level", ">", "self", ".", "ENDSECTIONS_LEVEL", ":", "self", ".", "ref", "=", "self", ".", "ownerDocument", ".", "createElement", "(", "'the'", "+", "self", ".", "counter", ")", ".", "expand", "(", "tex", ")", "self", ".", "captionName", "=", "self", ".", "ownerDocument", ".", "createElement", "(", "self", ".", "counter", "+", "'name'", ")", ".", "expand", "(", "tex", ")" ]
https://github.com/plastex/plastex/blob/af1628719b50cf25fbe80f16a3e100d566e9bc32/plasTeX/__init__.py#L570-L583
spl0k/supysonic
62bad3b9878a1d22cf040f25dab0fa28a252ba38
supysonic/config.py
python
IniConfig.__init__
(self, paths)
[]
def __init__(self, paths): super().__init__() parser = RawConfigParser() parser.read(paths) for section in parser.sections(): options = {k: self.__try_parse(v) for k, v in parser.items(section)} section = section.upper() if hasattr(self, section): getattr(self, section).update(options) else: setattr(self, section, options)
[ "def", "__init__", "(", "self", ",", "paths", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "parser", "=", "RawConfigParser", "(", ")", "parser", ".", "read", "(", "paths", ")", "for", "section", "in", "parser", ".", "sections", "(", ")", ":", "options", "=", "{", "k", ":", "self", ".", "__try_parse", "(", "v", ")", "for", "k", ",", "v", "in", "parser", ".", "items", "(", "section", ")", "}", "section", "=", "section", ".", "upper", "(", ")", "if", "hasattr", "(", "self", ",", "section", ")", ":", "getattr", "(", "self", ",", "section", ")", ".", "update", "(", "options", ")", "else", ":", "setattr", "(", "self", ",", "section", ",", "options", ")" ]
https://github.com/spl0k/supysonic/blob/62bad3b9878a1d22cf040f25dab0fa28a252ba38/supysonic/config.py#L67-L80
playframework/play1
0ecac3bc2421ae2dbec27a368bf671eda1c9cba5
python/Lib/codecs.py
python
StreamReaderWriter.__init__
(self, stream, Reader, Writer, errors='strict')
Creates a StreamReaderWriter instance. stream must be a Stream-like object. Reader, Writer must be factory functions or classes providing the StreamReader, StreamWriter interface resp. Error handling is done in the same way as defined for the StreamWriter/Readers.
Creates a StreamReaderWriter instance.
[ "Creates", "a", "StreamReaderWriter", "instance", "." ]
def __init__(self, stream, Reader, Writer, errors='strict'): """ Creates a StreamReaderWriter instance. stream must be a Stream-like object. Reader, Writer must be factory functions or classes providing the StreamReader, StreamWriter interface resp. Error handling is done in the same way as defined for the StreamWriter/Readers. """ self.stream = stream self.reader = Reader(stream, errors) self.writer = Writer(stream, errors) self.errors = errors
[ "def", "__init__", "(", "self", ",", "stream", ",", "Reader", ",", "Writer", ",", "errors", "=", "'strict'", ")", ":", "self", ".", "stream", "=", "stream", "self", ".", "reader", "=", "Reader", "(", "stream", ",", "errors", ")", "self", ".", "writer", "=", "Writer", "(", "stream", ",", "errors", ")", "self", ".", "errors", "=", "errors" ]
https://github.com/playframework/play1/blob/0ecac3bc2421ae2dbec27a368bf671eda1c9cba5/python/Lib/codecs.py#L666-L682
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/library/oc_route.py
python
Yedit.separator
(self, inc_sep)
setter method for separator
setter method for separator
[ "setter", "method", "for", "separator" ]
def separator(self, inc_sep): ''' setter method for separator ''' self._separator = inc_sep
[ "def", "separator", "(", "self", ",", "inc_sep", ")", ":", "self", ".", "_separator", "=", "inc_sep" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/library/oc_route.py#L233-L235
vispy/vispy
26256fdc2574259dd227022fbce0767cae4e244b
vispy/gloo/gl/pyopengl2.py
python
_patch
()
Monkey-patch pyopengl to fix a bug in glBufferSubData.
Monkey-patch pyopengl to fix a bug in glBufferSubData.
[ "Monkey", "-", "patch", "pyopengl", "to", "fix", "a", "bug", "in", "glBufferSubData", "." ]
def _patch(): """Monkey-patch pyopengl to fix a bug in glBufferSubData.""" import sys from OpenGL import GL if sys.version_info > (3,): buffersubdatafunc = GL.glBufferSubData if hasattr(buffersubdatafunc, 'wrapperFunction'): buffersubdatafunc = buffersubdatafunc.wrapperFunction _m = sys.modules[buffersubdatafunc.__module__] _m.long = int # Fix missing enum try: from OpenGL.GL.VERSION import GL_2_0 GL_2_0.GL_OBJECT_SHADER_SOURCE_LENGTH = GL_2_0.GL_SHADER_SOURCE_LENGTH except Exception: pass
[ "def", "_patch", "(", ")", ":", "import", "sys", "from", "OpenGL", "import", "GL", "if", "sys", ".", "version_info", ">", "(", "3", ",", ")", ":", "buffersubdatafunc", "=", "GL", ".", "glBufferSubData", "if", "hasattr", "(", "buffersubdatafunc", ",", "'wrapperFunction'", ")", ":", "buffersubdatafunc", "=", "buffersubdatafunc", ".", "wrapperFunction", "_m", "=", "sys", ".", "modules", "[", "buffersubdatafunc", ".", "__module__", "]", "_m", ".", "long", "=", "int", "# Fix missing enum", "try", ":", "from", "OpenGL", ".", "GL", ".", "VERSION", "import", "GL_2_0", "GL_2_0", ".", "GL_OBJECT_SHADER_SOURCE_LENGTH", "=", "GL_2_0", ".", "GL_SHADER_SOURCE_LENGTH", "except", "Exception", ":", "pass" ]
https://github.com/vispy/vispy/blob/26256fdc2574259dd227022fbce0767cae4e244b/vispy/gloo/gl/pyopengl2.py#L18-L34
rucio/rucio
6d0d358e04f5431f0b9a98ae40f31af0ddff4833
lib/rucio/core/permission/generic_multi_vo.py
python
perm_approve_rule
(issuer, kwargs)
return False
Checks if an issuer can approve a replication rule. :param issuer: Account identifier which issues the command. :param kwargs: List of arguments for the action. :returns: True if account is allowed to call the API call, otherwise False
Checks if an issuer can approve a replication rule.
[ "Checks", "if", "an", "issuer", "can", "approve", "a", "replication", "rule", "." ]
def perm_approve_rule(issuer, kwargs): """ Checks if an issuer can approve a replication rule. :param issuer: Account identifier which issues the command. :param kwargs: List of arguments for the action. :returns: True if account is allowed to call the API call, otherwise False """ if _is_root(issuer) or has_account_attribute(account=issuer, key='admin'): return True return False
[ "def", "perm_approve_rule", "(", "issuer", ",", "kwargs", ")", ":", "if", "_is_root", "(", "issuer", ")", "or", "has_account_attribute", "(", "account", "=", "issuer", ",", "key", "=", "'admin'", ")", ":", "return", "True", "return", "False" ]
https://github.com/rucio/rucio/blob/6d0d358e04f5431f0b9a98ae40f31af0ddff4833/lib/rucio/core/permission/generic_multi_vo.py#L478-L488
DataBiosphere/toil
2e148eee2114ece8dcc3ec8a83f36333266ece0d
src/toil/lib/retry.py
python
get_error_message
(e: Exception)
Get the error message string from a Boto 2 or 3 error, or compatible types. Note that error message conditions also chack more than this; this function does not fall back to the traceback for incompatible types.
Get the error message string from a Boto 2 or 3 error, or compatible types.
[ "Get", "the", "error", "message", "string", "from", "a", "Boto", "2", "or", "3", "error", "or", "compatible", "types", "." ]
def get_error_message(e: Exception) -> str: """ Get the error message string from a Boto 2 or 3 error, or compatible types. Note that error message conditions also chack more than this; this function does not fall back to the traceback for incompatible types. """ if hasattr(e, 'error_message') and isinstance(e.error_message, str): # A Boto 2 error return e.error_message elif hasattr(e, 'response') and hasattr(e.response, 'get'): # A Boto 3 error message = e.response.get('Error', {}).get('Message') if isinstance(message, str): return message else: return '' else: return ''
[ "def", "get_error_message", "(", "e", ":", "Exception", ")", "->", "str", ":", "if", "hasattr", "(", "e", ",", "'error_message'", ")", "and", "isinstance", "(", "e", ".", "error_message", ",", "str", ")", ":", "# A Boto 2 error", "return", "e", ".", "error_message", "elif", "hasattr", "(", "e", ",", "'response'", ")", "and", "hasattr", "(", "e", ".", "response", ",", "'get'", ")", ":", "# A Boto 3 error", "message", "=", "e", ".", "response", ".", "get", "(", "'Error'", ",", "{", "}", ")", ".", "get", "(", "'Message'", ")", "if", "isinstance", "(", "message", ",", "str", ")", ":", "return", "message", "else", ":", "return", "''", "else", ":", "return", "''" ]
https://github.com/DataBiosphere/toil/blob/2e148eee2114ece8dcc3ec8a83f36333266ece0d/src/toil/lib/retry.py#L349-L367
pantsbuild/pex
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
pex/vendor/_vendored/setuptools/setuptools/_vendor/packaging/version.py
python
_BaseVersion.__lt__
(self, other)
return self._compare(other, lambda s, o: s < o)
[]
def __lt__(self, other): return self._compare(other, lambda s, o: s < o)
[ "def", "__lt__", "(", "self", ",", "other", ")", ":", "return", "self", ".", "_compare", "(", "other", ",", "lambda", "s", ",", "o", ":", "s", "<", "o", ")" ]
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/setuptools/setuptools/_vendor/packaging/version.py#L43-L44
otsaloma/gaupol
6dec7826654d223c71a8d3279dcd967e95c46714
gaupol/dialogs/spell_check.py
python
SpellCheckDialog._on_join_back_button_clicked
(self, *args)
Join the current word with the previous.
Join the current word with the previous.
[ "Join", "the", "current", "word", "with", "the", "previous", "." ]
def _on_join_back_button_clicked(self, *args): """Join the current word with the previous.""" self._navigator.join_with_previous() self._proceed()
[ "def", "_on_join_back_button_clicked", "(", "self", ",", "*", "args", ")", ":", "self", ".", "_navigator", ".", "join_with_previous", "(", ")", "self", ".", "_proceed", "(", ")" ]
https://github.com/otsaloma/gaupol/blob/6dec7826654d223c71a8d3279dcd967e95c46714/gaupol/dialogs/spell_check.py#L166-L169
deanishe/zothero
5b057ef080ee730d82d5dd15e064d2a4730c2b11
src/lib/workflow/workflow.py
python
Workflow.open_cachedir
(self)
Open the workflow's :attr:`cachedir` in Finder.
Open the workflow's :attr:`cachedir` in Finder.
[ "Open", "the", "workflow", "s", ":", "attr", ":", "cachedir", "in", "Finder", "." ]
def open_cachedir(self): """Open the workflow's :attr:`cachedir` in Finder.""" subprocess.call(['open', self.cachedir])
[ "def", "open_cachedir", "(", "self", ")", ":", "subprocess", ".", "call", "(", "[", "'open'", ",", "self", ".", "cachedir", "]", ")" ]
https://github.com/deanishe/zothero/blob/5b057ef080ee730d82d5dd15e064d2a4730c2b11/src/lib/workflow/workflow.py#L2644-L2646
yuxiaokui/Intranet-Penetration
f57678a204840c83cbf3308e3470ae56c5ff514b
proxy/XX-Net/code/default/python27/1.0/lib/difflib.py
python
SequenceMatcher.ratio
(self)
return _calculate_ratio(matches, len(self.a) + len(self.b))
Return a measure of the sequences' similarity (float in [0,1]). Where T is the total number of elements in both sequences, and M is the number of matches, this is 2.0*M / T. Note that this is 1 if the sequences are identical, and 0 if they have nothing in common. .ratio() is expensive to compute if you haven't already computed .get_matching_blocks() or .get_opcodes(), in which case you may want to try .quick_ratio() or .real_quick_ratio() first to get an upper bound. >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() 0.75 >>> s.quick_ratio() 0.75 >>> s.real_quick_ratio() 1.0
Return a measure of the sequences' similarity (float in [0,1]).
[ "Return", "a", "measure", "of", "the", "sequences", "similarity", "(", "float", "in", "[", "0", "1", "]", ")", "." ]
def ratio(self): """Return a measure of the sequences' similarity (float in [0,1]). Where T is the total number of elements in both sequences, and M is the number of matches, this is 2.0*M / T. Note that this is 1 if the sequences are identical, and 0 if they have nothing in common. .ratio() is expensive to compute if you haven't already computed .get_matching_blocks() or .get_opcodes(), in which case you may want to try .quick_ratio() or .real_quick_ratio() first to get an upper bound. >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() 0.75 >>> s.quick_ratio() 0.75 >>> s.real_quick_ratio() 1.0 """ matches = reduce(lambda sum, triple: sum + triple[-1], self.get_matching_blocks(), 0) return _calculate_ratio(matches, len(self.a) + len(self.b))
[ "def", "ratio", "(", "self", ")", ":", "matches", "=", "reduce", "(", "lambda", "sum", ",", "triple", ":", "sum", "+", "triple", "[", "-", "1", "]", ",", "self", ".", "get_matching_blocks", "(", ")", ",", "0", ")", "return", "_calculate_ratio", "(", "matches", ",", "len", "(", "self", ".", "a", ")", "+", "len", "(", "self", ".", "b", ")", ")" ]
https://github.com/yuxiaokui/Intranet-Penetration/blob/f57678a204840c83cbf3308e3470ae56c5ff514b/proxy/XX-Net/code/default/python27/1.0/lib/difflib.py#L634-L658
aaronportnoy/toolbag
2d39457a7617b2f334d203d8c8cf88a5a25ef1fa
toolbag/agent/dbg/envi/registers.py
python
RegisterContext.setRegisterSnap
(self, snap)
Use this to bulk restore the register state. NOTE: This may only be used under the assumption that the RegisterContext has been initialized the same way (like context switches in tracers, or emulaction snaps)
Use this to bulk restore the register state.
[ "Use", "this", "to", "bulk", "restore", "the", "register", "state", "." ]
def setRegisterSnap(self, snap): """ Use this to bulk restore the register state. NOTE: This may only be used under the assumption that the RegisterContext has been initialized the same way (like context switches in tracers, or emulaction snaps) """ self._rctx_vals = list(snap)
[ "def", "setRegisterSnap", "(", "self", ",", "snap", ")", ":", "self", ".", "_rctx_vals", "=", "list", "(", "snap", ")" ]
https://github.com/aaronportnoy/toolbag/blob/2d39457a7617b2f334d203d8c8cf88a5a25ef1fa/toolbag/agent/dbg/envi/registers.py#L30-L38
bcbio/bcbio-nextgen
c80f9b6b1be3267d1f981b7035e3b72441d258f2
bcbio/cwl/workflow.py
python
_get_variable
(vid, variables)
Retrieve an input variable from our existing pool of options.
Retrieve an input variable from our existing pool of options.
[ "Retrieve", "an", "input", "variable", "from", "our", "existing", "pool", "of", "options", "." ]
def _get_variable(vid, variables): """Retrieve an input variable from our existing pool of options. """ if isinstance(vid, six.string_types): vid = get_base_id(vid) else: vid = _get_string_vid(vid) for v in variables: if vid == get_base_id(v["id"]): return copy.deepcopy(v) raise ValueError("Did not find variable %s in \n%s" % (vid, pprint.pformat(variables)))
[ "def", "_get_variable", "(", "vid", ",", "variables", ")", ":", "if", "isinstance", "(", "vid", ",", "six", ".", "string_types", ")", ":", "vid", "=", "get_base_id", "(", "vid", ")", "else", ":", "vid", "=", "_get_string_vid", "(", "vid", ")", "for", "v", "in", "variables", ":", "if", "vid", "==", "get_base_id", "(", "v", "[", "\"id\"", "]", ")", ":", "return", "copy", ".", "deepcopy", "(", "v", ")", "raise", "ValueError", "(", "\"Did not find variable %s in \\n%s\"", "%", "(", "vid", ",", "pprint", ".", "pformat", "(", "variables", ")", ")", ")" ]
https://github.com/bcbio/bcbio-nextgen/blob/c80f9b6b1be3267d1f981b7035e3b72441d258f2/bcbio/cwl/workflow.py#L280-L290
lsbardel/python-stdnet
78db5320bdedc3f28c5e4f38cda13a4469e35db7
stdnet/utils/fallbacks/_collections.py
python
OrderedDict.clear
(self)
[]
def clear(self): self.__end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.__map = {} # key --> [key, prev, next] dict.clear(self)
[ "def", "clear", "(", "self", ")", ":", "self", ".", "__end", "=", "end", "=", "[", "]", "end", "+=", "[", "None", ",", "end", ",", "end", "]", "# sentinel node for doubly linked list", "self", ".", "__map", "=", "{", "}", "# key --> [key, prev, next]", "dict", ".", "clear", "(", "self", ")" ]
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/utils/fallbacks/_collections.py#L23-L27
JetBrains/python-skeletons
95ad24b666e475998e5d1cc02ed53a2188036167
__builtin__.py
python
set.intersection_update
(self, *other)
Update a set with the intersection of itself and other collections. :type other: collections.Iterable[T] :rtype: None
Update a set with the intersection of itself and other collections.
[ "Update", "a", "set", "with", "the", "intersection", "of", "itself", "and", "other", "collections", "." ]
def intersection_update(self, *other): """Update a set with the intersection of itself and other collections. :type other: collections.Iterable[T] :rtype: None """ pass
[ "def", "intersection_update", "(", "self", ",", "*", "other", ")", ":", "pass" ]
https://github.com/JetBrains/python-skeletons/blob/95ad24b666e475998e5d1cc02ed53a2188036167/__builtin__.py#L2327-L2333
onaio/onadata
89ad16744e8f247fb748219476f6ac295869a95f
onadata/libs/utils/export_builder.py
python
ExportBuilder.to_zipped_sav
(self, path, data, *args, **kwargs)
[]
def to_zipped_sav(self, path, data, *args, **kwargs): total_records = kwargs.get('total_records') def write_row(row, csv_writer, fields): # replace character for osm fields fields = [field.replace(':', '_') for field in fields] sav_writer.writerow( [encode_if_str(row, field, sav_writer=sav_writer) for field in fields]) sav_defs = {} # write headers for section in self.sections: sav_options = self._get_sav_options(section['elements']) sav_file = NamedTemporaryFile(suffix='.sav') sav_writer = SavWriter(sav_file.name, ioLocale=str('en_US.UTF-8'), **sav_options) sav_defs[section['name']] = { 'sav_file': sav_file, 'sav_writer': sav_writer} media_xpaths = [] if not self.INCLUDE_IMAGES \ else self.dd.get_media_survey_xpaths() index = 1 indices = {} survey_name = self.survey.name for i, d in enumerate(data, start=1): # decode mongo section names joined_export = dict_to_joined_export(d, index, indices, survey_name, self.survey, d, media_xpaths) output = decode_mongo_encoded_section_names(joined_export) # attach meta fields (index, parent_index, parent_table) # output has keys for every section if survey_name not in output: output[survey_name] = {} output[survey_name][INDEX] = index output[survey_name][PARENT_INDEX] = -1 for section in self.sections: # get data for this section and write to csv section_name = section['name'] sav_def = sav_defs[section_name] fields = [ element['xpath'] for element in section['elements']] sav_writer = sav_def['sav_writer'] row = output.get(section_name, None) if isinstance(row, dict): write_row( self.pre_process_row(row, section), sav_writer, fields) elif isinstance(row, list): for child_row in row: write_row( self.pre_process_row(child_row, section), sav_writer, fields) index += 1 track_task_progress(i, total_records) for (section_name, sav_def) in iteritems(sav_defs): sav_def['sav_writer'].closeSavFile( sav_def['sav_writer'].fh, mode='wb') # write zipfile with ZipFile(path, 'w', ZIP_DEFLATED, allowZip64=True) as zip_file: for (section_name, sav_def) in iteritems(sav_defs): sav_file = sav_def['sav_file'] sav_file.seek(0) zip_file.write( sav_file.name, '_'.join(section_name.split('/')) + '.sav') # close files when we are done for (section_name, sav_def) in iteritems(sav_defs): sav_def['sav_file'].close()
[ "def", "to_zipped_sav", "(", "self", ",", "path", ",", "data", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "total_records", "=", "kwargs", ".", "get", "(", "'total_records'", ")", "def", "write_row", "(", "row", ",", "csv_writer", ",", "fields", ")", ":", "# replace character for osm fields", "fields", "=", "[", "field", ".", "replace", "(", "':'", ",", "'_'", ")", "for", "field", "in", "fields", "]", "sav_writer", ".", "writerow", "(", "[", "encode_if_str", "(", "row", ",", "field", ",", "sav_writer", "=", "sav_writer", ")", "for", "field", "in", "fields", "]", ")", "sav_defs", "=", "{", "}", "# write headers", "for", "section", "in", "self", ".", "sections", ":", "sav_options", "=", "self", ".", "_get_sav_options", "(", "section", "[", "'elements'", "]", ")", "sav_file", "=", "NamedTemporaryFile", "(", "suffix", "=", "'.sav'", ")", "sav_writer", "=", "SavWriter", "(", "sav_file", ".", "name", ",", "ioLocale", "=", "str", "(", "'en_US.UTF-8'", ")", ",", "*", "*", "sav_options", ")", "sav_defs", "[", "section", "[", "'name'", "]", "]", "=", "{", "'sav_file'", ":", "sav_file", ",", "'sav_writer'", ":", "sav_writer", "}", "media_xpaths", "=", "[", "]", "if", "not", "self", ".", "INCLUDE_IMAGES", "else", "self", ".", "dd", ".", "get_media_survey_xpaths", "(", ")", "index", "=", "1", "indices", "=", "{", "}", "survey_name", "=", "self", ".", "survey", ".", "name", "for", "i", ",", "d", "in", "enumerate", "(", "data", ",", "start", "=", "1", ")", ":", "# decode mongo section names", "joined_export", "=", "dict_to_joined_export", "(", "d", ",", "index", ",", "indices", ",", "survey_name", ",", "self", ".", "survey", ",", "d", ",", "media_xpaths", ")", "output", "=", "decode_mongo_encoded_section_names", "(", "joined_export", ")", "# attach meta fields (index, parent_index, parent_table)", "# output has keys for every section", "if", "survey_name", "not", "in", "output", ":", "output", "[", "survey_name", "]", "=", "{", "}", "output", "[", "survey_name", "]", "[", "INDEX", "]", "=", "index", "output", "[", "survey_name", "]", "[", "PARENT_INDEX", "]", "=", "-", "1", "for", "section", "in", "self", ".", "sections", ":", "# get data for this section and write to csv", "section_name", "=", "section", "[", "'name'", "]", "sav_def", "=", "sav_defs", "[", "section_name", "]", "fields", "=", "[", "element", "[", "'xpath'", "]", "for", "element", "in", "section", "[", "'elements'", "]", "]", "sav_writer", "=", "sav_def", "[", "'sav_writer'", "]", "row", "=", "output", ".", "get", "(", "section_name", ",", "None", ")", "if", "isinstance", "(", "row", ",", "dict", ")", ":", "write_row", "(", "self", ".", "pre_process_row", "(", "row", ",", "section", ")", ",", "sav_writer", ",", "fields", ")", "elif", "isinstance", "(", "row", ",", "list", ")", ":", "for", "child_row", "in", "row", ":", "write_row", "(", "self", ".", "pre_process_row", "(", "child_row", ",", "section", ")", ",", "sav_writer", ",", "fields", ")", "index", "+=", "1", "track_task_progress", "(", "i", ",", "total_records", ")", "for", "(", "section_name", ",", "sav_def", ")", "in", "iteritems", "(", "sav_defs", ")", ":", "sav_def", "[", "'sav_writer'", "]", ".", "closeSavFile", "(", "sav_def", "[", "'sav_writer'", "]", ".", "fh", ",", "mode", "=", "'wb'", ")", "# write zipfile", "with", "ZipFile", "(", "path", ",", "'w'", ",", "ZIP_DEFLATED", ",", "allowZip64", "=", "True", ")", "as", "zip_file", ":", "for", "(", "section_name", ",", "sav_def", ")", "in", "iteritems", "(", "sav_defs", ")", ":", "sav_file", "=", "sav_def", "[", "'sav_file'", "]", "sav_file", ".", "seek", "(", "0", ")", "zip_file", ".", "write", "(", "sav_file", ".", "name", ",", "'_'", ".", "join", "(", "section_name", ".", "split", "(", "'/'", ")", ")", "+", "'.sav'", ")", "# close files when we are done", "for", "(", "section_name", ",", "sav_def", ")", "in", "iteritems", "(", "sav_defs", ")", ":", "sav_def", "[", "'sav_file'", "]", ".", "close", "(", ")" ]
https://github.com/onaio/onadata/blob/89ad16744e8f247fb748219476f6ac295869a95f/onadata/libs/utils/export_builder.py#L1192-L1267
VirtueSecurity/aws-extender
d123b7e1a845847709ba3a481f11996bddc68a1c
BappModules/boto/ec2/elb/loadbalancer.py
python
LoadBalancer.disable_cross_zone_load_balancing
(self)
return success
Turns off CrossZone Load Balancing for this ELB. :rtype: bool :return: True if successful, False if not.
Turns off CrossZone Load Balancing for this ELB.
[ "Turns", "off", "CrossZone", "Load", "Balancing", "for", "this", "ELB", "." ]
def disable_cross_zone_load_balancing(self): """ Turns off CrossZone Load Balancing for this ELB. :rtype: bool :return: True if successful, False if not. """ success = self.connection.modify_lb_attribute( self.name, 'crossZoneLoadBalancing', False) if success and self._attributes: self._attributes.cross_zone_load_balancing.enabled = False return success
[ "def", "disable_cross_zone_load_balancing", "(", "self", ")", ":", "success", "=", "self", ".", "connection", ".", "modify_lb_attribute", "(", "self", ".", "name", ",", "'crossZoneLoadBalancing'", ",", "False", ")", "if", "success", "and", "self", ".", "_attributes", ":", "self", ".", "_attributes", ".", "cross_zone_load_balancing", ".", "enabled", "=", "False", "return", "success" ]
https://github.com/VirtueSecurity/aws-extender/blob/d123b7e1a845847709ba3a481f11996bddc68a1c/BappModules/boto/ec2/elb/loadbalancer.py#L250-L261
OpenMDAO/OpenMDAO-Framework
f2e37b7de3edeaaeb2d251b375917adec059db9b
openmdao.main/src/openmdao/main/interfaces.py
python
IContainer.contains
(path)
Return True if the child specified by the given dotted path name is contained in this Container.
Return True if the child specified by the given dotted path name is contained in this Container.
[ "Return", "True", "if", "the", "child", "specified", "by", "the", "given", "dotted", "path", "name", "is", "contained", "in", "this", "Container", "." ]
def contains(path): """Return True if the child specified by the given dotted path name is contained in this Container. """
[ "def", "contains", "(", "path", ")", ":" ]
https://github.com/OpenMDAO/OpenMDAO-Framework/blob/f2e37b7de3edeaaeb2d251b375917adec059db9b/openmdao.main/src/openmdao/main/interfaces.py#L50-L53
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/Lib/rfc822.py
python
Message.__setitem__
(self, name, value)
Set the value of a header. Note: This is not a perfect inversion of __getitem__, because any changed headers get stuck at the end of the raw-headers list rather than where the altered header was.
Set the value of a header.
[ "Set", "the", "value", "of", "a", "header", "." ]
def __setitem__(self, name, value): """Set the value of a header. Note: This is not a perfect inversion of __getitem__, because any changed headers get stuck at the end of the raw-headers list rather than where the altered header was. """ del self[name] # Won't fail if it doesn't exist self.dict[name.lower()] = value text = name + ": " + value for line in text.split("\n"): self.headers.append(line + "\n")
[ "def", "__setitem__", "(", "self", ",", "name", ",", "value", ")", ":", "del", "self", "[", "name", "]", "# Won't fail if it doesn't exist", "self", ".", "dict", "[", "name", ".", "lower", "(", ")", "]", "=", "value", "text", "=", "name", "+", "\": \"", "+", "value", "for", "line", "in", "text", ".", "split", "(", "\"\\n\"", ")", ":", "self", ".", "headers", ".", "append", "(", "line", "+", "\"\\n\"", ")" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/Lib/rfc822.py#L390-L401
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/utils/jinja.py
python
ensure_sequence_filter
(data)
return data
Ensure sequenced data. **sequence** ensure that parsed data is a sequence .. code-block:: jinja {% set my_string = "foo" %} {% set my_list = ["bar", ] %} {% set my_dict = {"baz": "qux"} %} {{ my_string|sequence|first }} {{ my_list|sequence|first }} {{ my_dict|sequence|first }} will be rendered as: .. code-block:: yaml foo bar baz
Ensure sequenced data.
[ "Ensure", "sequenced", "data", "." ]
def ensure_sequence_filter(data): """ Ensure sequenced data. **sequence** ensure that parsed data is a sequence .. code-block:: jinja {% set my_string = "foo" %} {% set my_list = ["bar", ] %} {% set my_dict = {"baz": "qux"} %} {{ my_string|sequence|first }} {{ my_list|sequence|first }} {{ my_dict|sequence|first }} will be rendered as: .. code-block:: yaml foo bar baz """ if not isinstance(data, (list, tuple, set, dict)): return [data] return data
[ "def", "ensure_sequence_filter", "(", "data", ")", ":", "if", "not", "isinstance", "(", "data", ",", "(", "list", ",", "tuple", ",", "set", ",", "dict", ")", ")", ":", "return", "[", "data", "]", "return", "data" ]
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/utils/jinja.py#L287-L316
modoboa/modoboa
9065b7a5679fee149fc6f6f0e1760699c194cf89
modoboa/core/apps.py
python
load_core_settings
()
Load core settings. This function must be manually called (see :file:`urls.py`) in order to load base settings.
Load core settings.
[ "Load", "core", "settings", "." ]
def load_core_settings(): """Load core settings. This function must be manually called (see :file:`urls.py`) in order to load base settings. """ from modoboa.parameters import tools as param_tools from . import app_settings from .api.v2 import serializers param_tools.registry.add( "global", app_settings.GeneralParametersForm, ugettext_lazy("General")) param_tools.registry.add2( "global", "core", ugettext_lazy("General"), app_settings.GLOBAL_PARAMETERS_STRUCT, serializers.CoreGlobalParametersSerializer)
[ "def", "load_core_settings", "(", ")", ":", "from", "modoboa", ".", "parameters", "import", "tools", "as", "param_tools", "from", ".", "import", "app_settings", "from", ".", "api", ".", "v2", "import", "serializers", "param_tools", ".", "registry", ".", "add", "(", "\"global\"", ",", "app_settings", ".", "GeneralParametersForm", ",", "ugettext_lazy", "(", "\"General\"", ")", ")", "param_tools", ".", "registry", ".", "add2", "(", "\"global\"", ",", "\"core\"", ",", "ugettext_lazy", "(", "\"General\"", ")", ",", "app_settings", ".", "GLOBAL_PARAMETERS_STRUCT", ",", "serializers", ".", "CoreGlobalParametersSerializer", ")" ]
https://github.com/modoboa/modoboa/blob/9065b7a5679fee149fc6f6f0e1760699c194cf89/modoboa/core/apps.py#L8-L23
django-oscar/django-oscar-accounts
8a6dc3b42306979779f048b4d3ed0a9fd4a2f794
src/oscar_accounts/core.py
python
redemptions_account
()
return Account.objects.get(name=names.REDEMPTIONS)
[]
def redemptions_account(): return Account.objects.get(name=names.REDEMPTIONS)
[ "def", "redemptions_account", "(", ")", ":", "return", "Account", ".", "objects", ".", "get", "(", "name", "=", "names", ".", "REDEMPTIONS", ")" ]
https://github.com/django-oscar/django-oscar-accounts/blob/8a6dc3b42306979779f048b4d3ed0a9fd4a2f794/src/oscar_accounts/core.py#L8-L9
python/mypy
17850b3bd77ae9efb5d21f656c4e4e05ac48d894
mypy/memprofile.py
python
find_recursive_objects
(objs: List[object])
Find additional objects referenced by objs and append them to objs. We use this since gc.get_objects() does not return objects without pointers in them such as strings.
Find additional objects referenced by objs and append them to objs.
[ "Find", "additional", "objects", "referenced", "by", "objs", "and", "append", "them", "to", "objs", "." ]
def find_recursive_objects(objs: List[object]) -> None: """Find additional objects referenced by objs and append them to objs. We use this since gc.get_objects() does not return objects without pointers in them such as strings. """ seen = set(id(o) for o in objs) def visit(o: object) -> None: if id(o) not in seen: objs.append(o) seen.add(id(o)) for obj in objs[:]: if type(obj) is FakeInfo: # Processing these would cause a crash. continue if type(obj) in (dict, defaultdict): for key, val in cast(Dict[object, object], obj).items(): visit(key) visit(val) if type(obj) in (list, tuple, set): for x in cast(Iterable[object], obj): visit(x) if hasattr(obj, '__slots__'): for base in type.mro(type(obj)): for slot in getattr(base, '__slots__', ()): if hasattr(obj, slot): visit(getattr(obj, slot))
[ "def", "find_recursive_objects", "(", "objs", ":", "List", "[", "object", "]", ")", "->", "None", ":", "seen", "=", "set", "(", "id", "(", "o", ")", "for", "o", "in", "objs", ")", "def", "visit", "(", "o", ":", "object", ")", "->", "None", ":", "if", "id", "(", "o", ")", "not", "in", "seen", ":", "objs", ".", "append", "(", "o", ")", "seen", ".", "add", "(", "id", "(", "o", ")", ")", "for", "obj", "in", "objs", "[", ":", "]", ":", "if", "type", "(", "obj", ")", "is", "FakeInfo", ":", "# Processing these would cause a crash.", "continue", "if", "type", "(", "obj", ")", "in", "(", "dict", ",", "defaultdict", ")", ":", "for", "key", ",", "val", "in", "cast", "(", "Dict", "[", "object", ",", "object", "]", ",", "obj", ")", ".", "items", "(", ")", ":", "visit", "(", "key", ")", "visit", "(", "val", ")", "if", "type", "(", "obj", ")", "in", "(", "list", ",", "tuple", ",", "set", ")", ":", "for", "x", "in", "cast", "(", "Iterable", "[", "object", "]", ",", "obj", ")", ":", "visit", "(", "x", ")", "if", "hasattr", "(", "obj", ",", "'__slots__'", ")", ":", "for", "base", "in", "type", ".", "mro", "(", "type", "(", "obj", ")", ")", ":", "for", "slot", "in", "getattr", "(", "base", ",", "'__slots__'", ",", "(", ")", ")", ":", "if", "hasattr", "(", "obj", ",", "slot", ")", ":", "visit", "(", "getattr", "(", "obj", ",", "slot", ")", ")" ]
https://github.com/python/mypy/blob/17850b3bd77ae9efb5d21f656c4e4e05ac48d894/mypy/memprofile.py#L91-L119
openedx/edx-platform
68dd185a0ab45862a2a61e0f803d7e03d2be71b5
common/lib/xmodule/xmodule/modulestore/split_mongo/mongo_connection.py
python
Tagger.tag
(self, **kwargs)
Add tags to the timer. Arguments: **kwargs: Each keyword is treated as a tag name, and the value of the argument is the tag value.
Add tags to the timer.
[ "Add", "tags", "to", "the", "timer", "." ]
def tag(self, **kwargs): """ Add tags to the timer. Arguments: **kwargs: Each keyword is treated as a tag name, and the value of the argument is the tag value. """ self.added_tags.extend(list(kwargs.items()))
[ "def", "tag", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "added_tags", ".", "extend", "(", "list", "(", "kwargs", ".", "items", "(", ")", ")", ")" ]
https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/common/lib/xmodule/xmodule/modulestore/split_mongo/mongo_connection.py#L74-L82
wxWidgets/Phoenix
b2199e299a6ca6d866aa6f3d0888499136ead9d6
wx/lib/agw/customtreectrl.py
python
GenericTreeItem.SetHeight
(self, h)
Sets the item's height. Used internally. :param integer `h`: an integer specifying the item's height, in pixels.
Sets the item's height. Used internally.
[ "Sets", "the", "item", "s", "height", ".", "Used", "internally", "." ]
def SetHeight(self, h): """ Sets the item's height. Used internally. :param integer `h`: an integer specifying the item's height, in pixels. """ self._height = h
[ "def", "SetHeight", "(", "self", ",", "h", ")", ":", "self", ".", "_height", "=", "h" ]
https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/lib/agw/customtreectrl.py#L1972-L1979
privacyidea/privacyidea
9490c12ddbf77a34ac935b082d09eb583dfafa2c
privacyidea/lib/eventhandler/base.py
python
BaseEventHandler.check_condition
(self, options)
return True
Check if all conditions are met and if the action should be executed. The the conditions are met, we return "True" :return: True
Check if all conditions are met and if the action should be executed. The the conditions are met, we return "True" :return: True
[ "Check", "if", "all", "conditions", "are", "met", "and", "if", "the", "action", "should", "be", "executed", ".", "The", "the", "conditions", "are", "met", "we", "return", "True", ":", "return", ":", "True" ]
def check_condition(self, options): """ Check if all conditions are met and if the action should be executed. The the conditions are met, we return "True" :return: True """ g = options.get("g") request = options.get("request") response = options.get("response") e_handler_def = options.get("handler_def") if not e_handler_def: # options is the handler definition return True # conditions can be corresponding to the property conditions conditions = e_handler_def.get("conditions") content = self._get_response_content(response) user = self._get_tokenowner(request) serial = request.all_data.get("serial") or content.get("detail", {}).get("serial") tokenrealms = [] tokenresolvers = [] tokentype = None token_obj = None if serial: # We have determined the serial number from the request. token_obj_list = get_tokens(serial=serial) elif user: # We have to determine the token via the user object. But only if # the user has only one token token_obj_list = get_tokens(user=user) else: token_obj_list = [] if len(token_obj_list) == 1: # There is a token involved, so we determine it's resolvers and realms token_obj = token_obj_list[0] tokenrealms = token_obj.get_realms() tokentype = token_obj.get_tokentype() all_realms = get_realms() for tokenrealm in tokenrealms: resolvers = all_realms.get(tokenrealm, {}).get("resolver", {}) tokenresolvers.extend([r.get("name") for r in resolvers]) tokenresolvers = list(set(tokenresolvers)) if CONDITION.CLIENT_IP in conditions: if g and g.client_ip: ip_policy = [ip.strip() for ip in conditions.get(CONDITION.CLIENT_IP).split(",")] found, excluded = check_ip_in_policy(g.client_ip, ip_policy) if not found or excluded: return False if CONDITION.REALM in conditions: if user.realm != conditions.get(CONDITION.REALM): return False if CONDITION.RESOLVER in conditions: if user.resolver != conditions.get(CONDITION.RESOLVER): return False if "logged_in_user" in conditions: # Determine the role of the user try: logged_in_user = g.logged_in_user user_role = logged_in_user.get("role") except Exception: # A non-logged-in-user is a User, not an admin user_role = ROLE.USER if user_role != conditions.get("logged_in_user"): return False if CONDITION.RESULT_VALUE in conditions: condition_value = conditions.get(CONDITION.RESULT_VALUE) result_value = content.get("result", {}).get("value") if is_true(condition_value) != is_true(result_value): return False if CONDITION.RESULT_STATUS in conditions: condition_value = conditions.get(CONDITION.RESULT_STATUS) result_status = content.get("result", {}).get("status") if is_true(condition_value) != is_true(result_status): return False # checking of max-failcounter state of the token if "token_locked" in conditions: if token_obj: locked = token_obj.get_failcount() >= \ token_obj.get_max_failcount() if (conditions.get("token_locked") in ["True", True]) != \ locked: return False else: # check all tokens of the user, if any token is maxfail token_objects = get_tokens(user=user, maxfail=True) if not ','.join([tok.get_serial() for tok in token_objects]): return False if CONDITION.TOKENREALM in conditions and tokenrealms: res = False for trealm in tokenrealms: if trealm in conditions.get(CONDITION.TOKENREALM).split(","): res = True break if not res: return False if CONDITION.TOKENRESOLVER in conditions and tokenresolvers: res = False for tres in tokenresolvers: if tres in conditions.get(CONDITION.TOKENRESOLVER).split(","): res = True break if not res: return False if "serial" in conditions and serial: serial_match = conditions.get("serial") if not bool(re.match(serial_match, serial)): return False if CONDITION.USER_TOKEN_NUMBER in conditions and user: num_tokens = get_tokens(user=user, count=True) if num_tokens != int(conditions.get( CONDITION.USER_TOKEN_NUMBER)): return False if CONDITION.DETAIL_ERROR_MESSAGE in conditions: message = content.get("detail", {}).get("error", {}).get("message", "") search_exp = conditions.get(CONDITION.DETAIL_ERROR_MESSAGE) m = re.search(search_exp, message) if not bool(m): return False if CONDITION.DETAIL_MESSAGE in conditions: message = content.get("detail", {}).get("message", "") search_exp = conditions.get(CONDITION.DETAIL_MESSAGE) m = re.search(search_exp, message) if not bool(m): return False if CONDITION.COUNTER in conditions: # Can be counter==1000 if not compare_generic_condition(conditions.get(CONDITION.COUNTER), lambda x: counter_read(x) or 0, "Misconfiguration in your counter " "condition: {0!s}" ): return False # Token specific conditions if token_obj: if CONDITION.TOKENTYPE in conditions: if tokentype not in conditions.get(CONDITION.TOKENTYPE).split( ","): return False if CONDITION.TOKEN_HAS_OWNER in conditions: uid = token_obj.get_user_id() check = conditions.get(CONDITION.TOKEN_HAS_OWNER) if uid and check in ["True", True]: res = True elif not uid and check in ["False", False]: res = True else: log.debug("Condition token_has_owner for token {0!r} " "not fulfilled.".format(token_obj)) return False if CONDITION.TOKEN_IS_ORPHANED in conditions: orphaned = token_obj.is_orphaned() check = conditions.get(CONDITION.TOKEN_IS_ORPHANED) if orphaned and check in ["True", True]: res = True elif not orphaned and check in ["False", False]: res = True else: log.debug("Condition token_is_orphaned for token {0!r} not " "fulfilled.".format(token_obj)) return False if CONDITION.TOKEN_VALIDITY_PERIOD in conditions: valid = token_obj.check_validity_period() if (conditions.get(CONDITION.TOKEN_VALIDITY_PERIOD) in ["True", True]) != valid: return False if CONDITION.OTP_COUNTER in conditions: cond = conditions.get(CONDITION.OTP_COUNTER) if not compare_condition(cond, token_obj.token.count): return False if CONDITION.LAST_AUTH in conditions: if token_obj.check_last_auth_newer(conditions.get( CONDITION.LAST_AUTH)): return False if CONDITION.COUNT_AUTH in conditions: count = token_obj.get_count_auth() cond = conditions.get(CONDITION.COUNT_AUTH) if not compare_condition(cond, count): return False if CONDITION.COUNT_AUTH_SUCCESS in conditions: count = token_obj.get_count_auth_success() cond = conditions.get(CONDITION.COUNT_AUTH_SUCCESS) if not compare_condition(cond, count): return False if CONDITION.COUNT_AUTH_FAIL in conditions: count = token_obj.get_count_auth() c_success = token_obj.get_count_auth_success() c_fail = count - c_success cond = conditions.get(CONDITION.COUNT_AUTH_FAIL) if not compare_condition(cond, c_fail): return False if CONDITION.FAILCOUNTER in conditions: failcount = token_obj.get_failcount() cond = conditions.get(CONDITION.FAILCOUNTER) if not compare_condition(cond, failcount): return False if CONDITION.TOKENINFO in conditions: cond = conditions.get(CONDITION.TOKENINFO) # replace {now} in condition cond, td = parse_time_offset_from_now(cond) s_now = (datetime.datetime.now(tzlocal()) + td).strftime( DATE_FORMAT) cond = cond.format(now=s_now) if not compare_generic_condition(cond, token_obj.get_tokeninfo, "Misconfiguration in your tokeninfo " "condition: {0!s}"): return False if CONDITION.ROLLOUT_STATE in conditions: cond = conditions.get(CONDITION.ROLLOUT_STATE) if not cond == token_obj.token.rollout_state: return False return True
[ "def", "check_condition", "(", "self", ",", "options", ")", ":", "g", "=", "options", ".", "get", "(", "\"g\"", ")", "request", "=", "options", ".", "get", "(", "\"request\"", ")", "response", "=", "options", ".", "get", "(", "\"response\"", ")", "e_handler_def", "=", "options", ".", "get", "(", "\"handler_def\"", ")", "if", "not", "e_handler_def", ":", "# options is the handler definition", "return", "True", "# conditions can be corresponding to the property conditions", "conditions", "=", "e_handler_def", ".", "get", "(", "\"conditions\"", ")", "content", "=", "self", ".", "_get_response_content", "(", "response", ")", "user", "=", "self", ".", "_get_tokenowner", "(", "request", ")", "serial", "=", "request", ".", "all_data", ".", "get", "(", "\"serial\"", ")", "or", "content", ".", "get", "(", "\"detail\"", ",", "{", "}", ")", ".", "get", "(", "\"serial\"", ")", "tokenrealms", "=", "[", "]", "tokenresolvers", "=", "[", "]", "tokentype", "=", "None", "token_obj", "=", "None", "if", "serial", ":", "# We have determined the serial number from the request.", "token_obj_list", "=", "get_tokens", "(", "serial", "=", "serial", ")", "elif", "user", ":", "# We have to determine the token via the user object. But only if", "# the user has only one token", "token_obj_list", "=", "get_tokens", "(", "user", "=", "user", ")", "else", ":", "token_obj_list", "=", "[", "]", "if", "len", "(", "token_obj_list", ")", "==", "1", ":", "# There is a token involved, so we determine it's resolvers and realms", "token_obj", "=", "token_obj_list", "[", "0", "]", "tokenrealms", "=", "token_obj", ".", "get_realms", "(", ")", "tokentype", "=", "token_obj", ".", "get_tokentype", "(", ")", "all_realms", "=", "get_realms", "(", ")", "for", "tokenrealm", "in", "tokenrealms", ":", "resolvers", "=", "all_realms", ".", "get", "(", "tokenrealm", ",", "{", "}", ")", ".", "get", "(", "\"resolver\"", ",", "{", "}", ")", "tokenresolvers", ".", "extend", "(", "[", "r", ".", "get", "(", "\"name\"", ")", "for", "r", "in", "resolvers", "]", ")", "tokenresolvers", "=", "list", "(", "set", "(", "tokenresolvers", ")", ")", "if", "CONDITION", ".", "CLIENT_IP", "in", "conditions", ":", "if", "g", "and", "g", ".", "client_ip", ":", "ip_policy", "=", "[", "ip", ".", "strip", "(", ")", "for", "ip", "in", "conditions", ".", "get", "(", "CONDITION", ".", "CLIENT_IP", ")", ".", "split", "(", "\",\"", ")", "]", "found", ",", "excluded", "=", "check_ip_in_policy", "(", "g", ".", "client_ip", ",", "ip_policy", ")", "if", "not", "found", "or", "excluded", ":", "return", "False", "if", "CONDITION", ".", "REALM", "in", "conditions", ":", "if", "user", ".", "realm", "!=", "conditions", ".", "get", "(", "CONDITION", ".", "REALM", ")", ":", "return", "False", "if", "CONDITION", ".", "RESOLVER", "in", "conditions", ":", "if", "user", ".", "resolver", "!=", "conditions", ".", "get", "(", "CONDITION", ".", "RESOLVER", ")", ":", "return", "False", "if", "\"logged_in_user\"", "in", "conditions", ":", "# Determine the role of the user", "try", ":", "logged_in_user", "=", "g", ".", "logged_in_user", "user_role", "=", "logged_in_user", ".", "get", "(", "\"role\"", ")", "except", "Exception", ":", "# A non-logged-in-user is a User, not an admin", "user_role", "=", "ROLE", ".", "USER", "if", "user_role", "!=", "conditions", ".", "get", "(", "\"logged_in_user\"", ")", ":", "return", "False", "if", "CONDITION", ".", "RESULT_VALUE", "in", "conditions", ":", "condition_value", "=", "conditions", ".", "get", "(", "CONDITION", ".", "RESULT_VALUE", ")", "result_value", "=", "content", ".", "get", "(", "\"result\"", ",", "{", "}", ")", ".", "get", "(", "\"value\"", ")", "if", "is_true", "(", "condition_value", ")", "!=", "is_true", "(", "result_value", ")", ":", "return", "False", "if", "CONDITION", ".", "RESULT_STATUS", "in", "conditions", ":", "condition_value", "=", "conditions", ".", "get", "(", "CONDITION", ".", "RESULT_STATUS", ")", "result_status", "=", "content", ".", "get", "(", "\"result\"", ",", "{", "}", ")", ".", "get", "(", "\"status\"", ")", "if", "is_true", "(", "condition_value", ")", "!=", "is_true", "(", "result_status", ")", ":", "return", "False", "# checking of max-failcounter state of the token", "if", "\"token_locked\"", "in", "conditions", ":", "if", "token_obj", ":", "locked", "=", "token_obj", ".", "get_failcount", "(", ")", ">=", "token_obj", ".", "get_max_failcount", "(", ")", "if", "(", "conditions", ".", "get", "(", "\"token_locked\"", ")", "in", "[", "\"True\"", ",", "True", "]", ")", "!=", "locked", ":", "return", "False", "else", ":", "# check all tokens of the user, if any token is maxfail", "token_objects", "=", "get_tokens", "(", "user", "=", "user", ",", "maxfail", "=", "True", ")", "if", "not", "','", ".", "join", "(", "[", "tok", ".", "get_serial", "(", ")", "for", "tok", "in", "token_objects", "]", ")", ":", "return", "False", "if", "CONDITION", ".", "TOKENREALM", "in", "conditions", "and", "tokenrealms", ":", "res", "=", "False", "for", "trealm", "in", "tokenrealms", ":", "if", "trealm", "in", "conditions", ".", "get", "(", "CONDITION", ".", "TOKENREALM", ")", ".", "split", "(", "\",\"", ")", ":", "res", "=", "True", "break", "if", "not", "res", ":", "return", "False", "if", "CONDITION", ".", "TOKENRESOLVER", "in", "conditions", "and", "tokenresolvers", ":", "res", "=", "False", "for", "tres", "in", "tokenresolvers", ":", "if", "tres", "in", "conditions", ".", "get", "(", "CONDITION", ".", "TOKENRESOLVER", ")", ".", "split", "(", "\",\"", ")", ":", "res", "=", "True", "break", "if", "not", "res", ":", "return", "False", "if", "\"serial\"", "in", "conditions", "and", "serial", ":", "serial_match", "=", "conditions", ".", "get", "(", "\"serial\"", ")", "if", "not", "bool", "(", "re", ".", "match", "(", "serial_match", ",", "serial", ")", ")", ":", "return", "False", "if", "CONDITION", ".", "USER_TOKEN_NUMBER", "in", "conditions", "and", "user", ":", "num_tokens", "=", "get_tokens", "(", "user", "=", "user", ",", "count", "=", "True", ")", "if", "num_tokens", "!=", "int", "(", "conditions", ".", "get", "(", "CONDITION", ".", "USER_TOKEN_NUMBER", ")", ")", ":", "return", "False", "if", "CONDITION", ".", "DETAIL_ERROR_MESSAGE", "in", "conditions", ":", "message", "=", "content", ".", "get", "(", "\"detail\"", ",", "{", "}", ")", ".", "get", "(", "\"error\"", ",", "{", "}", ")", ".", "get", "(", "\"message\"", ",", "\"\"", ")", "search_exp", "=", "conditions", ".", "get", "(", "CONDITION", ".", "DETAIL_ERROR_MESSAGE", ")", "m", "=", "re", ".", "search", "(", "search_exp", ",", "message", ")", "if", "not", "bool", "(", "m", ")", ":", "return", "False", "if", "CONDITION", ".", "DETAIL_MESSAGE", "in", "conditions", ":", "message", "=", "content", ".", "get", "(", "\"detail\"", ",", "{", "}", ")", ".", "get", "(", "\"message\"", ",", "\"\"", ")", "search_exp", "=", "conditions", ".", "get", "(", "CONDITION", ".", "DETAIL_MESSAGE", ")", "m", "=", "re", ".", "search", "(", "search_exp", ",", "message", ")", "if", "not", "bool", "(", "m", ")", ":", "return", "False", "if", "CONDITION", ".", "COUNTER", "in", "conditions", ":", "# Can be counter==1000", "if", "not", "compare_generic_condition", "(", "conditions", ".", "get", "(", "CONDITION", ".", "COUNTER", ")", ",", "lambda", "x", ":", "counter_read", "(", "x", ")", "or", "0", ",", "\"Misconfiguration in your counter \"", "\"condition: {0!s}\"", ")", ":", "return", "False", "# Token specific conditions", "if", "token_obj", ":", "if", "CONDITION", ".", "TOKENTYPE", "in", "conditions", ":", "if", "tokentype", "not", "in", "conditions", ".", "get", "(", "CONDITION", ".", "TOKENTYPE", ")", ".", "split", "(", "\",\"", ")", ":", "return", "False", "if", "CONDITION", ".", "TOKEN_HAS_OWNER", "in", "conditions", ":", "uid", "=", "token_obj", ".", "get_user_id", "(", ")", "check", "=", "conditions", ".", "get", "(", "CONDITION", ".", "TOKEN_HAS_OWNER", ")", "if", "uid", "and", "check", "in", "[", "\"True\"", ",", "True", "]", ":", "res", "=", "True", "elif", "not", "uid", "and", "check", "in", "[", "\"False\"", ",", "False", "]", ":", "res", "=", "True", "else", ":", "log", ".", "debug", "(", "\"Condition token_has_owner for token {0!r} \"", "\"not fulfilled.\"", ".", "format", "(", "token_obj", ")", ")", "return", "False", "if", "CONDITION", ".", "TOKEN_IS_ORPHANED", "in", "conditions", ":", "orphaned", "=", "token_obj", ".", "is_orphaned", "(", ")", "check", "=", "conditions", ".", "get", "(", "CONDITION", ".", "TOKEN_IS_ORPHANED", ")", "if", "orphaned", "and", "check", "in", "[", "\"True\"", ",", "True", "]", ":", "res", "=", "True", "elif", "not", "orphaned", "and", "check", "in", "[", "\"False\"", ",", "False", "]", ":", "res", "=", "True", "else", ":", "log", ".", "debug", "(", "\"Condition token_is_orphaned for token {0!r} not \"", "\"fulfilled.\"", ".", "format", "(", "token_obj", ")", ")", "return", "False", "if", "CONDITION", ".", "TOKEN_VALIDITY_PERIOD", "in", "conditions", ":", "valid", "=", "token_obj", ".", "check_validity_period", "(", ")", "if", "(", "conditions", ".", "get", "(", "CONDITION", ".", "TOKEN_VALIDITY_PERIOD", ")", "in", "[", "\"True\"", ",", "True", "]", ")", "!=", "valid", ":", "return", "False", "if", "CONDITION", ".", "OTP_COUNTER", "in", "conditions", ":", "cond", "=", "conditions", ".", "get", "(", "CONDITION", ".", "OTP_COUNTER", ")", "if", "not", "compare_condition", "(", "cond", ",", "token_obj", ".", "token", ".", "count", ")", ":", "return", "False", "if", "CONDITION", ".", "LAST_AUTH", "in", "conditions", ":", "if", "token_obj", ".", "check_last_auth_newer", "(", "conditions", ".", "get", "(", "CONDITION", ".", "LAST_AUTH", ")", ")", ":", "return", "False", "if", "CONDITION", ".", "COUNT_AUTH", "in", "conditions", ":", "count", "=", "token_obj", ".", "get_count_auth", "(", ")", "cond", "=", "conditions", ".", "get", "(", "CONDITION", ".", "COUNT_AUTH", ")", "if", "not", "compare_condition", "(", "cond", ",", "count", ")", ":", "return", "False", "if", "CONDITION", ".", "COUNT_AUTH_SUCCESS", "in", "conditions", ":", "count", "=", "token_obj", ".", "get_count_auth_success", "(", ")", "cond", "=", "conditions", ".", "get", "(", "CONDITION", ".", "COUNT_AUTH_SUCCESS", ")", "if", "not", "compare_condition", "(", "cond", ",", "count", ")", ":", "return", "False", "if", "CONDITION", ".", "COUNT_AUTH_FAIL", "in", "conditions", ":", "count", "=", "token_obj", ".", "get_count_auth", "(", ")", "c_success", "=", "token_obj", ".", "get_count_auth_success", "(", ")", "c_fail", "=", "count", "-", "c_success", "cond", "=", "conditions", ".", "get", "(", "CONDITION", ".", "COUNT_AUTH_FAIL", ")", "if", "not", "compare_condition", "(", "cond", ",", "c_fail", ")", ":", "return", "False", "if", "CONDITION", ".", "FAILCOUNTER", "in", "conditions", ":", "failcount", "=", "token_obj", ".", "get_failcount", "(", ")", "cond", "=", "conditions", ".", "get", "(", "CONDITION", ".", "FAILCOUNTER", ")", "if", "not", "compare_condition", "(", "cond", ",", "failcount", ")", ":", "return", "False", "if", "CONDITION", ".", "TOKENINFO", "in", "conditions", ":", "cond", "=", "conditions", ".", "get", "(", "CONDITION", ".", "TOKENINFO", ")", "# replace {now} in condition", "cond", ",", "td", "=", "parse_time_offset_from_now", "(", "cond", ")", "s_now", "=", "(", "datetime", ".", "datetime", ".", "now", "(", "tzlocal", "(", ")", ")", "+", "td", ")", ".", "strftime", "(", "DATE_FORMAT", ")", "cond", "=", "cond", ".", "format", "(", "now", "=", "s_now", ")", "if", "not", "compare_generic_condition", "(", "cond", ",", "token_obj", ".", "get_tokeninfo", ",", "\"Misconfiguration in your tokeninfo \"", "\"condition: {0!s}\"", ")", ":", "return", "False", "if", "CONDITION", ".", "ROLLOUT_STATE", "in", "conditions", ":", "cond", "=", "conditions", ".", "get", "(", "CONDITION", ".", "ROLLOUT_STATE", ")", "if", "not", "cond", "==", "token_obj", ".", "token", ".", "rollout_state", ":", "return", "False", "return", "True" ]
https://github.com/privacyidea/privacyidea/blob/9490c12ddbf77a34ac935b082d09eb583dfafa2c/privacyidea/lib/eventhandler/base.py#L364-L604
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/site-packages/pandas-0.17.0-py3.3-win-amd64.egg/pandas/core/common.py
python
_lcd_dtypes
(a_dtype, b_dtype)
return np.object
return the lcd dtype to hold these types
return the lcd dtype to hold these types
[ "return", "the", "lcd", "dtype", "to", "hold", "these", "types" ]
def _lcd_dtypes(a_dtype, b_dtype): """ return the lcd dtype to hold these types """ if is_datetime64_dtype(a_dtype) or is_datetime64_dtype(b_dtype): return _NS_DTYPE elif is_timedelta64_dtype(a_dtype) or is_timedelta64_dtype(b_dtype): return _TD_DTYPE elif is_complex_dtype(a_dtype): if is_complex_dtype(b_dtype): return a_dtype return np.float64 elif is_integer_dtype(a_dtype): if is_integer_dtype(b_dtype): if a_dtype.itemsize == b_dtype.itemsize: return a_dtype return np.int64 return np.float64 elif is_float_dtype(a_dtype): if is_float_dtype(b_dtype): if a_dtype.itemsize == b_dtype.itemsize: return a_dtype else: return np.float64 elif is_integer(b_dtype): return np.float64 return np.object
[ "def", "_lcd_dtypes", "(", "a_dtype", ",", "b_dtype", ")", ":", "if", "is_datetime64_dtype", "(", "a_dtype", ")", "or", "is_datetime64_dtype", "(", "b_dtype", ")", ":", "return", "_NS_DTYPE", "elif", "is_timedelta64_dtype", "(", "a_dtype", ")", "or", "is_timedelta64_dtype", "(", "b_dtype", ")", ":", "return", "_TD_DTYPE", "elif", "is_complex_dtype", "(", "a_dtype", ")", ":", "if", "is_complex_dtype", "(", "b_dtype", ")", ":", "return", "a_dtype", "return", "np", ".", "float64", "elif", "is_integer_dtype", "(", "a_dtype", ")", ":", "if", "is_integer_dtype", "(", "b_dtype", ")", ":", "if", "a_dtype", ".", "itemsize", "==", "b_dtype", ".", "itemsize", ":", "return", "a_dtype", "return", "np", ".", "int64", "return", "np", ".", "float64", "elif", "is_float_dtype", "(", "a_dtype", ")", ":", "if", "is_float_dtype", "(", "b_dtype", ")", ":", "if", "a_dtype", ".", "itemsize", "==", "b_dtype", ".", "itemsize", ":", "return", "a_dtype", "else", ":", "return", "np", ".", "float64", "elif", "is_integer", "(", "b_dtype", ")", ":", "return", "np", ".", "float64", "return", "np", ".", "object" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/pandas-0.17.0-py3.3-win-amd64.egg/pandas/core/common.py#L1328-L1353
alibaba/iOSSecAudit
f94ed3254263f3382f374e3f05afae8a1fe79f20
lib/taskutil.py
python
TaskUtil.resign_ipa
(self, ipa_path, entitlements_path, mobileprovision_path, identity, sign_file=None)
[]
def resign_ipa(self, ipa_path, entitlements_path, mobileprovision_path, identity, sign_file=None): """""" G.log(G.INFO, 'Starting resign ipa file') new_ipa_path = LocalUtils().resign_ipa(os.path.abspath(os.path.expanduser(ipa_path)), os.path.abspath(os.path.expanduser(entitlements_path)), os.path.abspath(os.path.expanduser(mobileprovision_path)), identity, sign_file=None) if new_ipa_path is not None: G.log(G.INFO, 'Resign success, new ipa file: %s' % new_ipa_path) G.log(G.INFO, 'Try cmd: \'iipa\' to install new ipa file') else: G.log(G.INFO, 'Resign failed')
[ "def", "resign_ipa", "(", "self", ",", "ipa_path", ",", "entitlements_path", ",", "mobileprovision_path", ",", "identity", ",", "sign_file", "=", "None", ")", ":", "G", ".", "log", "(", "G", ".", "INFO", ",", "'Starting resign ipa file'", ")", "new_ipa_path", "=", "LocalUtils", "(", ")", ".", "resign_ipa", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "ipa_path", ")", ")", ",", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "entitlements_path", ")", ")", ",", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "mobileprovision_path", ")", ")", ",", "identity", ",", "sign_file", "=", "None", ")", "if", "new_ipa_path", "is", "not", "None", ":", "G", ".", "log", "(", "G", ".", "INFO", ",", "'Resign success, new ipa file: %s'", "%", "new_ipa_path", ")", "G", ".", "log", "(", "G", ".", "INFO", ",", "'Try cmd: \\'iipa\\' to install new ipa file'", ")", "else", ":", "G", ".", "log", "(", "G", ".", "INFO", ",", "'Resign failed'", ")" ]
https://github.com/alibaba/iOSSecAudit/blob/f94ed3254263f3382f374e3f05afae8a1fe79f20/lib/taskutil.py#L1152-L1166
briis/unifiprotect
578427a79974ddfec397d3d9e4c2a4a425e965b5
custom_components/unifiprotect/entity.py
python
ProtectDeviceEntity.async_update
(self)
Update the entity. Only used by the generic entity update service.
Update the entity.
[ "Update", "the", "entity", "." ]
async def async_update(self) -> None: """Update the entity. Only used by the generic entity update service. """ await self.data.async_refresh()
[ "async", "def", "async_update", "(", "self", ")", "->", "None", ":", "await", "self", ".", "data", ".", "async_refresh", "(", ")" ]
https://github.com/briis/unifiprotect/blob/578427a79974ddfec397d3d9e4c2a4a425e965b5/custom_components/unifiprotect/entity.py#L130-L135
dmlc/gluon-cv
709bc139919c02f7454cb411311048be188cde64
gluoncv/data/transforms/presets/rcnn.py
python
transform_test
(imgs, short=600, max_size=1000, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
return tensors, origs
A util function to transform all images to tensors as network input by applying normalizations. This function support 1 NDArray or iterable of NDArrays. Parameters ---------- imgs : NDArray or iterable of NDArray Image(s) to be transformed. short : int, optional, default is 600 Resize image short side to this `short` and keep aspect ratio. max_size : int, optional, default is 1000 Maximum longer side length to fit image. This is to limit the input image shape, avoid processing too large image. mean : iterable of float Mean pixel values. std : iterable of float Standard deviations of pixel values. Returns ------- (mxnet.NDArray, numpy.ndarray) or list of such tuple A (1, 3, H, W) mxnet NDArray as input to network, and a numpy ndarray as original un-normalized color image for display. If multiple image names are supplied, return two lists. You can use `zip()`` to collapse it.
A util function to transform all images to tensors as network input by applying normalizations. This function support 1 NDArray or iterable of NDArrays.
[ "A", "util", "function", "to", "transform", "all", "images", "to", "tensors", "as", "network", "input", "by", "applying", "normalizations", ".", "This", "function", "support", "1", "NDArray", "or", "iterable", "of", "NDArrays", "." ]
def transform_test(imgs, short=600, max_size=1000, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): """A util function to transform all images to tensors as network input by applying normalizations. This function support 1 NDArray or iterable of NDArrays. Parameters ---------- imgs : NDArray or iterable of NDArray Image(s) to be transformed. short : int, optional, default is 600 Resize image short side to this `short` and keep aspect ratio. max_size : int, optional, default is 1000 Maximum longer side length to fit image. This is to limit the input image shape, avoid processing too large image. mean : iterable of float Mean pixel values. std : iterable of float Standard deviations of pixel values. Returns ------- (mxnet.NDArray, numpy.ndarray) or list of such tuple A (1, 3, H, W) mxnet NDArray as input to network, and a numpy ndarray as original un-normalized color image for display. If multiple image names are supplied, return two lists. You can use `zip()`` to collapse it. """ if isinstance(imgs, mx.nd.NDArray): imgs = [imgs] for im in imgs: assert isinstance(im, mx.nd.NDArray), "Expect NDArray, got {}".format(type(im)) tensors = [] origs = [] for img in imgs: img = timage.resize_short_within(img, short, max_size) orig_img = img.asnumpy().astype('uint8') img = mx.nd.image.to_tensor(img) img = mx.nd.image.normalize(img, mean=mean, std=std) tensors.append(img.expand_dims(0)) origs.append(orig_img) if len(tensors) == 1: return tensors[0], origs[0] return tensors, origs
[ "def", "transform_test", "(", "imgs", ",", "short", "=", "600", ",", "max_size", "=", "1000", ",", "mean", "=", "(", "0.485", ",", "0.456", ",", "0.406", ")", ",", "std", "=", "(", "0.229", ",", "0.224", ",", "0.225", ")", ")", ":", "if", "isinstance", "(", "imgs", ",", "mx", ".", "nd", ".", "NDArray", ")", ":", "imgs", "=", "[", "imgs", "]", "for", "im", "in", "imgs", ":", "assert", "isinstance", "(", "im", ",", "mx", ".", "nd", ".", "NDArray", ")", ",", "\"Expect NDArray, got {}\"", ".", "format", "(", "type", "(", "im", ")", ")", "tensors", "=", "[", "]", "origs", "=", "[", "]", "for", "img", "in", "imgs", ":", "img", "=", "timage", ".", "resize_short_within", "(", "img", ",", "short", ",", "max_size", ")", "orig_img", "=", "img", ".", "asnumpy", "(", ")", ".", "astype", "(", "'uint8'", ")", "img", "=", "mx", ".", "nd", ".", "image", ".", "to_tensor", "(", "img", ")", "img", "=", "mx", ".", "nd", ".", "image", ".", "normalize", "(", "img", ",", "mean", "=", "mean", ",", "std", "=", "std", ")", "tensors", ".", "append", "(", "img", ".", "expand_dims", "(", "0", ")", ")", "origs", ".", "append", "(", "orig_img", ")", "if", "len", "(", "tensors", ")", "==", "1", ":", "return", "tensors", "[", "0", "]", ",", "origs", "[", "0", "]", "return", "tensors", ",", "origs" ]
https://github.com/dmlc/gluon-cv/blob/709bc139919c02f7454cb411311048be188cde64/gluoncv/data/transforms/presets/rcnn.py#L19-L63
suavecode/SUAVE
4f83c467c5662b6cc611ce2ab6c0bdd25fd5c0a5
trunk/SUAVE/Components/Energy/Cooling/Cryocooler.py
python
Cryocooler.energy_calc
(self, cooling_power, cryo_temp, amb_temp)
return [input_power, mass]
Calculate the power required by the cryocooler based on the cryocooler type, the required cooling power, and the temperature conditions. Assumptions: Based on mass data for Cryomech cryocoolers as per the datasheets for ground based non-massreduced coolers available via the cryomech website: https://www.cryomech.com/cryocoolers/. The mass is calculated for the requested power level, the cryocooler should be sized for the maximum power level required as its mass will not change during the flight. The efficiency scales with required cooling power and temperature only. The temperature difference and efficiency are taken not to scale with ambient temperature. This should not matter in the narrow range of temperatures in which aircraft operate, i.e. for ambient temperatures between -50 and 50 C. Source: https://www.cryomech.com/cryocoolers/ Inputs: cooling_power - cooling power required of the cryocooler [watts] cryo_temp - cryogenic output temperature required [kelvin] amb_temp - ambient temperature the cooler will reject heat to, defaults to 19C [kelvin] cooler_type - cryocooler type used Outputs: input_power - electrical input power required by the cryocooler [watts] mass - mass of the cryocooler and supporting components [kilogram] Properties Used: N/A
Calculate the power required by the cryocooler based on the cryocooler type, the required cooling power, and the temperature conditions. Assumptions: Based on mass data for Cryomech cryocoolers as per the datasheets for ground based non-massreduced coolers available via the cryomech website: https://www.cryomech.com/cryocoolers/. The mass is calculated for the requested power level, the cryocooler should be sized for the maximum power level required as its mass will not change during the flight. The efficiency scales with required cooling power and temperature only. The temperature difference and efficiency are taken not to scale with ambient temperature. This should not matter in the narrow range of temperatures in which aircraft operate, i.e. for ambient temperatures between -50 and 50 C. Source: https://www.cryomech.com/cryocoolers/ Inputs:
[ "Calculate", "the", "power", "required", "by", "the", "cryocooler", "based", "on", "the", "cryocooler", "type", "the", "required", "cooling", "power", "and", "the", "temperature", "conditions", ".", "Assumptions", ":", "Based", "on", "mass", "data", "for", "Cryomech", "cryocoolers", "as", "per", "the", "datasheets", "for", "ground", "based", "non", "-", "massreduced", "coolers", "available", "via", "the", "cryomech", "website", ":", "https", ":", "//", "www", ".", "cryomech", ".", "com", "/", "cryocoolers", "/", ".", "The", "mass", "is", "calculated", "for", "the", "requested", "power", "level", "the", "cryocooler", "should", "be", "sized", "for", "the", "maximum", "power", "level", "required", "as", "its", "mass", "will", "not", "change", "during", "the", "flight", ".", "The", "efficiency", "scales", "with", "required", "cooling", "power", "and", "temperature", "only", ".", "The", "temperature", "difference", "and", "efficiency", "are", "taken", "not", "to", "scale", "with", "ambient", "temperature", ".", "This", "should", "not", "matter", "in", "the", "narrow", "range", "of", "temperatures", "in", "which", "aircraft", "operate", "i", ".", "e", ".", "for", "ambient", "temperatures", "between", "-", "50", "and", "50", "C", ".", "Source", ":", "https", ":", "//", "www", ".", "cryomech", ".", "com", "/", "cryocoolers", "/", "Inputs", ":" ]
def energy_calc(self, cooling_power, cryo_temp, amb_temp): """ Calculate the power required by the cryocooler based on the cryocooler type, the required cooling power, and the temperature conditions. Assumptions: Based on mass data for Cryomech cryocoolers as per the datasheets for ground based non-massreduced coolers available via the cryomech website: https://www.cryomech.com/cryocoolers/. The mass is calculated for the requested power level, the cryocooler should be sized for the maximum power level required as its mass will not change during the flight. The efficiency scales with required cooling power and temperature only. The temperature difference and efficiency are taken not to scale with ambient temperature. This should not matter in the narrow range of temperatures in which aircraft operate, i.e. for ambient temperatures between -50 and 50 C. Source: https://www.cryomech.com/cryocoolers/ Inputs: cooling_power - cooling power required of the cryocooler [watts] cryo_temp - cryogenic output temperature required [kelvin] amb_temp - ambient temperature the cooler will reject heat to, defaults to 19C [kelvin] cooler_type - cryocooler type used Outputs: input_power - electrical input power required by the cryocooler [watts] mass - mass of the cryocooler and supporting components [kilogram] Properties Used: N/A """ # Prevent unrealistic temperature changes. if np.amin(cryo_temp) < 1.: cryo_temp = np.maximum(cryo_temp, 5.) print("Warning: Less than zero kelvin not possible, setting cryogenic temperature target to 5K.") # Warn if ambient temperature is very low. if np.amin(amb_temp) < 200.: print("Warning: Suprisingly low ambient temperature, check altitude.") # Calculate the shift in achievable minimum temperature based on the the ambient temperature (temp_amb) and the datasheet operating temperature (19C, 292.15K) temp_offset = 292.15 - amb_temp # Calculate the required temperature difference the cryocooler must produce. temp_diff = amb_temp-cryo_temp # Disable if the target temperature is greater than the ambient temp. Technically cooling like this is possible, however there are better cooling technologies to use if this is the required scenario. if np.amin(temp_diff) < 0.: temp_diff = np.maximum(temp_diff, 0.) print("Warning: Temperature conditions are not well suited to cryocooler use. Cryocooler disabled.") # Set the parameters of the cooler based on the cooler type and the operating conditions. The default ambient operating temperature (19C) is used as a base. if self.cooler_type == 'fps': #Free Piston Stirling temp_minRT = 35.0 # Minimum temperature achievable by this type of cooler when rejecting to an ambient temperature of 19C (K) temp_min = temp_minRT - temp_offset # Updated minimum achievable temperature based on the supplied ambient temperature (K) eff = 0.0014*(cryo_temp-temp_min) # Efficiency function. This is a line fit from a survey of Cryomech coolers in November 2019 input_power = cooling_power/eff # Electrical input power (W) mass = 0.0098*input_power+1.0769 # Total cooler mass function. Fit from November 2019 Cryomech data. (kg) elif self.cooler_type == 'GM': #Gifford McMahon temp_minRT = 5.4 temp_min = temp_minRT - temp_offset eff = 0.0005*(cryo_temp-temp_min) input_power = cooling_power/eff mass = 0.0129*input_power+63.08 elif self.cooler_type == 'sPT': #Single Pulsetube temp_minRT = 16.0 temp_min = temp_minRT - temp_offset eff = 0.0002*(cryo_temp-temp_min) input_power = cooling_power/eff mass = 0.0079*input_power+51.124 elif self.cooler_type == 'dPT': #Double Pulsetube temp_minRT = 8.0 temp_min = temp_minRT - temp_offset eff = 0.00001*(cryo_temp-temp_min) input_power = cooling_power/eff mass = 0.0111*input_power+73.809 else: print("Warning: Unknown Cryocooler type") return[0.0,0.0] # Warn if the cryogenic temperature is unachievable diff = cryo_temp - temp_min if np.amin(diff) < 0.0: eff = 0.0 input_power = None mass = None print("Warning: The required cryogenic temperature of " + str(cryo_temp) + " is not achievable using a " + self.cooler_type + " cryocooler at an ambient temperature of " + str(amb_temp) + ". The minimum temperature achievable is " + str(temp_min)) self.mass_properties.mass = mass self.rated_power = input_power return [input_power, mass]
[ "def", "energy_calc", "(", "self", ",", "cooling_power", ",", "cryo_temp", ",", "amb_temp", ")", ":", "# Prevent unrealistic temperature changes.", "if", "np", ".", "amin", "(", "cryo_temp", ")", "<", "1.", ":", "cryo_temp", "=", "np", ".", "maximum", "(", "cryo_temp", ",", "5.", ")", "print", "(", "\"Warning: Less than zero kelvin not possible, setting cryogenic temperature target to 5K.\"", ")", "# Warn if ambient temperature is very low.", "if", "np", ".", "amin", "(", "amb_temp", ")", "<", "200.", ":", "print", "(", "\"Warning: Suprisingly low ambient temperature, check altitude.\"", ")", "# Calculate the shift in achievable minimum temperature based on the the ambient temperature (temp_amb) and the datasheet operating temperature (19C, 292.15K)", "temp_offset", "=", "292.15", "-", "amb_temp", "# Calculate the required temperature difference the cryocooler must produce.", "temp_diff", "=", "amb_temp", "-", "cryo_temp", "# Disable if the target temperature is greater than the ambient temp. Technically cooling like this is possible, however there are better cooling technologies to use if this is the required scenario.", "if", "np", ".", "amin", "(", "temp_diff", ")", "<", "0.", ":", "temp_diff", "=", "np", ".", "maximum", "(", "temp_diff", ",", "0.", ")", "print", "(", "\"Warning: Temperature conditions are not well suited to cryocooler use. Cryocooler disabled.\"", ")", "# Set the parameters of the cooler based on the cooler type and the operating conditions. The default ambient operating temperature (19C) is used as a base.", "if", "self", ".", "cooler_type", "==", "'fps'", ":", "#Free Piston Stirling", "temp_minRT", "=", "35.0", "# Minimum temperature achievable by this type of cooler when rejecting to an ambient temperature of 19C (K)", "temp_min", "=", "temp_minRT", "-", "temp_offset", "# Updated minimum achievable temperature based on the supplied ambient temperature (K)", "eff", "=", "0.0014", "*", "(", "cryo_temp", "-", "temp_min", ")", "# Efficiency function. This is a line fit from a survey of Cryomech coolers in November 2019 ", "input_power", "=", "cooling_power", "/", "eff", "# Electrical input power (W)", "mass", "=", "0.0098", "*", "input_power", "+", "1.0769", "# Total cooler mass function. Fit from November 2019 Cryomech data. (kg)", "elif", "self", ".", "cooler_type", "==", "'GM'", ":", "#Gifford McMahon", "temp_minRT", "=", "5.4", "temp_min", "=", "temp_minRT", "-", "temp_offset", "eff", "=", "0.0005", "*", "(", "cryo_temp", "-", "temp_min", ")", "input_power", "=", "cooling_power", "/", "eff", "mass", "=", "0.0129", "*", "input_power", "+", "63.08", "elif", "self", ".", "cooler_type", "==", "'sPT'", ":", "#Single Pulsetube", "temp_minRT", "=", "16.0", "temp_min", "=", "temp_minRT", "-", "temp_offset", "eff", "=", "0.0002", "*", "(", "cryo_temp", "-", "temp_min", ")", "input_power", "=", "cooling_power", "/", "eff", "mass", "=", "0.0079", "*", "input_power", "+", "51.124", "elif", "self", ".", "cooler_type", "==", "'dPT'", ":", "#Double Pulsetube", "temp_minRT", "=", "8.0", "temp_min", "=", "temp_minRT", "-", "temp_offset", "eff", "=", "0.00001", "*", "(", "cryo_temp", "-", "temp_min", ")", "input_power", "=", "cooling_power", "/", "eff", "mass", "=", "0.0111", "*", "input_power", "+", "73.809", "else", ":", "print", "(", "\"Warning: Unknown Cryocooler type\"", ")", "return", "[", "0.0", ",", "0.0", "]", "# Warn if the cryogenic temperature is unachievable", "diff", "=", "cryo_temp", "-", "temp_min", "if", "np", ".", "amin", "(", "diff", ")", "<", "0.0", ":", "eff", "=", "0.0", "input_power", "=", "None", "mass", "=", "None", "print", "(", "\"Warning: The required cryogenic temperature of \"", "+", "str", "(", "cryo_temp", ")", "+", "\" is not achievable using a \"", "+", "self", ".", "cooler_type", "+", "\" cryocooler at an ambient temperature of \"", "+", "str", "(", "amb_temp", ")", "+", "\". The minimum temperature achievable is \"", "+", "str", "(", "temp_min", ")", ")", "self", ".", "mass_properties", ".", "mass", "=", "mass", "self", ".", "rated_power", "=", "input_power", "return", "[", "input_power", ",", "mass", "]" ]
https://github.com/suavecode/SUAVE/blob/4f83c467c5662b6cc611ce2ab6c0bdd25fd5c0a5/trunk/SUAVE/Components/Energy/Cooling/Cryocooler.py#L37-L141
VITA-Group/FasterSeg
478b0265eb9ab626cfbe503ad16d2452878b38cc
search/loss.py
python
FocalLoss.forward
(self, input_, target)
[]
def forward(self, input_, target): cross_entropy = super().forward(input_, target) # Temporarily mask out ignore index to '0' for valid gather-indices input. # This won't contribute final loss as the cross_entropy contribution # for these would be zero. target = target * (target != self.ignore_index).long() input_prob = torch.gather(F.softmax(input_, 1), 1, target.unsqueeze(1)) loss = torch.pow(1 - input_prob, self.gamma) * cross_entropy if self.reduction == 'mean': return torch.mean(loss) elif self.reduction == 'sum': return torch.sum(loss) else: return loss
[ "def", "forward", "(", "self", ",", "input_", ",", "target", ")", ":", "cross_entropy", "=", "super", "(", ")", ".", "forward", "(", "input_", ",", "target", ")", "# Temporarily mask out ignore index to '0' for valid gather-indices input.", "# This won't contribute final loss as the cross_entropy contribution", "# for these would be zero.", "target", "=", "target", "*", "(", "target", "!=", "self", ".", "ignore_index", ")", ".", "long", "(", ")", "input_prob", "=", "torch", ".", "gather", "(", "F", ".", "softmax", "(", "input_", ",", "1", ")", ",", "1", ",", "target", ".", "unsqueeze", "(", "1", ")", ")", "loss", "=", "torch", ".", "pow", "(", "1", "-", "input_prob", ",", "self", ".", "gamma", ")", "*", "cross_entropy", "if", "self", ".", "reduction", "==", "'mean'", ":", "return", "torch", ".", "mean", "(", "loss", ")", "elif", "self", ".", "reduction", "==", "'sum'", ":", "return", "torch", ".", "sum", "(", "loss", ")", "else", ":", "return", "loss" ]
https://github.com/VITA-Group/FasterSeg/blob/478b0265eb9ab626cfbe503ad16d2452878b38cc/search/loss.py#L40-L50
Jenyay/outwiker
50530cf7b3f71480bb075b2829bc0669773b835b
src/outwiker/gui/controls/ultimatelistctrl.py
python
UltimateListMainWindow.OnChildFocus
(self, event)
Handles the ``wx.EVT_CHILD_FOCUS`` event for :class:`UltimateListMainWindow`. :param `event`: a :class:`ChildFocusEvent` event to be processed. .. note:: This method is intentionally empty to prevent the default handler in :class:`ScrolledWindow` from needlessly scrolling the window when the edit control is dismissed.
Handles the ``wx.EVT_CHILD_FOCUS`` event for :class:`UltimateListMainWindow`.
[ "Handles", "the", "wx", ".", "EVT_CHILD_FOCUS", "event", "for", ":", "class", ":", "UltimateListMainWindow", "." ]
def OnChildFocus(self, event): """ Handles the ``wx.EVT_CHILD_FOCUS`` event for :class:`UltimateListMainWindow`. :param `event`: a :class:`ChildFocusEvent` event to be processed. .. note:: This method is intentionally empty to prevent the default handler in :class:`ScrolledWindow` from needlessly scrolling the window when the edit control is dismissed. """ # Do nothing here. This prevents the default handler in wx.ScrolledWindow # from needlessly scrolling the window when the edit control is # dismissed. See ticket #9563. pass
[ "def", "OnChildFocus", "(", "self", ",", "event", ")", ":", "# Do nothing here. This prevents the default handler in wx.ScrolledWindow", "# from needlessly scrolling the window when the edit control is", "# dismissed. See ticket #9563.", "pass" ]
https://github.com/Jenyay/outwiker/blob/50530cf7b3f71480bb075b2829bc0669773b835b/src/outwiker/gui/controls/ultimatelistctrl.py#L7277-L7295
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
lib-python/2.7/plat-mac/lib-scriptpackages/Terminal/Standard_Suite.py
python
Standard_Suite_Events.quit
(self, _object, _attributes={}, **_arguments)
quit: Quit an application. Required argument: the object for the command Keyword argument saving: Specifies whether changes should be saved before quitting. Keyword argument _attributes: AppleEvent attribute dictionary
quit: Quit an application. Required argument: the object for the command Keyword argument saving: Specifies whether changes should be saved before quitting. Keyword argument _attributes: AppleEvent attribute dictionary
[ "quit", ":", "Quit", "an", "application", ".", "Required", "argument", ":", "the", "object", "for", "the", "command", "Keyword", "argument", "saving", ":", "Specifies", "whether", "changes", "should", "be", "saved", "before", "quitting", ".", "Keyword", "argument", "_attributes", ":", "AppleEvent", "attribute", "dictionary" ]
def quit(self, _object, _attributes={}, **_arguments): """quit: Quit an application. Required argument: the object for the command Keyword argument saving: Specifies whether changes should be saved before quitting. Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'aevt' _subcode = 'quit' aetools.keysubst(_arguments, self._argmap_quit) _arguments['----'] = _object aetools.enumsubst(_arguments, 'savo', _Enum_savo) _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.get('errn', 0): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----']
[ "def", "quit", "(", "self", ",", "_object", ",", "_attributes", "=", "{", "}", ",", "*", "*", "_arguments", ")", ":", "_code", "=", "'aevt'", "_subcode", "=", "'quit'", "aetools", ".", "keysubst", "(", "_arguments", ",", "self", ".", "_argmap_quit", ")", "_arguments", "[", "'----'", "]", "=", "_object", "aetools", ".", "enumsubst", "(", "_arguments", ",", "'savo'", ",", "_Enum_savo", ")", "_reply", ",", "_arguments", ",", "_attributes", "=", "self", ".", "send", "(", "_code", ",", "_subcode", ",", "_arguments", ",", "_attributes", ")", "if", "_arguments", ".", "get", "(", "'errn'", ",", "0", ")", ":", "raise", "aetools", ".", "Error", ",", "aetools", ".", "decodeerror", "(", "_arguments", ")", "# XXXX Optionally decode result", "if", "_arguments", ".", "has_key", "(", "'----'", ")", ":", "return", "_arguments", "[", "'----'", "]" ]
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/lib-python/2.7/plat-mac/lib-scriptpackages/Terminal/Standard_Suite.py#L258-L278
sahana/eden
1696fa50e90ce967df69f66b571af45356cc18da
modules/s3cfg.py
python
S3Config.get_L10n_pootle_password
(self)
return self.L10n.get("pootle_password", False)
Password for Pootle server
Password for Pootle server
[ "Password", "for", "Pootle", "server" ]
def get_L10n_pootle_password(self): """ Password for Pootle server """ return self.L10n.get("pootle_password", False)
[ "def", "get_L10n_pootle_password", "(", "self", ")", ":", "return", "self", ".", "L10n", ".", "get", "(", "\"pootle_password\"", ",", "False", ")" ]
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/s3cfg.py#L2058-L2060
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Lib/shutil.py
python
get_archive_formats
()
return formats
Returns a list of supported formats for archiving and unarchiving. Each element of the returned sequence is a tuple (name, description)
Returns a list of supported formats for archiving and unarchiving.
[ "Returns", "a", "list", "of", "supported", "formats", "for", "archiving", "and", "unarchiving", "." ]
def get_archive_formats(): """Returns a list of supported formats for archiving and unarchiving. Each element of the returned sequence is a tuple (name, description) """ formats = [(name, registry[2]) for name, registry in _ARCHIVE_FORMATS.items()] formats.sort() return formats
[ "def", "get_archive_formats", "(", ")", ":", "formats", "=", "[", "(", "name", ",", "registry", "[", "2", "]", ")", "for", "name", ",", "registry", "in", "_ARCHIVE_FORMATS", ".", "items", "(", ")", "]", "formats", ".", "sort", "(", ")", "return", "formats" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/shutil.py#L469-L477
pydoit/doit
cf7edfbe73fafebd1b2a6f1d3be8b69fde41383d
doit/cmd_base.py
python
DoitCmdBase.get_options
(self)
return [CmdOption(opt) for opt in opt_list]
from base class - merge base_options, loader_options and cmd_options
from base class - merge base_options, loader_options and cmd_options
[ "from", "base", "class", "-", "merge", "base_options", "loader_options", "and", "cmd_options" ]
def get_options(self): """from base class - merge base_options, loader_options and cmd_options """ opt_list = (self.base_options + self.loader.cmd_options + self.cmd_options) return [CmdOption(opt) for opt in opt_list]
[ "def", "get_options", "(", "self", ")", ":", "opt_list", "=", "(", "self", ".", "base_options", "+", "self", ".", "loader", ".", "cmd_options", "+", "self", ".", "cmd_options", ")", "return", "[", "CmdOption", "(", "opt", ")", "for", "opt", "in", "opt_list", "]" ]
https://github.com/pydoit/doit/blob/cf7edfbe73fafebd1b2a6f1d3be8b69fde41383d/doit/cmd_base.py#L477-L482
IronLanguages/main
a949455434b1fda8c783289e897e78a9a0caabb5
External.LCA_RESTRICTED/Languages/IronPython/27/Lib/stringold.py
python
rindex
(s, *args)
return _apply(s.rindex, args)
rindex(s, sub [,start [,end]]) -> int Like rfind but raises ValueError when the substring is not found.
rindex(s, sub [,start [,end]]) -> int
[ "rindex", "(", "s", "sub", "[", "start", "[", "end", "]]", ")", "-", ">", "int" ]
def rindex(s, *args): """rindex(s, sub [,start [,end]]) -> int Like rfind but raises ValueError when the substring is not found. """ return _apply(s.rindex, args)
[ "def", "rindex", "(", "s", ",", "*", "args", ")", ":", "return", "_apply", "(", "s", ".", "rindex", ",", "args", ")" ]
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/IronPython/27/Lib/stringold.py#L145-L151
openstack/oslo.messaging
5d165cc713a98dbd650e9e6295d7966ce2919935
oslo_messaging/_drivers/base.py
python
BaseDriver.listen
(self, target, batch_size, batch_timeout)
Construct a listener for the given target. The listener may be either a :py:class:`Listener` or :py:class:`PollStyleListener` depending on the driver's preference. This method is used by the RPC server. The driver must create subscriptions to the address provided in *target*. These subscriptions must then be associated with a :py:class:`Listener` or :py:class:`PollStyleListener` which is returned by this method. See :py:meth:`BaseDriver.send` for more detail regarding message addressing. The driver must support receiving messages sent to the following addresses derived from the values in *target*: * all messages sent to the exchange and topic given in the target. This includes messages sent using a fanout pattern. * if the server attribute of the target is set then the driver must also subscribe to messages sent to the exchange, topic, and server For example, given a target with exchange 'my-exchange', topic 'my-topic', and server 'my-server', the driver would create subscriptions for: * all messages sent to my-exchange and my-topic (including fanout) * all messages sent to my-exchange, my-topic, and my-server The driver must pass messages arriving from these subscriptions to the listener. For :py:class:`PollStyleListener` the driver should trigger the :py:meth:`PollStyleListener.poll` method to unblock and return the incoming messages. For :py:class:`Listener` the driver should invoke the callback with the incoming messages. This method only blocks long enough to establish the subscription(s) and construct the listener. In the case of failover, the driver must restore the subscription(s). Subscriptions should remain active until the listener is stopped. :param target: The address(es) to subscribe to. :type target: Target :param batch_size: passed to the listener :type batch_size: int :param batch_timeout: passed to the listener :type batch_timeout: float :returns: None :raises: :py:exc:`MessagingException`
Construct a listener for the given target. The listener may be either a :py:class:`Listener` or :py:class:`PollStyleListener` depending on the driver's preference. This method is used by the RPC server.
[ "Construct", "a", "listener", "for", "the", "given", "target", ".", "The", "listener", "may", "be", "either", "a", ":", "py", ":", "class", ":", "Listener", "or", ":", "py", ":", "class", ":", "PollStyleListener", "depending", "on", "the", "driver", "s", "preference", ".", "This", "method", "is", "used", "by", "the", "RPC", "server", "." ]
def listen(self, target, batch_size, batch_timeout): """Construct a listener for the given target. The listener may be either a :py:class:`Listener` or :py:class:`PollStyleListener` depending on the driver's preference. This method is used by the RPC server. The driver must create subscriptions to the address provided in *target*. These subscriptions must then be associated with a :py:class:`Listener` or :py:class:`PollStyleListener` which is returned by this method. See :py:meth:`BaseDriver.send` for more detail regarding message addressing. The driver must support receiving messages sent to the following addresses derived from the values in *target*: * all messages sent to the exchange and topic given in the target. This includes messages sent using a fanout pattern. * if the server attribute of the target is set then the driver must also subscribe to messages sent to the exchange, topic, and server For example, given a target with exchange 'my-exchange', topic 'my-topic', and server 'my-server', the driver would create subscriptions for: * all messages sent to my-exchange and my-topic (including fanout) * all messages sent to my-exchange, my-topic, and my-server The driver must pass messages arriving from these subscriptions to the listener. For :py:class:`PollStyleListener` the driver should trigger the :py:meth:`PollStyleListener.poll` method to unblock and return the incoming messages. For :py:class:`Listener` the driver should invoke the callback with the incoming messages. This method only blocks long enough to establish the subscription(s) and construct the listener. In the case of failover, the driver must restore the subscription(s). Subscriptions should remain active until the listener is stopped. :param target: The address(es) to subscribe to. :type target: Target :param batch_size: passed to the listener :type batch_size: int :param batch_timeout: passed to the listener :type batch_timeout: float :returns: None :raises: :py:exc:`MessagingException` """
[ "def", "listen", "(", "self", ",", "target", ",", "batch_size", ",", "batch_timeout", ")", ":" ]
https://github.com/openstack/oslo.messaging/blob/5d165cc713a98dbd650e9e6295d7966ce2919935/oslo_messaging/_drivers/base.py#L497-L543
IJDykeman/wangTiles
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
experimental_code/tiles_3d/venv/lib/python2.7/site-packages/pip/index.py
python
PackageFinder._find_url_name
(self, index_url, url_name, req)
return None
Finds the true URL name of a package, when the given name isn't quite correct. This is usually used to implement case-insensitivity.
Finds the true URL name of a package, when the given name isn't quite correct. This is usually used to implement case-insensitivity.
[ "Finds", "the", "true", "URL", "name", "of", "a", "package", "when", "the", "given", "name", "isn", "t", "quite", "correct", ".", "This", "is", "usually", "used", "to", "implement", "case", "-", "insensitivity", "." ]
def _find_url_name(self, index_url, url_name, req): """Finds the true URL name of a package, when the given name isn't quite correct. This is usually used to implement case-insensitivity.""" if not index_url.url.endswith('/'): # Vaguely part of the PyPI API... weird but true. ## FIXME: bad to modify this? index_url.url += '/' page = self._get_page(index_url, req) if page is None: logger.fatal('Cannot fetch index base URL %s' % index_url) return norm_name = normalize_name(req.url_name) for link in page.links: base = posixpath.basename(link.path.rstrip('/')) if norm_name == normalize_name(base): logger.notify('Real name of requirement %s is %s' % (url_name, base)) return base return None
[ "def", "_find_url_name", "(", "self", ",", "index_url", ",", "url_name", ",", "req", ")", ":", "if", "not", "index_url", ".", "url", ".", "endswith", "(", "'/'", ")", ":", "# Vaguely part of the PyPI API... weird but true.", "## FIXME: bad to modify this?", "index_url", ".", "url", "+=", "'/'", "page", "=", "self", ".", "_get_page", "(", "index_url", ",", "req", ")", "if", "page", "is", "None", ":", "logger", ".", "fatal", "(", "'Cannot fetch index base URL %s'", "%", "index_url", ")", "return", "norm_name", "=", "normalize_name", "(", "req", ".", "url_name", ")", "for", "link", "in", "page", ".", "links", ":", "base", "=", "posixpath", ".", "basename", "(", "link", ".", "path", ".", "rstrip", "(", "'/'", ")", ")", "if", "norm_name", "==", "normalize_name", "(", "base", ")", ":", "logger", ".", "notify", "(", "'Real name of requirement %s is %s'", "%", "(", "url_name", ",", "base", ")", ")", "return", "base", "return", "None" ]
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv/lib/python2.7/site-packages/pip/index.py#L354-L371
kensho-technologies/graphql-compiler
4318443b7b2512a059f3616112bfc40bbf8eec06
graphql_compiler/schema_generation/graphql_schema.py
python
_get_fields_for_class
( schema_graph, graphql_types, field_type_overrides, hidden_classes, cls_name )
return result
Return a dict from field name to GraphQL field type, for the specified graph class.
Return a dict from field name to GraphQL field type, for the specified graph class.
[ "Return", "a", "dict", "from", "field", "name", "to", "GraphQL", "field", "type", "for", "the", "specified", "graph", "class", "." ]
def _get_fields_for_class( schema_graph, graphql_types, field_type_overrides, hidden_classes, cls_name ): """Return a dict from field name to GraphQL field type, for the specified graph class.""" properties = schema_graph.get_element_by_class_name(cls_name).properties # Add leaf GraphQL fields (class properties). all_properties = {} for property_name, property_obj in six.iteritems(properties): if re_name.match(property_name): all_properties[property_name] = property_obj.type else: warnings.warn( "Ignoring property {} of class {} with invalid name. " "Property names must match /{}/.".format(property_name, cls_name, re_name) ) collections_of_non_graphql_scalars = { property_name for property_name, graphql_type in six.iteritems(all_properties) if ( isinstance(strip_non_null_from_type(graphql_type), GraphQLList) and not isinstance(strip_non_null_from_type(graphql_type.of_type), GraphQLScalarType) ) } if len(collections_of_non_graphql_scalars) > 0: warnings.warn( "The fields {} of class {} were ignored since they are GraphQLLists of " "non-GraphQLScalarTypes. GraphQLLists of non-GraphQLScalarTypes are not " "currently supported in the GraphQLSchema.".format( collections_of_non_graphql_scalars, cls_name ) ) # Filter collections of non-GraphQLScalarTypes. They are currently not supported. result = { property_name: graphql_type for property_name, graphql_type in six.iteritems(all_properties) if property_name not in collections_of_non_graphql_scalars } # Add edge GraphQL fields. schema_element = schema_graph.get_element_by_class_name(cls_name) outbound_edges = ( ( "out_{}".format(out_edge_name), schema_graph.get_element_by_class_name(out_edge_name).base_out_connection, ) for out_edge_name in schema_element.out_connections ) inbound_edges = ( ( "in_{}".format(in_edge_name), schema_graph.get_element_by_class_name(in_edge_name).base_in_connection, ) for in_edge_name in schema_element.in_connections ) for field_name, to_type_name in chain(outbound_edges, inbound_edges): edge_endpoint_type_name = None subclasses = schema_graph.get_subclass_set(to_type_name) to_type_abstract = schema_graph.get_element_by_class_name(to_type_name).abstract if not to_type_abstract and len(subclasses) > 1: # If the edge endpoint type has no subclasses, it can't be coerced into any other # type. If the edge endpoint type is abstract (an interface type), we can already # coerce it to the proper type with a GraphQL fragment. However, if the endpoint # type is non-abstract and has subclasses, we need to return its subclasses as an # union type. This is because GraphQL fragments cannot be applied on concrete # types, and GraphQL does not support inheritance of concrete types. type_names_to_union = [ subclass for subclass in subclasses if subclass not in hidden_classes ] if type_names_to_union: edge_endpoint_type_name = _get_union_type_name(type_names_to_union) else: if to_type_name not in hidden_classes: edge_endpoint_type_name = to_type_name if edge_endpoint_type_name is not None: # If we decided to not hide this edge due to its endpoint type being # non-representable, represent the edge field as the GraphQL type # List(edge_endpoint_type_name). result[field_name] = GraphQLList(graphql_types[edge_endpoint_type_name]) for field_name, field_type in six.iteritems(field_type_overrides): if field_name not in result: raise AssertionError( 'Attempting to override field "{}" from class "{}", but the ' "class does not contain said field".format(field_name, cls_name) ) else: result[field_name] = field_type return result
[ "def", "_get_fields_for_class", "(", "schema_graph", ",", "graphql_types", ",", "field_type_overrides", ",", "hidden_classes", ",", "cls_name", ")", ":", "properties", "=", "schema_graph", ".", "get_element_by_class_name", "(", "cls_name", ")", ".", "properties", "# Add leaf GraphQL fields (class properties).", "all_properties", "=", "{", "}", "for", "property_name", ",", "property_obj", "in", "six", ".", "iteritems", "(", "properties", ")", ":", "if", "re_name", ".", "match", "(", "property_name", ")", ":", "all_properties", "[", "property_name", "]", "=", "property_obj", ".", "type", "else", ":", "warnings", ".", "warn", "(", "\"Ignoring property {} of class {} with invalid name. \"", "\"Property names must match /{}/.\"", ".", "format", "(", "property_name", ",", "cls_name", ",", "re_name", ")", ")", "collections_of_non_graphql_scalars", "=", "{", "property_name", "for", "property_name", ",", "graphql_type", "in", "six", ".", "iteritems", "(", "all_properties", ")", "if", "(", "isinstance", "(", "strip_non_null_from_type", "(", "graphql_type", ")", ",", "GraphQLList", ")", "and", "not", "isinstance", "(", "strip_non_null_from_type", "(", "graphql_type", ".", "of_type", ")", ",", "GraphQLScalarType", ")", ")", "}", "if", "len", "(", "collections_of_non_graphql_scalars", ")", ">", "0", ":", "warnings", ".", "warn", "(", "\"The fields {} of class {} were ignored since they are GraphQLLists of \"", "\"non-GraphQLScalarTypes. GraphQLLists of non-GraphQLScalarTypes are not \"", "\"currently supported in the GraphQLSchema.\"", ".", "format", "(", "collections_of_non_graphql_scalars", ",", "cls_name", ")", ")", "# Filter collections of non-GraphQLScalarTypes. They are currently not supported.", "result", "=", "{", "property_name", ":", "graphql_type", "for", "property_name", ",", "graphql_type", "in", "six", ".", "iteritems", "(", "all_properties", ")", "if", "property_name", "not", "in", "collections_of_non_graphql_scalars", "}", "# Add edge GraphQL fields.", "schema_element", "=", "schema_graph", ".", "get_element_by_class_name", "(", "cls_name", ")", "outbound_edges", "=", "(", "(", "\"out_{}\"", ".", "format", "(", "out_edge_name", ")", ",", "schema_graph", ".", "get_element_by_class_name", "(", "out_edge_name", ")", ".", "base_out_connection", ",", ")", "for", "out_edge_name", "in", "schema_element", ".", "out_connections", ")", "inbound_edges", "=", "(", "(", "\"in_{}\"", ".", "format", "(", "in_edge_name", ")", ",", "schema_graph", ".", "get_element_by_class_name", "(", "in_edge_name", ")", ".", "base_in_connection", ",", ")", "for", "in_edge_name", "in", "schema_element", ".", "in_connections", ")", "for", "field_name", ",", "to_type_name", "in", "chain", "(", "outbound_edges", ",", "inbound_edges", ")", ":", "edge_endpoint_type_name", "=", "None", "subclasses", "=", "schema_graph", ".", "get_subclass_set", "(", "to_type_name", ")", "to_type_abstract", "=", "schema_graph", ".", "get_element_by_class_name", "(", "to_type_name", ")", ".", "abstract", "if", "not", "to_type_abstract", "and", "len", "(", "subclasses", ")", ">", "1", ":", "# If the edge endpoint type has no subclasses, it can't be coerced into any other", "# type. If the edge endpoint type is abstract (an interface type), we can already", "# coerce it to the proper type with a GraphQL fragment. However, if the endpoint", "# type is non-abstract and has subclasses, we need to return its subclasses as an", "# union type. This is because GraphQL fragments cannot be applied on concrete", "# types, and GraphQL does not support inheritance of concrete types.", "type_names_to_union", "=", "[", "subclass", "for", "subclass", "in", "subclasses", "if", "subclass", "not", "in", "hidden_classes", "]", "if", "type_names_to_union", ":", "edge_endpoint_type_name", "=", "_get_union_type_name", "(", "type_names_to_union", ")", "else", ":", "if", "to_type_name", "not", "in", "hidden_classes", ":", "edge_endpoint_type_name", "=", "to_type_name", "if", "edge_endpoint_type_name", "is", "not", "None", ":", "# If we decided to not hide this edge due to its endpoint type being", "# non-representable, represent the edge field as the GraphQL type", "# List(edge_endpoint_type_name).", "result", "[", "field_name", "]", "=", "GraphQLList", "(", "graphql_types", "[", "edge_endpoint_type_name", "]", ")", "for", "field_name", ",", "field_type", "in", "six", ".", "iteritems", "(", "field_type_overrides", ")", ":", "if", "field_name", "not", "in", "result", ":", "raise", "AssertionError", "(", "'Attempting to override field \"{}\" from class \"{}\", but the '", "\"class does not contain said field\"", ".", "format", "(", "field_name", ",", "cls_name", ")", ")", "else", ":", "result", "[", "field_name", "]", "=", "field_type", "return", "result" ]
https://github.com/kensho-technologies/graphql-compiler/blob/4318443b7b2512a059f3616112bfc40bbf8eec06/graphql_compiler/schema_generation/graphql_schema.py#L91-L185
santi-pdp/pase
2a41e63e54fa8673efd12c16cdcdd5ad4f0f125e
pase/transforms.py
python
Additive.apply_IRS
(self, data, srate, nbits)
return data_filtered
Apply telephone handset BW [300, 3200] Hz
Apply telephone handset BW [300, 3200] Hz
[ "Apply", "telephone", "handset", "BW", "[", "300", "3200", "]", "Hz" ]
def apply_IRS(self, data, srate, nbits): """ Apply telephone handset BW [300, 3200] Hz """ raise NotImplementedError('Under construction!') from pyfftw.interfaces import scipy_fftpack as fftw n = data.shape[0] # find next pow of 2 which is greater or eq to n pow_of_2 = 2 ** (np.ceil(np.log2(n))) align_filter_dB = np.array([[0, -200], [50, -40], [100, -20], [125, -12], [160, -6], [200, 0], [250, 4], [300, 6], [350, 8], [400, 10], [500, 11], [600, 12], [700, 12], [800, 12], [1000, 12], [1300, 12], [1600, 12], [2000, 12], [2500, 12], [3000, 12], [3250, 12], [3500, 4], [4000, -200], [5000, -200], [6300, -200], [8000, -200]]) print('align filter dB shape: ', align_filter_dB.shape) num_of_points, trivial = align_filter_dB.shape overallGainFilter = interp1d(align_filter_dB[:, 0], align_filter[:, 1], 1000) x = np.zeros((pow_of_2)) x[:data.shape[0]] = data x_fft = fftw.fft(x, pow_of_2) freq_resolution = srate / pow_of_2 factorDb = interp1d(align_filter_dB[:, 0], align_filter_dB[:, 1], list(range(0, (pow_of_2 / 2) + 1) * \ freq_resolution)) - \ overallGainFilter factor = 10 ** (factorDb / 20) factor = [factor, np.fliplr(factor[1:(pow_of_2 / 2 + 1)])] x_fft = x_fft * factor y = fftw.ifft(x_fft, pow_of_2) data_filtered = y[:n] return data_filtered
[ "def", "apply_IRS", "(", "self", ",", "data", ",", "srate", ",", "nbits", ")", ":", "raise", "NotImplementedError", "(", "'Under construction!'", ")", "from", "pyfftw", ".", "interfaces", "import", "scipy_fftpack", "as", "fftw", "n", "=", "data", ".", "shape", "[", "0", "]", "# find next pow of 2 which is greater or eq to n", "pow_of_2", "=", "2", "**", "(", "np", ".", "ceil", "(", "np", ".", "log2", "(", "n", ")", ")", ")", "align_filter_dB", "=", "np", ".", "array", "(", "[", "[", "0", ",", "-", "200", "]", ",", "[", "50", ",", "-", "40", "]", ",", "[", "100", ",", "-", "20", "]", ",", "[", "125", ",", "-", "12", "]", ",", "[", "160", ",", "-", "6", "]", ",", "[", "200", ",", "0", "]", ",", "[", "250", ",", "4", "]", ",", "[", "300", ",", "6", "]", ",", "[", "350", ",", "8", "]", ",", "[", "400", ",", "10", "]", ",", "[", "500", ",", "11", "]", ",", "[", "600", ",", "12", "]", ",", "[", "700", ",", "12", "]", ",", "[", "800", ",", "12", "]", ",", "[", "1000", ",", "12", "]", ",", "[", "1300", ",", "12", "]", ",", "[", "1600", ",", "12", "]", ",", "[", "2000", ",", "12", "]", ",", "[", "2500", ",", "12", "]", ",", "[", "3000", ",", "12", "]", ",", "[", "3250", ",", "12", "]", ",", "[", "3500", ",", "4", "]", ",", "[", "4000", ",", "-", "200", "]", ",", "[", "5000", ",", "-", "200", "]", ",", "[", "6300", ",", "-", "200", "]", ",", "[", "8000", ",", "-", "200", "]", "]", ")", "print", "(", "'align filter dB shape: '", ",", "align_filter_dB", ".", "shape", ")", "num_of_points", ",", "trivial", "=", "align_filter_dB", ".", "shape", "overallGainFilter", "=", "interp1d", "(", "align_filter_dB", "[", ":", ",", "0", "]", ",", "align_filter", "[", ":", ",", "1", "]", ",", "1000", ")", "x", "=", "np", ".", "zeros", "(", "(", "pow_of_2", ")", ")", "x", "[", ":", "data", ".", "shape", "[", "0", "]", "]", "=", "data", "x_fft", "=", "fftw", ".", "fft", "(", "x", ",", "pow_of_2", ")", "freq_resolution", "=", "srate", "/", "pow_of_2", "factorDb", "=", "interp1d", "(", "align_filter_dB", "[", ":", ",", "0", "]", ",", "align_filter_dB", "[", ":", ",", "1", "]", ",", "list", "(", "range", "(", "0", ",", "(", "pow_of_2", "/", "2", ")", "+", "1", ")", "*", "freq_resolution", ")", ")", "-", "overallGainFilter", "factor", "=", "10", "**", "(", "factorDb", "/", "20", ")", "factor", "=", "[", "factor", ",", "np", ".", "fliplr", "(", "factor", "[", "1", ":", "(", "pow_of_2", "/", "2", "+", "1", ")", "]", ")", "]", "x_fft", "=", "x_fft", "*", "factor", "y", "=", "fftw", ".", "ifft", "(", "x_fft", ",", "pow_of_2", ")", "data_filtered", "=", "y", "[", ":", "n", "]", "return", "data_filtered" ]
https://github.com/santi-pdp/pase/blob/2a41e63e54fa8673efd12c16cdcdd5ad4f0f125e/pase/transforms.py#L1869-L1910
pyeve/eve-sqlalchemy
d5c8081457a32a35a1abd6cee4bfa9420f4e05af
eve_sqlalchemy/parser.py
python
parse
(expression, model)
return v.sqla_query
Given a python-like conditional statement, returns the equivalent SQLAlchemy-like query expression. Conditional and boolean operators (==, <=, >=, !=, >, <) are supported.
Given a python-like conditional statement, returns the equivalent SQLAlchemy-like query expression. Conditional and boolean operators (==, <=, >=, !=, >, <) are supported.
[ "Given", "a", "python", "-", "like", "conditional", "statement", "returns", "the", "equivalent", "SQLAlchemy", "-", "like", "query", "expression", ".", "Conditional", "and", "boolean", "operators", "(", "==", "<", "=", ">", "=", "!", "=", ">", "<", ")", "are", "supported", "." ]
def parse(expression, model): """ Given a python-like conditional statement, returns the equivalent SQLAlchemy-like query expression. Conditional and boolean operators (==, <=, >=, !=, >, <) are supported. """ v = SQLAVisitor(model) try: parsed_expr = ast.parse(expression) except SyntaxError: raise ParseError("Can't parse expression '{0}'".format(expression)) v.visit(parsed_expr) return v.sqla_query
[ "def", "parse", "(", "expression", ",", "model", ")", ":", "v", "=", "SQLAVisitor", "(", "model", ")", "try", ":", "parsed_expr", "=", "ast", ".", "parse", "(", "expression", ")", "except", "SyntaxError", ":", "raise", "ParseError", "(", "\"Can't parse expression '{0}'\"", ".", "format", "(", "expression", ")", ")", "v", ".", "visit", "(", "parsed_expr", ")", "return", "v", ".", "sqla_query" ]
https://github.com/pyeve/eve-sqlalchemy/blob/d5c8081457a32a35a1abd6cee4bfa9420f4e05af/eve_sqlalchemy/parser.py#L128-L141
MeanEYE/Sunflower
1024bbdde3b8e202ddad3553b321a7b6230bffc9
sunflower/clipboard.py
python
Provider.text_available
(self)
return False
Check if clipboard with text is available.
Check if clipboard with text is available.
[ "Check", "if", "clipboard", "with", "text", "is", "available", "." ]
def text_available(self): """Check if clipboard with text is available.""" return False
[ "def", "text_available", "(", "self", ")", ":", "return", "False" ]
https://github.com/MeanEYE/Sunflower/blob/1024bbdde3b8e202ddad3553b321a7b6230bffc9/sunflower/clipboard.py#L100-L102
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/lib2to3/fixes/fix_urllib.py
python
build_pattern
()
[]
def build_pattern(): bare = set() for old_module, changes in MAPPING.items(): for change in changes: new_module, members = change members = alternates(members) yield """import_name< 'import' (module=%r | dotted_as_names< any* module=%r any* >) > """ % (old_module, old_module) yield """import_from< 'from' mod_member=%r 'import' ( member=%s | import_as_name< member=%s 'as' any > | import_as_names< members=any* >) > """ % (old_module, members, members) yield """import_from< 'from' module_star=%r 'import' star='*' > """ % old_module yield """import_name< 'import' dotted_as_name< module_as=%r 'as' any > > """ % old_module # bare_with_attr has a special significance for FixImports.match(). yield """power< bare_with_attr=%r trailer< '.' member=%s > any* > """ % (old_module, members)
[ "def", "build_pattern", "(", ")", ":", "bare", "=", "set", "(", ")", "for", "old_module", ",", "changes", "in", "MAPPING", ".", "items", "(", ")", ":", "for", "change", "in", "changes", ":", "new_module", ",", "members", "=", "change", "members", "=", "alternates", "(", "members", ")", "yield", "\"\"\"import_name< 'import' (module=%r\n | dotted_as_names< any* module=%r any* >) >\n \"\"\"", "%", "(", "old_module", ",", "old_module", ")", "yield", "\"\"\"import_from< 'from' mod_member=%r 'import'\n ( member=%s | import_as_name< member=%s 'as' any > |\n import_as_names< members=any* >) >\n \"\"\"", "%", "(", "old_module", ",", "members", ",", "members", ")", "yield", "\"\"\"import_from< 'from' module_star=%r 'import' star='*' >\n \"\"\"", "%", "old_module", "yield", "\"\"\"import_name< 'import'\n dotted_as_name< module_as=%r 'as' any > >\n \"\"\"", "%", "old_module", "# bare_with_attr has a special significance for FixImports.match().", "yield", "\"\"\"power< bare_with_attr=%r trailer< '.' member=%s > any* >\n \"\"\"", "%", "(", "old_module", ",", "members", ")" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/lib2to3/fixes/fix_urllib.py#L48-L68
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Lib/plat-mac/Carbon/Dragconst.py
python
FOUR_CHAR_CODE
(x)
return x
[]
def FOUR_CHAR_CODE(x): return x
[ "def", "FOUR_CHAR_CODE", "(", "x", ")", ":", "return", "x" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/plat-mac/Carbon/Dragconst.py#L3-L3
hakril/PythonForWindows
61e027a678d5b87aa64fcf8a37a6661a86236589
windows/native_exec/simple_x64.py
python
BitArray.from_int
(cls, size, x)
return cls(size, bin(x)[2:])
[]
def from_int(cls, size, x): if x < 0: x = x & ((2 ** size) - 1) return cls(size, bin(x)[2:])
[ "def", "from_int", "(", "cls", ",", "size", ",", "x", ")", ":", "if", "x", "<", "0", ":", "x", "=", "x", "&", "(", "(", "2", "**", "size", ")", "-", "1", ")", "return", "cls", "(", "size", ",", "bin", "(", "x", ")", "[", "2", ":", "]", ")" ]
https://github.com/hakril/PythonForWindows/blob/61e027a678d5b87aa64fcf8a37a6661a86236589/windows/native_exec/simple_x64.py#L79-L82
KhronosGroup/OpenXR-SDK-Source
76756e2e7849b15466d29bee7d80cada92865550
external/python/jinja2/filters.py
python
ignore_case
(value)
return value.lower() if isinstance(value, string_types) else value
For use as a postprocessor for :func:`make_attrgetter`. Converts strings to lowercase and returns other types as-is.
For use as a postprocessor for :func:`make_attrgetter`. Converts strings to lowercase and returns other types as-is.
[ "For", "use", "as", "a", "postprocessor", "for", ":", "func", ":", "make_attrgetter", ".", "Converts", "strings", "to", "lowercase", "and", "returns", "other", "types", "as", "-", "is", "." ]
def ignore_case(value): """For use as a postprocessor for :func:`make_attrgetter`. Converts strings to lowercase and returns other types as-is.""" return value.lower() if isinstance(value, string_types) else value
[ "def", "ignore_case", "(", "value", ")", ":", "return", "value", ".", "lower", "(", ")", "if", "isinstance", "(", "value", ",", "string_types", ")", "else", "value" ]
https://github.com/KhronosGroup/OpenXR-SDK-Source/blob/76756e2e7849b15466d29bee7d80cada92865550/external/python/jinja2/filters.py#L56-L59