repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
QualiSystems/vCenterShell
package/cloudshell/cp/vcenter/commands/DeleteInstance.py
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/commands/DeleteInstance.py#L47-L64
def DeleteInstance(self, si, logger, session, vcenter_data_model, vm_uuid, vm_name): """ :param logger: :param CloudShellAPISession session: :param str vm_name: This is the resource name :return: """ # find vm vm = self.pv_service.find_by_uuid(si, vm_uuid) if vm is not None: # destroy vm result = self.pv_service.destroy_vm(vm=vm, logger=logger) else: resource___format = "Could not find the VM {0},will remove the resource.".format(vm_name) logger.info(resource___format) result = resource___format return result
[ "def", "DeleteInstance", "(", "self", ",", "si", ",", "logger", ",", "session", ",", "vcenter_data_model", ",", "vm_uuid", ",", "vm_name", ")", ":", "# find vm", "vm", "=", "self", ".", "pv_service", ".", "find_by_uuid", "(", "si", ",", "vm_uuid", ")", "if", "vm", "is", "not", "None", ":", "# destroy vm", "result", "=", "self", ".", "pv_service", ".", "destroy_vm", "(", "vm", "=", "vm", ",", "logger", "=", "logger", ")", "else", ":", "resource___format", "=", "\"Could not find the VM {0},will remove the resource.\"", ".", "format", "(", "vm_name", ")", "logger", ".", "info", "(", "resource___format", ")", "result", "=", "resource___format", "return", "result" ]
:param logger: :param CloudShellAPISession session: :param str vm_name: This is the resource name :return:
[ ":", "param", "logger", ":", ":", "param", "CloudShellAPISession", "session", ":", ":", "param", "str", "vm_name", ":", "This", "is", "the", "resource", "name", ":", "return", ":" ]
python
train
estnltk/estnltk
estnltk/text.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L362-L367
def tag(self, layer): """Tag the annotations of given layer. It can automatically tag any built-in layer type.""" mapping = self.layer_tagger_mapping if layer in mapping: mapping[layer]() return self
[ "def", "tag", "(", "self", ",", "layer", ")", ":", "mapping", "=", "self", ".", "layer_tagger_mapping", "if", "layer", "in", "mapping", ":", "mapping", "[", "layer", "]", "(", ")", "return", "self" ]
Tag the annotations of given layer. It can automatically tag any built-in layer type.
[ "Tag", "the", "annotations", "of", "given", "layer", ".", "It", "can", "automatically", "tag", "any", "built", "-", "in", "layer", "type", "." ]
python
train
senaite/senaite.core
bika/lims/vocabularies/__init__.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/vocabularies/__init__.py#L466-L504
def getStickerTemplates(filter_by_type=False): """ Returns an array with the sticker templates available. Retrieves the TAL templates saved in templates/stickers folder. Each array item is a dictionary with the following structure: {'id': <template_id>, 'title': <template_title>} If the template lives outside the bika.lims add-on, both the template_id and template_title include a prefix that matches with the add-on identifier. template_title is the same name as the id, but with whitespaces and without extension. As an example, for a template from the my.product add-on located in templates/stickers, and with a filename "EAN128_default_small.pt", the dictionary will look like: {'id': 'my.product:EAN128_default_small.pt', 'title': 'my.product: EAN128 default small'} If filter by type is given in the request, only the templates under the path with the type name will be rendered given as vocabulary. Example: If filter_by_type=='worksheet', only *.tp files under a folder with this name will be displayed. :param filter_by_type: :type filter_by_type: string/bool. :returns: an array with the sticker templates available """ # Retrieve the templates from bika.lims add-on # resdirname resdirname = 'stickers' if filter_by_type: bikalims_path = os.path.join( "browser", "templates", resdirname, filter_by_type) else: bikalims_path = os.path.join("browser", "templates", resdirname) # getTemplates needs two parameters, the first one is the bikalims path # where the stickers will be found. The second one is the resource # directory type. This allows us to filter stickers by the type we want. return getTemplates(bikalims_path, resdirname, filter_by_type)
[ "def", "getStickerTemplates", "(", "filter_by_type", "=", "False", ")", ":", "# Retrieve the templates from bika.lims add-on", "# resdirname", "resdirname", "=", "'stickers'", "if", "filter_by_type", ":", "bikalims_path", "=", "os", ".", "path", ".", "join", "(", "\"browser\"", ",", "\"templates\"", ",", "resdirname", ",", "filter_by_type", ")", "else", ":", "bikalims_path", "=", "os", ".", "path", ".", "join", "(", "\"browser\"", ",", "\"templates\"", ",", "resdirname", ")", "# getTemplates needs two parameters, the first one is the bikalims path", "# where the stickers will be found. The second one is the resource", "# directory type. This allows us to filter stickers by the type we want.", "return", "getTemplates", "(", "bikalims_path", ",", "resdirname", ",", "filter_by_type", ")" ]
Returns an array with the sticker templates available. Retrieves the TAL templates saved in templates/stickers folder. Each array item is a dictionary with the following structure: {'id': <template_id>, 'title': <template_title>} If the template lives outside the bika.lims add-on, both the template_id and template_title include a prefix that matches with the add-on identifier. template_title is the same name as the id, but with whitespaces and without extension. As an example, for a template from the my.product add-on located in templates/stickers, and with a filename "EAN128_default_small.pt", the dictionary will look like: {'id': 'my.product:EAN128_default_small.pt', 'title': 'my.product: EAN128 default small'} If filter by type is given in the request, only the templates under the path with the type name will be rendered given as vocabulary. Example: If filter_by_type=='worksheet', only *.tp files under a folder with this name will be displayed. :param filter_by_type: :type filter_by_type: string/bool. :returns: an array with the sticker templates available
[ "Returns", "an", "array", "with", "the", "sticker", "templates", "available", ".", "Retrieves", "the", "TAL", "templates", "saved", "in", "templates", "/", "stickers", "folder", "." ]
python
train
jamieleshaw/lurklib
lurklib/channel.py
https://github.com/jamieleshaw/lurklib/blob/a861f35d880140422103dd78ec3239814e85fd7e/lurklib/channel.py#L359-L378
def invite(self, channel, nick): """ Invite someone to a channel. Required arguments: * channel - Channel to invite them to. * nick - Nick to invite. """ with self.lock: self.is_in_channel(channel) self.send('INVITE %s %s' % (nick, channel)) while self.readable(): msg = self._recv(expected_replies=('341', '301')) if msg[0] == '341': pass elif msg[0] == '301': away_msg = msg[2].split()[1].replace(':', '', 1) return 'AWAY', away_msg
[ "def", "invite", "(", "self", ",", "channel", ",", "nick", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "is_in_channel", "(", "channel", ")", "self", ".", "send", "(", "'INVITE %s %s'", "%", "(", "nick", ",", "channel", ")", ")", "while", "self", ".", "readable", "(", ")", ":", "msg", "=", "self", ".", "_recv", "(", "expected_replies", "=", "(", "'341'", ",", "'301'", ")", ")", "if", "msg", "[", "0", "]", "==", "'341'", ":", "pass", "elif", "msg", "[", "0", "]", "==", "'301'", ":", "away_msg", "=", "msg", "[", "2", "]", ".", "split", "(", ")", "[", "1", "]", ".", "replace", "(", "':'", ",", "''", ",", "1", ")", "return", "'AWAY'", ",", "away_msg" ]
Invite someone to a channel. Required arguments: * channel - Channel to invite them to. * nick - Nick to invite.
[ "Invite", "someone", "to", "a", "channel", ".", "Required", "arguments", ":", "*", "channel", "-", "Channel", "to", "invite", "them", "to", ".", "*", "nick", "-", "Nick", "to", "invite", "." ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_playbook.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_playbook.py#L877-L907
def read_string(self, key, embedded=True): """Read method of CRUD operation for string data. Args: key (string): The variable to read from the DB. embedded (boolean): Resolve embedded variables. Returns: (string): Results retrieved from DB. """ data = None if key is not None: key_type = self.variable_type(key) data = self.db.read(key.strip()) if data is not None: # handle improperly saved string try: data = json.loads(data) if embedded: data = self.read_embedded(data, key_type) if data is not None: # reverted the previous change where data was encoded due to issues where # it broke the operator method in py3 (e.g. b'1' ne '1'). # data = str(data) data = u'{}'.format(data) except ValueError as e: err = u'Failed loading JSON data ({}). Error: ({})'.format(data, e) self.tcex.log.error(err) else: self.tcex.log.warning(u'The key field was None.') return data
[ "def", "read_string", "(", "self", ",", "key", ",", "embedded", "=", "True", ")", ":", "data", "=", "None", "if", "key", "is", "not", "None", ":", "key_type", "=", "self", ".", "variable_type", "(", "key", ")", "data", "=", "self", ".", "db", ".", "read", "(", "key", ".", "strip", "(", ")", ")", "if", "data", "is", "not", "None", ":", "# handle improperly saved string", "try", ":", "data", "=", "json", ".", "loads", "(", "data", ")", "if", "embedded", ":", "data", "=", "self", ".", "read_embedded", "(", "data", ",", "key_type", ")", "if", "data", "is", "not", "None", ":", "# reverted the previous change where data was encoded due to issues where", "# it broke the operator method in py3 (e.g. b'1' ne '1').", "# data = str(data)", "data", "=", "u'{}'", ".", "format", "(", "data", ")", "except", "ValueError", "as", "e", ":", "err", "=", "u'Failed loading JSON data ({}). Error: ({})'", ".", "format", "(", "data", ",", "e", ")", "self", ".", "tcex", ".", "log", ".", "error", "(", "err", ")", "else", ":", "self", ".", "tcex", ".", "log", ".", "warning", "(", "u'The key field was None.'", ")", "return", "data" ]
Read method of CRUD operation for string data. Args: key (string): The variable to read from the DB. embedded (boolean): Resolve embedded variables. Returns: (string): Results retrieved from DB.
[ "Read", "method", "of", "CRUD", "operation", "for", "string", "data", "." ]
python
train
chimera0/accel-brain-code
Algorithmic-Composition/pycomposer/noisesampler/bar_noise_sampler.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Algorithmic-Composition/pycomposer/noisesampler/bar_noise_sampler.py#L54-L82
def generate(self): ''' Generate noise samples. Returns: `np.ndarray` of samples. ''' sampled_arr = np.zeros((self.__batch_size, self.__channel, self.__seq_len, self.__dim)) for batch in range(self.__batch_size): for i in range(len(self.__program_list)): program_key = self.__program_list[i] key = np.random.randint(low=0, high=len(self.__midi_df_list)) midi_df = self.__midi_df_list[key] midi_df = midi_df[midi_df.program == program_key] if midi_df.shape[0] < self.__seq_len: continue row = np.random.uniform( low=midi_df.start.min(), high=midi_df.end.max() - (self.__seq_len * self.__time_fraction) ) for seq in range(self.__seq_len): start = row + (seq * self.__time_fraction) end = row + ((seq+1) * self.__time_fraction) df = midi_df[(start <= midi_df.start) & (midi_df.start <= end)] sampled_arr[batch, i, seq] = self.__convert_into_feature(df) return sampled_arr
[ "def", "generate", "(", "self", ")", ":", "sampled_arr", "=", "np", ".", "zeros", "(", "(", "self", ".", "__batch_size", ",", "self", ".", "__channel", ",", "self", ".", "__seq_len", ",", "self", ".", "__dim", ")", ")", "for", "batch", "in", "range", "(", "self", ".", "__batch_size", ")", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "__program_list", ")", ")", ":", "program_key", "=", "self", ".", "__program_list", "[", "i", "]", "key", "=", "np", ".", "random", ".", "randint", "(", "low", "=", "0", ",", "high", "=", "len", "(", "self", ".", "__midi_df_list", ")", ")", "midi_df", "=", "self", ".", "__midi_df_list", "[", "key", "]", "midi_df", "=", "midi_df", "[", "midi_df", ".", "program", "==", "program_key", "]", "if", "midi_df", ".", "shape", "[", "0", "]", "<", "self", ".", "__seq_len", ":", "continue", "row", "=", "np", ".", "random", ".", "uniform", "(", "low", "=", "midi_df", ".", "start", ".", "min", "(", ")", ",", "high", "=", "midi_df", ".", "end", ".", "max", "(", ")", "-", "(", "self", ".", "__seq_len", "*", "self", ".", "__time_fraction", ")", ")", "for", "seq", "in", "range", "(", "self", ".", "__seq_len", ")", ":", "start", "=", "row", "+", "(", "seq", "*", "self", ".", "__time_fraction", ")", "end", "=", "row", "+", "(", "(", "seq", "+", "1", ")", "*", "self", ".", "__time_fraction", ")", "df", "=", "midi_df", "[", "(", "start", "<=", "midi_df", ".", "start", ")", "&", "(", "midi_df", ".", "start", "<=", "end", ")", "]", "sampled_arr", "[", "batch", ",", "i", ",", "seq", "]", "=", "self", ".", "__convert_into_feature", "(", "df", ")", "return", "sampled_arr" ]
Generate noise samples. Returns: `np.ndarray` of samples.
[ "Generate", "noise", "samples", ".", "Returns", ":", "np", ".", "ndarray", "of", "samples", "." ]
python
train
pytroll/posttroll
posttroll/subscriber.py
https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/subscriber.py#L233-L242
def close(self): """Close the subscriber: stop it and close the local subscribers. """ self.stop() for sub in list(self.subscribers) + self._hooks: try: sub.setsockopt(LINGER, 1) sub.close() except ZMQError: pass
[ "def", "close", "(", "self", ")", ":", "self", ".", "stop", "(", ")", "for", "sub", "in", "list", "(", "self", ".", "subscribers", ")", "+", "self", ".", "_hooks", ":", "try", ":", "sub", ".", "setsockopt", "(", "LINGER", ",", "1", ")", "sub", ".", "close", "(", ")", "except", "ZMQError", ":", "pass" ]
Close the subscriber: stop it and close the local subscribers.
[ "Close", "the", "subscriber", ":", "stop", "it", "and", "close", "the", "local", "subscribers", "." ]
python
train
mitsei/dlkit
dlkit/json_/grading/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/objects.py#L203-L221
def set_input_score_start_range(self, score): """Sets the input score start range. arg: score (decimal): the new start range raise: InvalidArgument - ``score`` is invalid raise: NoAccess - ``range`` cannot be modified *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.grading.GradeSystemForm.set_lowest_numeric_score if self.get_input_score_start_range_metadata().is_read_only(): raise errors.NoAccess() try: score = float(score) except ValueError: raise errors.InvalidArgument() if not self._is_valid_decimal(score, self.get_input_score_start_range_metadata()): raise errors.InvalidArgument() self._my_map['inputScoreStartRange'] = score
[ "def", "set_input_score_start_range", "(", "self", ",", "score", ")", ":", "# Implemented from template for osid.grading.GradeSystemForm.set_lowest_numeric_score", "if", "self", ".", "get_input_score_start_range_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "try", ":", "score", "=", "float", "(", "score", ")", "except", "ValueError", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "if", "not", "self", ".", "_is_valid_decimal", "(", "score", ",", "self", ".", "get_input_score_start_range_metadata", "(", ")", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "self", ".", "_my_map", "[", "'inputScoreStartRange'", "]", "=", "score" ]
Sets the input score start range. arg: score (decimal): the new start range raise: InvalidArgument - ``score`` is invalid raise: NoAccess - ``range`` cannot be modified *compliance: mandatory -- This method must be implemented.*
[ "Sets", "the", "input", "score", "start", "range", "." ]
python
train
serge-sans-paille/pythran
pythran/passmanager.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/passmanager.py#L80-L91
def prepare(self, node): '''Gather analysis result required by this analysis''' if isinstance(node, ast.Module): self.ctx.module = node elif isinstance(node, ast.FunctionDef): self.ctx.function = node for D in self.deps: d = D() d.attach(self.passmanager, self.ctx) result = d.run(node) setattr(self, uncamel(D.__name__), result)
[ "def", "prepare", "(", "self", ",", "node", ")", ":", "if", "isinstance", "(", "node", ",", "ast", ".", "Module", ")", ":", "self", ".", "ctx", ".", "module", "=", "node", "elif", "isinstance", "(", "node", ",", "ast", ".", "FunctionDef", ")", ":", "self", ".", "ctx", ".", "function", "=", "node", "for", "D", "in", "self", ".", "deps", ":", "d", "=", "D", "(", ")", "d", ".", "attach", "(", "self", ".", "passmanager", ",", "self", ".", "ctx", ")", "result", "=", "d", ".", "run", "(", "node", ")", "setattr", "(", "self", ",", "uncamel", "(", "D", ".", "__name__", ")", ",", "result", ")" ]
Gather analysis result required by this analysis
[ "Gather", "analysis", "result", "required", "by", "this", "analysis" ]
python
train
fhs/pyhdf
pyhdf/SD.py
https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2344-L2426
def getrange(self): """Retrieve the dataset min and max values. Args:: no argument Returns:: (min, max) tuple (attribute 'valid_range') Note that those are the values as stored by the 'setrange' method. 'getrange' does *NOT* compute the min and max from the current dataset contents. An exception is raised if the range is not set. The range returned by 'getrange' is part of the so-called "standard" SDS attributes. It corresponds to the following attribute:: valid_range C library equivalent: SDgetrange """ # Obtain SDS data type. try: sds_name, rank, dim_sizes, data_type, n_attrs = \ self.info() except HDF4Error: raise HDF4Error('getrange : invalid SDS identifier') n_values = 1 convert = _array_to_ret if data_type == SDC.CHAR8: buf1 = _C.array_byte(n_values) buf2 = _C.array_byte(n_values) convert = _array_to_str elif data_type in [SDC.UCHAR8, SDC.UINT8]: buf1 = _C.array_byte(n_values) buf2 = _C.array_byte(n_values) elif data_type == SDC.INT8: buf1 = _C.array_int8(n_values) buf2 = _C.array_int8(n_values) elif data_type == SDC.INT16: buf1 = _C.array_int16(n_values) buf2 = _C.array_int16(n_values) elif data_type == SDC.UINT16: buf1 = _C.array_uint16(n_values) buf2 = _C.array_uint16(n_values) elif data_type == SDC.INT32: buf1 = _C.array_int32(n_values) buf2 = _C.array_int32(n_values) elif data_type == SDC.UINT32: buf1 = _C.array_uint32(n_values) buf2 = _C.array_uint32(n_values) elif data_type == SDC.FLOAT32: buf1 = _C.array_float32(n_values) buf2 = _C.array_float32(n_values) elif data_type == SDC.FLOAT64: buf1 = _C.array_float64(n_values) buf2 = _C.array_float64(n_values) else: raise HDF4Error("getrange: SDS has an illegal or " \ "unsupported type %d" % data) # Note: The C routine returns the max in buf1 and the min # in buf2. We swap the values returned by the Python # interface, since it is more natural to return # min first, then max. status = _C.SDgetrange(self._id, buf1, buf2) _checkErr('getrange', status, 'range not set') return convert(buf2, n_values), convert(buf1, n_values)
[ "def", "getrange", "(", "self", ")", ":", "# Obtain SDS data type.", "try", ":", "sds_name", ",", "rank", ",", "dim_sizes", ",", "data_type", ",", "n_attrs", "=", "self", ".", "info", "(", ")", "except", "HDF4Error", ":", "raise", "HDF4Error", "(", "'getrange : invalid SDS identifier'", ")", "n_values", "=", "1", "convert", "=", "_array_to_ret", "if", "data_type", "==", "SDC", ".", "CHAR8", ":", "buf1", "=", "_C", ".", "array_byte", "(", "n_values", ")", "buf2", "=", "_C", ".", "array_byte", "(", "n_values", ")", "convert", "=", "_array_to_str", "elif", "data_type", "in", "[", "SDC", ".", "UCHAR8", ",", "SDC", ".", "UINT8", "]", ":", "buf1", "=", "_C", ".", "array_byte", "(", "n_values", ")", "buf2", "=", "_C", ".", "array_byte", "(", "n_values", ")", "elif", "data_type", "==", "SDC", ".", "INT8", ":", "buf1", "=", "_C", ".", "array_int8", "(", "n_values", ")", "buf2", "=", "_C", ".", "array_int8", "(", "n_values", ")", "elif", "data_type", "==", "SDC", ".", "INT16", ":", "buf1", "=", "_C", ".", "array_int16", "(", "n_values", ")", "buf2", "=", "_C", ".", "array_int16", "(", "n_values", ")", "elif", "data_type", "==", "SDC", ".", "UINT16", ":", "buf1", "=", "_C", ".", "array_uint16", "(", "n_values", ")", "buf2", "=", "_C", ".", "array_uint16", "(", "n_values", ")", "elif", "data_type", "==", "SDC", ".", "INT32", ":", "buf1", "=", "_C", ".", "array_int32", "(", "n_values", ")", "buf2", "=", "_C", ".", "array_int32", "(", "n_values", ")", "elif", "data_type", "==", "SDC", ".", "UINT32", ":", "buf1", "=", "_C", ".", "array_uint32", "(", "n_values", ")", "buf2", "=", "_C", ".", "array_uint32", "(", "n_values", ")", "elif", "data_type", "==", "SDC", ".", "FLOAT32", ":", "buf1", "=", "_C", ".", "array_float32", "(", "n_values", ")", "buf2", "=", "_C", ".", "array_float32", "(", "n_values", ")", "elif", "data_type", "==", "SDC", ".", "FLOAT64", ":", "buf1", "=", "_C", ".", "array_float64", "(", "n_values", ")", "buf2", "=", "_C", ".", "array_float64", "(", "n_values", ")", "else", ":", "raise", "HDF4Error", "(", "\"getrange: SDS has an illegal or \"", "\"unsupported type %d\"", "%", "data", ")", "# Note: The C routine returns the max in buf1 and the min", "# in buf2. We swap the values returned by the Python", "# interface, since it is more natural to return", "# min first, then max.", "status", "=", "_C", ".", "SDgetrange", "(", "self", ".", "_id", ",", "buf1", ",", "buf2", ")", "_checkErr", "(", "'getrange'", ",", "status", ",", "'range not set'", ")", "return", "convert", "(", "buf2", ",", "n_values", ")", ",", "convert", "(", "buf1", ",", "n_values", ")" ]
Retrieve the dataset min and max values. Args:: no argument Returns:: (min, max) tuple (attribute 'valid_range') Note that those are the values as stored by the 'setrange' method. 'getrange' does *NOT* compute the min and max from the current dataset contents. An exception is raised if the range is not set. The range returned by 'getrange' is part of the so-called "standard" SDS attributes. It corresponds to the following attribute:: valid_range C library equivalent: SDgetrange
[ "Retrieve", "the", "dataset", "min", "and", "max", "values", "." ]
python
train
Unidata/siphon
siphon/cdmr/ncstream.py
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/cdmr/ncstream.py#L373-L397
def unpack_attribute(att): """Unpack an embedded attribute into a python or numpy object.""" if att.unsigned: log.warning('Unsupported unsigned attribute!') # TDS 5.0 now has a dataType attribute that takes precedence if att.len == 0: # Empty val = None elif att.dataType == stream.STRING: # Then look for new datatype string val = att.sdata elif att.dataType: # Then a non-zero new data type val = np.frombuffer(att.data, dtype='>' + _dtypeLookup[att.dataType], count=att.len) elif att.type: # Then non-zero old-data type0 val = np.frombuffer(att.data, dtype=_attrConverters[att.type], count=att.len) elif att.sdata: # This leaves both 0, try old string val = att.sdata else: # Assume new datatype is Char (0) val = np.array(att.data, dtype=_dtypeLookup[att.dataType]) if att.len == 1: val = val[0] return att.name, val
[ "def", "unpack_attribute", "(", "att", ")", ":", "if", "att", ".", "unsigned", ":", "log", ".", "warning", "(", "'Unsupported unsigned attribute!'", ")", "# TDS 5.0 now has a dataType attribute that takes precedence", "if", "att", ".", "len", "==", "0", ":", "# Empty", "val", "=", "None", "elif", "att", ".", "dataType", "==", "stream", ".", "STRING", ":", "# Then look for new datatype string", "val", "=", "att", ".", "sdata", "elif", "att", ".", "dataType", ":", "# Then a non-zero new data type", "val", "=", "np", ".", "frombuffer", "(", "att", ".", "data", ",", "dtype", "=", "'>'", "+", "_dtypeLookup", "[", "att", ".", "dataType", "]", ",", "count", "=", "att", ".", "len", ")", "elif", "att", ".", "type", ":", "# Then non-zero old-data type0", "val", "=", "np", ".", "frombuffer", "(", "att", ".", "data", ",", "dtype", "=", "_attrConverters", "[", "att", ".", "type", "]", ",", "count", "=", "att", ".", "len", ")", "elif", "att", ".", "sdata", ":", "# This leaves both 0, try old string", "val", "=", "att", ".", "sdata", "else", ":", "# Assume new datatype is Char (0)", "val", "=", "np", ".", "array", "(", "att", ".", "data", ",", "dtype", "=", "_dtypeLookup", "[", "att", ".", "dataType", "]", ")", "if", "att", ".", "len", "==", "1", ":", "val", "=", "val", "[", "0", "]", "return", "att", ".", "name", ",", "val" ]
Unpack an embedded attribute into a python or numpy object.
[ "Unpack", "an", "embedded", "attribute", "into", "a", "python", "or", "numpy", "object", "." ]
python
train
Kortemme-Lab/klab
klab/bio/fragments/hpc/SGE.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/fragments/hpc/SGE.py#L150-L210
def query(logfile, jobID = None): """If jobID is an integer then return False if the job has finished and True if it is still running. Otherwise, returns a table of jobs run by the user.""" joblist = logfile.readFromLogfile() if jobID and type(jobID) == type(1): command = ['qstat', '-j', str(jobID)] else: command = ['qstat'] processoutput = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() output = processoutput[0] serror = processoutput[1] # Form command jobs = {} if type(jobID) == type(1): if serror.find("Following jobs do not exist") != -1: return False else: return True if not output.strip(): colorprinter.message("No jobs running at present.") output = output.strip().split("\n") if len(output) > 2: for line in output[2:]: # We assume that our script names contain no spaces for the parsing below to work tokens = line.split() jid = int(tokens[0]) jobstate = tokens[4] details = { "jobid" : jid, "prior" : tokens[1], "name" : tokens[2], "user" : tokens[3], "state" : jobstate, "submit/start at" : "%s %s" % (tokens[5], tokens[6]) } jataskID = 0 if jobstate == "r": details["queue"] = tokens[7] details["slots"] = tokens[8] elif jobstate == "qw": details["slots"] = tokens[7] if len(tokens) >= 9: jataskID = tokens[8] details["ja-task-ID"] = jataskID if len(tokens) > 9: jataskID = tokens[9] details["ja-task-ID"] = jataskID jobs[jid] = jobs.get(jid) or {} jobs[jid][jataskID] = details if joblist.get(jid): jobdir = joblist[jid]["Directory"] jobtime = joblist[jid]["TimeInSeconds"] colorprinter.message("Job %d submitted %d minutes ago. Status: '%s'. Destination directory: %s." % (jid, jobtime / 60, jobstate, jobdir)) else: colorprinter.message("Job %d submitted at %s %s. Status: '%s'. Destination directory unknown." % (jid, tokens[5], tokens[6], jobstate)) return True
[ "def", "query", "(", "logfile", ",", "jobID", "=", "None", ")", ":", "joblist", "=", "logfile", ".", "readFromLogfile", "(", ")", "if", "jobID", "and", "type", "(", "jobID", ")", "==", "type", "(", "1", ")", ":", "command", "=", "[", "'qstat'", ",", "'-j'", ",", "str", "(", "jobID", ")", "]", "else", ":", "command", "=", "[", "'qstat'", "]", "processoutput", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", ".", "communicate", "(", ")", "output", "=", "processoutput", "[", "0", "]", "serror", "=", "processoutput", "[", "1", "]", "# Form command", "jobs", "=", "{", "}", "if", "type", "(", "jobID", ")", "==", "type", "(", "1", ")", ":", "if", "serror", ".", "find", "(", "\"Following jobs do not exist\"", ")", "!=", "-", "1", ":", "return", "False", "else", ":", "return", "True", "if", "not", "output", ".", "strip", "(", ")", ":", "colorprinter", ".", "message", "(", "\"No jobs running at present.\"", ")", "output", "=", "output", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "if", "len", "(", "output", ")", ">", "2", ":", "for", "line", "in", "output", "[", "2", ":", "]", ":", "# We assume that our script names contain no spaces for the parsing below to work", "tokens", "=", "line", ".", "split", "(", ")", "jid", "=", "int", "(", "tokens", "[", "0", "]", ")", "jobstate", "=", "tokens", "[", "4", "]", "details", "=", "{", "\"jobid\"", ":", "jid", ",", "\"prior\"", ":", "tokens", "[", "1", "]", ",", "\"name\"", ":", "tokens", "[", "2", "]", ",", "\"user\"", ":", "tokens", "[", "3", "]", ",", "\"state\"", ":", "jobstate", ",", "\"submit/start at\"", ":", "\"%s %s\"", "%", "(", "tokens", "[", "5", "]", ",", "tokens", "[", "6", "]", ")", "}", "jataskID", "=", "0", "if", "jobstate", "==", "\"r\"", ":", "details", "[", "\"queue\"", "]", "=", "tokens", "[", "7", "]", "details", "[", "\"slots\"", "]", "=", "tokens", "[", "8", "]", "elif", "jobstate", "==", "\"qw\"", ":", "details", "[", "\"slots\"", "]", "=", "tokens", "[", "7", "]", "if", "len", "(", "tokens", ")", ">=", "9", ":", "jataskID", "=", "tokens", "[", "8", "]", "details", "[", "\"ja-task-ID\"", "]", "=", "jataskID", "if", "len", "(", "tokens", ")", ">", "9", ":", "jataskID", "=", "tokens", "[", "9", "]", "details", "[", "\"ja-task-ID\"", "]", "=", "jataskID", "jobs", "[", "jid", "]", "=", "jobs", ".", "get", "(", "jid", ")", "or", "{", "}", "jobs", "[", "jid", "]", "[", "jataskID", "]", "=", "details", "if", "joblist", ".", "get", "(", "jid", ")", ":", "jobdir", "=", "joblist", "[", "jid", "]", "[", "\"Directory\"", "]", "jobtime", "=", "joblist", "[", "jid", "]", "[", "\"TimeInSeconds\"", "]", "colorprinter", ".", "message", "(", "\"Job %d submitted %d minutes ago. Status: '%s'. Destination directory: %s.\"", "%", "(", "jid", ",", "jobtime", "/", "60", ",", "jobstate", ",", "jobdir", ")", ")", "else", ":", "colorprinter", ".", "message", "(", "\"Job %d submitted at %s %s. Status: '%s'. Destination directory unknown.\"", "%", "(", "jid", ",", "tokens", "[", "5", "]", ",", "tokens", "[", "6", "]", ",", "jobstate", ")", ")", "return", "True" ]
If jobID is an integer then return False if the job has finished and True if it is still running. Otherwise, returns a table of jobs run by the user.
[ "If", "jobID", "is", "an", "integer", "then", "return", "False", "if", "the", "job", "has", "finished", "and", "True", "if", "it", "is", "still", "running", ".", "Otherwise", "returns", "a", "table", "of", "jobs", "run", "by", "the", "user", "." ]
python
train
saltstack/salt
salt/modules/bsd_shadow.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bsd_shadow.py#L178-L216
def set_password(name, password): ''' Set the password for a named user. The password must be a properly defined hash. The password hash can be generated with this command: ``python -c "import crypt; print crypt.crypt('password', ciphersalt)"`` .. note:: When constructing the ``ciphersalt`` string, you must escape any dollar signs, to avoid them being interpolated by the shell. ``'password'`` is, of course, the password for which you want to generate a hash. ``ciphersalt`` is a combination of a cipher identifier, an optional number of rounds, and the cryptographic salt. The arrangement and format of these fields depends on the cipher and which flavor of BSD you are using. For more information on this, see the manpage for ``crpyt(3)``. On NetBSD, additional information is available in ``passwd.conf(5)``. It is important to make sure that a supported cipher is used. CLI Example: .. code-block:: bash salt '*' shadow.set_password someuser '$1$UYCIxa628.9qXjpQCjM4a..' ''' if __grains__.get('os', '') == 'FreeBSD': cmd = ['pw', 'user', 'mod', name, '-H', '0'] stdin = password else: cmd = ['usermod', '-p', password, name] stdin = None __salt__['cmd.run'](cmd, stdin=stdin, output_loglevel='quiet', python_shell=False) return info(name)['passwd'] == password
[ "def", "set_password", "(", "name", ",", "password", ")", ":", "if", "__grains__", ".", "get", "(", "'os'", ",", "''", ")", "==", "'FreeBSD'", ":", "cmd", "=", "[", "'pw'", ",", "'user'", ",", "'mod'", ",", "name", ",", "'-H'", ",", "'0'", "]", "stdin", "=", "password", "else", ":", "cmd", "=", "[", "'usermod'", ",", "'-p'", ",", "password", ",", "name", "]", "stdin", "=", "None", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "stdin", "=", "stdin", ",", "output_loglevel", "=", "'quiet'", ",", "python_shell", "=", "False", ")", "return", "info", "(", "name", ")", "[", "'passwd'", "]", "==", "password" ]
Set the password for a named user. The password must be a properly defined hash. The password hash can be generated with this command: ``python -c "import crypt; print crypt.crypt('password', ciphersalt)"`` .. note:: When constructing the ``ciphersalt`` string, you must escape any dollar signs, to avoid them being interpolated by the shell. ``'password'`` is, of course, the password for which you want to generate a hash. ``ciphersalt`` is a combination of a cipher identifier, an optional number of rounds, and the cryptographic salt. The arrangement and format of these fields depends on the cipher and which flavor of BSD you are using. For more information on this, see the manpage for ``crpyt(3)``. On NetBSD, additional information is available in ``passwd.conf(5)``. It is important to make sure that a supported cipher is used. CLI Example: .. code-block:: bash salt '*' shadow.set_password someuser '$1$UYCIxa628.9qXjpQCjM4a..'
[ "Set", "the", "password", "for", "a", "named", "user", ".", "The", "password", "must", "be", "a", "properly", "defined", "hash", ".", "The", "password", "hash", "can", "be", "generated", "with", "this", "command", ":" ]
python
train
wylee/runcommands
runcommands/util/string.py
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/util/string.py#L4-L42
def camel_to_underscore(name): """Convert camel case name to underscore name. Examples:: >>> camel_to_underscore('HttpRequest') 'http_request' >>> camel_to_underscore('httpRequest') 'http_request' >>> camel_to_underscore('HTTPRequest') 'http_request' >>> camel_to_underscore('myHTTPRequest') 'my_http_request' >>> camel_to_underscore('MyHTTPRequest') 'my_http_request' >>> camel_to_underscore('my_http_request') 'my_http_request' >>> camel_to_underscore('MyHTTPRequestXYZ') 'my_http_request_xyz' >>> camel_to_underscore('_HTTPRequest') '_http_request' >>> camel_to_underscore('Request') 'request' >>> camel_to_underscore('REQUEST') 'request' >>> camel_to_underscore('_Request') '_request' >>> camel_to_underscore('__Request') '__request' >>> camel_to_underscore('_request') '_request' >>> camel_to_underscore('Request_') 'request_' """ name = re.sub(r'(?<!\b)(?<!_)([A-Z][a-z])', r'_\1', name) name = re.sub(r'(?<!\b)(?<!_)([a-z])([A-Z])', r'\1_\2', name) name = name.lower() return name
[ "def", "camel_to_underscore", "(", "name", ")", ":", "name", "=", "re", ".", "sub", "(", "r'(?<!\\b)(?<!_)([A-Z][a-z])'", ",", "r'_\\1'", ",", "name", ")", "name", "=", "re", ".", "sub", "(", "r'(?<!\\b)(?<!_)([a-z])([A-Z])'", ",", "r'\\1_\\2'", ",", "name", ")", "name", "=", "name", ".", "lower", "(", ")", "return", "name" ]
Convert camel case name to underscore name. Examples:: >>> camel_to_underscore('HttpRequest') 'http_request' >>> camel_to_underscore('httpRequest') 'http_request' >>> camel_to_underscore('HTTPRequest') 'http_request' >>> camel_to_underscore('myHTTPRequest') 'my_http_request' >>> camel_to_underscore('MyHTTPRequest') 'my_http_request' >>> camel_to_underscore('my_http_request') 'my_http_request' >>> camel_to_underscore('MyHTTPRequestXYZ') 'my_http_request_xyz' >>> camel_to_underscore('_HTTPRequest') '_http_request' >>> camel_to_underscore('Request') 'request' >>> camel_to_underscore('REQUEST') 'request' >>> camel_to_underscore('_Request') '_request' >>> camel_to_underscore('__Request') '__request' >>> camel_to_underscore('_request') '_request' >>> camel_to_underscore('Request_') 'request_'
[ "Convert", "camel", "case", "name", "to", "underscore", "name", "." ]
python
train
sods/ods
pods/assesser.py
https://github.com/sods/ods/blob/3995c659f25a0a640f6009ed7fcc2559ce659b1d/pods/assesser.py#L211-L228
def marksheet(self): """Returns an pandas empty dataframe object containing rows and columns for marking. This can then be passed to a google doc that is distributed to markers for editing with the mark for each section.""" columns=['Number', 'Question', 'Correct (a fraction)', 'Max Mark', 'Comments'] mark_sheet = pd.DataFrame() for qu_number, question in enumerate(self.answers): part_no = 0 for number, part in enumerate(question): if number>0: if part[2] > 0: part_no += 1 index = str(qu_number+1) +'_'+str(part_no) frame = pd.DataFrame(columns=columns, index=[index]) frame.loc[index]['Number'] = index frame.loc[index]['Question'] = part[0] frame.loc[index]['Max Mark'] = part[2] mark_sheet = mark_sheet.append(frame) return mark_sheet.sort(columns='Number')
[ "def", "marksheet", "(", "self", ")", ":", "columns", "=", "[", "'Number'", ",", "'Question'", ",", "'Correct (a fraction)'", ",", "'Max Mark'", ",", "'Comments'", "]", "mark_sheet", "=", "pd", ".", "DataFrame", "(", ")", "for", "qu_number", ",", "question", "in", "enumerate", "(", "self", ".", "answers", ")", ":", "part_no", "=", "0", "for", "number", ",", "part", "in", "enumerate", "(", "question", ")", ":", "if", "number", ">", "0", ":", "if", "part", "[", "2", "]", ">", "0", ":", "part_no", "+=", "1", "index", "=", "str", "(", "qu_number", "+", "1", ")", "+", "'_'", "+", "str", "(", "part_no", ")", "frame", "=", "pd", ".", "DataFrame", "(", "columns", "=", "columns", ",", "index", "=", "[", "index", "]", ")", "frame", ".", "loc", "[", "index", "]", "[", "'Number'", "]", "=", "index", "frame", ".", "loc", "[", "index", "]", "[", "'Question'", "]", "=", "part", "[", "0", "]", "frame", ".", "loc", "[", "index", "]", "[", "'Max Mark'", "]", "=", "part", "[", "2", "]", "mark_sheet", "=", "mark_sheet", ".", "append", "(", "frame", ")", "return", "mark_sheet", ".", "sort", "(", "columns", "=", "'Number'", ")" ]
Returns an pandas empty dataframe object containing rows and columns for marking. This can then be passed to a google doc that is distributed to markers for editing with the mark for each section.
[ "Returns", "an", "pandas", "empty", "dataframe", "object", "containing", "rows", "and", "columns", "for", "marking", ".", "This", "can", "then", "be", "passed", "to", "a", "google", "doc", "that", "is", "distributed", "to", "markers", "for", "editing", "with", "the", "mark", "for", "each", "section", "." ]
python
train
ASMfreaK/yandex_weather_api
yandex_weather_api/types.py
https://github.com/ASMfreaK/yandex_weather_api/blob/d58ad80f7389dc3b58c721bb42c2441e9ff3e351/yandex_weather_api/types.py#L60-L66
def validate(cls, cnd): "Проверяет, что переданный объект - один из возможных `VALUES`" if cnd not in cls.VALUES: raise ValueError("Value {} cannot be used in {}".format( cnd, cls )) return cls(cnd)
[ "def", "validate", "(", "cls", ",", "cnd", ")", ":", "if", "cnd", "not", "in", "cls", ".", "VALUES", ":", "raise", "ValueError", "(", "\"Value {} cannot be used in {}\"", ".", "format", "(", "cnd", ",", "cls", ")", ")", "return", "cls", "(", "cnd", ")" ]
Проверяет, что переданный объект - один из возможных `VALUES`
[ "Проверяет", "что", "переданный", "объект", "-", "один", "из", "возможных", "VALUES" ]
python
train
timkpaine/pyEX
pyEX/alternative.py
https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/alternative.py#L38-L58
def sentiment(symbol, type='daily', date=None, token='', version=''): '''This endpoint provides social sentiment data from StockTwits. Data can be viewed as a daily value, or by minute for a given date. https://iexcloud.io/docs/api/#social-sentiment Continuous Args: symbol (string); Ticker to request type (string); 'daily' or 'minute' date (string); date in YYYYMMDD or datetime token (string); Access token version (string); API version Returns: dict: result ''' _raiseIfNotStr(symbol) if date: date = _strOrDate(date) return _getJson('stock/{symbol}/sentiment/{type}/{date}'.format(symbol=symbol, type=type, date=date), token, version) return _getJson('stock/{symbol}/sentiment/{type}/'.format(symbol=symbol, type=type), token, version)
[ "def", "sentiment", "(", "symbol", ",", "type", "=", "'daily'", ",", "date", "=", "None", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "_raiseIfNotStr", "(", "symbol", ")", "if", "date", ":", "date", "=", "_strOrDate", "(", "date", ")", "return", "_getJson", "(", "'stock/{symbol}/sentiment/{type}/{date}'", ".", "format", "(", "symbol", "=", "symbol", ",", "type", "=", "type", ",", "date", "=", "date", ")", ",", "token", ",", "version", ")", "return", "_getJson", "(", "'stock/{symbol}/sentiment/{type}/'", ".", "format", "(", "symbol", "=", "symbol", ",", "type", "=", "type", ")", ",", "token", ",", "version", ")" ]
This endpoint provides social sentiment data from StockTwits. Data can be viewed as a daily value, or by minute for a given date. https://iexcloud.io/docs/api/#social-sentiment Continuous Args: symbol (string); Ticker to request type (string); 'daily' or 'minute' date (string); date in YYYYMMDD or datetime token (string); Access token version (string); API version Returns: dict: result
[ "This", "endpoint", "provides", "social", "sentiment", "data", "from", "StockTwits", ".", "Data", "can", "be", "viewed", "as", "a", "daily", "value", "or", "by", "minute", "for", "a", "given", "date", "." ]
python
valid
IvanMalison/okcupyd
okcupyd/util/__init__.py
https://github.com/IvanMalison/okcupyd/blob/46f4eaa9419098f6c299738ce148af55c64deb64/okcupyd/util/__init__.py#L105-L111
def from_string_pairs(cls, string_value_pairs, **kwargs): """Build an :class:`~.REMap` from str, value pairs by applying `re.compile` to each string and calling the __init__ of :class:`~.REMap` """ return cls(re_value_pairs=[(re.compile(s), v) for s, v in string_value_pairs], **kwargs)
[ "def", "from_string_pairs", "(", "cls", ",", "string_value_pairs", ",", "*", "*", "kwargs", ")", ":", "return", "cls", "(", "re_value_pairs", "=", "[", "(", "re", ".", "compile", "(", "s", ")", ",", "v", ")", "for", "s", ",", "v", "in", "string_value_pairs", "]", ",", "*", "*", "kwargs", ")" ]
Build an :class:`~.REMap` from str, value pairs by applying `re.compile` to each string and calling the __init__ of :class:`~.REMap`
[ "Build", "an", ":", "class", ":", "~", ".", "REMap", "from", "str", "value", "pairs", "by", "applying", "re", ".", "compile", "to", "each", "string", "and", "calling", "the", "__init__", "of", ":", "class", ":", "~", ".", "REMap" ]
python
train
InspectorMustache/base16-builder-python
pybase16_builder/shared.py
https://github.com/InspectorMustache/base16-builder-python/blob/586f1f87ee9f70696ab19c542af6ef55c6548a2e/pybase16_builder/shared.py#L12-L20
def get_yaml_dict(yaml_file): """Return a yaml_dict from reading yaml_file. If yaml_file is empty or doesn't exist, return an empty dict instead.""" try: with open(yaml_file, 'r') as file_: yaml_dict = yaml.safe_load(file_.read()) or {} return yaml_dict except FileNotFoundError: return {}
[ "def", "get_yaml_dict", "(", "yaml_file", ")", ":", "try", ":", "with", "open", "(", "yaml_file", ",", "'r'", ")", "as", "file_", ":", "yaml_dict", "=", "yaml", ".", "safe_load", "(", "file_", ".", "read", "(", ")", ")", "or", "{", "}", "return", "yaml_dict", "except", "FileNotFoundError", ":", "return", "{", "}" ]
Return a yaml_dict from reading yaml_file. If yaml_file is empty or doesn't exist, return an empty dict instead.
[ "Return", "a", "yaml_dict", "from", "reading", "yaml_file", ".", "If", "yaml_file", "is", "empty", "or", "doesn", "t", "exist", "return", "an", "empty", "dict", "instead", "." ]
python
train
nickoala/telepot
telepot/helper.py
https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/helper.py#L1002-L1008
def map(self, msg): """ Apply key function to ``msg`` to obtain a key. Return the routing table entry. """ k = self.key_function(msg) key = k[0] if isinstance(k, (tuple, list)) else k return self.routing_table[key]
[ "def", "map", "(", "self", ",", "msg", ")", ":", "k", "=", "self", ".", "key_function", "(", "msg", ")", "key", "=", "k", "[", "0", "]", "if", "isinstance", "(", "k", ",", "(", "tuple", ",", "list", ")", ")", "else", "k", "return", "self", ".", "routing_table", "[", "key", "]" ]
Apply key function to ``msg`` to obtain a key. Return the routing table entry.
[ "Apply", "key", "function", "to", "msg", "to", "obtain", "a", "key", ".", "Return", "the", "routing", "table", "entry", "." ]
python
train
fbcotter/py3nvml
py3nvml/py3nvml.py
https://github.com/fbcotter/py3nvml/blob/47f0f2c0eee56dec4e4beebec26b734e01d357b7/py3nvml/py3nvml.py#L4019-L4058
def nvmlDeviceClearEccErrorCounts(handle, counterType): r""" /** * Clear the ECC error and other memory error counts for the device. * * For Kepler &tm; or newer fully supported devices. * Only applicable to devices with ECC. * Requires \a NVML_INFOROM_ECC version 2.0 or higher to clear aggregate location-based ECC counts. * Requires \a NVML_INFOROM_ECC version 1.0 or higher to clear all other ECC counts. * Requires root/admin permissions. * Requires ECC Mode to be enabled. * * Sets all of the specified ECC counters to 0, including both detailed and total counts. * * This operation takes effect immediately. * * See \ref nvmlMemoryErrorType_t for details on available counter types. * * @param device The identifier of the target device * @param counterType Flag that indicates which type of errors should be cleared. * * @return * - \ref NVML_SUCCESS if the error counts were cleared * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counterType is invalid * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see * - nvmlDeviceGetDetailedEccErrors() * - nvmlDeviceGetTotalEccErrors() */ nvmlReturn_t DECLDIR nvmlDeviceClearEccErrorCounts """ fn = _nvmlGetFunctionPointer("nvmlDeviceClearEccErrorCounts") ret = fn(handle, _nvmlEccCounterType_t(counterType)) _nvmlCheckReturn(ret) return None
[ "def", "nvmlDeviceClearEccErrorCounts", "(", "handle", ",", "counterType", ")", ":", "fn", "=", "_nvmlGetFunctionPointer", "(", "\"nvmlDeviceClearEccErrorCounts\"", ")", "ret", "=", "fn", "(", "handle", ",", "_nvmlEccCounterType_t", "(", "counterType", ")", ")", "_nvmlCheckReturn", "(", "ret", ")", "return", "None" ]
r""" /** * Clear the ECC error and other memory error counts for the device. * * For Kepler &tm; or newer fully supported devices. * Only applicable to devices with ECC. * Requires \a NVML_INFOROM_ECC version 2.0 or higher to clear aggregate location-based ECC counts. * Requires \a NVML_INFOROM_ECC version 1.0 or higher to clear all other ECC counts. * Requires root/admin permissions. * Requires ECC Mode to be enabled. * * Sets all of the specified ECC counters to 0, including both detailed and total counts. * * This operation takes effect immediately. * * See \ref nvmlMemoryErrorType_t for details on available counter types. * * @param device The identifier of the target device * @param counterType Flag that indicates which type of errors should be cleared. * * @return * - \ref NVML_SUCCESS if the error counts were cleared * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counterType is invalid * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see * - nvmlDeviceGetDetailedEccErrors() * - nvmlDeviceGetTotalEccErrors() */ nvmlReturn_t DECLDIR nvmlDeviceClearEccErrorCounts
[ "r", "/", "**", "*", "Clear", "the", "ECC", "error", "and", "other", "memory", "error", "counts", "for", "the", "device", ".", "*", "*", "For", "Kepler", "&tm", ";", "or", "newer", "fully", "supported", "devices", ".", "*", "Only", "applicable", "to", "devices", "with", "ECC", ".", "*", "Requires", "\\", "a", "NVML_INFOROM_ECC", "version", "2", ".", "0", "or", "higher", "to", "clear", "aggregate", "location", "-", "based", "ECC", "counts", ".", "*", "Requires", "\\", "a", "NVML_INFOROM_ECC", "version", "1", ".", "0", "or", "higher", "to", "clear", "all", "other", "ECC", "counts", ".", "*", "Requires", "root", "/", "admin", "permissions", ".", "*", "Requires", "ECC", "Mode", "to", "be", "enabled", ".", "*", "*", "Sets", "all", "of", "the", "specified", "ECC", "counters", "to", "0", "including", "both", "detailed", "and", "total", "counts", ".", "*", "*", "This", "operation", "takes", "effect", "immediately", ".", "*", "*", "See", "\\", "ref", "nvmlMemoryErrorType_t", "for", "details", "on", "available", "counter", "types", ".", "*", "*" ]
python
train
ActivisionGameScience/assertpy
assertpy/assertpy.py
https://github.com/ActivisionGameScience/assertpy/blob/08d799cdb01f9a25d3e20672efac991c7bc26d79/assertpy/assertpy.py#L856-L864
def is_before(self, other): """Asserts that val is a date and is before other date.""" if type(self.val) is not datetime.datetime: raise TypeError('val must be datetime, but was type <%s>' % type(self.val).__name__) if type(other) is not datetime.datetime: raise TypeError('given arg must be datetime, but was type <%s>' % type(other).__name__) if self.val >= other: self._err('Expected <%s> to be before <%s>, but was not.' % (self.val.strftime('%Y-%m-%d %H:%M:%S'), other.strftime('%Y-%m-%d %H:%M:%S'))) return self
[ "def", "is_before", "(", "self", ",", "other", ")", ":", "if", "type", "(", "self", ".", "val", ")", "is", "not", "datetime", ".", "datetime", ":", "raise", "TypeError", "(", "'val must be datetime, but was type <%s>'", "%", "type", "(", "self", ".", "val", ")", ".", "__name__", ")", "if", "type", "(", "other", ")", "is", "not", "datetime", ".", "datetime", ":", "raise", "TypeError", "(", "'given arg must be datetime, but was type <%s>'", "%", "type", "(", "other", ")", ".", "__name__", ")", "if", "self", ".", "val", ">=", "other", ":", "self", ".", "_err", "(", "'Expected <%s> to be before <%s>, but was not.'", "%", "(", "self", ".", "val", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", ",", "other", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", ")", ")", "return", "self" ]
Asserts that val is a date and is before other date.
[ "Asserts", "that", "val", "is", "a", "date", "and", "is", "before", "other", "date", "." ]
python
valid
Pegase745/sqlalchemy-datatables
examples/pyramid_tut/pyramid_tut/views.py
https://github.com/Pegase745/sqlalchemy-datatables/blob/049ab5f98f20ad37926fe86d5528da0c91cd462d/examples/pyramid_tut/pyramid_tut/views.py#L83-L98
def data_advanced(request): """Return server side data.""" columns = [ ColumnDT(User.id, search_method="numeric"), ColumnDT(User.name), ColumnDT(Address.description), ColumnDT(User.birthday, search_method="date"), ColumnDT(User.age, search_method="numeric") ] query = DBSession.query().select_from(User).join(Address).filter( Address.id > 4) rowTable = DataTables(request.GET, query, columns) return rowTable.output_result()
[ "def", "data_advanced", "(", "request", ")", ":", "columns", "=", "[", "ColumnDT", "(", "User", ".", "id", ",", "search_method", "=", "\"numeric\"", ")", ",", "ColumnDT", "(", "User", ".", "name", ")", ",", "ColumnDT", "(", "Address", ".", "description", ")", ",", "ColumnDT", "(", "User", ".", "birthday", ",", "search_method", "=", "\"date\"", ")", ",", "ColumnDT", "(", "User", ".", "age", ",", "search_method", "=", "\"numeric\"", ")", "]", "query", "=", "DBSession", ".", "query", "(", ")", ".", "select_from", "(", "User", ")", ".", "join", "(", "Address", ")", ".", "filter", "(", "Address", ".", "id", ">", "4", ")", "rowTable", "=", "DataTables", "(", "request", ".", "GET", ",", "query", ",", "columns", ")", "return", "rowTable", ".", "output_result", "(", ")" ]
Return server side data.
[ "Return", "server", "side", "data", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/revnet.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/revnet.py#L208-L258
def unit(x1, x2, block_num, depth, num_layers, dim='2d', bottleneck=True, first_batch_norm=True, stride=1, training=True): """Implements bottleneck RevNet unit from authors' RevNet architecture. Args: x1: [N, H, W, C] tensor of network activations. x2: [N, H, W, C] tensor of network activations. block_num: integer ID of block depth: First depth in bottleneck residual unit. num_layers: Number of layers in the RevNet block. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. bottleneck: Should a bottleneck layer be used. first_batch_norm: Whether to keep the first batch norm layer or not. Typically used in the first RevNet block. stride: Stride for the residual function. training: True for train phase, False for eval phase. Returns: Two [N, H, W, C] output activation tensors. """ scope_name = 'unit_%d' % block_num if bottleneck: depth1 = depth depth2 = depth * 4 else: depth1 = depth2 = depth residual = wrapped_partial(f, depth1=depth1, depth2=depth2, dim=dim, training=training, bottleneck=bottleneck) with tf.variable_scope(scope_name): downsample = downsample_bottleneck if bottleneck else downsample_residual # Manual implementation of downsampling with tf.variable_scope('downsampling'): with tf.variable_scope('x1'): hx1 = downsample(x1, depth2, dim=dim, stride=stride) fx2 = residual(x2, stride=stride, first_batch_norm=first_batch_norm) x1 = hx1 + fx2 with tf.variable_scope('x2'): hx2 = downsample(x2, depth2, dim=dim, stride=stride) fx1 = residual(x1) x2 = hx2 + fx1 # Full block using memory-efficient rev_block implementation. with tf.variable_scope('full_block'): x1, x2 = tf.contrib.layers.rev_block(x1, x2, residual, residual, num_layers=num_layers) return x1, x2
[ "def", "unit", "(", "x1", ",", "x2", ",", "block_num", ",", "depth", ",", "num_layers", ",", "dim", "=", "'2d'", ",", "bottleneck", "=", "True", ",", "first_batch_norm", "=", "True", ",", "stride", "=", "1", ",", "training", "=", "True", ")", ":", "scope_name", "=", "'unit_%d'", "%", "block_num", "if", "bottleneck", ":", "depth1", "=", "depth", "depth2", "=", "depth", "*", "4", "else", ":", "depth1", "=", "depth2", "=", "depth", "residual", "=", "wrapped_partial", "(", "f", ",", "depth1", "=", "depth1", ",", "depth2", "=", "depth2", ",", "dim", "=", "dim", ",", "training", "=", "training", ",", "bottleneck", "=", "bottleneck", ")", "with", "tf", ".", "variable_scope", "(", "scope_name", ")", ":", "downsample", "=", "downsample_bottleneck", "if", "bottleneck", "else", "downsample_residual", "# Manual implementation of downsampling", "with", "tf", ".", "variable_scope", "(", "'downsampling'", ")", ":", "with", "tf", ".", "variable_scope", "(", "'x1'", ")", ":", "hx1", "=", "downsample", "(", "x1", ",", "depth2", ",", "dim", "=", "dim", ",", "stride", "=", "stride", ")", "fx2", "=", "residual", "(", "x2", ",", "stride", "=", "stride", ",", "first_batch_norm", "=", "first_batch_norm", ")", "x1", "=", "hx1", "+", "fx2", "with", "tf", ".", "variable_scope", "(", "'x2'", ")", ":", "hx2", "=", "downsample", "(", "x2", ",", "depth2", ",", "dim", "=", "dim", ",", "stride", "=", "stride", ")", "fx1", "=", "residual", "(", "x1", ")", "x2", "=", "hx2", "+", "fx1", "# Full block using memory-efficient rev_block implementation.", "with", "tf", ".", "variable_scope", "(", "'full_block'", ")", ":", "x1", ",", "x2", "=", "tf", ".", "contrib", ".", "layers", ".", "rev_block", "(", "x1", ",", "x2", ",", "residual", ",", "residual", ",", "num_layers", "=", "num_layers", ")", "return", "x1", ",", "x2" ]
Implements bottleneck RevNet unit from authors' RevNet architecture. Args: x1: [N, H, W, C] tensor of network activations. x2: [N, H, W, C] tensor of network activations. block_num: integer ID of block depth: First depth in bottleneck residual unit. num_layers: Number of layers in the RevNet block. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. bottleneck: Should a bottleneck layer be used. first_batch_norm: Whether to keep the first batch norm layer or not. Typically used in the first RevNet block. stride: Stride for the residual function. training: True for train phase, False for eval phase. Returns: Two [N, H, W, C] output activation tensors.
[ "Implements", "bottleneck", "RevNet", "unit", "from", "authors", "RevNet", "architecture", "." ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/util/quaternion.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/util/quaternion.py#L87-L103
def exp(self): """ Returns the exponent of the quaternion. (not tested) """ # Init vecNorm = self.x**2 + self.y**2 + self.z**2 wPart = np.exp(self.w) q = Quaternion() # Calculate q.w = wPart * np.cos(vecNorm) q.x = wPart * self.x * np.sin(vecNorm) / vecNorm q.y = wPart * self.y * np.sin(vecNorm) / vecNorm q.z = wPart * self.z * np.sin(vecNorm) / vecNorm return q
[ "def", "exp", "(", "self", ")", ":", "# Init", "vecNorm", "=", "self", ".", "x", "**", "2", "+", "self", ".", "y", "**", "2", "+", "self", ".", "z", "**", "2", "wPart", "=", "np", ".", "exp", "(", "self", ".", "w", ")", "q", "=", "Quaternion", "(", ")", "# Calculate", "q", ".", "w", "=", "wPart", "*", "np", ".", "cos", "(", "vecNorm", ")", "q", ".", "x", "=", "wPart", "*", "self", ".", "x", "*", "np", ".", "sin", "(", "vecNorm", ")", "/", "vecNorm", "q", ".", "y", "=", "wPart", "*", "self", ".", "y", "*", "np", ".", "sin", "(", "vecNorm", ")", "/", "vecNorm", "q", ".", "z", "=", "wPart", "*", "self", ".", "z", "*", "np", ".", "sin", "(", "vecNorm", ")", "/", "vecNorm", "return", "q" ]
Returns the exponent of the quaternion. (not tested)
[ "Returns", "the", "exponent", "of", "the", "quaternion", ".", "(", "not", "tested", ")" ]
python
train
davgeo/clear
clear/database.py
https://github.com/davgeo/clear/blob/5ec85d27efd28afddfcd4c3f44df17f0115a77aa/clear/database.py#L230-L252
def SetConfigValue(self, fieldName, value): """ Set value in Config table. If a entry already exists this is updated with the new value, otherwise a new entry is added. Parameters ---------- fieldName : string String to be inserted or matched against Name column in Config table. value : string Entry to be inserted or updated in Value column of Config table. """ currentConfigValue = self.GetConfigValue(fieldName) if currentConfigValue is None: goodlogging.Log.Info("DB", "Adding {0}={1} to database config table".format(fieldName, value), verbosity=self.logVerbosity) self._ActionDatabase("INSERT INTO Config VALUES (?,?)", (fieldName, value)) else: goodlogging.Log.Info("DB", "Updating {0} in database config table from {1} to {2}".format(fieldName, currentConfigValue, value), verbosity=self.logVerbosity) self._ActionDatabase("UPDATE Config SET Value=? WHERE Name=?", (value, fieldName))
[ "def", "SetConfigValue", "(", "self", ",", "fieldName", ",", "value", ")", ":", "currentConfigValue", "=", "self", ".", "GetConfigValue", "(", "fieldName", ")", "if", "currentConfigValue", "is", "None", ":", "goodlogging", ".", "Log", ".", "Info", "(", "\"DB\"", ",", "\"Adding {0}={1} to database config table\"", ".", "format", "(", "fieldName", ",", "value", ")", ",", "verbosity", "=", "self", ".", "logVerbosity", ")", "self", ".", "_ActionDatabase", "(", "\"INSERT INTO Config VALUES (?,?)\"", ",", "(", "fieldName", ",", "value", ")", ")", "else", ":", "goodlogging", ".", "Log", ".", "Info", "(", "\"DB\"", ",", "\"Updating {0} in database config table from {1} to {2}\"", ".", "format", "(", "fieldName", ",", "currentConfigValue", ",", "value", ")", ",", "verbosity", "=", "self", ".", "logVerbosity", ")", "self", ".", "_ActionDatabase", "(", "\"UPDATE Config SET Value=? WHERE Name=?\"", ",", "(", "value", ",", "fieldName", ")", ")" ]
Set value in Config table. If a entry already exists this is updated with the new value, otherwise a new entry is added. Parameters ---------- fieldName : string String to be inserted or matched against Name column in Config table. value : string Entry to be inserted or updated in Value column of Config table.
[ "Set", "value", "in", "Config", "table", "." ]
python
train
econ-ark/HARK
HARK/ConsumptionSaving/ConsIndShockModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsIndShockModel.py#L1383-L1446
def solveConsKinkedR(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rboro,Rsave, PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool): ''' Solves a single period consumption-saving problem with CRRA utility and risky income (subject to permanent and transitory shocks), and different interest factors on borrowing and saving. Restriction: Rboro >= Rsave. Currently cannot construct a cubic spline consumption function, only linear. Can gen- erate a value function if requested. Parameters ---------- solution_next : ConsumerSolution The solution to next period's one period problem. IncomeDstn : [np.array] A list containing three arrays of floats, representing a discrete approximation to the income process between the period being solved and the one immediately following (in solution_next). Order: event probabilities, permanent shocks, transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. Rboro: float Interest factor on assets between this period and the succeeding period when assets are negative. Rsave: float Interest factor on assets between this period and the succeeding period when assets are positive. PermGroFac : float Expected permanent income growth factor at the end of this period. BoroCnstArt: float or None Borrowing constraint for the minimum allowable assets to end the period with. If it is less than the natural borrowing constraint, then it is irrelevant; BoroCnstArt=None indicates no artificial bor- rowing constraint. aXtraGrid: np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. vFuncBool: boolean An indicator for whether the value function should be computed and included in the reported solution. CubicBool: boolean Indicator for whether the solver should use cubic or linear interpolation. Returns ------- solution_now : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (using cubic or linear splines), a marginal value function vPfunc, a minimum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc. ''' solver = ConsKinkedRsolver(solution_next,IncomeDstn,LivPrb, DiscFac,CRRA,Rboro,Rsave,PermGroFac,BoroCnstArt, aXtraGrid,vFuncBool,CubicBool) solver.prepareToSolve() solution = solver.solve() return solution
[ "def", "solveConsKinkedR", "(", "solution_next", ",", "IncomeDstn", ",", "LivPrb", ",", "DiscFac", ",", "CRRA", ",", "Rboro", ",", "Rsave", ",", "PermGroFac", ",", "BoroCnstArt", ",", "aXtraGrid", ",", "vFuncBool", ",", "CubicBool", ")", ":", "solver", "=", "ConsKinkedRsolver", "(", "solution_next", ",", "IncomeDstn", ",", "LivPrb", ",", "DiscFac", ",", "CRRA", ",", "Rboro", ",", "Rsave", ",", "PermGroFac", ",", "BoroCnstArt", ",", "aXtraGrid", ",", "vFuncBool", ",", "CubicBool", ")", "solver", ".", "prepareToSolve", "(", ")", "solution", "=", "solver", ".", "solve", "(", ")", "return", "solution" ]
Solves a single period consumption-saving problem with CRRA utility and risky income (subject to permanent and transitory shocks), and different interest factors on borrowing and saving. Restriction: Rboro >= Rsave. Currently cannot construct a cubic spline consumption function, only linear. Can gen- erate a value function if requested. Parameters ---------- solution_next : ConsumerSolution The solution to next period's one period problem. IncomeDstn : [np.array] A list containing three arrays of floats, representing a discrete approximation to the income process between the period being solved and the one immediately following (in solution_next). Order: event probabilities, permanent shocks, transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. Rboro: float Interest factor on assets between this period and the succeeding period when assets are negative. Rsave: float Interest factor on assets between this period and the succeeding period when assets are positive. PermGroFac : float Expected permanent income growth factor at the end of this period. BoroCnstArt: float or None Borrowing constraint for the minimum allowable assets to end the period with. If it is less than the natural borrowing constraint, then it is irrelevant; BoroCnstArt=None indicates no artificial bor- rowing constraint. aXtraGrid: np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. vFuncBool: boolean An indicator for whether the value function should be computed and included in the reported solution. CubicBool: boolean Indicator for whether the solver should use cubic or linear interpolation. Returns ------- solution_now : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (using cubic or linear splines), a marginal value function vPfunc, a minimum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc.
[ "Solves", "a", "single", "period", "consumption", "-", "saving", "problem", "with", "CRRA", "utility", "and", "risky", "income", "(", "subject", "to", "permanent", "and", "transitory", "shocks", ")", "and", "different", "interest", "factors", "on", "borrowing", "and", "saving", ".", "Restriction", ":", "Rboro", ">", "=", "Rsave", ".", "Currently", "cannot", "construct", "a", "cubic", "spline", "consumption", "function", "only", "linear", ".", "Can", "gen", "-", "erate", "a", "value", "function", "if", "requested", "." ]
python
train
fastai/fastai
fastai/widgets/image_downloader.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/widgets/image_downloader.py#L78-L91
def download_google_images(path:PathOrStr, search_term:str, size:str='>400*300', n_images:int=10, format:str='jpg', max_workers:int=defaults.cpus, timeout:int=4) -> FilePathList: """ Search for `n_images` images on Google, matching `search_term` and `size` requirements, download them into `path`/`search_term` and verify them, using `max_workers` threads. """ label_path = Path(path)/search_term search_url = _search_url(search_term, size=size, format=format) if n_images <= 100: img_tuples = _fetch_img_tuples(search_url, format=format, n_images=n_images) else: img_tuples = _fetch_img_tuples_webdriver(search_url, format=format, n_images=n_images) downloaded_images = _download_images(label_path, img_tuples, max_workers=max_workers, timeout=timeout) if len(downloaded_images) == 0: raise RuntimeError(f"Couldn't download any images.") verify_images(label_path, max_workers=max_workers) return get_image_files(label_path)
[ "def", "download_google_images", "(", "path", ":", "PathOrStr", ",", "search_term", ":", "str", ",", "size", ":", "str", "=", "'>400*300'", ",", "n_images", ":", "int", "=", "10", ",", "format", ":", "str", "=", "'jpg'", ",", "max_workers", ":", "int", "=", "defaults", ".", "cpus", ",", "timeout", ":", "int", "=", "4", ")", "->", "FilePathList", ":", "label_path", "=", "Path", "(", "path", ")", "/", "search_term", "search_url", "=", "_search_url", "(", "search_term", ",", "size", "=", "size", ",", "format", "=", "format", ")", "if", "n_images", "<=", "100", ":", "img_tuples", "=", "_fetch_img_tuples", "(", "search_url", ",", "format", "=", "format", ",", "n_images", "=", "n_images", ")", "else", ":", "img_tuples", "=", "_fetch_img_tuples_webdriver", "(", "search_url", ",", "format", "=", "format", ",", "n_images", "=", "n_images", ")", "downloaded_images", "=", "_download_images", "(", "label_path", ",", "img_tuples", ",", "max_workers", "=", "max_workers", ",", "timeout", "=", "timeout", ")", "if", "len", "(", "downloaded_images", ")", "==", "0", ":", "raise", "RuntimeError", "(", "f\"Couldn't download any images.\"", ")", "verify_images", "(", "label_path", ",", "max_workers", "=", "max_workers", ")", "return", "get_image_files", "(", "label_path", ")" ]
Search for `n_images` images on Google, matching `search_term` and `size` requirements, download them into `path`/`search_term` and verify them, using `max_workers` threads.
[ "Search", "for", "n_images", "images", "on", "Google", "matching", "search_term", "and", "size", "requirements", "download", "them", "into", "path", "/", "search_term", "and", "verify", "them", "using", "max_workers", "threads", "." ]
python
train
mozilla/elasticutils
elasticutils/__init__.py
https://github.com/mozilla/elasticutils/blob/b880cc5d51fb1079b0581255ec664c1ec934656e/elasticutils/__init__.py#L694-L759
def query(self, *queries, **kw): """ Return a new S instance with query args combined with existing set in a must boolean query. :arg queries: instances of Q :arg kw: queries in the form of ``field__action=value`` There are three special flags you can use: * ``must=True``: Specifies that the queries and kw queries **must match** in order for a document to be in the result. If you don't specify a special flag, this is the default. * ``should=True``: Specifies that the queries and kw queries **should match** in order for a document to be in the result. * ``must_not=True``: Specifies the queries and kw queries **must not match** in order for a document to be in the result. These flags work by putting those queries in the appropriate clause of an Elasticsearch boolean query. Examples: >>> s = S().query(foo='bar') >>> s = S().query(Q(foo='bar')) >>> s = S().query(foo='bar', bat__match='baz') >>> s = S().query(foo='bar', should=True) >>> s = S().query(foo='bar', should=True).query(baz='bat', must=True) Notes: 1. Don't specify multiple special flags, but if you did, `should` takes precedence. 2. If you don't specify any, it defaults to `must`. 3. You can specify special flags in the :py:class:`elasticutils.Q`, too. If you're building your query incrementally, using :py:class:`elasticutils.Q` helps a lot. See the documentation on :py:class:`elasticutils.Q` for more details on composing queries with Q. See the documentation on :py:class:`elasticutils.S` for more details on adding support for more query types. """ q = Q() for query in queries: q += query if 'or_' in kw: # Backwards compatibile with pre-0.7 version. or_query = kw.pop('or_') # or_query here is a dict of key/val pairs. or_ indicates # they're in a should clause, so we generate the # equivalent Q and then add it in. or_query['should'] = True q += Q(**or_query) q += Q(**kw) return self._clone(next_step=('query', q))
[ "def", "query", "(", "self", ",", "*", "queries", ",", "*", "*", "kw", ")", ":", "q", "=", "Q", "(", ")", "for", "query", "in", "queries", ":", "q", "+=", "query", "if", "'or_'", "in", "kw", ":", "# Backwards compatibile with pre-0.7 version.", "or_query", "=", "kw", ".", "pop", "(", "'or_'", ")", "# or_query here is a dict of key/val pairs. or_ indicates", "# they're in a should clause, so we generate the", "# equivalent Q and then add it in.", "or_query", "[", "'should'", "]", "=", "True", "q", "+=", "Q", "(", "*", "*", "or_query", ")", "q", "+=", "Q", "(", "*", "*", "kw", ")", "return", "self", ".", "_clone", "(", "next_step", "=", "(", "'query'", ",", "q", ")", ")" ]
Return a new S instance with query args combined with existing set in a must boolean query. :arg queries: instances of Q :arg kw: queries in the form of ``field__action=value`` There are three special flags you can use: * ``must=True``: Specifies that the queries and kw queries **must match** in order for a document to be in the result. If you don't specify a special flag, this is the default. * ``should=True``: Specifies that the queries and kw queries **should match** in order for a document to be in the result. * ``must_not=True``: Specifies the queries and kw queries **must not match** in order for a document to be in the result. These flags work by putting those queries in the appropriate clause of an Elasticsearch boolean query. Examples: >>> s = S().query(foo='bar') >>> s = S().query(Q(foo='bar')) >>> s = S().query(foo='bar', bat__match='baz') >>> s = S().query(foo='bar', should=True) >>> s = S().query(foo='bar', should=True).query(baz='bat', must=True) Notes: 1. Don't specify multiple special flags, but if you did, `should` takes precedence. 2. If you don't specify any, it defaults to `must`. 3. You can specify special flags in the :py:class:`elasticutils.Q`, too. If you're building your query incrementally, using :py:class:`elasticutils.Q` helps a lot. See the documentation on :py:class:`elasticutils.Q` for more details on composing queries with Q. See the documentation on :py:class:`elasticutils.S` for more details on adding support for more query types.
[ "Return", "a", "new", "S", "instance", "with", "query", "args", "combined", "with", "existing", "set", "in", "a", "must", "boolean", "query", "." ]
python
train
spyder-ide/spyder
spyder/config/gui.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/config/gui.py#L169-L173
def is_dark_font_color(color_scheme): """Check if the font color used in the color scheme is dark.""" color_scheme = get_color_scheme(color_scheme) font_color, fon_fw, fon_fs = color_scheme['normal'] return dark_color(font_color)
[ "def", "is_dark_font_color", "(", "color_scheme", ")", ":", "color_scheme", "=", "get_color_scheme", "(", "color_scheme", ")", "font_color", ",", "fon_fw", ",", "fon_fs", "=", "color_scheme", "[", "'normal'", "]", "return", "dark_color", "(", "font_color", ")" ]
Check if the font color used in the color scheme is dark.
[ "Check", "if", "the", "font", "color", "used", "in", "the", "color", "scheme", "is", "dark", "." ]
python
train
ratt-ru/PyMORESANE
pymoresane/iuwt_convolution.py
https://github.com/ratt-ru/PyMORESANE/blob/b024591ad0bbb69320d08841f28a2c27f62ae1af/pymoresane/iuwt_convolution.py#L242-L265
def scale_fft(in1): """ This function performs in-place scaling after the IFFT without recompilation. INPUTS: in1 (no default): Array containing data which is to be scaled. """ ker = SourceModule(""" __global__ void scale_fft_ker(float *in1) { const int len = gridDim.x*blockDim.x; const int col = (blockDim.x * blockIdx.x + threadIdx.x); const int row = (blockDim.y * blockIdx.y + threadIdx.y); const int tid2 = col + len*row; const int szin1 = gridDim.x*blockDim.x*gridDim.y*blockDim.y; in1[tid2] = in1[tid2]/szin1; } """, keep=True) scale_fft_ker = ker.get_function("scale_fft_ker") scale_fft_ker(in1, block=(32,32,1), grid=(int(in1.shape[1]//32), int(in1.shape[0]//32)))
[ "def", "scale_fft", "(", "in1", ")", ":", "ker", "=", "SourceModule", "(", "\"\"\"\n __global__ void scale_fft_ker(float *in1)\n {\n const int len = gridDim.x*blockDim.x;\n const int col = (blockDim.x * blockIdx.x + threadIdx.x);\n const int row = (blockDim.y * blockIdx.y + threadIdx.y);\n const int tid2 = col + len*row;\n const int szin1 = gridDim.x*blockDim.x*gridDim.y*blockDim.y;\n\n\t\t\t\t\t\t\tin1[tid2] = in1[tid2]/szin1; \n }\n \"\"\"", ",", "keep", "=", "True", ")", "scale_fft_ker", "=", "ker", ".", "get_function", "(", "\"scale_fft_ker\"", ")", "scale_fft_ker", "(", "in1", ",", "block", "=", "(", "32", ",", "32", ",", "1", ")", ",", "grid", "=", "(", "int", "(", "in1", ".", "shape", "[", "1", "]", "//", "32", ")", ",", "int", "(", "in1", ".", "shape", "[", "0", "]", "//", "32", ")", ")", ")" ]
This function performs in-place scaling after the IFFT without recompilation. INPUTS: in1 (no default): Array containing data which is to be scaled.
[ "This", "function", "performs", "in", "-", "place", "scaling", "after", "the", "IFFT", "without", "recompilation", "." ]
python
train
Gandi/gandi.cli
gandi/cli/modules/domain.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/domain.py#L122-L126
def from_fqdn(cls, fqdn): """Retrieve domain id associated to a FQDN.""" result = cls.list({'fqdn': fqdn}) if len(result) > 0: return result[0]['id']
[ "def", "from_fqdn", "(", "cls", ",", "fqdn", ")", ":", "result", "=", "cls", ".", "list", "(", "{", "'fqdn'", ":", "fqdn", "}", ")", "if", "len", "(", "result", ")", ">", "0", ":", "return", "result", "[", "0", "]", "[", "'id'", "]" ]
Retrieve domain id associated to a FQDN.
[ "Retrieve", "domain", "id", "associated", "to", "a", "FQDN", "." ]
python
train
emory-libraries/eulfedora
eulfedora/models.py
https://github.com/emory-libraries/eulfedora/blob/161826f3fdcdab4007f6fa7dfd9f1ecabc4bcbe4/eulfedora/models.py#L2155-L2191
def for_class(digobj, repo): '''Generate a ContentModel object for the specified :class:`DigitalObject` class. Content model object is saved in the specified repository if it doesn't already exist.''' full_name = '%s.%s' % (digobj.__module__, digobj.__name__) cmodels = getattr(digobj, 'CONTENT_MODELS', None) if not cmodels: logger.debug('%s has no content models', full_name) return None if len(cmodels) > 1: logger.debug('%s has %d content models', full_name, len(cmodels)) raise ValueError(('Cannot construct ContentModel object for ' + '%s, which has %d CONTENT_MODELS (only 1 is ' + 'supported)') % (full_name, len(cmodels))) cmodel_uri = cmodels[0] logger.debug('cmodel for %s is %s', full_name, cmodel_uri) cmodel_obj = repo.get_object(cmodel_uri, type=ContentModel, create=False) if cmodel_obj.exists: logger.debug('%s already exists', cmodel_uri) return cmodel_obj # otherwise the cmodel doesn't exist. let's create it. logger.debug('creating %s from %s', cmodel_uri, full_name) cmodel_obj = repo.get_object(cmodel_uri, type=ContentModel, create=True) # XXX: should this use _defined_datastreams instead? for ds in digobj._local_datastreams.values(): ds_composite_model = cmodel_obj.ds_composite_model.content type_model = ds_composite_model.get_type_model(ds.id, create=True) type_model.mimetype = ds.default_mimetype if ds.default_format_uri: type_model.format_uri = ds.default_format_uri cmodel_obj.save() return cmodel_obj
[ "def", "for_class", "(", "digobj", ",", "repo", ")", ":", "full_name", "=", "'%s.%s'", "%", "(", "digobj", ".", "__module__", ",", "digobj", ".", "__name__", ")", "cmodels", "=", "getattr", "(", "digobj", ",", "'CONTENT_MODELS'", ",", "None", ")", "if", "not", "cmodels", ":", "logger", ".", "debug", "(", "'%s has no content models'", ",", "full_name", ")", "return", "None", "if", "len", "(", "cmodels", ")", ">", "1", ":", "logger", ".", "debug", "(", "'%s has %d content models'", ",", "full_name", ",", "len", "(", "cmodels", ")", ")", "raise", "ValueError", "(", "(", "'Cannot construct ContentModel object for '", "+", "'%s, which has %d CONTENT_MODELS (only 1 is '", "+", "'supported)'", ")", "%", "(", "full_name", ",", "len", "(", "cmodels", ")", ")", ")", "cmodel_uri", "=", "cmodels", "[", "0", "]", "logger", ".", "debug", "(", "'cmodel for %s is %s'", ",", "full_name", ",", "cmodel_uri", ")", "cmodel_obj", "=", "repo", ".", "get_object", "(", "cmodel_uri", ",", "type", "=", "ContentModel", ",", "create", "=", "False", ")", "if", "cmodel_obj", ".", "exists", ":", "logger", ".", "debug", "(", "'%s already exists'", ",", "cmodel_uri", ")", "return", "cmodel_obj", "# otherwise the cmodel doesn't exist. let's create it.", "logger", ".", "debug", "(", "'creating %s from %s'", ",", "cmodel_uri", ",", "full_name", ")", "cmodel_obj", "=", "repo", ".", "get_object", "(", "cmodel_uri", ",", "type", "=", "ContentModel", ",", "create", "=", "True", ")", "# XXX: should this use _defined_datastreams instead?", "for", "ds", "in", "digobj", ".", "_local_datastreams", ".", "values", "(", ")", ":", "ds_composite_model", "=", "cmodel_obj", ".", "ds_composite_model", ".", "content", "type_model", "=", "ds_composite_model", ".", "get_type_model", "(", "ds", ".", "id", ",", "create", "=", "True", ")", "type_model", ".", "mimetype", "=", "ds", ".", "default_mimetype", "if", "ds", ".", "default_format_uri", ":", "type_model", ".", "format_uri", "=", "ds", ".", "default_format_uri", "cmodel_obj", ".", "save", "(", ")", "return", "cmodel_obj" ]
Generate a ContentModel object for the specified :class:`DigitalObject` class. Content model object is saved in the specified repository if it doesn't already exist.
[ "Generate", "a", "ContentModel", "object", "for", "the", "specified", ":", "class", ":", "DigitalObject", "class", ".", "Content", "model", "object", "is", "saved", "in", "the", "specified", "repository", "if", "it", "doesn", "t", "already", "exist", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/mp_tile.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/mp_tile.py#L216-L225
def coord_to_tile(self, lat, lon, zoom): '''convert lat/lon/zoom to a TileInfo''' world_tiles = 1<<zoom x = world_tiles / 360.0 * (lon + 180.0) tiles_pre_radian = world_tiles / (2 * math.pi) e = math.sin(lat * (1/180.*math.pi)) y = world_tiles/2 + 0.5*math.log((1+e)/(1-e)) * (-tiles_pre_radian) offsetx = int((x - int(x)) * TILES_WIDTH) offsety = int((y - int(y)) * TILES_HEIGHT) return TileInfo((int(x) % world_tiles, int(y) % world_tiles), zoom, self.service, offset=(offsetx, offsety))
[ "def", "coord_to_tile", "(", "self", ",", "lat", ",", "lon", ",", "zoom", ")", ":", "world_tiles", "=", "1", "<<", "zoom", "x", "=", "world_tiles", "/", "360.0", "*", "(", "lon", "+", "180.0", ")", "tiles_pre_radian", "=", "world_tiles", "/", "(", "2", "*", "math", ".", "pi", ")", "e", "=", "math", ".", "sin", "(", "lat", "*", "(", "1", "/", "180.", "*", "math", ".", "pi", ")", ")", "y", "=", "world_tiles", "/", "2", "+", "0.5", "*", "math", ".", "log", "(", "(", "1", "+", "e", ")", "/", "(", "1", "-", "e", ")", ")", "*", "(", "-", "tiles_pre_radian", ")", "offsetx", "=", "int", "(", "(", "x", "-", "int", "(", "x", ")", ")", "*", "TILES_WIDTH", ")", "offsety", "=", "int", "(", "(", "y", "-", "int", "(", "y", ")", ")", "*", "TILES_HEIGHT", ")", "return", "TileInfo", "(", "(", "int", "(", "x", ")", "%", "world_tiles", ",", "int", "(", "y", ")", "%", "world_tiles", ")", ",", "zoom", ",", "self", ".", "service", ",", "offset", "=", "(", "offsetx", ",", "offsety", ")", ")" ]
convert lat/lon/zoom to a TileInfo
[ "convert", "lat", "/", "lon", "/", "zoom", "to", "a", "TileInfo" ]
python
train
aws/aws-xray-sdk-python
aws_xray_sdk/core/sampling/reservoir.py
https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/sampling/reservoir.py#L23-L30
def borrow_or_take(self, now, can_borrow): """ Decide whether to borrow or take one quota from the reservoir. Return ``False`` if it can neither borrow nor take. This method is thread-safe. """ with self._lock: return self._borrow_or_take(now, can_borrow)
[ "def", "borrow_or_take", "(", "self", ",", "now", ",", "can_borrow", ")", ":", "with", "self", ".", "_lock", ":", "return", "self", ".", "_borrow_or_take", "(", "now", ",", "can_borrow", ")" ]
Decide whether to borrow or take one quota from the reservoir. Return ``False`` if it can neither borrow nor take. This method is thread-safe.
[ "Decide", "whether", "to", "borrow", "or", "take", "one", "quota", "from", "the", "reservoir", ".", "Return", "False", "if", "it", "can", "neither", "borrow", "nor", "take", ".", "This", "method", "is", "thread", "-", "safe", "." ]
python
train
hydraplatform/hydra-base
hydra_base/util/hydra_dateutil.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/util/hydra_dateutil.py#L296-L352
def reindex_timeseries(ts_string, new_timestamps): """ get data for timesamp :param a JSON string, in pandas-friendly format :param a timestamp or list of timestamps (datetimes) :returns a pandas data frame, reindexed with the supplied timestamos or None if no data is found """ #If a single timestamp is passed in, turn it into a list #Reindexing can't work if it's not a list if not isinstance(new_timestamps, list): new_timestamps = [new_timestamps] #Convert the incoming timestamps to datetimes #if they are not datetimes. new_timestamps_converted = [] for t in new_timestamps: new_timestamps_converted.append(get_datetime(t)) new_timestamps = new_timestamps_converted seasonal_year = config.get('DEFAULT','seasonal_year', '1678') seasonal_key = config.get('DEFAULT', 'seasonal_key', '9999') ts = ts_string.replace(seasonal_key, seasonal_year) timeseries = pd.read_json(ts) idx = timeseries.index ts_timestamps = new_timestamps #'Fix' the incoming timestamp in case it's a seasonal value if type(idx) == pd.DatetimeIndex: if set(idx.year) == set([int(seasonal_year)]): if isinstance(new_timestamps, list): seasonal_timestamp = [] for t in ts_timestamps: t_1900 = t.replace(year=int(seasonal_year)) seasonal_timestamp.append(t_1900) ts_timestamps = seasonal_timestamp #Reindex the timeseries to reflect the requested timestamps reindexed_ts = timeseries.reindex(ts_timestamps, method='ffill') i = reindexed_ts.index reindexed_ts.index = pd.Index(new_timestamps, names=i.names) #If there are no values at all, just return None if len(reindexed_ts.dropna()) == 0: return None #Replace all numpy NAN values with None pandas_ts = reindexed_ts.where(reindexed_ts.notnull(), None) return pandas_ts
[ "def", "reindex_timeseries", "(", "ts_string", ",", "new_timestamps", ")", ":", "#If a single timestamp is passed in, turn it into a list", "#Reindexing can't work if it's not a list", "if", "not", "isinstance", "(", "new_timestamps", ",", "list", ")", ":", "new_timestamps", "=", "[", "new_timestamps", "]", "#Convert the incoming timestamps to datetimes", "#if they are not datetimes.", "new_timestamps_converted", "=", "[", "]", "for", "t", "in", "new_timestamps", ":", "new_timestamps_converted", ".", "append", "(", "get_datetime", "(", "t", ")", ")", "new_timestamps", "=", "new_timestamps_converted", "seasonal_year", "=", "config", ".", "get", "(", "'DEFAULT'", ",", "'seasonal_year'", ",", "'1678'", ")", "seasonal_key", "=", "config", ".", "get", "(", "'DEFAULT'", ",", "'seasonal_key'", ",", "'9999'", ")", "ts", "=", "ts_string", ".", "replace", "(", "seasonal_key", ",", "seasonal_year", ")", "timeseries", "=", "pd", ".", "read_json", "(", "ts", ")", "idx", "=", "timeseries", ".", "index", "ts_timestamps", "=", "new_timestamps", "#'Fix' the incoming timestamp in case it's a seasonal value", "if", "type", "(", "idx", ")", "==", "pd", ".", "DatetimeIndex", ":", "if", "set", "(", "idx", ".", "year", ")", "==", "set", "(", "[", "int", "(", "seasonal_year", ")", "]", ")", ":", "if", "isinstance", "(", "new_timestamps", ",", "list", ")", ":", "seasonal_timestamp", "=", "[", "]", "for", "t", "in", "ts_timestamps", ":", "t_1900", "=", "t", ".", "replace", "(", "year", "=", "int", "(", "seasonal_year", ")", ")", "seasonal_timestamp", ".", "append", "(", "t_1900", ")", "ts_timestamps", "=", "seasonal_timestamp", "#Reindex the timeseries to reflect the requested timestamps", "reindexed_ts", "=", "timeseries", ".", "reindex", "(", "ts_timestamps", ",", "method", "=", "'ffill'", ")", "i", "=", "reindexed_ts", ".", "index", "reindexed_ts", ".", "index", "=", "pd", ".", "Index", "(", "new_timestamps", ",", "names", "=", "i", ".", "names", ")", "#If there are no values at all, just return None", "if", "len", "(", "reindexed_ts", ".", "dropna", "(", ")", ")", "==", "0", ":", "return", "None", "#Replace all numpy NAN values with None", "pandas_ts", "=", "reindexed_ts", ".", "where", "(", "reindexed_ts", ".", "notnull", "(", ")", ",", "None", ")", "return", "pandas_ts" ]
get data for timesamp :param a JSON string, in pandas-friendly format :param a timestamp or list of timestamps (datetimes) :returns a pandas data frame, reindexed with the supplied timestamos or None if no data is found
[ "get", "data", "for", "timesamp" ]
python
train
pymc-devs/pymc
pymc/diagnostics.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/diagnostics.py#L94-L228
def validate(sampler, replicates=20, iterations=10000, burn=5000, thin=1, deterministic=False, db='ram', plot=True, verbose=0): """ Model validation method, following Cook et al. (Journal of Computational and Graphical Statistics, 2006, DOI: 10.1198/106186006X136976). Generates posterior samples based on 'true' parameter values and data simulated from the priors. The quantiles of the parameter values are calculated, based on the samples. If the model is valid, the quantiles should be uniformly distributed over [0,1]. Since this relies on the generation of simulated data, all data stochastics must have a valid random() method for validation to proceed. Parameters ---------- sampler : Sampler An MCMC sampler object. replicates (optional) : int The number of validation replicates (i.e. number of quantiles to be simulated). Defaults to 100. iterations (optional) : int The number of MCMC iterations to be run per replicate. Defaults to 2000. burn (optional) : int The number of burn-in iterations to be run per replicate. Defaults to 1000. thin (optional) : int The thinning factor to be applied to posterior sample. Defaults to 1 (no thinning) deterministic (optional) : bool Flag for inclusion of deterministic nodes in validation procedure. Defaults to False. db (optional) : string The database backend to use for the validation runs. Defaults to 'ram'. plot (optional) : bool Flag for validation plots. Defaults to True. Returns ------- stats : dict Return a dictionary containing tuples with the chi-square statistic and associated p-value for each data stochastic. Notes ----- This function requires SciPy. """ import scipy as sp # Set verbosity for models to zero sampler.verbose = 0 # Specify parameters to be evaluated parameters = sampler.stochastics if deterministic: # Add deterministics to the mix, if requested parameters = parameters | sampler.deterministics # Assign database backend original_backend = sampler.db.__name__ sampler._assign_database_backend(db) # Empty lists for quantiles quantiles = {} if verbose: print_("\nExecuting Cook et al. (2006) validation procedure ...\n") # Loop over replicates for i in range(replicates): # Sample from priors for p in sampler.stochastics: if not p.extended_parents: p.random() # Sample "true" data values for o in sampler.observed_stochastics: # Generate simuated data for data stochastic o.set_value(o.random(), force=True) if verbose: print_("Data for %s is %s" % (o.__name__, o.value)) param_values = {} # Record data-generating parameter values for s in parameters: param_values[s] = s.value try: # Fit models given parameter values sampler.sample(iterations, burn=burn, thin=thin) for s in param_values: if not i: # Initialize dict quantiles[s.__name__] = [] trace = s.trace() q = sum(trace < param_values[s], 0) / float(len(trace)) quantiles[s.__name__].append(open01(q)) # Replace data values for o in sampler.observed_stochastics: o.revert() finally: # Replace data values for o in sampler.observed_stochastics: o.revert() # Replace backend sampler._assign_database_backend(original_backend) if not i % 10 and i and verbose: print_("\tCompleted validation replicate", i) # Replace backend sampler._assign_database_backend(original_backend) stats = {} # Calculate chi-square statistics for param in quantiles: q = quantiles[param] # Calculate chi-square statistics X2 = sum(sp.special.ndtri(q) ** 2) # Calculate p-value p = sp.special.chdtrc(replicates, X2) stats[param] = (X2, p) if plot: # Convert p-values to z-scores p = copy(stats) for i in p: p[i] = p[i][1] Matplot.zplot(p, verbose=verbose) return stats
[ "def", "validate", "(", "sampler", ",", "replicates", "=", "20", ",", "iterations", "=", "10000", ",", "burn", "=", "5000", ",", "thin", "=", "1", ",", "deterministic", "=", "False", ",", "db", "=", "'ram'", ",", "plot", "=", "True", ",", "verbose", "=", "0", ")", ":", "import", "scipy", "as", "sp", "# Set verbosity for models to zero", "sampler", ".", "verbose", "=", "0", "# Specify parameters to be evaluated", "parameters", "=", "sampler", ".", "stochastics", "if", "deterministic", ":", "# Add deterministics to the mix, if requested", "parameters", "=", "parameters", "|", "sampler", ".", "deterministics", "# Assign database backend", "original_backend", "=", "sampler", ".", "db", ".", "__name__", "sampler", ".", "_assign_database_backend", "(", "db", ")", "# Empty lists for quantiles", "quantiles", "=", "{", "}", "if", "verbose", ":", "print_", "(", "\"\\nExecuting Cook et al. (2006) validation procedure ...\\n\"", ")", "# Loop over replicates", "for", "i", "in", "range", "(", "replicates", ")", ":", "# Sample from priors", "for", "p", "in", "sampler", ".", "stochastics", ":", "if", "not", "p", ".", "extended_parents", ":", "p", ".", "random", "(", ")", "# Sample \"true\" data values", "for", "o", "in", "sampler", ".", "observed_stochastics", ":", "# Generate simuated data for data stochastic", "o", ".", "set_value", "(", "o", ".", "random", "(", ")", ",", "force", "=", "True", ")", "if", "verbose", ":", "print_", "(", "\"Data for %s is %s\"", "%", "(", "o", ".", "__name__", ",", "o", ".", "value", ")", ")", "param_values", "=", "{", "}", "# Record data-generating parameter values", "for", "s", "in", "parameters", ":", "param_values", "[", "s", "]", "=", "s", ".", "value", "try", ":", "# Fit models given parameter values", "sampler", ".", "sample", "(", "iterations", ",", "burn", "=", "burn", ",", "thin", "=", "thin", ")", "for", "s", "in", "param_values", ":", "if", "not", "i", ":", "# Initialize dict", "quantiles", "[", "s", ".", "__name__", "]", "=", "[", "]", "trace", "=", "s", ".", "trace", "(", ")", "q", "=", "sum", "(", "trace", "<", "param_values", "[", "s", "]", ",", "0", ")", "/", "float", "(", "len", "(", "trace", ")", ")", "quantiles", "[", "s", ".", "__name__", "]", ".", "append", "(", "open01", "(", "q", ")", ")", "# Replace data values", "for", "o", "in", "sampler", ".", "observed_stochastics", ":", "o", ".", "revert", "(", ")", "finally", ":", "# Replace data values", "for", "o", "in", "sampler", ".", "observed_stochastics", ":", "o", ".", "revert", "(", ")", "# Replace backend", "sampler", ".", "_assign_database_backend", "(", "original_backend", ")", "if", "not", "i", "%", "10", "and", "i", "and", "verbose", ":", "print_", "(", "\"\\tCompleted validation replicate\"", ",", "i", ")", "# Replace backend", "sampler", ".", "_assign_database_backend", "(", "original_backend", ")", "stats", "=", "{", "}", "# Calculate chi-square statistics", "for", "param", "in", "quantiles", ":", "q", "=", "quantiles", "[", "param", "]", "# Calculate chi-square statistics", "X2", "=", "sum", "(", "sp", ".", "special", ".", "ndtri", "(", "q", ")", "**", "2", ")", "# Calculate p-value", "p", "=", "sp", ".", "special", ".", "chdtrc", "(", "replicates", ",", "X2", ")", "stats", "[", "param", "]", "=", "(", "X2", ",", "p", ")", "if", "plot", ":", "# Convert p-values to z-scores", "p", "=", "copy", "(", "stats", ")", "for", "i", "in", "p", ":", "p", "[", "i", "]", "=", "p", "[", "i", "]", "[", "1", "]", "Matplot", ".", "zplot", "(", "p", ",", "verbose", "=", "verbose", ")", "return", "stats" ]
Model validation method, following Cook et al. (Journal of Computational and Graphical Statistics, 2006, DOI: 10.1198/106186006X136976). Generates posterior samples based on 'true' parameter values and data simulated from the priors. The quantiles of the parameter values are calculated, based on the samples. If the model is valid, the quantiles should be uniformly distributed over [0,1]. Since this relies on the generation of simulated data, all data stochastics must have a valid random() method for validation to proceed. Parameters ---------- sampler : Sampler An MCMC sampler object. replicates (optional) : int The number of validation replicates (i.e. number of quantiles to be simulated). Defaults to 100. iterations (optional) : int The number of MCMC iterations to be run per replicate. Defaults to 2000. burn (optional) : int The number of burn-in iterations to be run per replicate. Defaults to 1000. thin (optional) : int The thinning factor to be applied to posterior sample. Defaults to 1 (no thinning) deterministic (optional) : bool Flag for inclusion of deterministic nodes in validation procedure. Defaults to False. db (optional) : string The database backend to use for the validation runs. Defaults to 'ram'. plot (optional) : bool Flag for validation plots. Defaults to True. Returns ------- stats : dict Return a dictionary containing tuples with the chi-square statistic and associated p-value for each data stochastic. Notes ----- This function requires SciPy.
[ "Model", "validation", "method", "following", "Cook", "et", "al", ".", "(", "Journal", "of", "Computational", "and", "Graphical", "Statistics", "2006", "DOI", ":", "10", ".", "1198", "/", "106186006X136976", ")", "." ]
python
train
Hackerfleet/hfos
modules/chat/hfos/chat/bot.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/modules/chat/hfos/chat/bot.py#L67-L81
def say(self, event): """Chat event handler for incoming events :param event: say-event with incoming chat message """ try: userid = event.user.uuid recipient = self._get_recipient(event) content = self._get_content(event) if self.config.name in content: self.log('I think, someone mentioned me:', content) except Exception as e: self.log("Error: '%s' %s" % (e, type(e)), exc=True, lvl=error)
[ "def", "say", "(", "self", ",", "event", ")", ":", "try", ":", "userid", "=", "event", ".", "user", ".", "uuid", "recipient", "=", "self", ".", "_get_recipient", "(", "event", ")", "content", "=", "self", ".", "_get_content", "(", "event", ")", "if", "self", ".", "config", ".", "name", "in", "content", ":", "self", ".", "log", "(", "'I think, someone mentioned me:'", ",", "content", ")", "except", "Exception", "as", "e", ":", "self", ".", "log", "(", "\"Error: '%s' %s\"", "%", "(", "e", ",", "type", "(", "e", ")", ")", ",", "exc", "=", "True", ",", "lvl", "=", "error", ")" ]
Chat event handler for incoming events :param event: say-event with incoming chat message
[ "Chat", "event", "handler", "for", "incoming", "events", ":", "param", "event", ":", "say", "-", "event", "with", "incoming", "chat", "message" ]
python
train
Alignak-monitoring/alignak
alignak/objects/schedulingitem.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L846-L884
def no_more_a_problem(self, hosts, services, timeperiods, bi_modulations): """Remove this objects as an impact for other schedulingitem. :param hosts: hosts objects, used to get impacts :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get impacts :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for update_business_impact_value :type timeperiods: alignak.objects.timeperiod.Timeperiods :param bi_modulations: business impact modulation are used when setting myself as problem :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations :return: None TODO: SchedulingItem object should not handle other schedulingitem obj. We should call obj.register* on both obj. This is 'Java' style """ was_pb = self.is_problem if self.is_problem: self.is_problem = False # we warn impacts that we are no more a problem for impact_id in self.impacts: if impact_id in hosts: impact = hosts[impact_id] else: impact = services[impact_id] impact.unregister_a_problem(self) # we can just drop our impacts list self.impacts = [] # We update our business_impact value, it's not a huge thing :) self.update_business_impact_value(hosts, services, timeperiods, bi_modulations) # If we were a problem, we say to everyone # our new status, with good business_impact value if was_pb: # And we register a new broks for update status self.broks.append(self.get_update_status_brok())
[ "def", "no_more_a_problem", "(", "self", ",", "hosts", ",", "services", ",", "timeperiods", ",", "bi_modulations", ")", ":", "was_pb", "=", "self", ".", "is_problem", "if", "self", ".", "is_problem", ":", "self", ".", "is_problem", "=", "False", "# we warn impacts that we are no more a problem", "for", "impact_id", "in", "self", ".", "impacts", ":", "if", "impact_id", "in", "hosts", ":", "impact", "=", "hosts", "[", "impact_id", "]", "else", ":", "impact", "=", "services", "[", "impact_id", "]", "impact", ".", "unregister_a_problem", "(", "self", ")", "# we can just drop our impacts list", "self", ".", "impacts", "=", "[", "]", "# We update our business_impact value, it's not a huge thing :)", "self", ".", "update_business_impact_value", "(", "hosts", ",", "services", ",", "timeperiods", ",", "bi_modulations", ")", "# If we were a problem, we say to everyone", "# our new status, with good business_impact value", "if", "was_pb", ":", "# And we register a new broks for update status", "self", ".", "broks", ".", "append", "(", "self", ".", "get_update_status_brok", "(", ")", ")" ]
Remove this objects as an impact for other schedulingitem. :param hosts: hosts objects, used to get impacts :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get impacts :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used for update_business_impact_value :type timeperiods: alignak.objects.timeperiod.Timeperiods :param bi_modulations: business impact modulation are used when setting myself as problem :type bi_modulations: alignak.object.businessimpactmodulation.Businessimpactmodulations :return: None TODO: SchedulingItem object should not handle other schedulingitem obj. We should call obj.register* on both obj. This is 'Java' style
[ "Remove", "this", "objects", "as", "an", "impact", "for", "other", "schedulingitem", "." ]
python
train
swisscom/cleanerversion
versions/deletion.py
https://github.com/swisscom/cleanerversion/blob/becadbab5d7b474a0e9a596b99e97682402d2f2c/versions/deletion.py#L185-L194
def versionable_delete(self, instance, timestamp): """ Soft-deletes the instance, setting it's version_end_date to timestamp. Override this method to implement custom behaviour. :param Versionable instance: :param datetime timestamp: """ instance._delete_at(timestamp, using=self.using)
[ "def", "versionable_delete", "(", "self", ",", "instance", ",", "timestamp", ")", ":", "instance", ".", "_delete_at", "(", "timestamp", ",", "using", "=", "self", ".", "using", ")" ]
Soft-deletes the instance, setting it's version_end_date to timestamp. Override this method to implement custom behaviour. :param Versionable instance: :param datetime timestamp:
[ "Soft", "-", "deletes", "the", "instance", "setting", "it", "s", "version_end_date", "to", "timestamp", "." ]
python
train
wolfhong/formic
formic/formic.py
https://github.com/wolfhong/formic/blob/0d81eb88dcbb6fa705194fc6ccf2993f4abbaa76/formic/formic.py#L801-L811
def _find_parent(self, path_elements): """Recurse up the tree of FileSetStates until we find a parent, i.e. one whose path_elements member is the start of the path_element argument""" if not self.path_elements: # Automatically terminate on root return self elif self.path_elements == path_elements[0:len(self.path_elements)]: return self else: return self.parent._find_parent(path_elements)
[ "def", "_find_parent", "(", "self", ",", "path_elements", ")", ":", "if", "not", "self", ".", "path_elements", ":", "# Automatically terminate on root", "return", "self", "elif", "self", ".", "path_elements", "==", "path_elements", "[", "0", ":", "len", "(", "self", ".", "path_elements", ")", "]", ":", "return", "self", "else", ":", "return", "self", ".", "parent", ".", "_find_parent", "(", "path_elements", ")" ]
Recurse up the tree of FileSetStates until we find a parent, i.e. one whose path_elements member is the start of the path_element argument
[ "Recurse", "up", "the", "tree", "of", "FileSetStates", "until", "we", "find", "a", "parent", "i", ".", "e", ".", "one", "whose", "path_elements", "member", "is", "the", "start", "of", "the", "path_element", "argument" ]
python
train
PythonCharmers/python-future
src/future/backports/email/utils.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/utils.py#L267-L279
def encode_rfc2231(s, charset=None, language=None): """Encode string according to RFC 2231. If neither charset nor language is given, then s is returned as-is. If charset is given but not language, the string is encoded using the empty string for language. """ s = url_quote(s, safe='', encoding=charset or 'ascii') if charset is None and language is None: return s if language is None: language = '' return "%s'%s'%s" % (charset, language, s)
[ "def", "encode_rfc2231", "(", "s", ",", "charset", "=", "None", ",", "language", "=", "None", ")", ":", "s", "=", "url_quote", "(", "s", ",", "safe", "=", "''", ",", "encoding", "=", "charset", "or", "'ascii'", ")", "if", "charset", "is", "None", "and", "language", "is", "None", ":", "return", "s", "if", "language", "is", "None", ":", "language", "=", "''", "return", "\"%s'%s'%s\"", "%", "(", "charset", ",", "language", ",", "s", ")" ]
Encode string according to RFC 2231. If neither charset nor language is given, then s is returned as-is. If charset is given but not language, the string is encoded using the empty string for language.
[ "Encode", "string", "according", "to", "RFC", "2231", "." ]
python
train
gnosis/gnosis-py
gnosis/safe/safe_tx.py
https://github.com/gnosis/gnosis-py/blob/2a9a5d75a375fc9813ac04df133e6910c82f9d49/gnosis/safe/safe_tx.py#L189-L223
def execute(self, tx_sender_private_key: str, tx_gas: Optional[int] = None, tx_gas_price: Optional[int] = None, tx_nonce: Optional[int] = None, block_identifier='pending') -> Tuple[bytes, Dict[str, any]]: """ Send multisig tx to the Safe :param tx_sender_private_key: Sender private key :param tx_gas: Gas for the external tx. If not, `(safe_tx_gas + data_gas) * 2` will be used :param tx_gas_price: Gas price of the external tx. If not, `gas_price` will be used :param tx_nonce: Force nonce for `tx_sender` :param block_identifier: `latest` or `pending` :return: Tuple(tx_hash, tx) :raises: InvalidMultisigTx: If user tx cannot go through the Safe """ tx_gas_price = tx_gas_price or self.gas_price # Use wrapped tx gas_price if not provided tx_gas = tx_gas or (self.safe_tx_gas + self.data_gas) * 2 tx_sender_address = Account.privateKeyToAccount(tx_sender_private_key).address tx_parameters = { 'from': tx_sender_address, 'gas': tx_gas, 'gasPrice': tx_gas_price, } if tx_nonce is not None: tx_parameters['nonce'] = tx_nonce self.tx = self.w3_tx.buildTransaction(tx_parameters) self.tx_hash = self.ethereum_client.send_unsigned_transaction(self.tx, private_key=tx_sender_private_key, retry=True, block_identifier=block_identifier) return self.tx_hash, self.tx
[ "def", "execute", "(", "self", ",", "tx_sender_private_key", ":", "str", ",", "tx_gas", ":", "Optional", "[", "int", "]", "=", "None", ",", "tx_gas_price", ":", "Optional", "[", "int", "]", "=", "None", ",", "tx_nonce", ":", "Optional", "[", "int", "]", "=", "None", ",", "block_identifier", "=", "'pending'", ")", "->", "Tuple", "[", "bytes", ",", "Dict", "[", "str", ",", "any", "]", "]", ":", "tx_gas_price", "=", "tx_gas_price", "or", "self", ".", "gas_price", "# Use wrapped tx gas_price if not provided", "tx_gas", "=", "tx_gas", "or", "(", "self", ".", "safe_tx_gas", "+", "self", ".", "data_gas", ")", "*", "2", "tx_sender_address", "=", "Account", ".", "privateKeyToAccount", "(", "tx_sender_private_key", ")", ".", "address", "tx_parameters", "=", "{", "'from'", ":", "tx_sender_address", ",", "'gas'", ":", "tx_gas", ",", "'gasPrice'", ":", "tx_gas_price", ",", "}", "if", "tx_nonce", "is", "not", "None", ":", "tx_parameters", "[", "'nonce'", "]", "=", "tx_nonce", "self", ".", "tx", "=", "self", ".", "w3_tx", ".", "buildTransaction", "(", "tx_parameters", ")", "self", ".", "tx_hash", "=", "self", ".", "ethereum_client", ".", "send_unsigned_transaction", "(", "self", ".", "tx", ",", "private_key", "=", "tx_sender_private_key", ",", "retry", "=", "True", ",", "block_identifier", "=", "block_identifier", ")", "return", "self", ".", "tx_hash", ",", "self", ".", "tx" ]
Send multisig tx to the Safe :param tx_sender_private_key: Sender private key :param tx_gas: Gas for the external tx. If not, `(safe_tx_gas + data_gas) * 2` will be used :param tx_gas_price: Gas price of the external tx. If not, `gas_price` will be used :param tx_nonce: Force nonce for `tx_sender` :param block_identifier: `latest` or `pending` :return: Tuple(tx_hash, tx) :raises: InvalidMultisigTx: If user tx cannot go through the Safe
[ "Send", "multisig", "tx", "to", "the", "Safe", ":", "param", "tx_sender_private_key", ":", "Sender", "private", "key", ":", "param", "tx_gas", ":", "Gas", "for", "the", "external", "tx", ".", "If", "not", "(", "safe_tx_gas", "+", "data_gas", ")", "*", "2", "will", "be", "used", ":", "param", "tx_gas_price", ":", "Gas", "price", "of", "the", "external", "tx", ".", "If", "not", "gas_price", "will", "be", "used", ":", "param", "tx_nonce", ":", "Force", "nonce", "for", "tx_sender", ":", "param", "block_identifier", ":", "latest", "or", "pending", ":", "return", ":", "Tuple", "(", "tx_hash", "tx", ")", ":", "raises", ":", "InvalidMultisigTx", ":", "If", "user", "tx", "cannot", "go", "through", "the", "Safe" ]
python
test
pyviz/holoviews
holoviews/plotting/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/util.py#L985-L998
def _get_min_distance_numpy(element): """ NumPy based implementation of get_min_distance """ xys = element.array([0, 1]) with warnings.catch_warnings(): warnings.filterwarnings('ignore', r'invalid value encountered in') xys = xys.astype('float32').view(np.complex64) distances = np.abs(xys.T-xys) np.fill_diagonal(distances, np.inf) distances = distances[distances>0] if len(distances): return distances.min() return 0
[ "def", "_get_min_distance_numpy", "(", "element", ")", ":", "xys", "=", "element", ".", "array", "(", "[", "0", ",", "1", "]", ")", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "filterwarnings", "(", "'ignore'", ",", "r'invalid value encountered in'", ")", "xys", "=", "xys", ".", "astype", "(", "'float32'", ")", ".", "view", "(", "np", ".", "complex64", ")", "distances", "=", "np", ".", "abs", "(", "xys", ".", "T", "-", "xys", ")", "np", ".", "fill_diagonal", "(", "distances", ",", "np", ".", "inf", ")", "distances", "=", "distances", "[", "distances", ">", "0", "]", "if", "len", "(", "distances", ")", ":", "return", "distances", ".", "min", "(", ")", "return", "0" ]
NumPy based implementation of get_min_distance
[ "NumPy", "based", "implementation", "of", "get_min_distance" ]
python
train
oemof/demandlib
demandlib/bdew.py
https://github.com/oemof/demandlib/blob/4b62d60e05cb06eb2590f9c5655c2cdebf494080/demandlib/bdew.py#L178-L219
def weighted_temperature(self, how='geometric_series'): r""" A new temperature vector is generated containing a multi-day average temperature as needed in the load profile function. Parameters ---------- how : string string which type to return ("geometric_series" or "mean") Notes ----- Equation for the mathematical series of the average tempaerature [1]_: .. math:: T=\frac{T_{D}+0.5\cdot T_{D-1}+0.25\cdot T_{D-2}+ 0.125\cdot T_{D-3}}{1+0.5+0.25+0.125} with :math:`T_D` = Average temperature on the present day :math:`T_{D-i}` = Average temperature on the day - i References ---------- .. [1] `BDEW <https://www.avacon.de/cps/rde/xbcr/avacon/15-06-30_Leitfaden_Abwicklung_SLP_Gas.pdf>`_, BDEW Documentation for heat profiles. """ # calculate daily mean temperature temperature = self.df['temperature'].resample('D').mean().reindex( self.df.index).fillna(method='ffill').fillna(method='bfill') if how == 'geometric_series': temperature_mean = (temperature + 0.5 * np.roll(temperature, 24) + 0.25 * np.roll(temperature, 48) + 0.125 * np.roll(temperature, 72)) / 1.875 elif how == 'mean': temperature_mean = temperature else: temperature_mean = None return temperature_mean
[ "def", "weighted_temperature", "(", "self", ",", "how", "=", "'geometric_series'", ")", ":", "# calculate daily mean temperature", "temperature", "=", "self", ".", "df", "[", "'temperature'", "]", ".", "resample", "(", "'D'", ")", ".", "mean", "(", ")", ".", "reindex", "(", "self", ".", "df", ".", "index", ")", ".", "fillna", "(", "method", "=", "'ffill'", ")", ".", "fillna", "(", "method", "=", "'bfill'", ")", "if", "how", "==", "'geometric_series'", ":", "temperature_mean", "=", "(", "temperature", "+", "0.5", "*", "np", ".", "roll", "(", "temperature", ",", "24", ")", "+", "0.25", "*", "np", ".", "roll", "(", "temperature", ",", "48", ")", "+", "0.125", "*", "np", ".", "roll", "(", "temperature", ",", "72", ")", ")", "/", "1.875", "elif", "how", "==", "'mean'", ":", "temperature_mean", "=", "temperature", "else", ":", "temperature_mean", "=", "None", "return", "temperature_mean" ]
r""" A new temperature vector is generated containing a multi-day average temperature as needed in the load profile function. Parameters ---------- how : string string which type to return ("geometric_series" or "mean") Notes ----- Equation for the mathematical series of the average tempaerature [1]_: .. math:: T=\frac{T_{D}+0.5\cdot T_{D-1}+0.25\cdot T_{D-2}+ 0.125\cdot T_{D-3}}{1+0.5+0.25+0.125} with :math:`T_D` = Average temperature on the present day :math:`T_{D-i}` = Average temperature on the day - i References ---------- .. [1] `BDEW <https://www.avacon.de/cps/rde/xbcr/avacon/15-06-30_Leitfaden_Abwicklung_SLP_Gas.pdf>`_, BDEW Documentation for heat profiles.
[ "r", "A", "new", "temperature", "vector", "is", "generated", "containing", "a", "multi", "-", "day", "average", "temperature", "as", "needed", "in", "the", "load", "profile", "function", "." ]
python
train
KelSolaar/Foundations
foundations/parsers.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/parsers.py#L672-L798
def parse(self, raw_sections=None, namespaces=True, strip_comments=True, strip_whitespaces=True, strip_quotation_markers=True, raise_parsing_errors=True): """ Process the file content and extracts the sections / attributes as nested :class:`collections.OrderedDict` dictionaries or dictionaries. Usage:: >>> content = ["; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "Attribute 2 = \\"Value B\\"\\n"] >>> sections_file_parser = SectionsFileParser() >>> sections_file_parser.content = content >>> sections_file_parser.parse(strip_comments=False) <foundations.parsers.SectionsFileParser object at 0x860323123> >>> sections_file_parser.sections.keys() [u'_defaults'] >>> sections_file_parser.sections["_defaults"].values() [u'Value A', u'Value B'] >>> sections_file_parser.parse(strip_comments=False, strip_quotation_markers=False) <foundations.parsers.SectionsFileParser object at 0x860323123> >>> sections_file_parser.sections["_defaults"].values() [u'"Value A"', u'"Value B"'] >>> sections_file_parser.comments OrderedDict([(u'_defaults|#0', {u'content': u'Comment.', u'id': 0})]) >>> sections_file_parser.parse() <foundations.parsers.SectionsFileParser object at 0x860323123> >>> sections_file_parser.sections["_defaults"] OrderedDict([(u'_defaults|Attribute 1', u'Value A'), (u'_defaults|Attribute 2', u'Value B')]) >>> sections_file_parser.parse(namespaces=False) <foundations.parsers.SectionsFileParser object at 0x860323123> >>> sections_file_parser.sections["_defaults"] OrderedDict([(u'Attribute 1', u'Value A'), (u'Attribute 2', u'Value B')]) :param raw_sections: Ignored raw sections. :type raw_sections: tuple or list :param namespaces: Attributes and comments are namespaced. :type namespaces: bool :param strip_comments: Comments are stripped. :type strip_comments: bool :param strip_whitespaces: Whitespaces are stripped. :type strip_whitespaces: bool :param strip_quotation_markers: Attributes values quotation markers are stripped. :type strip_quotation_markers: bool :param raise_parsing_errors: Raise parsing errors. :type raise_parsing_errors: bool :return: SectionFileParser instance. :rtype: SectionFileParser """ LOGGER.debug("> Reading sections from: '{0}'.".format(self.path)) if not self.content: self.read() attributes = {} if not self.__preserve_order else OrderedDict() section = self.__defaults_section raw_sections = raw_sections or [] commentId = 0 for i, line in enumerate(self.content): # Comments matching. search = re.search(r"^\s*[{0}](?P<comment>.+)$".format("".join(self.__comment_limiters)), line) if search: if not strip_comments: comment = namespaces and foundations.namespace.set_namespace(section, "{0}{1}".format( self.__comment_marker, commentId), self.__namespace_splitter) or \ "{0}{1}".format(self.__comment_marker, commentId) self.__comments[comment] = {"id": commentId, "content": strip_whitespaces and search.group( "comment").strip() or search.group( "comment")} commentId += 1 continue # Sections matching. search = re.search(r"^\s*\[(?P<section>.+)\]\s*$", line) if search: section = strip_whitespaces and search.group("section").strip() or search.group("section") if not self.__preserve_order: attributes = {} else: attributes = OrderedDict() rawContent = [] continue if section in raw_sections: rawContent.append(line) attributes[self.__raw_section_content_identifier] = rawContent else: # Empty line matching. search = re.search(r"^\s*$", line) if search: continue # Attributes matching. search = re.search(r"^(?P<attribute>.+?)[{0}](?P<value>.+)$".format("".join(self.__splitters)), line) \ or re.search(r"^(?P<attribute>.+?)[{0}]\s*$".format("".join(self.__splitters)), line) if search: attribute = search.group("attribute").strip() if strip_whitespaces else search.group("attribute") attribute = foundations.namespace.set_namespace(section, attribute, self.__namespace_splitter) \ if namespaces else attribute if len(search.groups()) == 2: value = search.group("value").strip() if strip_whitespaces else search.group("value") attributes[attribute] = value.strip("".join(self.__quotation_markers)) \ if strip_quotation_markers else value else: attributes[attribute] = None else: self.__parsing_errors.append(foundations.exceptions.AttributeStructureParsingError( "Attribute structure is invalid: {0}".format(line), i + 1)) self.__sections[section] = attributes LOGGER.debug("> Sections: '{0}'.".format(self.__sections)) LOGGER.debug("> '{0}' file parsing done!".format(self.path)) if self.__parsing_errors and raise_parsing_errors: raise foundations.exceptions.FileStructureParsingError( "{0} | '{1}' structure is invalid, parsing exceptions occured!".format(self.__class__.__name__, self.path)) return self
[ "def", "parse", "(", "self", ",", "raw_sections", "=", "None", ",", "namespaces", "=", "True", ",", "strip_comments", "=", "True", ",", "strip_whitespaces", "=", "True", ",", "strip_quotation_markers", "=", "True", ",", "raise_parsing_errors", "=", "True", ")", ":", "LOGGER", ".", "debug", "(", "\"> Reading sections from: '{0}'.\"", ".", "format", "(", "self", ".", "path", ")", ")", "if", "not", "self", ".", "content", ":", "self", ".", "read", "(", ")", "attributes", "=", "{", "}", "if", "not", "self", ".", "__preserve_order", "else", "OrderedDict", "(", ")", "section", "=", "self", ".", "__defaults_section", "raw_sections", "=", "raw_sections", "or", "[", "]", "commentId", "=", "0", "for", "i", ",", "line", "in", "enumerate", "(", "self", ".", "content", ")", ":", "# Comments matching.", "search", "=", "re", ".", "search", "(", "r\"^\\s*[{0}](?P<comment>.+)$\"", ".", "format", "(", "\"\"", ".", "join", "(", "self", ".", "__comment_limiters", ")", ")", ",", "line", ")", "if", "search", ":", "if", "not", "strip_comments", ":", "comment", "=", "namespaces", "and", "foundations", ".", "namespace", ".", "set_namespace", "(", "section", ",", "\"{0}{1}\"", ".", "format", "(", "self", ".", "__comment_marker", ",", "commentId", ")", ",", "self", ".", "__namespace_splitter", ")", "or", "\"{0}{1}\"", ".", "format", "(", "self", ".", "__comment_marker", ",", "commentId", ")", "self", ".", "__comments", "[", "comment", "]", "=", "{", "\"id\"", ":", "commentId", ",", "\"content\"", ":", "strip_whitespaces", "and", "search", ".", "group", "(", "\"comment\"", ")", ".", "strip", "(", ")", "or", "search", ".", "group", "(", "\"comment\"", ")", "}", "commentId", "+=", "1", "continue", "# Sections matching.", "search", "=", "re", ".", "search", "(", "r\"^\\s*\\[(?P<section>.+)\\]\\s*$\"", ",", "line", ")", "if", "search", ":", "section", "=", "strip_whitespaces", "and", "search", ".", "group", "(", "\"section\"", ")", ".", "strip", "(", ")", "or", "search", ".", "group", "(", "\"section\"", ")", "if", "not", "self", ".", "__preserve_order", ":", "attributes", "=", "{", "}", "else", ":", "attributes", "=", "OrderedDict", "(", ")", "rawContent", "=", "[", "]", "continue", "if", "section", "in", "raw_sections", ":", "rawContent", ".", "append", "(", "line", ")", "attributes", "[", "self", ".", "__raw_section_content_identifier", "]", "=", "rawContent", "else", ":", "# Empty line matching.", "search", "=", "re", ".", "search", "(", "r\"^\\s*$\"", ",", "line", ")", "if", "search", ":", "continue", "# Attributes matching.", "search", "=", "re", ".", "search", "(", "r\"^(?P<attribute>.+?)[{0}](?P<value>.+)$\"", ".", "format", "(", "\"\"", ".", "join", "(", "self", ".", "__splitters", ")", ")", ",", "line", ")", "or", "re", ".", "search", "(", "r\"^(?P<attribute>.+?)[{0}]\\s*$\"", ".", "format", "(", "\"\"", ".", "join", "(", "self", ".", "__splitters", ")", ")", ",", "line", ")", "if", "search", ":", "attribute", "=", "search", ".", "group", "(", "\"attribute\"", ")", ".", "strip", "(", ")", "if", "strip_whitespaces", "else", "search", ".", "group", "(", "\"attribute\"", ")", "attribute", "=", "foundations", ".", "namespace", ".", "set_namespace", "(", "section", ",", "attribute", ",", "self", ".", "__namespace_splitter", ")", "if", "namespaces", "else", "attribute", "if", "len", "(", "search", ".", "groups", "(", ")", ")", "==", "2", ":", "value", "=", "search", ".", "group", "(", "\"value\"", ")", ".", "strip", "(", ")", "if", "strip_whitespaces", "else", "search", ".", "group", "(", "\"value\"", ")", "attributes", "[", "attribute", "]", "=", "value", ".", "strip", "(", "\"\"", ".", "join", "(", "self", ".", "__quotation_markers", ")", ")", "if", "strip_quotation_markers", "else", "value", "else", ":", "attributes", "[", "attribute", "]", "=", "None", "else", ":", "self", ".", "__parsing_errors", ".", "append", "(", "foundations", ".", "exceptions", ".", "AttributeStructureParsingError", "(", "\"Attribute structure is invalid: {0}\"", ".", "format", "(", "line", ")", ",", "i", "+", "1", ")", ")", "self", ".", "__sections", "[", "section", "]", "=", "attributes", "LOGGER", ".", "debug", "(", "\"> Sections: '{0}'.\"", ".", "format", "(", "self", ".", "__sections", ")", ")", "LOGGER", ".", "debug", "(", "\"> '{0}' file parsing done!\"", ".", "format", "(", "self", ".", "path", ")", ")", "if", "self", ".", "__parsing_errors", "and", "raise_parsing_errors", ":", "raise", "foundations", ".", "exceptions", ".", "FileStructureParsingError", "(", "\"{0} | '{1}' structure is invalid, parsing exceptions occured!\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "self", ".", "path", ")", ")", "return", "self" ]
Process the file content and extracts the sections / attributes as nested :class:`collections.OrderedDict` dictionaries or dictionaries. Usage:: >>> content = ["; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "Attribute 2 = \\"Value B\\"\\n"] >>> sections_file_parser = SectionsFileParser() >>> sections_file_parser.content = content >>> sections_file_parser.parse(strip_comments=False) <foundations.parsers.SectionsFileParser object at 0x860323123> >>> sections_file_parser.sections.keys() [u'_defaults'] >>> sections_file_parser.sections["_defaults"].values() [u'Value A', u'Value B'] >>> sections_file_parser.parse(strip_comments=False, strip_quotation_markers=False) <foundations.parsers.SectionsFileParser object at 0x860323123> >>> sections_file_parser.sections["_defaults"].values() [u'"Value A"', u'"Value B"'] >>> sections_file_parser.comments OrderedDict([(u'_defaults|#0', {u'content': u'Comment.', u'id': 0})]) >>> sections_file_parser.parse() <foundations.parsers.SectionsFileParser object at 0x860323123> >>> sections_file_parser.sections["_defaults"] OrderedDict([(u'_defaults|Attribute 1', u'Value A'), (u'_defaults|Attribute 2', u'Value B')]) >>> sections_file_parser.parse(namespaces=False) <foundations.parsers.SectionsFileParser object at 0x860323123> >>> sections_file_parser.sections["_defaults"] OrderedDict([(u'Attribute 1', u'Value A'), (u'Attribute 2', u'Value B')]) :param raw_sections: Ignored raw sections. :type raw_sections: tuple or list :param namespaces: Attributes and comments are namespaced. :type namespaces: bool :param strip_comments: Comments are stripped. :type strip_comments: bool :param strip_whitespaces: Whitespaces are stripped. :type strip_whitespaces: bool :param strip_quotation_markers: Attributes values quotation markers are stripped. :type strip_quotation_markers: bool :param raise_parsing_errors: Raise parsing errors. :type raise_parsing_errors: bool :return: SectionFileParser instance. :rtype: SectionFileParser
[ "Process", "the", "file", "content", "and", "extracts", "the", "sections", "/", "attributes", "as", "nested", ":", "class", ":", "collections", ".", "OrderedDict", "dictionaries", "or", "dictionaries", "." ]
python
train
ekzhu/datasketch
datasketch/lean_minhash.py
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lean_minhash.py#L51-L60
def _initialize_slots(self, seed, hashvalues): '''Initialize the slots of the LeanMinHash. Args: seed (int): The random seed controls the set of random permutation functions generated for this LeanMinHash. hashvalues: The hash values is the internal state of the LeanMinHash. ''' self.seed = seed self.hashvalues = self._parse_hashvalues(hashvalues)
[ "def", "_initialize_slots", "(", "self", ",", "seed", ",", "hashvalues", ")", ":", "self", ".", "seed", "=", "seed", "self", ".", "hashvalues", "=", "self", ".", "_parse_hashvalues", "(", "hashvalues", ")" ]
Initialize the slots of the LeanMinHash. Args: seed (int): The random seed controls the set of random permutation functions generated for this LeanMinHash. hashvalues: The hash values is the internal state of the LeanMinHash.
[ "Initialize", "the", "slots", "of", "the", "LeanMinHash", "." ]
python
test
tensorflow/tensor2tensor
tensor2tensor/models/vanilla_gan.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/vanilla_gan.py#L37-L48
def deconv2d( input_, output_shape, k_h, k_w, d_h, d_w, stddev=0.02, name="deconv2d"): """Deconvolution layer.""" with tf.variable_scope(name): w = tf.get_variable( "w", [k_h, k_w, output_shape[-1], input_.get_shape()[-1]], initializer=tf.random_normal_initializer(stddev=stddev)) deconv = tf.nn.conv2d_transpose( input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) biases = tf.get_variable( "biases", [output_shape[-1]], initializer=tf.constant_initializer(0.0)) return tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
[ "def", "deconv2d", "(", "input_", ",", "output_shape", ",", "k_h", ",", "k_w", ",", "d_h", ",", "d_w", ",", "stddev", "=", "0.02", ",", "name", "=", "\"deconv2d\"", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "w", "=", "tf", ".", "get_variable", "(", "\"w\"", ",", "[", "k_h", ",", "k_w", ",", "output_shape", "[", "-", "1", "]", ",", "input_", ".", "get_shape", "(", ")", "[", "-", "1", "]", "]", ",", "initializer", "=", "tf", ".", "random_normal_initializer", "(", "stddev", "=", "stddev", ")", ")", "deconv", "=", "tf", ".", "nn", ".", "conv2d_transpose", "(", "input_", ",", "w", ",", "output_shape", "=", "output_shape", ",", "strides", "=", "[", "1", ",", "d_h", ",", "d_w", ",", "1", "]", ")", "biases", "=", "tf", ".", "get_variable", "(", "\"biases\"", ",", "[", "output_shape", "[", "-", "1", "]", "]", ",", "initializer", "=", "tf", ".", "constant_initializer", "(", "0.0", ")", ")", "return", "tf", ".", "reshape", "(", "tf", ".", "nn", ".", "bias_add", "(", "deconv", ",", "biases", ")", ",", "deconv", ".", "get_shape", "(", ")", ")" ]
Deconvolution layer.
[ "Deconvolution", "layer", "." ]
python
train
icgood/pymap
pymap/parsing/specials/sequenceset.py
https://github.com/icgood/pymap/blob/e77d9a54d760e3cbe044a548883bb4299ed61dc2/pymap/parsing/specials/sequenceset.py#L61-L72
def is_all(self) -> bool: """True if the sequence set starts at ``1`` and ends at the maximum value. This may be used to optimize cases of checking for a value in the set, avoiding the need to provide ``max_value`` in :meth:`.flatten` or :meth:`.iter`. """ first = self.sequences[0] return isinstance(first, tuple) \ and first[0] == 1 and isinstance(first[1], MaxValue)
[ "def", "is_all", "(", "self", ")", "->", "bool", ":", "first", "=", "self", ".", "sequences", "[", "0", "]", "return", "isinstance", "(", "first", ",", "tuple", ")", "and", "first", "[", "0", "]", "==", "1", "and", "isinstance", "(", "first", "[", "1", "]", ",", "MaxValue", ")" ]
True if the sequence set starts at ``1`` and ends at the maximum value. This may be used to optimize cases of checking for a value in the set, avoiding the need to provide ``max_value`` in :meth:`.flatten` or :meth:`.iter`.
[ "True", "if", "the", "sequence", "set", "starts", "at", "1", "and", "ends", "at", "the", "maximum", "value", "." ]
python
train
tkem/cachetools
cachetools/__init__.py
https://github.com/tkem/cachetools/blob/1b67cddadccb89993e9d2567bac22e57e2b2b373/cachetools/__init__.py#L71-L112
def cachedmethod(cache, key=keys.hashkey, lock=None): """Decorator to wrap a class or instance method with a memoizing callable that saves results in a cache. """ def decorator(method): if lock is None: def wrapper(self, *args, **kwargs): c = cache(self) if c is None: return method(self, *args, **kwargs) k = key(*args, **kwargs) try: return c[k] except KeyError: pass # key not found v = method(self, *args, **kwargs) try: c[k] = v except ValueError: pass # value too large return v else: def wrapper(self, *args, **kwargs): c = cache(self) if c is None: return method(self, *args, **kwargs) k = key(*args, **kwargs) try: with lock(self): return c[k] except KeyError: pass # key not found v = method(self, *args, **kwargs) try: with lock(self): c[k] = v except ValueError: pass # value too large return v return _update_wrapper(wrapper, method) return decorator
[ "def", "cachedmethod", "(", "cache", ",", "key", "=", "keys", ".", "hashkey", ",", "lock", "=", "None", ")", ":", "def", "decorator", "(", "method", ")", ":", "if", "lock", "is", "None", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "c", "=", "cache", "(", "self", ")", "if", "c", "is", "None", ":", "return", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "k", "=", "key", "(", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "return", "c", "[", "k", "]", "except", "KeyError", ":", "pass", "# key not found", "v", "=", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "c", "[", "k", "]", "=", "v", "except", "ValueError", ":", "pass", "# value too large", "return", "v", "else", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "c", "=", "cache", "(", "self", ")", "if", "c", "is", "None", ":", "return", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "k", "=", "key", "(", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "with", "lock", "(", "self", ")", ":", "return", "c", "[", "k", "]", "except", "KeyError", ":", "pass", "# key not found", "v", "=", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "with", "lock", "(", "self", ")", ":", "c", "[", "k", "]", "=", "v", "except", "ValueError", ":", "pass", "# value too large", "return", "v", "return", "_update_wrapper", "(", "wrapper", ",", "method", ")", "return", "decorator" ]
Decorator to wrap a class or instance method with a memoizing callable that saves results in a cache.
[ "Decorator", "to", "wrap", "a", "class", "or", "instance", "method", "with", "a", "memoizing", "callable", "that", "saves", "results", "in", "a", "cache", "." ]
python
train
manns/pyspread
pyspread/src/lib/vlc.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L5676-L5684
def libvlc_video_get_aspect_ratio(p_mi): '''Get current video aspect ratio. @param p_mi: the media player. @return: the video aspect ratio or NULL if unspecified (the result must be released with free() or L{libvlc_free}()). ''' f = _Cfunctions.get('libvlc_video_get_aspect_ratio', None) or \ _Cfunction('libvlc_video_get_aspect_ratio', ((1,),), string_result, ctypes.c_void_p, MediaPlayer) return f(p_mi)
[ "def", "libvlc_video_get_aspect_ratio", "(", "p_mi", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_video_get_aspect_ratio'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_video_get_aspect_ratio'", ",", "(", "(", "1", ",", ")", ",", ")", ",", "string_result", ",", "ctypes", ".", "c_void_p", ",", "MediaPlayer", ")", "return", "f", "(", "p_mi", ")" ]
Get current video aspect ratio. @param p_mi: the media player. @return: the video aspect ratio or NULL if unspecified (the result must be released with free() or L{libvlc_free}()).
[ "Get", "current", "video", "aspect", "ratio", "." ]
python
train
dariosky/wfcli
wfcli/tossl.py
https://github.com/dariosky/wfcli/blob/87a9ed30dbd456f801135a55099f0541b0614ccb/wfcli/tossl.py#L184-L198
def website_exists_as_secure(self, website): """" Return true if the website has an equivalent that is secure we will have 2 websites with the same name, one insecure (that will contain just the redirect and the identity-verification) and one secured """ if website['https']: logger.info("website %s is already secured, skip" % website['name']) return website # changes in these fields are ignored for other in self._websites: if other['id'] == website['id']: continue if other['name'] == website['name'] and other['https']: return other return None
[ "def", "website_exists_as_secure", "(", "self", ",", "website", ")", ":", "if", "website", "[", "'https'", "]", ":", "logger", ".", "info", "(", "\"website %s is already secured, skip\"", "%", "website", "[", "'name'", "]", ")", "return", "website", "# changes in these fields are ignored", "for", "other", "in", "self", ".", "_websites", ":", "if", "other", "[", "'id'", "]", "==", "website", "[", "'id'", "]", ":", "continue", "if", "other", "[", "'name'", "]", "==", "website", "[", "'name'", "]", "and", "other", "[", "'https'", "]", ":", "return", "other", "return", "None" ]
Return true if the website has an equivalent that is secure we will have 2 websites with the same name, one insecure (that will contain just the redirect and the identity-verification) and one secured
[ "Return", "true", "if", "the", "website", "has", "an", "equivalent", "that", "is", "secure", "we", "will", "have", "2", "websites", "with", "the", "same", "name", "one", "insecure", "(", "that", "will", "contain", "just", "the", "redirect", "and", "the", "identity", "-", "verification", ")", "and", "one", "secured" ]
python
train
pandas-dev/pandas
pandas/io/parsers.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1951-L2003
def _set_noconvert_columns(self): """ Set the columns that should not undergo dtype conversions. Currently, any column that is involved with date parsing will not undergo such conversions. """ names = self.orig_names if self.usecols_dtype == 'integer': # A set of integers will be converted to a list in # the correct order every single time. usecols = list(self.usecols) usecols.sort() elif (callable(self.usecols) or self.usecols_dtype not in ('empty', None)): # The names attribute should have the correct columns # in the proper order for indexing with parse_dates. usecols = self.names[:] else: # Usecols is empty. usecols = None def _set(x): if usecols is not None and is_integer(x): x = usecols[x] if not is_integer(x): x = names.index(x) self._reader.set_noconvert(x) if isinstance(self.parse_dates, list): for val in self.parse_dates: if isinstance(val, list): for k in val: _set(k) else: _set(val) elif isinstance(self.parse_dates, dict): for val in self.parse_dates.values(): if isinstance(val, list): for k in val: _set(k) else: _set(val) elif self.parse_dates: if isinstance(self.index_col, list): for k in self.index_col: _set(k) elif self.index_col is not None: _set(self.index_col)
[ "def", "_set_noconvert_columns", "(", "self", ")", ":", "names", "=", "self", ".", "orig_names", "if", "self", ".", "usecols_dtype", "==", "'integer'", ":", "# A set of integers will be converted to a list in", "# the correct order every single time.", "usecols", "=", "list", "(", "self", ".", "usecols", ")", "usecols", ".", "sort", "(", ")", "elif", "(", "callable", "(", "self", ".", "usecols", ")", "or", "self", ".", "usecols_dtype", "not", "in", "(", "'empty'", ",", "None", ")", ")", ":", "# The names attribute should have the correct columns", "# in the proper order for indexing with parse_dates.", "usecols", "=", "self", ".", "names", "[", ":", "]", "else", ":", "# Usecols is empty.", "usecols", "=", "None", "def", "_set", "(", "x", ")", ":", "if", "usecols", "is", "not", "None", "and", "is_integer", "(", "x", ")", ":", "x", "=", "usecols", "[", "x", "]", "if", "not", "is_integer", "(", "x", ")", ":", "x", "=", "names", ".", "index", "(", "x", ")", "self", ".", "_reader", ".", "set_noconvert", "(", "x", ")", "if", "isinstance", "(", "self", ".", "parse_dates", ",", "list", ")", ":", "for", "val", "in", "self", ".", "parse_dates", ":", "if", "isinstance", "(", "val", ",", "list", ")", ":", "for", "k", "in", "val", ":", "_set", "(", "k", ")", "else", ":", "_set", "(", "val", ")", "elif", "isinstance", "(", "self", ".", "parse_dates", ",", "dict", ")", ":", "for", "val", "in", "self", ".", "parse_dates", ".", "values", "(", ")", ":", "if", "isinstance", "(", "val", ",", "list", ")", ":", "for", "k", "in", "val", ":", "_set", "(", "k", ")", "else", ":", "_set", "(", "val", ")", "elif", "self", ".", "parse_dates", ":", "if", "isinstance", "(", "self", ".", "index_col", ",", "list", ")", ":", "for", "k", "in", "self", ".", "index_col", ":", "_set", "(", "k", ")", "elif", "self", ".", "index_col", "is", "not", "None", ":", "_set", "(", "self", ".", "index_col", ")" ]
Set the columns that should not undergo dtype conversions. Currently, any column that is involved with date parsing will not undergo such conversions.
[ "Set", "the", "columns", "that", "should", "not", "undergo", "dtype", "conversions", "." ]
python
train
andycasey/sick
sick/models/cannon.py
https://github.com/andycasey/sick/blob/6c37686182794c4cafea45abf7062b30b789b1a2/sick/models/cannon.py#L82-L114
def train_local(self, closest_point, label_vector_description=None, N=None, pivot=True, **kwargs): """ Train the model in a Cannon-like fashion using the grid points as labels and the intensities as normalsied rest-frame fluxes within some local regime. """ lv = self._cannon_label_vector if label_vector_description is None else\ self._interpret_label_vector(label_vector_description) # By default we will train to the nearest 10% of the grid. # If grid subset is a fraction, scale it to real numbers. if N is None: N = self._configuration.get("settings", {}).get("grid_subset", 0.10) if 1 >= N > 0: N = int(np.round(N * self.grid_points.size)) logger.debug("Using {} nearest points for local Cannon model".format(N)) # Use closest N points. dtype = [(name, '<f8') for name in self.grid_points.dtype.names] grid_points \ = self.grid_points.astype(dtype).view(float).reshape(-1, len(dtype)) distance = np.sum(np.abs(grid_points - np.array(closest_point))/ np.ptp(grid_points, axis=0), axis=1) grid_indices = np.argsort(distance)[:N] lv_array, _, offsets = _build_label_vector_array( self.grid_points[grid_indices], lv, pivot=pivot) return self._train(lv_array, grid_indices, offsets, lv, **kwargs)
[ "def", "train_local", "(", "self", ",", "closest_point", ",", "label_vector_description", "=", "None", ",", "N", "=", "None", ",", "pivot", "=", "True", ",", "*", "*", "kwargs", ")", ":", "lv", "=", "self", ".", "_cannon_label_vector", "if", "label_vector_description", "is", "None", "else", "self", ".", "_interpret_label_vector", "(", "label_vector_description", ")", "# By default we will train to the nearest 10% of the grid.", "# If grid subset is a fraction, scale it to real numbers.", "if", "N", "is", "None", ":", "N", "=", "self", ".", "_configuration", ".", "get", "(", "\"settings\"", ",", "{", "}", ")", ".", "get", "(", "\"grid_subset\"", ",", "0.10", ")", "if", "1", ">=", "N", ">", "0", ":", "N", "=", "int", "(", "np", ".", "round", "(", "N", "*", "self", ".", "grid_points", ".", "size", ")", ")", "logger", ".", "debug", "(", "\"Using {} nearest points for local Cannon model\"", ".", "format", "(", "N", ")", ")", "# Use closest N points.", "dtype", "=", "[", "(", "name", ",", "'<f8'", ")", "for", "name", "in", "self", ".", "grid_points", ".", "dtype", ".", "names", "]", "grid_points", "=", "self", ".", "grid_points", ".", "astype", "(", "dtype", ")", ".", "view", "(", "float", ")", ".", "reshape", "(", "-", "1", ",", "len", "(", "dtype", ")", ")", "distance", "=", "np", ".", "sum", "(", "np", ".", "abs", "(", "grid_points", "-", "np", ".", "array", "(", "closest_point", ")", ")", "/", "np", ".", "ptp", "(", "grid_points", ",", "axis", "=", "0", ")", ",", "axis", "=", "1", ")", "grid_indices", "=", "np", ".", "argsort", "(", "distance", ")", "[", ":", "N", "]", "lv_array", ",", "_", ",", "offsets", "=", "_build_label_vector_array", "(", "self", ".", "grid_points", "[", "grid_indices", "]", ",", "lv", ",", "pivot", "=", "pivot", ")", "return", "self", ".", "_train", "(", "lv_array", ",", "grid_indices", ",", "offsets", ",", "lv", ",", "*", "*", "kwargs", ")" ]
Train the model in a Cannon-like fashion using the grid points as labels and the intensities as normalsied rest-frame fluxes within some local regime.
[ "Train", "the", "model", "in", "a", "Cannon", "-", "like", "fashion", "using", "the", "grid", "points", "as", "labels", "and", "the", "intensities", "as", "normalsied", "rest", "-", "frame", "fluxes", "within", "some", "local", "regime", "." ]
python
train
mushkevych/scheduler
synergy/scheduler/timetable.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/scheduler/timetable.py#L262-L267
def is_job_record_finalizable(self, job_record): """ :return: True, if the node and all its children are in [STATE_PROCESSED, STATE_SKIPPED, STATE_NOOP] """ assert isinstance(job_record, Job) tree = self.get_tree(job_record.process_name) node = tree.get_node(job_record.process_name, job_record.timeperiod) return node.is_finalizable()
[ "def", "is_job_record_finalizable", "(", "self", ",", "job_record", ")", ":", "assert", "isinstance", "(", "job_record", ",", "Job", ")", "tree", "=", "self", ".", "get_tree", "(", "job_record", ".", "process_name", ")", "node", "=", "tree", ".", "get_node", "(", "job_record", ".", "process_name", ",", "job_record", ".", "timeperiod", ")", "return", "node", ".", "is_finalizable", "(", ")" ]
:return: True, if the node and all its children are in [STATE_PROCESSED, STATE_SKIPPED, STATE_NOOP]
[ ":", "return", ":", "True", "if", "the", "node", "and", "all", "its", "children", "are", "in", "[", "STATE_PROCESSED", "STATE_SKIPPED", "STATE_NOOP", "]" ]
python
train
jalanb/pysyte
pysyte/getch.py
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/getch.py#L155-L167
def get_key(): """Get a key from the keyboard as a string A 'key' will be a single char, or the name of an extended key """ character_name = chr codes = _get_keycodes() if len(codes) == 1: code = codes[0] if code >= 32: return character_name(code) return control_key_name(code) return get_extended_key_name(codes)
[ "def", "get_key", "(", ")", ":", "character_name", "=", "chr", "codes", "=", "_get_keycodes", "(", ")", "if", "len", "(", "codes", ")", "==", "1", ":", "code", "=", "codes", "[", "0", "]", "if", "code", ">=", "32", ":", "return", "character_name", "(", "code", ")", "return", "control_key_name", "(", "code", ")", "return", "get_extended_key_name", "(", "codes", ")" ]
Get a key from the keyboard as a string A 'key' will be a single char, or the name of an extended key
[ "Get", "a", "key", "from", "the", "keyboard", "as", "a", "string" ]
python
train
potash/drain
drain/util.py
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/util.py#L273-L291
def dict_diff(dicts): """ Subset dictionaries to keys which map to multiple values """ diff_keys = set() for k in union(set(d.keys()) for d in dicts): values = [] for d in dicts: if k not in d: diff_keys.add(k) break else: values.append(d[k]) if nunique(values) > 1: diff_keys.add(k) break return [dict_subset(d, diff_keys) for d in dicts]
[ "def", "dict_diff", "(", "dicts", ")", ":", "diff_keys", "=", "set", "(", ")", "for", "k", "in", "union", "(", "set", "(", "d", ".", "keys", "(", ")", ")", "for", "d", "in", "dicts", ")", ":", "values", "=", "[", "]", "for", "d", "in", "dicts", ":", "if", "k", "not", "in", "d", ":", "diff_keys", ".", "add", "(", "k", ")", "break", "else", ":", "values", ".", "append", "(", "d", "[", "k", "]", ")", "if", "nunique", "(", "values", ")", ">", "1", ":", "diff_keys", ".", "add", "(", "k", ")", "break", "return", "[", "dict_subset", "(", "d", ",", "diff_keys", ")", "for", "d", "in", "dicts", "]" ]
Subset dictionaries to keys which map to multiple values
[ "Subset", "dictionaries", "to", "keys", "which", "map", "to", "multiple", "values" ]
python
train
gem/oq-engine
openquake/calculators/export/hazard.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/export/hazard.py#L59-L77
def export_ruptures_xml(ekey, dstore): """ :param ekey: export key, i.e. a pair (datastore key, fmt) :param dstore: datastore object """ fmt = ekey[-1] oq = dstore['oqparam'] num_ses = oq.ses_per_logic_tree_path mesh = get_mesh(dstore['sitecol']) ruptures_by_grp = {} for rgetter in gen_rupture_getters(dstore): ebrs = [ebr.export(mesh, rgetter.rlzs_by_gsim, num_ses) for ebr in rgetter.get_ruptures()] if ebrs: ruptures_by_grp[rgetter.grp_id] = ebrs dest = dstore.export_path('ses.' + fmt) writer = hazard_writers.SESXMLWriter(dest) writer.serialize(ruptures_by_grp, oq.investigation_time) return [dest]
[ "def", "export_ruptures_xml", "(", "ekey", ",", "dstore", ")", ":", "fmt", "=", "ekey", "[", "-", "1", "]", "oq", "=", "dstore", "[", "'oqparam'", "]", "num_ses", "=", "oq", ".", "ses_per_logic_tree_path", "mesh", "=", "get_mesh", "(", "dstore", "[", "'sitecol'", "]", ")", "ruptures_by_grp", "=", "{", "}", "for", "rgetter", "in", "gen_rupture_getters", "(", "dstore", ")", ":", "ebrs", "=", "[", "ebr", ".", "export", "(", "mesh", ",", "rgetter", ".", "rlzs_by_gsim", ",", "num_ses", ")", "for", "ebr", "in", "rgetter", ".", "get_ruptures", "(", ")", "]", "if", "ebrs", ":", "ruptures_by_grp", "[", "rgetter", ".", "grp_id", "]", "=", "ebrs", "dest", "=", "dstore", ".", "export_path", "(", "'ses.'", "+", "fmt", ")", "writer", "=", "hazard_writers", ".", "SESXMLWriter", "(", "dest", ")", "writer", ".", "serialize", "(", "ruptures_by_grp", ",", "oq", ".", "investigation_time", ")", "return", "[", "dest", "]" ]
:param ekey: export key, i.e. a pair (datastore key, fmt) :param dstore: datastore object
[ ":", "param", "ekey", ":", "export", "key", "i", ".", "e", ".", "a", "pair", "(", "datastore", "key", "fmt", ")", ":", "param", "dstore", ":", "datastore", "object" ]
python
train
akfullfo/taskforce
taskforce/task.py
https://github.com/akfullfo/taskforce/blob/bc6dd744bd33546447d085dbd18a350532220193/taskforce/task.py#L1355-L1372
def file_del(self, key, paths=None): """ Deregister a task for file event changes. If paths is None, all paths assoicated with the task will be deregistered. """ if paths is None: paths = [] for path in self._file_event_map: if key in self._file_event_map[path]: paths.append(path) elif not isinstance(paths, list): paths = [paths] for path in paths: if key in self._file_event_map[path]: del self._file_event_map[path][key] if path in self._file_event_map and not self._file_event_map[path]: self._watch_files.remove(path) del self._file_event_map[path]
[ "def", "file_del", "(", "self", ",", "key", ",", "paths", "=", "None", ")", ":", "if", "paths", "is", "None", ":", "paths", "=", "[", "]", "for", "path", "in", "self", ".", "_file_event_map", ":", "if", "key", "in", "self", ".", "_file_event_map", "[", "path", "]", ":", "paths", ".", "append", "(", "path", ")", "elif", "not", "isinstance", "(", "paths", ",", "list", ")", ":", "paths", "=", "[", "paths", "]", "for", "path", "in", "paths", ":", "if", "key", "in", "self", ".", "_file_event_map", "[", "path", "]", ":", "del", "self", ".", "_file_event_map", "[", "path", "]", "[", "key", "]", "if", "path", "in", "self", ".", "_file_event_map", "and", "not", "self", ".", "_file_event_map", "[", "path", "]", ":", "self", ".", "_watch_files", ".", "remove", "(", "path", ")", "del", "self", ".", "_file_event_map", "[", "path", "]" ]
Deregister a task for file event changes. If paths is None, all paths assoicated with the task will be deregistered.
[ "Deregister", "a", "task", "for", "file", "event", "changes", ".", "If", "paths", "is", "None", "all", "paths", "assoicated", "with", "the", "task", "will", "be", "deregistered", "." ]
python
train
inspirehep/inspire-utils
inspire_utils/name.py
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/name.py#L146-L200
def dumps(self): """Dump the name to string, after normalizing it.""" def _is_initial(author_name): return len(author_name) == 1 or u'.' in author_name def _ensure_dotted_initials(author_name): if _is_initial(author_name) \ and u'.' not in author_name: seq = (author_name, u'.') author_name = u''.join(seq) return author_name def _ensure_dotted_suffixes(author_suffix): if u'.' not in author_suffix: seq = (author_suffix, u'.') author_suffix = u''.join(seq) return author_suffix def _is_roman_numeral(suffix): """Controls that the user's input only contains valid roman numerals""" valid_roman_numerals = [u'M', u'D', u'C', u'L', u'X', u'V', u'I', u'(', u')'] return all(letters in valid_roman_numerals for letters in suffix.upper()) first_and_middle_names = iter(_ensure_dotted_initials(name) for name in self.first_list) try: prev = next(first_and_middle_names) except StopIteration: LOGGER.warning(u"Cannot process %s properly", self._parsed_name.original) prev = self._parsed_name.original names_with_spaces = [prev] for name in first_and_middle_names: if not _is_initial(name) or not _is_initial(prev): names_with_spaces.append(' ') prev = name names_with_spaces.append(prev) normalized_names = u''.join(names_with_spaces) if _is_roman_numeral(self.suffix): suffix = self.suffix.upper() else: suffix = _ensure_dotted_suffixes(self.suffix) final_name = u', '.join( part for part in (self.last, normalized_names.strip(), suffix) if part) # Replace unicode curly apostrophe to normal apostrophe. final_name = final_name.replace(u'’', '\'') return final_name
[ "def", "dumps", "(", "self", ")", ":", "def", "_is_initial", "(", "author_name", ")", ":", "return", "len", "(", "author_name", ")", "==", "1", "or", "u'.'", "in", "author_name", "def", "_ensure_dotted_initials", "(", "author_name", ")", ":", "if", "_is_initial", "(", "author_name", ")", "and", "u'.'", "not", "in", "author_name", ":", "seq", "=", "(", "author_name", ",", "u'.'", ")", "author_name", "=", "u''", ".", "join", "(", "seq", ")", "return", "author_name", "def", "_ensure_dotted_suffixes", "(", "author_suffix", ")", ":", "if", "u'.'", "not", "in", "author_suffix", ":", "seq", "=", "(", "author_suffix", ",", "u'.'", ")", "author_suffix", "=", "u''", ".", "join", "(", "seq", ")", "return", "author_suffix", "def", "_is_roman_numeral", "(", "suffix", ")", ":", "\"\"\"Controls that the user's input only contains valid roman numerals\"\"\"", "valid_roman_numerals", "=", "[", "u'M'", ",", "u'D'", ",", "u'C'", ",", "u'L'", ",", "u'X'", ",", "u'V'", ",", "u'I'", ",", "u'('", ",", "u')'", "]", "return", "all", "(", "letters", "in", "valid_roman_numerals", "for", "letters", "in", "suffix", ".", "upper", "(", ")", ")", "first_and_middle_names", "=", "iter", "(", "_ensure_dotted_initials", "(", "name", ")", "for", "name", "in", "self", ".", "first_list", ")", "try", ":", "prev", "=", "next", "(", "first_and_middle_names", ")", "except", "StopIteration", ":", "LOGGER", ".", "warning", "(", "u\"Cannot process %s properly\"", ",", "self", ".", "_parsed_name", ".", "original", ")", "prev", "=", "self", ".", "_parsed_name", ".", "original", "names_with_spaces", "=", "[", "prev", "]", "for", "name", "in", "first_and_middle_names", ":", "if", "not", "_is_initial", "(", "name", ")", "or", "not", "_is_initial", "(", "prev", ")", ":", "names_with_spaces", ".", "append", "(", "' '", ")", "prev", "=", "name", "names_with_spaces", ".", "append", "(", "prev", ")", "normalized_names", "=", "u''", ".", "join", "(", "names_with_spaces", ")", "if", "_is_roman_numeral", "(", "self", ".", "suffix", ")", ":", "suffix", "=", "self", ".", "suffix", ".", "upper", "(", ")", "else", ":", "suffix", "=", "_ensure_dotted_suffixes", "(", "self", ".", "suffix", ")", "final_name", "=", "u', '", ".", "join", "(", "part", "for", "part", "in", "(", "self", ".", "last", ",", "normalized_names", ".", "strip", "(", ")", ",", "suffix", ")", "if", "part", ")", "# Replace unicode curly apostrophe to normal apostrophe.", "final_name", "=", "final_name", ".", "replace", "(", "u'’', ", "'", "'')", "", "return", "final_name" ]
Dump the name to string, after normalizing it.
[ "Dump", "the", "name", "to", "string", "after", "normalizing", "it", "." ]
python
train
googledatalab/pydatalab
google/datalab/bigquery/commands/_bigquery.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L1074-L1186
def _table_viewer(table, rows_per_page=25, fields=None): """ Return a table viewer. This includes a static rendering of the first page of the table, that gets replaced by the charting code in environments where Javascript is executable and BQ is available. Args: table: the table to view. rows_per_page: how many rows to display at one time. fields: an array of field names to display; default is None which uses the full schema. Returns: A string containing the HTML for the table viewer. """ # TODO(gram): rework this to use google.datalab.utils.commands.chart_html if not table.exists(): raise Exception('Table %s does not exist' % table.full_name) if not table.is_listable(): return "Done" _HTML_TEMPLATE = u""" <div class="bqtv" id="{div_id}">{static_table}</div> <br />{meta_data}<br /> <script src="/static/components/requirejs/require.js"></script> <script> require.config({{ paths: {{ base: '/static/base', d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3', plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext', jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min' }}, map: {{ '*': {{ datalab: 'nbextensions/gcpdatalab' }} }}, shim: {{ plotly: {{ deps: ['d3', 'jquery'], exports: 'plotly' }} }} }}); require(['datalab/charting', 'datalab/element!{div_id}', 'base/js/events', 'datalab/style!/nbextensions/gcpdatalab/charting.css'], function(charts, dom, events) {{ charts.render('gcharts', dom, events, '{chart_style}', [], {data}, {{ pageSize: {rows_per_page}, cssClassNames: {{ tableRow: 'gchart-table-row', headerRow: 'gchart-table-headerrow', oddTableRow: 'gchart-table-oddrow', selectedTableRow: 'gchart-table-selectedrow', hoverTableRow: 'gchart-table-hoverrow', tableCell: 'gchart-table-cell', headerCell: 'gchart-table-headercell', rowNumberCell: 'gchart-table-rownumcell' }} }}, {{source_index: {source_index}, fields: '{fields}'}}, 0, {total_rows}); }} ); </script> """ if fields is None: fields = google.datalab.utils.commands.get_field_list(fields, table.schema) div_id = google.datalab.utils.commands.Html.next_id() meta_count = ('rows: %d' % table.length) if table.length >= 0 else '' meta_name = table.full_name if table.job is None else ('job: %s' % table.job.id) if table.job: if table.job.cache_hit: meta_cost = 'cached' else: bytes = bigquery._query_stats.QueryStats._size_formatter(table.job.bytes_processed) meta_cost = '%s processed' % bytes meta_time = 'time: %.1fs' % table.job.total_time else: meta_cost = '' meta_time = '' data, total_count = google.datalab.utils.commands.get_data(table, fields, first_row=0, count=rows_per_page) if total_count < 0: # The table doesn't have a length metadata property but may still be small if we fetched less # rows than we asked for. fetched_count = len(data['rows']) if fetched_count < rows_per_page: total_count = fetched_count chart = 'table' if 0 <= total_count <= rows_per_page else 'paged_table' meta_entries = [meta_count, meta_time, meta_cost, meta_name] meta_data = '(%s)' % (', '.join([entry for entry in meta_entries if len(entry)])) return _HTML_TEMPLATE.format(div_id=div_id, static_table=google.datalab.utils.commands.HtmlBuilder .render_chart_data(data), meta_data=meta_data, chart_style=chart, source_index=google.datalab.utils.commands .get_data_source_index(table.full_name), fields=','.join(fields), total_rows=total_count, rows_per_page=rows_per_page, data=json.dumps(data, cls=google.datalab.utils.JSONEncoder))
[ "def", "_table_viewer", "(", "table", ",", "rows_per_page", "=", "25", ",", "fields", "=", "None", ")", ":", "# TODO(gram): rework this to use google.datalab.utils.commands.chart_html", "if", "not", "table", ".", "exists", "(", ")", ":", "raise", "Exception", "(", "'Table %s does not exist'", "%", "table", ".", "full_name", ")", "if", "not", "table", ".", "is_listable", "(", ")", ":", "return", "\"Done\"", "_HTML_TEMPLATE", "=", "u\"\"\"\n <div class=\"bqtv\" id=\"{div_id}\">{static_table}</div>\n <br />{meta_data}<br />\n <script src=\"/static/components/requirejs/require.js\"></script>\n <script>\n require.config({{\n paths: {{\n base: '/static/base',\n d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',\n plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',\n jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'\n }},\n map: {{\n '*': {{\n datalab: 'nbextensions/gcpdatalab'\n }}\n }},\n shim: {{\n plotly: {{\n deps: ['d3', 'jquery'],\n exports: 'plotly'\n }}\n }}\n }});\n\n require(['datalab/charting', 'datalab/element!{div_id}', 'base/js/events',\n 'datalab/style!/nbextensions/gcpdatalab/charting.css'],\n function(charts, dom, events) {{\n charts.render('gcharts', dom, events, '{chart_style}', [], {data},\n {{\n pageSize: {rows_per_page},\n cssClassNames: {{\n tableRow: 'gchart-table-row',\n headerRow: 'gchart-table-headerrow',\n oddTableRow: 'gchart-table-oddrow',\n selectedTableRow: 'gchart-table-selectedrow',\n hoverTableRow: 'gchart-table-hoverrow',\n tableCell: 'gchart-table-cell',\n headerCell: 'gchart-table-headercell',\n rowNumberCell: 'gchart-table-rownumcell'\n }}\n }},\n {{source_index: {source_index}, fields: '{fields}'}},\n 0,\n {total_rows});\n }}\n );\n </script>\n \"\"\"", "if", "fields", "is", "None", ":", "fields", "=", "google", ".", "datalab", ".", "utils", ".", "commands", ".", "get_field_list", "(", "fields", ",", "table", ".", "schema", ")", "div_id", "=", "google", ".", "datalab", ".", "utils", ".", "commands", ".", "Html", ".", "next_id", "(", ")", "meta_count", "=", "(", "'rows: %d'", "%", "table", ".", "length", ")", "if", "table", ".", "length", ">=", "0", "else", "''", "meta_name", "=", "table", ".", "full_name", "if", "table", ".", "job", "is", "None", "else", "(", "'job: %s'", "%", "table", ".", "job", ".", "id", ")", "if", "table", ".", "job", ":", "if", "table", ".", "job", ".", "cache_hit", ":", "meta_cost", "=", "'cached'", "else", ":", "bytes", "=", "bigquery", ".", "_query_stats", ".", "QueryStats", ".", "_size_formatter", "(", "table", ".", "job", ".", "bytes_processed", ")", "meta_cost", "=", "'%s processed'", "%", "bytes", "meta_time", "=", "'time: %.1fs'", "%", "table", ".", "job", ".", "total_time", "else", ":", "meta_cost", "=", "''", "meta_time", "=", "''", "data", ",", "total_count", "=", "google", ".", "datalab", ".", "utils", ".", "commands", ".", "get_data", "(", "table", ",", "fields", ",", "first_row", "=", "0", ",", "count", "=", "rows_per_page", ")", "if", "total_count", "<", "0", ":", "# The table doesn't have a length metadata property but may still be small if we fetched less", "# rows than we asked for.", "fetched_count", "=", "len", "(", "data", "[", "'rows'", "]", ")", "if", "fetched_count", "<", "rows_per_page", ":", "total_count", "=", "fetched_count", "chart", "=", "'table'", "if", "0", "<=", "total_count", "<=", "rows_per_page", "else", "'paged_table'", "meta_entries", "=", "[", "meta_count", ",", "meta_time", ",", "meta_cost", ",", "meta_name", "]", "meta_data", "=", "'(%s)'", "%", "(", "', '", ".", "join", "(", "[", "entry", "for", "entry", "in", "meta_entries", "if", "len", "(", "entry", ")", "]", ")", ")", "return", "_HTML_TEMPLATE", ".", "format", "(", "div_id", "=", "div_id", ",", "static_table", "=", "google", ".", "datalab", ".", "utils", ".", "commands", ".", "HtmlBuilder", ".", "render_chart_data", "(", "data", ")", ",", "meta_data", "=", "meta_data", ",", "chart_style", "=", "chart", ",", "source_index", "=", "google", ".", "datalab", ".", "utils", ".", "commands", ".", "get_data_source_index", "(", "table", ".", "full_name", ")", ",", "fields", "=", "','", ".", "join", "(", "fields", ")", ",", "total_rows", "=", "total_count", ",", "rows_per_page", "=", "rows_per_page", ",", "data", "=", "json", ".", "dumps", "(", "data", ",", "cls", "=", "google", ".", "datalab", ".", "utils", ".", "JSONEncoder", ")", ")" ]
Return a table viewer. This includes a static rendering of the first page of the table, that gets replaced by the charting code in environments where Javascript is executable and BQ is available. Args: table: the table to view. rows_per_page: how many rows to display at one time. fields: an array of field names to display; default is None which uses the full schema. Returns: A string containing the HTML for the table viewer.
[ "Return", "a", "table", "viewer", "." ]
python
train
chaoss/grimoirelab-elk
grimoire_elk/elk.py
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/elk.py#L524-L668
def enrich_backend(url, clean, backend_name, backend_params, cfg_section_name, ocean_index=None, ocean_index_enrich=None, db_projects_map=None, json_projects_map=None, db_sortinghat=None, no_incremental=False, only_identities=False, github_token=None, studies=False, only_studies=False, url_enrich=None, events_enrich=False, db_user=None, db_password=None, db_host=None, do_refresh_projects=False, do_refresh_identities=False, author_id=None, author_uuid=None, filter_raw=None, filters_raw_prefix=None, jenkins_rename_file=None, unaffiliated_group=None, pair_programming=False, node_regex=False, studies_args=None, es_enrich_aliases=None, last_enrich_date=None, projects_json_repo=None): """ Enrich Ocean index """ backend = None enrich_index = None if ocean_index or ocean_index_enrich: clean = False # don't remove index, it could be shared if do_refresh_projects or do_refresh_identities: clean = False # refresh works over the existing enriched items if not get_connector_from_name(backend_name): raise RuntimeError("Unknown backend %s" % backend_name) connector = get_connector_from_name(backend_name) klass = connector[3] # BackendCmd for the connector try: backend = None backend_cmd = None if klass: # Data is retrieved from Perceval backend_cmd = init_backend(klass(*backend_params)) backend = backend_cmd.backend if ocean_index_enrich: enrich_index = ocean_index_enrich else: if not ocean_index: ocean_index = backend_name + "_" + backend.origin enrich_index = ocean_index + "_enrich" if events_enrich: enrich_index += "_events" enrich_backend = connector[2](db_sortinghat, db_projects_map, json_projects_map, db_user, db_password, db_host) enrich_backend.set_params(backend_params) # store the cfg section name in the enrich backend to recover the corresponding project name in projects.json enrich_backend.set_cfg_section_name(cfg_section_name) enrich_backend.set_from_date(last_enrich_date) if url_enrich: elastic_enrich = get_elastic(url_enrich, enrich_index, clean, enrich_backend, es_enrich_aliases) else: elastic_enrich = get_elastic(url, enrich_index, clean, enrich_backend, es_enrich_aliases) enrich_backend.set_elastic(elastic_enrich) if github_token and backend_name == "git": enrich_backend.set_github_token(github_token) if jenkins_rename_file and backend_name == "jenkins": enrich_backend.set_jenkins_rename_file(jenkins_rename_file) if unaffiliated_group: enrich_backend.unaffiliated_group = unaffiliated_group if pair_programming: enrich_backend.pair_programming = pair_programming if node_regex: enrich_backend.node_regex = node_regex # The filter raw is needed to be able to assign the project value to an enriched item # see line 544, grimoire_elk/enriched/enrich.py (fltr = eitem['origin'] + ' --filter-raw=' + self.filter_raw) if filter_raw: enrich_backend.set_filter_raw(filter_raw) elif filters_raw_prefix: enrich_backend.set_filter_raw_should(filters_raw_prefix) enrich_backend.set_projects_json_repo(projects_json_repo) ocean_backend = get_ocean_backend(backend_cmd, enrich_backend, no_incremental, filter_raw, filters_raw_prefix) if only_studies: logger.info("Running only studies (no SH and no enrichment)") do_studies(ocean_backend, enrich_backend, studies_args) elif do_refresh_projects: logger.info("Refreshing project field in %s", enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url)) field_id = enrich_backend.get_field_unique_id() eitems = refresh_projects(enrich_backend) enrich_backend.elastic.bulk_upload(eitems, field_id) elif do_refresh_identities: author_attr = None author_values = None if author_id: author_attr = 'author_id' author_values = [author_id] elif author_uuid: author_attr = 'author_uuid' author_values = [author_uuid] logger.info("Refreshing identities fields in %s", enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url)) field_id = enrich_backend.get_field_unique_id() eitems = refresh_identities(enrich_backend, author_attr, author_values) enrich_backend.elastic.bulk_upload(eitems, field_id) else: clean = False # Don't remove ocean index when enrich elastic_ocean = get_elastic(url, ocean_index, clean, ocean_backend) ocean_backend.set_elastic(elastic_ocean) logger.info("Adding enrichment data to %s", enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url)) if db_sortinghat and enrich_backend.has_identities(): # FIXME: This step won't be done from enrich in the future total_ids = load_identities(ocean_backend, enrich_backend) logger.info("Total identities loaded %i ", total_ids) if only_identities: logger.info("Only SH identities added. Enrich not done!") else: # Enrichment for the new items once SH update is finished if not events_enrich: enrich_count = enrich_items(ocean_backend, enrich_backend) if enrich_count is not None: logger.info("Total items enriched %i ", enrich_count) else: enrich_count = enrich_items(ocean_backend, enrich_backend, events=True) if enrich_count is not None: logger.info("Total events enriched %i ", enrich_count) if studies: do_studies(ocean_backend, enrich_backend, studies_args) except Exception as ex: if backend: logger.error("Error enriching ocean from %s (%s): %s", backend_name, backend.origin, ex, exc_info=True) else: logger.error("Error enriching ocean %s", ex, exc_info=True) logger.info("Done %s ", backend_name)
[ "def", "enrich_backend", "(", "url", ",", "clean", ",", "backend_name", ",", "backend_params", ",", "cfg_section_name", ",", "ocean_index", "=", "None", ",", "ocean_index_enrich", "=", "None", ",", "db_projects_map", "=", "None", ",", "json_projects_map", "=", "None", ",", "db_sortinghat", "=", "None", ",", "no_incremental", "=", "False", ",", "only_identities", "=", "False", ",", "github_token", "=", "None", ",", "studies", "=", "False", ",", "only_studies", "=", "False", ",", "url_enrich", "=", "None", ",", "events_enrich", "=", "False", ",", "db_user", "=", "None", ",", "db_password", "=", "None", ",", "db_host", "=", "None", ",", "do_refresh_projects", "=", "False", ",", "do_refresh_identities", "=", "False", ",", "author_id", "=", "None", ",", "author_uuid", "=", "None", ",", "filter_raw", "=", "None", ",", "filters_raw_prefix", "=", "None", ",", "jenkins_rename_file", "=", "None", ",", "unaffiliated_group", "=", "None", ",", "pair_programming", "=", "False", ",", "node_regex", "=", "False", ",", "studies_args", "=", "None", ",", "es_enrich_aliases", "=", "None", ",", "last_enrich_date", "=", "None", ",", "projects_json_repo", "=", "None", ")", ":", "backend", "=", "None", "enrich_index", "=", "None", "if", "ocean_index", "or", "ocean_index_enrich", ":", "clean", "=", "False", "# don't remove index, it could be shared", "if", "do_refresh_projects", "or", "do_refresh_identities", ":", "clean", "=", "False", "# refresh works over the existing enriched items", "if", "not", "get_connector_from_name", "(", "backend_name", ")", ":", "raise", "RuntimeError", "(", "\"Unknown backend %s\"", "%", "backend_name", ")", "connector", "=", "get_connector_from_name", "(", "backend_name", ")", "klass", "=", "connector", "[", "3", "]", "# BackendCmd for the connector", "try", ":", "backend", "=", "None", "backend_cmd", "=", "None", "if", "klass", ":", "# Data is retrieved from Perceval", "backend_cmd", "=", "init_backend", "(", "klass", "(", "*", "backend_params", ")", ")", "backend", "=", "backend_cmd", ".", "backend", "if", "ocean_index_enrich", ":", "enrich_index", "=", "ocean_index_enrich", "else", ":", "if", "not", "ocean_index", ":", "ocean_index", "=", "backend_name", "+", "\"_\"", "+", "backend", ".", "origin", "enrich_index", "=", "ocean_index", "+", "\"_enrich\"", "if", "events_enrich", ":", "enrich_index", "+=", "\"_events\"", "enrich_backend", "=", "connector", "[", "2", "]", "(", "db_sortinghat", ",", "db_projects_map", ",", "json_projects_map", ",", "db_user", ",", "db_password", ",", "db_host", ")", "enrich_backend", ".", "set_params", "(", "backend_params", ")", "# store the cfg section name in the enrich backend to recover the corresponding project name in projects.json", "enrich_backend", ".", "set_cfg_section_name", "(", "cfg_section_name", ")", "enrich_backend", ".", "set_from_date", "(", "last_enrich_date", ")", "if", "url_enrich", ":", "elastic_enrich", "=", "get_elastic", "(", "url_enrich", ",", "enrich_index", ",", "clean", ",", "enrich_backend", ",", "es_enrich_aliases", ")", "else", ":", "elastic_enrich", "=", "get_elastic", "(", "url", ",", "enrich_index", ",", "clean", ",", "enrich_backend", ",", "es_enrich_aliases", ")", "enrich_backend", ".", "set_elastic", "(", "elastic_enrich", ")", "if", "github_token", "and", "backend_name", "==", "\"git\"", ":", "enrich_backend", ".", "set_github_token", "(", "github_token", ")", "if", "jenkins_rename_file", "and", "backend_name", "==", "\"jenkins\"", ":", "enrich_backend", ".", "set_jenkins_rename_file", "(", "jenkins_rename_file", ")", "if", "unaffiliated_group", ":", "enrich_backend", ".", "unaffiliated_group", "=", "unaffiliated_group", "if", "pair_programming", ":", "enrich_backend", ".", "pair_programming", "=", "pair_programming", "if", "node_regex", ":", "enrich_backend", ".", "node_regex", "=", "node_regex", "# The filter raw is needed to be able to assign the project value to an enriched item", "# see line 544, grimoire_elk/enriched/enrich.py (fltr = eitem['origin'] + ' --filter-raw=' + self.filter_raw)", "if", "filter_raw", ":", "enrich_backend", ".", "set_filter_raw", "(", "filter_raw", ")", "elif", "filters_raw_prefix", ":", "enrich_backend", ".", "set_filter_raw_should", "(", "filters_raw_prefix", ")", "enrich_backend", ".", "set_projects_json_repo", "(", "projects_json_repo", ")", "ocean_backend", "=", "get_ocean_backend", "(", "backend_cmd", ",", "enrich_backend", ",", "no_incremental", ",", "filter_raw", ",", "filters_raw_prefix", ")", "if", "only_studies", ":", "logger", ".", "info", "(", "\"Running only studies (no SH and no enrichment)\"", ")", "do_studies", "(", "ocean_backend", ",", "enrich_backend", ",", "studies_args", ")", "elif", "do_refresh_projects", ":", "logger", ".", "info", "(", "\"Refreshing project field in %s\"", ",", "enrich_backend", ".", "elastic", ".", "anonymize_url", "(", "enrich_backend", ".", "elastic", ".", "index_url", ")", ")", "field_id", "=", "enrich_backend", ".", "get_field_unique_id", "(", ")", "eitems", "=", "refresh_projects", "(", "enrich_backend", ")", "enrich_backend", ".", "elastic", ".", "bulk_upload", "(", "eitems", ",", "field_id", ")", "elif", "do_refresh_identities", ":", "author_attr", "=", "None", "author_values", "=", "None", "if", "author_id", ":", "author_attr", "=", "'author_id'", "author_values", "=", "[", "author_id", "]", "elif", "author_uuid", ":", "author_attr", "=", "'author_uuid'", "author_values", "=", "[", "author_uuid", "]", "logger", ".", "info", "(", "\"Refreshing identities fields in %s\"", ",", "enrich_backend", ".", "elastic", ".", "anonymize_url", "(", "enrich_backend", ".", "elastic", ".", "index_url", ")", ")", "field_id", "=", "enrich_backend", ".", "get_field_unique_id", "(", ")", "eitems", "=", "refresh_identities", "(", "enrich_backend", ",", "author_attr", ",", "author_values", ")", "enrich_backend", ".", "elastic", ".", "bulk_upload", "(", "eitems", ",", "field_id", ")", "else", ":", "clean", "=", "False", "# Don't remove ocean index when enrich", "elastic_ocean", "=", "get_elastic", "(", "url", ",", "ocean_index", ",", "clean", ",", "ocean_backend", ")", "ocean_backend", ".", "set_elastic", "(", "elastic_ocean", ")", "logger", ".", "info", "(", "\"Adding enrichment data to %s\"", ",", "enrich_backend", ".", "elastic", ".", "anonymize_url", "(", "enrich_backend", ".", "elastic", ".", "index_url", ")", ")", "if", "db_sortinghat", "and", "enrich_backend", ".", "has_identities", "(", ")", ":", "# FIXME: This step won't be done from enrich in the future", "total_ids", "=", "load_identities", "(", "ocean_backend", ",", "enrich_backend", ")", "logger", ".", "info", "(", "\"Total identities loaded %i \"", ",", "total_ids", ")", "if", "only_identities", ":", "logger", ".", "info", "(", "\"Only SH identities added. Enrich not done!\"", ")", "else", ":", "# Enrichment for the new items once SH update is finished", "if", "not", "events_enrich", ":", "enrich_count", "=", "enrich_items", "(", "ocean_backend", ",", "enrich_backend", ")", "if", "enrich_count", "is", "not", "None", ":", "logger", ".", "info", "(", "\"Total items enriched %i \"", ",", "enrich_count", ")", "else", ":", "enrich_count", "=", "enrich_items", "(", "ocean_backend", ",", "enrich_backend", ",", "events", "=", "True", ")", "if", "enrich_count", "is", "not", "None", ":", "logger", ".", "info", "(", "\"Total events enriched %i \"", ",", "enrich_count", ")", "if", "studies", ":", "do_studies", "(", "ocean_backend", ",", "enrich_backend", ",", "studies_args", ")", "except", "Exception", "as", "ex", ":", "if", "backend", ":", "logger", ".", "error", "(", "\"Error enriching ocean from %s (%s): %s\"", ",", "backend_name", ",", "backend", ".", "origin", ",", "ex", ",", "exc_info", "=", "True", ")", "else", ":", "logger", ".", "error", "(", "\"Error enriching ocean %s\"", ",", "ex", ",", "exc_info", "=", "True", ")", "logger", ".", "info", "(", "\"Done %s \"", ",", "backend_name", ")" ]
Enrich Ocean index
[ "Enrich", "Ocean", "index" ]
python
train
vtkiorg/vtki
vtki/common.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/common.py#L295-L328
def _point_scalar(self, name=None): """ Returns point scalars of a vtk object Parameters ---------- name : str Name of point scalars to retrive. Returns ------- scalars : np.ndarray Numpy array of scalars """ if name is None: # use active scalar array field, name = self.active_scalar_info if field != POINT_DATA_FIELD: raise RuntimeError('Must specify an array to fetch.') vtkarr = self.GetPointData().GetArray(name) if vtkarr is None: raise AssertionError('({}) is not a point scalar'.format(name)) # numpy does not support bit array data types if isinstance(vtkarr, vtk.vtkBitArray): vtkarr = vtk_bit_array_to_char(vtkarr) if name not in self._point_bool_array_names: self._point_bool_array_names.append(name) array = vtk_to_numpy(vtkarr) if array.dtype == np.uint8 and name in self._point_bool_array_names: array = array.view(np.bool) return array
[ "def", "_point_scalar", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "# use active scalar array", "field", ",", "name", "=", "self", ".", "active_scalar_info", "if", "field", "!=", "POINT_DATA_FIELD", ":", "raise", "RuntimeError", "(", "'Must specify an array to fetch.'", ")", "vtkarr", "=", "self", ".", "GetPointData", "(", ")", ".", "GetArray", "(", "name", ")", "if", "vtkarr", "is", "None", ":", "raise", "AssertionError", "(", "'({}) is not a point scalar'", ".", "format", "(", "name", ")", ")", "# numpy does not support bit array data types", "if", "isinstance", "(", "vtkarr", ",", "vtk", ".", "vtkBitArray", ")", ":", "vtkarr", "=", "vtk_bit_array_to_char", "(", "vtkarr", ")", "if", "name", "not", "in", "self", ".", "_point_bool_array_names", ":", "self", ".", "_point_bool_array_names", ".", "append", "(", "name", ")", "array", "=", "vtk_to_numpy", "(", "vtkarr", ")", "if", "array", ".", "dtype", "==", "np", ".", "uint8", "and", "name", "in", "self", ".", "_point_bool_array_names", ":", "array", "=", "array", ".", "view", "(", "np", ".", "bool", ")", "return", "array" ]
Returns point scalars of a vtk object Parameters ---------- name : str Name of point scalars to retrive. Returns ------- scalars : np.ndarray Numpy array of scalars
[ "Returns", "point", "scalars", "of", "a", "vtk", "object" ]
python
train
mozilla/treeherder
treeherder/etl/jobs.py
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/etl/jobs.py#L75-L328
def _load_job(repository, job_datum, push_id): """ Load a job into the treeherder database If the job is a ``retry`` the ``job_guid`` will have a special suffix on it. But the matching ``pending``/``running`` job will not. So we append the suffixed ``job_guid`` to ``retry_job_guids`` so that we can update the job_id_lookup later with the non-suffixed ``job_guid`` (root ``job_guid``). Then we can find the right ``pending``/``running`` job and update it with this ``retry`` job. """ build_platform, _ = BuildPlatform.objects.get_or_create( os_name=job_datum.get('build_platform', {}).get('os_name', 'unknown'), platform=job_datum.get('build_platform', {}).get('platform', 'unknown'), architecture=job_datum.get('build_platform', {}).get('architecture', 'unknown')) machine_platform, _ = MachinePlatform.objects.get_or_create( os_name=job_datum.get('machine_platform', {}).get('os_name', 'unknown'), platform=job_datum.get('machine_platform', {}).get('platform', 'unknown'), architecture=job_datum.get('machine_platform', {}).get('architecture', 'unknown')) option_names = job_datum.get('option_collection', []) option_collection_hash = OptionCollection.calculate_hash( option_names) if not OptionCollection.objects.filter( option_collection_hash=option_collection_hash).exists(): # in the unlikely event that we haven't seen this set of options # before, add the appropriate database rows options = [] for option_name in option_names: option, _ = Option.objects.get_or_create(name=option_name) options.append(option) for option in options: OptionCollection.objects.create( option_collection_hash=option_collection_hash, option=option) machine, _ = Machine.objects.get_or_create( name=job_datum.get('machine', 'unknown')) job_type, _ = JobType.objects.get_or_create( symbol=job_datum.get('job_symbol') or 'unknown', name=job_datum.get('name') or 'unknown') job_group, _ = JobGroup.objects.get_or_create( name=job_datum.get('group_name') or 'unknown', symbol=job_datum.get('group_symbol') or 'unknown') product_name = job_datum.get('product_name', 'unknown') if not product_name.strip(): product_name = 'unknown' product, _ = Product.objects.get_or_create(name=product_name) job_guid = job_datum['job_guid'] job_guid = job_guid[0:50] who = job_datum.get('who') or 'unknown' who = who[0:50] reason = job_datum.get('reason') or 'unknown' reason = reason[0:125] state = job_datum.get('state') or 'unknown' state = state[0:25] build_system_type = job_datum.get('build_system_type', 'buildbot') reference_data_name = job_datum.get('reference_data_name', None) default_failure_classification = FailureClassification.objects.get( name='not classified') sh = sha1() sh.update(''.join( map(str, [build_system_type, repository.name, build_platform.os_name, build_platform.platform, build_platform.architecture, machine_platform.os_name, machine_platform.platform, machine_platform.architecture, job_group.name, job_group.symbol, job_type.name, job_type.symbol, option_collection_hash, reference_data_name])).encode('utf-8')) signature_hash = sh.hexdigest() # Should be the buildername in the case of buildbot (if not provided # default to using the signature hash) if not reference_data_name: reference_data_name = signature_hash signature, _ = ReferenceDataSignatures.objects.get_or_create( name=reference_data_name, signature=signature_hash, build_system_type=build_system_type, repository=repository.name, defaults={ 'first_submission_timestamp': time.time(), 'build_os_name': build_platform.os_name, 'build_platform': build_platform.platform, 'build_architecture': build_platform.architecture, 'machine_os_name': machine_platform.os_name, 'machine_platform': machine_platform.platform, 'machine_architecture': machine_platform.architecture, 'job_group_name': job_group.name, 'job_group_symbol': job_group.symbol, 'job_type_name': job_type.name, 'job_type_symbol': job_type.symbol, 'option_collection_hash': option_collection_hash }) tier = job_datum.get('tier') or 1 result = job_datum.get('result', 'unknown') submit_time = datetime.fromtimestamp( _get_number(job_datum.get('submit_timestamp'))) start_time = datetime.fromtimestamp( _get_number(job_datum.get('start_timestamp'))) end_time = datetime.fromtimestamp( _get_number(job_datum.get('end_timestamp'))) # first, try to create the job with the given guid (if it doesn't # exist yet) job_guid_root = get_guid_root(job_guid) if not Job.objects.filter(guid__in=[job_guid, job_guid_root]).exists(): # This could theoretically already have been created by another process # that is running updates simultaneously. So just attempt to create # it, but allow it to skip if it's the same guid. The odds are # extremely high that this is a pending and running job that came in # quick succession and are being processed by two different workers. Job.objects.get_or_create( guid=job_guid, defaults={ "repository": repository, "signature": signature, "build_platform": build_platform, "machine_platform": machine_platform, "machine": machine, "option_collection_hash": option_collection_hash, "job_type": job_type, "job_group": job_group, "product": product, "failure_classification": default_failure_classification, "who": who, "reason": reason, "result": result, "state": state, "tier": tier, "submit_time": submit_time, "start_time": start_time, "end_time": end_time, "last_modified": datetime.now(), "push_id": push_id } ) # Can't just use the ``job`` we would get from the ``get_or_create`` # because we need to try the job_guid_root instance first for update, # rather than a possible retry job instance. try: job = Job.objects.get(guid=job_guid_root) except ObjectDoesNotExist: job = Job.objects.get(guid=job_guid) # add taskcluster metadata if applicable if all([k in job_datum for k in ['taskcluster_task_id', 'taskcluster_retry_id']]): try: TaskclusterMetadata.objects.create( job=job, task_id=job_datum['taskcluster_task_id'], retry_id=job_datum['taskcluster_retry_id']) except IntegrityError: pass # Update job with any data that would have changed Job.objects.filter(id=job.id).update( guid=job_guid, signature=signature, build_platform=build_platform, machine_platform=machine_platform, machine=machine, option_collection_hash=option_collection_hash, job_type=job_type, job_group=job_group, product=product, failure_classification=default_failure_classification, who=who, reason=reason, result=result, state=state, tier=tier, submit_time=submit_time, start_time=start_time, end_time=end_time, last_modified=datetime.now(), push_id=push_id) artifacts = job_datum.get('artifacts', []) has_text_log_summary = any(x for x in artifacts if x['name'] == 'text_log_summary') if artifacts: artifacts = serialize_artifact_json_blobs(artifacts) # need to add job guid to artifacts, since they likely weren't # present in the beginning for artifact in artifacts: if not all(k in artifact for k in ("name", "type", "blob")): raise ValueError( "Artifact missing properties: {}".format(artifact)) # Ensure every artifact has a ``job_guid`` value. # It is legal to submit an artifact that doesn't have a # ``job_guid`` value. But, if missing, it should inherit that # value from the job itself. if "job_guid" not in artifact: artifact["job_guid"] = job_guid store_job_artifacts(artifacts) log_refs = job_datum.get('log_references', []) job_logs = [] if log_refs: for log in log_refs: name = log.get('name') or 'unknown' name = name[0:50] url = log.get('url') or 'unknown' url = url[0:255] # this indicates that a summary artifact was submitted with # this job that corresponds to the buildbot_text log url. # Therefore, the log does not need parsing. So we should # ensure that it's marked as already parsed. if has_text_log_summary and name == 'buildbot_text': parse_status = JobLog.PARSED else: parse_status_map = dict([(k, v) for (v, k) in JobLog.STATUSES]) mapped_status = parse_status_map.get( log.get('parse_status')) if mapped_status: parse_status = mapped_status else: parse_status = JobLog.PENDING jl, _ = JobLog.objects.get_or_create( job=job, name=name, url=url, defaults={ 'status': parse_status }) job_logs.append(jl) _schedule_log_parsing(job, job_logs, result) return job_guid
[ "def", "_load_job", "(", "repository", ",", "job_datum", ",", "push_id", ")", ":", "build_platform", ",", "_", "=", "BuildPlatform", ".", "objects", ".", "get_or_create", "(", "os_name", "=", "job_datum", ".", "get", "(", "'build_platform'", ",", "{", "}", ")", ".", "get", "(", "'os_name'", ",", "'unknown'", ")", ",", "platform", "=", "job_datum", ".", "get", "(", "'build_platform'", ",", "{", "}", ")", ".", "get", "(", "'platform'", ",", "'unknown'", ")", ",", "architecture", "=", "job_datum", ".", "get", "(", "'build_platform'", ",", "{", "}", ")", ".", "get", "(", "'architecture'", ",", "'unknown'", ")", ")", "machine_platform", ",", "_", "=", "MachinePlatform", ".", "objects", ".", "get_or_create", "(", "os_name", "=", "job_datum", ".", "get", "(", "'machine_platform'", ",", "{", "}", ")", ".", "get", "(", "'os_name'", ",", "'unknown'", ")", ",", "platform", "=", "job_datum", ".", "get", "(", "'machine_platform'", ",", "{", "}", ")", ".", "get", "(", "'platform'", ",", "'unknown'", ")", ",", "architecture", "=", "job_datum", ".", "get", "(", "'machine_platform'", ",", "{", "}", ")", ".", "get", "(", "'architecture'", ",", "'unknown'", ")", ")", "option_names", "=", "job_datum", ".", "get", "(", "'option_collection'", ",", "[", "]", ")", "option_collection_hash", "=", "OptionCollection", ".", "calculate_hash", "(", "option_names", ")", "if", "not", "OptionCollection", ".", "objects", ".", "filter", "(", "option_collection_hash", "=", "option_collection_hash", ")", ".", "exists", "(", ")", ":", "# in the unlikely event that we haven't seen this set of options", "# before, add the appropriate database rows", "options", "=", "[", "]", "for", "option_name", "in", "option_names", ":", "option", ",", "_", "=", "Option", ".", "objects", ".", "get_or_create", "(", "name", "=", "option_name", ")", "options", ".", "append", "(", "option", ")", "for", "option", "in", "options", ":", "OptionCollection", ".", "objects", ".", "create", "(", "option_collection_hash", "=", "option_collection_hash", ",", "option", "=", "option", ")", "machine", ",", "_", "=", "Machine", ".", "objects", ".", "get_or_create", "(", "name", "=", "job_datum", ".", "get", "(", "'machine'", ",", "'unknown'", ")", ")", "job_type", ",", "_", "=", "JobType", ".", "objects", ".", "get_or_create", "(", "symbol", "=", "job_datum", ".", "get", "(", "'job_symbol'", ")", "or", "'unknown'", ",", "name", "=", "job_datum", ".", "get", "(", "'name'", ")", "or", "'unknown'", ")", "job_group", ",", "_", "=", "JobGroup", ".", "objects", ".", "get_or_create", "(", "name", "=", "job_datum", ".", "get", "(", "'group_name'", ")", "or", "'unknown'", ",", "symbol", "=", "job_datum", ".", "get", "(", "'group_symbol'", ")", "or", "'unknown'", ")", "product_name", "=", "job_datum", ".", "get", "(", "'product_name'", ",", "'unknown'", ")", "if", "not", "product_name", ".", "strip", "(", ")", ":", "product_name", "=", "'unknown'", "product", ",", "_", "=", "Product", ".", "objects", ".", "get_or_create", "(", "name", "=", "product_name", ")", "job_guid", "=", "job_datum", "[", "'job_guid'", "]", "job_guid", "=", "job_guid", "[", "0", ":", "50", "]", "who", "=", "job_datum", ".", "get", "(", "'who'", ")", "or", "'unknown'", "who", "=", "who", "[", "0", ":", "50", "]", "reason", "=", "job_datum", ".", "get", "(", "'reason'", ")", "or", "'unknown'", "reason", "=", "reason", "[", "0", ":", "125", "]", "state", "=", "job_datum", ".", "get", "(", "'state'", ")", "or", "'unknown'", "state", "=", "state", "[", "0", ":", "25", "]", "build_system_type", "=", "job_datum", ".", "get", "(", "'build_system_type'", ",", "'buildbot'", ")", "reference_data_name", "=", "job_datum", ".", "get", "(", "'reference_data_name'", ",", "None", ")", "default_failure_classification", "=", "FailureClassification", ".", "objects", ".", "get", "(", "name", "=", "'not classified'", ")", "sh", "=", "sha1", "(", ")", "sh", ".", "update", "(", "''", ".", "join", "(", "map", "(", "str", ",", "[", "build_system_type", ",", "repository", ".", "name", ",", "build_platform", ".", "os_name", ",", "build_platform", ".", "platform", ",", "build_platform", ".", "architecture", ",", "machine_platform", ".", "os_name", ",", "machine_platform", ".", "platform", ",", "machine_platform", ".", "architecture", ",", "job_group", ".", "name", ",", "job_group", ".", "symbol", ",", "job_type", ".", "name", ",", "job_type", ".", "symbol", ",", "option_collection_hash", ",", "reference_data_name", "]", ")", ")", ".", "encode", "(", "'utf-8'", ")", ")", "signature_hash", "=", "sh", ".", "hexdigest", "(", ")", "# Should be the buildername in the case of buildbot (if not provided", "# default to using the signature hash)", "if", "not", "reference_data_name", ":", "reference_data_name", "=", "signature_hash", "signature", ",", "_", "=", "ReferenceDataSignatures", ".", "objects", ".", "get_or_create", "(", "name", "=", "reference_data_name", ",", "signature", "=", "signature_hash", ",", "build_system_type", "=", "build_system_type", ",", "repository", "=", "repository", ".", "name", ",", "defaults", "=", "{", "'first_submission_timestamp'", ":", "time", ".", "time", "(", ")", ",", "'build_os_name'", ":", "build_platform", ".", "os_name", ",", "'build_platform'", ":", "build_platform", ".", "platform", ",", "'build_architecture'", ":", "build_platform", ".", "architecture", ",", "'machine_os_name'", ":", "machine_platform", ".", "os_name", ",", "'machine_platform'", ":", "machine_platform", ".", "platform", ",", "'machine_architecture'", ":", "machine_platform", ".", "architecture", ",", "'job_group_name'", ":", "job_group", ".", "name", ",", "'job_group_symbol'", ":", "job_group", ".", "symbol", ",", "'job_type_name'", ":", "job_type", ".", "name", ",", "'job_type_symbol'", ":", "job_type", ".", "symbol", ",", "'option_collection_hash'", ":", "option_collection_hash", "}", ")", "tier", "=", "job_datum", ".", "get", "(", "'tier'", ")", "or", "1", "result", "=", "job_datum", ".", "get", "(", "'result'", ",", "'unknown'", ")", "submit_time", "=", "datetime", ".", "fromtimestamp", "(", "_get_number", "(", "job_datum", ".", "get", "(", "'submit_timestamp'", ")", ")", ")", "start_time", "=", "datetime", ".", "fromtimestamp", "(", "_get_number", "(", "job_datum", ".", "get", "(", "'start_timestamp'", ")", ")", ")", "end_time", "=", "datetime", ".", "fromtimestamp", "(", "_get_number", "(", "job_datum", ".", "get", "(", "'end_timestamp'", ")", ")", ")", "# first, try to create the job with the given guid (if it doesn't", "# exist yet)", "job_guid_root", "=", "get_guid_root", "(", "job_guid", ")", "if", "not", "Job", ".", "objects", ".", "filter", "(", "guid__in", "=", "[", "job_guid", ",", "job_guid_root", "]", ")", ".", "exists", "(", ")", ":", "# This could theoretically already have been created by another process", "# that is running updates simultaneously. So just attempt to create", "# it, but allow it to skip if it's the same guid. The odds are", "# extremely high that this is a pending and running job that came in", "# quick succession and are being processed by two different workers.", "Job", ".", "objects", ".", "get_or_create", "(", "guid", "=", "job_guid", ",", "defaults", "=", "{", "\"repository\"", ":", "repository", ",", "\"signature\"", ":", "signature", ",", "\"build_platform\"", ":", "build_platform", ",", "\"machine_platform\"", ":", "machine_platform", ",", "\"machine\"", ":", "machine", ",", "\"option_collection_hash\"", ":", "option_collection_hash", ",", "\"job_type\"", ":", "job_type", ",", "\"job_group\"", ":", "job_group", ",", "\"product\"", ":", "product", ",", "\"failure_classification\"", ":", "default_failure_classification", ",", "\"who\"", ":", "who", ",", "\"reason\"", ":", "reason", ",", "\"result\"", ":", "result", ",", "\"state\"", ":", "state", ",", "\"tier\"", ":", "tier", ",", "\"submit_time\"", ":", "submit_time", ",", "\"start_time\"", ":", "start_time", ",", "\"end_time\"", ":", "end_time", ",", "\"last_modified\"", ":", "datetime", ".", "now", "(", ")", ",", "\"push_id\"", ":", "push_id", "}", ")", "# Can't just use the ``job`` we would get from the ``get_or_create``", "# because we need to try the job_guid_root instance first for update,", "# rather than a possible retry job instance.", "try", ":", "job", "=", "Job", ".", "objects", ".", "get", "(", "guid", "=", "job_guid_root", ")", "except", "ObjectDoesNotExist", ":", "job", "=", "Job", ".", "objects", ".", "get", "(", "guid", "=", "job_guid", ")", "# add taskcluster metadata if applicable", "if", "all", "(", "[", "k", "in", "job_datum", "for", "k", "in", "[", "'taskcluster_task_id'", ",", "'taskcluster_retry_id'", "]", "]", ")", ":", "try", ":", "TaskclusterMetadata", ".", "objects", ".", "create", "(", "job", "=", "job", ",", "task_id", "=", "job_datum", "[", "'taskcluster_task_id'", "]", ",", "retry_id", "=", "job_datum", "[", "'taskcluster_retry_id'", "]", ")", "except", "IntegrityError", ":", "pass", "# Update job with any data that would have changed", "Job", ".", "objects", ".", "filter", "(", "id", "=", "job", ".", "id", ")", ".", "update", "(", "guid", "=", "job_guid", ",", "signature", "=", "signature", ",", "build_platform", "=", "build_platform", ",", "machine_platform", "=", "machine_platform", ",", "machine", "=", "machine", ",", "option_collection_hash", "=", "option_collection_hash", ",", "job_type", "=", "job_type", ",", "job_group", "=", "job_group", ",", "product", "=", "product", ",", "failure_classification", "=", "default_failure_classification", ",", "who", "=", "who", ",", "reason", "=", "reason", ",", "result", "=", "result", ",", "state", "=", "state", ",", "tier", "=", "tier", ",", "submit_time", "=", "submit_time", ",", "start_time", "=", "start_time", ",", "end_time", "=", "end_time", ",", "last_modified", "=", "datetime", ".", "now", "(", ")", ",", "push_id", "=", "push_id", ")", "artifacts", "=", "job_datum", ".", "get", "(", "'artifacts'", ",", "[", "]", ")", "has_text_log_summary", "=", "any", "(", "x", "for", "x", "in", "artifacts", "if", "x", "[", "'name'", "]", "==", "'text_log_summary'", ")", "if", "artifacts", ":", "artifacts", "=", "serialize_artifact_json_blobs", "(", "artifacts", ")", "# need to add job guid to artifacts, since they likely weren't", "# present in the beginning", "for", "artifact", "in", "artifacts", ":", "if", "not", "all", "(", "k", "in", "artifact", "for", "k", "in", "(", "\"name\"", ",", "\"type\"", ",", "\"blob\"", ")", ")", ":", "raise", "ValueError", "(", "\"Artifact missing properties: {}\"", ".", "format", "(", "artifact", ")", ")", "# Ensure every artifact has a ``job_guid`` value.", "# It is legal to submit an artifact that doesn't have a", "# ``job_guid`` value. But, if missing, it should inherit that", "# value from the job itself.", "if", "\"job_guid\"", "not", "in", "artifact", ":", "artifact", "[", "\"job_guid\"", "]", "=", "job_guid", "store_job_artifacts", "(", "artifacts", ")", "log_refs", "=", "job_datum", ".", "get", "(", "'log_references'", ",", "[", "]", ")", "job_logs", "=", "[", "]", "if", "log_refs", ":", "for", "log", "in", "log_refs", ":", "name", "=", "log", ".", "get", "(", "'name'", ")", "or", "'unknown'", "name", "=", "name", "[", "0", ":", "50", "]", "url", "=", "log", ".", "get", "(", "'url'", ")", "or", "'unknown'", "url", "=", "url", "[", "0", ":", "255", "]", "# this indicates that a summary artifact was submitted with", "# this job that corresponds to the buildbot_text log url.", "# Therefore, the log does not need parsing. So we should", "# ensure that it's marked as already parsed.", "if", "has_text_log_summary", "and", "name", "==", "'buildbot_text'", ":", "parse_status", "=", "JobLog", ".", "PARSED", "else", ":", "parse_status_map", "=", "dict", "(", "[", "(", "k", ",", "v", ")", "for", "(", "v", ",", "k", ")", "in", "JobLog", ".", "STATUSES", "]", ")", "mapped_status", "=", "parse_status_map", ".", "get", "(", "log", ".", "get", "(", "'parse_status'", ")", ")", "if", "mapped_status", ":", "parse_status", "=", "mapped_status", "else", ":", "parse_status", "=", "JobLog", ".", "PENDING", "jl", ",", "_", "=", "JobLog", ".", "objects", ".", "get_or_create", "(", "job", "=", "job", ",", "name", "=", "name", ",", "url", "=", "url", ",", "defaults", "=", "{", "'status'", ":", "parse_status", "}", ")", "job_logs", ".", "append", "(", "jl", ")", "_schedule_log_parsing", "(", "job", ",", "job_logs", ",", "result", ")", "return", "job_guid" ]
Load a job into the treeherder database If the job is a ``retry`` the ``job_guid`` will have a special suffix on it. But the matching ``pending``/``running`` job will not. So we append the suffixed ``job_guid`` to ``retry_job_guids`` so that we can update the job_id_lookup later with the non-suffixed ``job_guid`` (root ``job_guid``). Then we can find the right ``pending``/``running`` job and update it with this ``retry`` job.
[ "Load", "a", "job", "into", "the", "treeherder", "database" ]
python
train
tdryer/hangups
examples/lookup_entities.py
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/examples/lookup_entities.py#L8-L20
async def lookup_entities(client, args): """Search for entities by phone number, email, or gaia_id.""" lookup_spec = _get_lookup_spec(args.entity_identifier) request = hangups.hangouts_pb2.GetEntityByIdRequest( request_header=client.get_request_header(), batch_lookup_spec=[lookup_spec], ) res = await client.get_entity_by_id(request) # Print the list of entities in the response. for entity_result in res.entity_result: for entity in entity_result.entity: print(entity)
[ "async", "def", "lookup_entities", "(", "client", ",", "args", ")", ":", "lookup_spec", "=", "_get_lookup_spec", "(", "args", ".", "entity_identifier", ")", "request", "=", "hangups", ".", "hangouts_pb2", ".", "GetEntityByIdRequest", "(", "request_header", "=", "client", ".", "get_request_header", "(", ")", ",", "batch_lookup_spec", "=", "[", "lookup_spec", "]", ",", ")", "res", "=", "await", "client", ".", "get_entity_by_id", "(", "request", ")", "# Print the list of entities in the response.", "for", "entity_result", "in", "res", ".", "entity_result", ":", "for", "entity", "in", "entity_result", ".", "entity", ":", "print", "(", "entity", ")" ]
Search for entities by phone number, email, or gaia_id.
[ "Search", "for", "entities", "by", "phone", "number", "email", "or", "gaia_id", "." ]
python
valid
odlgroup/odl
odl/solvers/functional/default_functionals.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/functional/default_functionals.py#L2635-L2651
def _call(self, x): """Return ``self(x)``.""" if isinstance(self.domain, ProductSpace): norm = PointwiseNorm(self.domain, 2)(x) else: norm = x.ufuncs.absolute() if self.gamma > 0: tmp = norm.ufuncs.square() tmp *= 1 / (2 * self.gamma) index = norm.ufuncs.greater_equal(self.gamma) tmp[index] = norm[index] - self.gamma / 2 else: tmp = norm return tmp.inner(tmp.space.one())
[ "def", "_call", "(", "self", ",", "x", ")", ":", "if", "isinstance", "(", "self", ".", "domain", ",", "ProductSpace", ")", ":", "norm", "=", "PointwiseNorm", "(", "self", ".", "domain", ",", "2", ")", "(", "x", ")", "else", ":", "norm", "=", "x", ".", "ufuncs", ".", "absolute", "(", ")", "if", "self", ".", "gamma", ">", "0", ":", "tmp", "=", "norm", ".", "ufuncs", ".", "square", "(", ")", "tmp", "*=", "1", "/", "(", "2", "*", "self", ".", "gamma", ")", "index", "=", "norm", ".", "ufuncs", ".", "greater_equal", "(", "self", ".", "gamma", ")", "tmp", "[", "index", "]", "=", "norm", "[", "index", "]", "-", "self", ".", "gamma", "/", "2", "else", ":", "tmp", "=", "norm", "return", "tmp", ".", "inner", "(", "tmp", ".", "space", ".", "one", "(", ")", ")" ]
Return ``self(x)``.
[ "Return", "self", "(", "x", ")", "." ]
python
train
readbeyond/aeneas
aeneas/adjustboundaryalgorithm.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/adjustboundaryalgorithm.py#L541-L546
def _adjust_rate_aggressive(self, real_wave_mfcc, algo_parameters): """ RATEAGGRESSIVE """ self.log(u"Called _adjust_rate_aggressive") self._apply_rate(max_rate=algo_parameters[0], aggressive=True)
[ "def", "_adjust_rate_aggressive", "(", "self", ",", "real_wave_mfcc", ",", "algo_parameters", ")", ":", "self", ".", "log", "(", "u\"Called _adjust_rate_aggressive\"", ")", "self", ".", "_apply_rate", "(", "max_rate", "=", "algo_parameters", "[", "0", "]", ",", "aggressive", "=", "True", ")" ]
RATEAGGRESSIVE
[ "RATEAGGRESSIVE" ]
python
train
projectshift/shift-schema
shiftschema/translator.py
https://github.com/projectshift/shift-schema/blob/07787b540d3369bb37217ffbfbe629118edaf0eb/shiftschema/translator.py#L22-L34
def normalize_locale(locale): """ Normalize locale Extracts language code from passed in locale string to be used later for dictionaries loading. :param locale: string, locale (en, en_US) :return: string, language code """ import re match = re.match(r'^[a-z]+', locale.lower()) if match: return match.group()
[ "def", "normalize_locale", "(", "locale", ")", ":", "import", "re", "match", "=", "re", ".", "match", "(", "r'^[a-z]+'", ",", "locale", ".", "lower", "(", ")", ")", "if", "match", ":", "return", "match", ".", "group", "(", ")" ]
Normalize locale Extracts language code from passed in locale string to be used later for dictionaries loading. :param locale: string, locale (en, en_US) :return: string, language code
[ "Normalize", "locale", "Extracts", "language", "code", "from", "passed", "in", "locale", "string", "to", "be", "used", "later", "for", "dictionaries", "loading", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L2104-L2125
def dafus(insum, nd, ni): """ Unpack an array summary into its double precision and integer components. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafus_c.html :param insum: Array summary. :type insum: Array of floats :param nd: Number of double precision components. :type nd: int :param ni: Number of integer components. :type ni: int :return: Double precision components, Integer components. :rtype: tuple """ insum = stypes.toDoubleVector(insum) dc = stypes.emptyDoubleVector(nd) ic = stypes.emptyIntVector(ni) nd = ctypes.c_int(nd) ni = ctypes.c_int(ni) libspice.dafus_c(insum, nd, ni, dc, ic) return stypes.cVectorToPython(dc), stypes.cVectorToPython(ic)
[ "def", "dafus", "(", "insum", ",", "nd", ",", "ni", ")", ":", "insum", "=", "stypes", ".", "toDoubleVector", "(", "insum", ")", "dc", "=", "stypes", ".", "emptyDoubleVector", "(", "nd", ")", "ic", "=", "stypes", ".", "emptyIntVector", "(", "ni", ")", "nd", "=", "ctypes", ".", "c_int", "(", "nd", ")", "ni", "=", "ctypes", ".", "c_int", "(", "ni", ")", "libspice", ".", "dafus_c", "(", "insum", ",", "nd", ",", "ni", ",", "dc", ",", "ic", ")", "return", "stypes", ".", "cVectorToPython", "(", "dc", ")", ",", "stypes", ".", "cVectorToPython", "(", "ic", ")" ]
Unpack an array summary into its double precision and integer components. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafus_c.html :param insum: Array summary. :type insum: Array of floats :param nd: Number of double precision components. :type nd: int :param ni: Number of integer components. :type ni: int :return: Double precision components, Integer components. :rtype: tuple
[ "Unpack", "an", "array", "summary", "into", "its", "double", "precision", "and", "integer", "components", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/attention_lm.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/attention_lm.py#L167-L182
def attention_lm_small(): """Cheap model. on lm1b_32k: 45M params 2 steps/sec on [GeForce GTX TITAN X] Returns: an hparams object. """ hparams = attention_lm_base() hparams.num_hidden_layers = 4 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.layer_prepostprocess_dropout = 0.5 return hparams
[ "def", "attention_lm_small", "(", ")", ":", "hparams", "=", "attention_lm_base", "(", ")", "hparams", ".", "num_hidden_layers", "=", "4", "hparams", ".", "hidden_size", "=", "512", "hparams", ".", "filter_size", "=", "2048", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.5", "return", "hparams" ]
Cheap model. on lm1b_32k: 45M params 2 steps/sec on [GeForce GTX TITAN X] Returns: an hparams object.
[ "Cheap", "model", "." ]
python
train
blockstack/blockstack-core
blockstack/lib/subdomains.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/subdomains.py#L1250-L1269
def get_domain_resolver(self, domain_name, cur=None): """ Get the last-knwon resolver entry for a domain name Returns None if not found. """ get_cmd = "SELECT resolver FROM {} WHERE domain=? AND resolver != '' AND accepted=1 ORDER BY sequence DESC, parent_zonefile_index DESC LIMIT 1;".format(self.subdomain_table) cursor = None if cur is None: cursor = self.conn.cursor() else: cursor = cur db_query_execute(cursor, get_cmd, (domain_name,)) rowdata = cursor.fetchone() if not rowdata: return None return rowdata['resolver']
[ "def", "get_domain_resolver", "(", "self", ",", "domain_name", ",", "cur", "=", "None", ")", ":", "get_cmd", "=", "\"SELECT resolver FROM {} WHERE domain=? AND resolver != '' AND accepted=1 ORDER BY sequence DESC, parent_zonefile_index DESC LIMIT 1;\"", ".", "format", "(", "self", ".", "subdomain_table", ")", "cursor", "=", "None", "if", "cur", "is", "None", ":", "cursor", "=", "self", ".", "conn", ".", "cursor", "(", ")", "else", ":", "cursor", "=", "cur", "db_query_execute", "(", "cursor", ",", "get_cmd", ",", "(", "domain_name", ",", ")", ")", "rowdata", "=", "cursor", ".", "fetchone", "(", ")", "if", "not", "rowdata", ":", "return", "None", "return", "rowdata", "[", "'resolver'", "]" ]
Get the last-knwon resolver entry for a domain name Returns None if not found.
[ "Get", "the", "last", "-", "knwon", "resolver", "entry", "for", "a", "domain", "name", "Returns", "None", "if", "not", "found", "." ]
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/address.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/address.py#L88-L93
def _checkReplyTo(self, value): '''WS-Address From value -- From server returned in wsa:To ''' if value != self._replyTo: raise WSActionException, 'wrong WS-Address ReplyTo(%s), expecting %s'%(value,self._replyTo)
[ "def", "_checkReplyTo", "(", "self", ",", "value", ")", ":", "if", "value", "!=", "self", ".", "_replyTo", ":", "raise", "WSActionException", ",", "'wrong WS-Address ReplyTo(%s), expecting %s'", "%", "(", "value", ",", "self", ".", "_replyTo", ")" ]
WS-Address From value -- From server returned in wsa:To
[ "WS", "-", "Address", "From", "value", "--", "From", "server", "returned", "in", "wsa", ":", "To" ]
python
train
orbeckst/RecSQL
recsql/export.py
https://github.com/orbeckst/RecSQL/blob/6acbf821022361719391697c9c2f0822f9f8022a/recsql/export.py#L50-L54
def rec2latex(r, filename, empty=""): """Export a recarray *r* to a LaTeX table in *filename*""" with open(filename, "w") as latex: latex.write(s_rec2latex(r, empty=empty)) return filename
[ "def", "rec2latex", "(", "r", ",", "filename", ",", "empty", "=", "\"\"", ")", ":", "with", "open", "(", "filename", ",", "\"w\"", ")", "as", "latex", ":", "latex", ".", "write", "(", "s_rec2latex", "(", "r", ",", "empty", "=", "empty", ")", ")", "return", "filename" ]
Export a recarray *r* to a LaTeX table in *filename*
[ "Export", "a", "recarray", "*", "r", "*", "to", "a", "LaTeX", "table", "in", "*", "filename", "*" ]
python
train
google/grumpy
third_party/stdlib/warnings.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/warnings.py#L95-L113
def simplefilter(action, category=Warning, lineno=0, append=0): """Insert a simple entry into the list of warnings filters (at the front). A simple filter matches all modules and messages. 'action' -- one of "error", "ignore", "always", "default", "module", or "once" 'category' -- a class that the warning must be a subclass of 'lineno' -- an integer line number, 0 matches all warnings 'append' -- if true, append to the list of filters """ assert action in ("error", "ignore", "always", "default", "module", "once"), "invalid action: %r" % (action,) assert isinstance(lineno, int) and lineno >= 0, \ "lineno must be an int >= 0" item = (action, None, category, None, lineno) if append: filters.append(item) else: filters.insert(0, item)
[ "def", "simplefilter", "(", "action", ",", "category", "=", "Warning", ",", "lineno", "=", "0", ",", "append", "=", "0", ")", ":", "assert", "action", "in", "(", "\"error\"", ",", "\"ignore\"", ",", "\"always\"", ",", "\"default\"", ",", "\"module\"", ",", "\"once\"", ")", ",", "\"invalid action: %r\"", "%", "(", "action", ",", ")", "assert", "isinstance", "(", "lineno", ",", "int", ")", "and", "lineno", ">=", "0", ",", "\"lineno must be an int >= 0\"", "item", "=", "(", "action", ",", "None", ",", "category", ",", "None", ",", "lineno", ")", "if", "append", ":", "filters", ".", "append", "(", "item", ")", "else", ":", "filters", ".", "insert", "(", "0", ",", "item", ")" ]
Insert a simple entry into the list of warnings filters (at the front). A simple filter matches all modules and messages. 'action' -- one of "error", "ignore", "always", "default", "module", or "once" 'category' -- a class that the warning must be a subclass of 'lineno' -- an integer line number, 0 matches all warnings 'append' -- if true, append to the list of filters
[ "Insert", "a", "simple", "entry", "into", "the", "list", "of", "warnings", "filters", "(", "at", "the", "front", ")", "." ]
python
valid
croscon/fleaker
fleaker/exceptions.py
https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/exceptions.py#L349-L373
def post_create_app(cls, app, **settings): """Register the errorhandler for the AppException to the passed in App. Args: app (fleaker.base.BaseApplication): A Flask application that extends the Fleaker Base Application, such that the hooks are implemented. Kwargs: register_errorhandler (bool): A boolean indicating if we want to automatically register an errorhandler for the :class:`AppException` exception class after we create this App. Pass ``False`` to prevent registration. Default is ``True``. Returns: fleaker.base.BaseApplication: Returns the app it was given. """ register_errorhandler = settings.pop('register_errorhandler', True) if register_errorhandler: AppException.register_errorhandler(app) return app
[ "def", "post_create_app", "(", "cls", ",", "app", ",", "*", "*", "settings", ")", ":", "register_errorhandler", "=", "settings", ".", "pop", "(", "'register_errorhandler'", ",", "True", ")", "if", "register_errorhandler", ":", "AppException", ".", "register_errorhandler", "(", "app", ")", "return", "app" ]
Register the errorhandler for the AppException to the passed in App. Args: app (fleaker.base.BaseApplication): A Flask application that extends the Fleaker Base Application, such that the hooks are implemented. Kwargs: register_errorhandler (bool): A boolean indicating if we want to automatically register an errorhandler for the :class:`AppException` exception class after we create this App. Pass ``False`` to prevent registration. Default is ``True``. Returns: fleaker.base.BaseApplication: Returns the app it was given.
[ "Register", "the", "errorhandler", "for", "the", "AppException", "to", "the", "passed", "in", "App", "." ]
python
train
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L2134-L2173
def _add_item(self, item, indent_amt): """Add an item to the line. Reflow the line to get the best formatting after the item is inserted. The bracket depth indicates if the item is being inserted inside of a container or not. """ if self._prev_item and self._prev_item.is_string and item.is_string: # Place consecutive string literals on separate lines. self._lines.append(self._LineBreak()) self._lines.append(self._Indent(indent_amt)) item_text = unicode(item) if self._lines and self._bracket_depth: # Adding the item into a container. self._prevent_default_initializer_splitting(item, indent_amt) if item_text in '.,)]}': self._split_after_delimiter(item, indent_amt) elif self._lines and not self.line_empty(): # Adding the item outside of a container. if self.fits_on_current_line(len(item_text)): self._enforce_space(item) else: # Line break for the new item. self._lines.append(self._LineBreak()) self._lines.append(self._Indent(indent_amt)) self._lines.append(item) self._prev_item, self._prev_prev_item = item, self._prev_item if item_text in '([{': self._bracket_depth += 1 elif item_text in '}])': self._bracket_depth -= 1 assert self._bracket_depth >= 0
[ "def", "_add_item", "(", "self", ",", "item", ",", "indent_amt", ")", ":", "if", "self", ".", "_prev_item", "and", "self", ".", "_prev_item", ".", "is_string", "and", "item", ".", "is_string", ":", "# Place consecutive string literals on separate lines.", "self", ".", "_lines", ".", "append", "(", "self", ".", "_LineBreak", "(", ")", ")", "self", ".", "_lines", ".", "append", "(", "self", ".", "_Indent", "(", "indent_amt", ")", ")", "item_text", "=", "unicode", "(", "item", ")", "if", "self", ".", "_lines", "and", "self", ".", "_bracket_depth", ":", "# Adding the item into a container.", "self", ".", "_prevent_default_initializer_splitting", "(", "item", ",", "indent_amt", ")", "if", "item_text", "in", "'.,)]}'", ":", "self", ".", "_split_after_delimiter", "(", "item", ",", "indent_amt", ")", "elif", "self", ".", "_lines", "and", "not", "self", ".", "line_empty", "(", ")", ":", "# Adding the item outside of a container.", "if", "self", ".", "fits_on_current_line", "(", "len", "(", "item_text", ")", ")", ":", "self", ".", "_enforce_space", "(", "item", ")", "else", ":", "# Line break for the new item.", "self", ".", "_lines", ".", "append", "(", "self", ".", "_LineBreak", "(", ")", ")", "self", ".", "_lines", ".", "append", "(", "self", ".", "_Indent", "(", "indent_amt", ")", ")", "self", ".", "_lines", ".", "append", "(", "item", ")", "self", ".", "_prev_item", ",", "self", ".", "_prev_prev_item", "=", "item", ",", "self", ".", "_prev_item", "if", "item_text", "in", "'([{'", ":", "self", ".", "_bracket_depth", "+=", "1", "elif", "item_text", "in", "'}])'", ":", "self", ".", "_bracket_depth", "-=", "1", "assert", "self", ".", "_bracket_depth", ">=", "0" ]
Add an item to the line. Reflow the line to get the best formatting after the item is inserted. The bracket depth indicates if the item is being inserted inside of a container or not.
[ "Add", "an", "item", "to", "the", "line", "." ]
python
train
benley/butcher
butcher/targets/pkgfilegroup.py
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/targets/pkgfilegroup.py#L51-L59
def translate_path(self, dep_file, dep_rule): """Translate dep_file from dep_rule into this rule's output path.""" dst_base = dep_file.split(os.path.join(dep_rule.address.repo, dep_rule.address.path), 1)[-1] if self.params['strip_prefix']: dst_base = dep_file.split(self.params['strip_prefix'], 1)[-1] return os.path.join(self.address.repo, self.address.path, self.params['prefix'].lstrip('/'), dst_base.lstrip('/'))
[ "def", "translate_path", "(", "self", ",", "dep_file", ",", "dep_rule", ")", ":", "dst_base", "=", "dep_file", ".", "split", "(", "os", ".", "path", ".", "join", "(", "dep_rule", ".", "address", ".", "repo", ",", "dep_rule", ".", "address", ".", "path", ")", ",", "1", ")", "[", "-", "1", "]", "if", "self", ".", "params", "[", "'strip_prefix'", "]", ":", "dst_base", "=", "dep_file", ".", "split", "(", "self", ".", "params", "[", "'strip_prefix'", "]", ",", "1", ")", "[", "-", "1", "]", "return", "os", ".", "path", ".", "join", "(", "self", ".", "address", ".", "repo", ",", "self", ".", "address", ".", "path", ",", "self", ".", "params", "[", "'prefix'", "]", ".", "lstrip", "(", "'/'", ")", ",", "dst_base", ".", "lstrip", "(", "'/'", ")", ")" ]
Translate dep_file from dep_rule into this rule's output path.
[ "Translate", "dep_file", "from", "dep_rule", "into", "this", "rule", "s", "output", "path", "." ]
python
train
pydata/pandas-gbq
pandas_gbq/gbq.py
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L1185-L1195
def _generate_bq_schema(df, default_type="STRING"): """DEPRECATED: Given a dataframe, generate a Google BigQuery schema. This is a private method, but was used in external code to work around issues in the default schema generation. Now that individual columns can be overridden: https://github.com/pydata/pandas-gbq/issues/218, this method can be removed after there is time to migrate away from this method. """ from pandas_gbq import schema return schema.generate_bq_schema(df, default_type=default_type)
[ "def", "_generate_bq_schema", "(", "df", ",", "default_type", "=", "\"STRING\"", ")", ":", "from", "pandas_gbq", "import", "schema", "return", "schema", ".", "generate_bq_schema", "(", "df", ",", "default_type", "=", "default_type", ")" ]
DEPRECATED: Given a dataframe, generate a Google BigQuery schema. This is a private method, but was used in external code to work around issues in the default schema generation. Now that individual columns can be overridden: https://github.com/pydata/pandas-gbq/issues/218, this method can be removed after there is time to migrate away from this method.
[ "DEPRECATED", ":", "Given", "a", "dataframe", "generate", "a", "Google", "BigQuery", "schema", "." ]
python
train
python-hyper/wsproto
example/synchronous_server.py
https://github.com/python-hyper/wsproto/blob/a7abcc5a9f7ad126668afb0cc9932da08c87f40f/example/synchronous_server.py#L18-L41
def main(): ''' Run the server. ''' try: ip = sys.argv[1] port = int(sys.argv[2]) except (IndexError, ValueError): print('Usage: {} <BIND_IP> <PORT>'.format(sys.argv[0])) sys.exit(1) server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind((ip, port)) server.listen(0) try: while True: print('Waiting for connection...') (stream, addr) = server.accept() print('Client connected: {}:{}'.format(addr[0], addr[1])) handle_connection(stream) stream.shutdown(socket.SHUT_WR) stream.close() except KeyboardInterrupt: print('Received SIGINT: shutting down…')
[ "def", "main", "(", ")", ":", "try", ":", "ip", "=", "sys", ".", "argv", "[", "1", "]", "port", "=", "int", "(", "sys", ".", "argv", "[", "2", "]", ")", "except", "(", "IndexError", ",", "ValueError", ")", ":", "print", "(", "'Usage: {} <BIND_IP> <PORT>'", ".", "format", "(", "sys", ".", "argv", "[", "0", "]", ")", ")", "sys", ".", "exit", "(", "1", ")", "server", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "server", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",", "1", ")", "server", ".", "bind", "(", "(", "ip", ",", "port", ")", ")", "server", ".", "listen", "(", "0", ")", "try", ":", "while", "True", ":", "print", "(", "'Waiting for connection...'", ")", "(", "stream", ",", "addr", ")", "=", "server", ".", "accept", "(", ")", "print", "(", "'Client connected: {}:{}'", ".", "format", "(", "addr", "[", "0", "]", ",", "addr", "[", "1", "]", ")", ")", "handle_connection", "(", "stream", ")", "stream", ".", "shutdown", "(", "socket", ".", "SHUT_WR", ")", "stream", ".", "close", "(", ")", "except", "KeyboardInterrupt", ":", "print", "(", "'Received SIGINT: shutting down…')", "" ]
Run the server.
[ "Run", "the", "server", "." ]
python
train
nickstenning/tagalog
tagalog/io.py
https://github.com/nickstenning/tagalog/blob/c6847a957dc4f96836a5cf13c4eb664fccafaac2/tagalog/io.py#L24-L68
def lines(fp): """ Read lines of UTF-8 from the file-like object given in ``fp``, making sure that when reading from STDIN, reads are at most line-buffered. UTF-8 decoding errors are handled silently. Invalid characters are replaced by U+FFFD REPLACEMENT CHARACTER. Line endings are normalised to newlines by Python's universal newlines feature. Returns an iterator yielding lines. """ if fp.fileno() == sys.stdin.fileno(): close = True try: # Python 3 fp = open(fp.fileno(), mode='r', buffering=BUF_LINEBUFFERED, errors='replace') decode = False except TypeError: fp = os.fdopen(fp.fileno(), 'rU', BUF_LINEBUFFERED) decode = True else: close = False try: # only decode if the fp doesn't already have an encoding decode = (fp.encoding != UTF8) except AttributeError: # fp has been opened in binary mode decode = True try: while 1: l = fp.readline() if l: if decode: l = l.decode(UTF8, 'replace') yield l else: break finally: if close: fp.close()
[ "def", "lines", "(", "fp", ")", ":", "if", "fp", ".", "fileno", "(", ")", "==", "sys", ".", "stdin", ".", "fileno", "(", ")", ":", "close", "=", "True", "try", ":", "# Python 3", "fp", "=", "open", "(", "fp", ".", "fileno", "(", ")", ",", "mode", "=", "'r'", ",", "buffering", "=", "BUF_LINEBUFFERED", ",", "errors", "=", "'replace'", ")", "decode", "=", "False", "except", "TypeError", ":", "fp", "=", "os", ".", "fdopen", "(", "fp", ".", "fileno", "(", ")", ",", "'rU'", ",", "BUF_LINEBUFFERED", ")", "decode", "=", "True", "else", ":", "close", "=", "False", "try", ":", "# only decode if the fp doesn't already have an encoding", "decode", "=", "(", "fp", ".", "encoding", "!=", "UTF8", ")", "except", "AttributeError", ":", "# fp has been opened in binary mode", "decode", "=", "True", "try", ":", "while", "1", ":", "l", "=", "fp", ".", "readline", "(", ")", "if", "l", ":", "if", "decode", ":", "l", "=", "l", ".", "decode", "(", "UTF8", ",", "'replace'", ")", "yield", "l", "else", ":", "break", "finally", ":", "if", "close", ":", "fp", ".", "close", "(", ")" ]
Read lines of UTF-8 from the file-like object given in ``fp``, making sure that when reading from STDIN, reads are at most line-buffered. UTF-8 decoding errors are handled silently. Invalid characters are replaced by U+FFFD REPLACEMENT CHARACTER. Line endings are normalised to newlines by Python's universal newlines feature. Returns an iterator yielding lines.
[ "Read", "lines", "of", "UTF", "-", "8", "from", "the", "file", "-", "like", "object", "given", "in", "fp", "making", "sure", "that", "when", "reading", "from", "STDIN", "reads", "are", "at", "most", "line", "-", "buffered", "." ]
python
train
marcomusy/vtkplotter
vtkplotter/utils.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/utils.py#L100-L105
def versor(v): """Return the unit vector. Input can be a list of vectors.""" if isinstance(v[0], np.ndarray): return np.divide(v, mag(v)[:, None]) else: return v / mag(v)
[ "def", "versor", "(", "v", ")", ":", "if", "isinstance", "(", "v", "[", "0", "]", ",", "np", ".", "ndarray", ")", ":", "return", "np", ".", "divide", "(", "v", ",", "mag", "(", "v", ")", "[", ":", ",", "None", "]", ")", "else", ":", "return", "v", "/", "mag", "(", "v", ")" ]
Return the unit vector. Input can be a list of vectors.
[ "Return", "the", "unit", "vector", ".", "Input", "can", "be", "a", "list", "of", "vectors", "." ]
python
train
arteria/django-openinghours
openinghours/utils.py
https://github.com/arteria/django-openinghours/blob/6bad47509a14d65a3a5a08777455f4cc8b4961fa/openinghours/utils.py#L69-L111
def is_open(location, now=None): """ Is the company currently open? Pass "now" to test with a specific timestamp. Can be used stand-alone or as a helper. """ if now is None: now = get_now() if has_closing_rule_for_now(location): return False now_time = datetime.time(now.hour, now.minute, now.second) if location: ohs = OpeningHours.objects.filter(company=location) else: ohs = Company.objects.first().openinghours_set.all() for oh in ohs: is_open = False # start and end is on the same day if (oh.weekday == now.isoweekday() and oh.from_hour <= now_time and now_time <= oh.to_hour): is_open = oh # start and end are not on the same day and we test on the start day if (oh.weekday == now.isoweekday() and oh.from_hour <= now_time and ((oh.to_hour < oh.from_hour) and (now_time < datetime.time(23, 59, 59)))): is_open = oh # start and end are not on the same day and we test on the end day if (oh.weekday == (now.isoweekday() - 1) % 7 and oh.from_hour >= now_time and oh.to_hour >= now_time and oh.to_hour < oh.from_hour): is_open = oh # print " 'Special' case after midnight", oh if is_open is not False: return oh return False
[ "def", "is_open", "(", "location", ",", "now", "=", "None", ")", ":", "if", "now", "is", "None", ":", "now", "=", "get_now", "(", ")", "if", "has_closing_rule_for_now", "(", "location", ")", ":", "return", "False", "now_time", "=", "datetime", ".", "time", "(", "now", ".", "hour", ",", "now", ".", "minute", ",", "now", ".", "second", ")", "if", "location", ":", "ohs", "=", "OpeningHours", ".", "objects", ".", "filter", "(", "company", "=", "location", ")", "else", ":", "ohs", "=", "Company", ".", "objects", ".", "first", "(", ")", ".", "openinghours_set", ".", "all", "(", ")", "for", "oh", "in", "ohs", ":", "is_open", "=", "False", "# start and end is on the same day", "if", "(", "oh", ".", "weekday", "==", "now", ".", "isoweekday", "(", ")", "and", "oh", ".", "from_hour", "<=", "now_time", "and", "now_time", "<=", "oh", ".", "to_hour", ")", ":", "is_open", "=", "oh", "# start and end are not on the same day and we test on the start day", "if", "(", "oh", ".", "weekday", "==", "now", ".", "isoweekday", "(", ")", "and", "oh", ".", "from_hour", "<=", "now_time", "and", "(", "(", "oh", ".", "to_hour", "<", "oh", ".", "from_hour", ")", "and", "(", "now_time", "<", "datetime", ".", "time", "(", "23", ",", "59", ",", "59", ")", ")", ")", ")", ":", "is_open", "=", "oh", "# start and end are not on the same day and we test on the end day", "if", "(", "oh", ".", "weekday", "==", "(", "now", ".", "isoweekday", "(", ")", "-", "1", ")", "%", "7", "and", "oh", ".", "from_hour", ">=", "now_time", "and", "oh", ".", "to_hour", ">=", "now_time", "and", "oh", ".", "to_hour", "<", "oh", ".", "from_hour", ")", ":", "is_open", "=", "oh", "# print \" 'Special' case after midnight\", oh", "if", "is_open", "is", "not", "False", ":", "return", "oh", "return", "False" ]
Is the company currently open? Pass "now" to test with a specific timestamp. Can be used stand-alone or as a helper.
[ "Is", "the", "company", "currently", "open?", "Pass", "now", "to", "test", "with", "a", "specific", "timestamp", ".", "Can", "be", "used", "stand", "-", "alone", "or", "as", "a", "helper", "." ]
python
train
klmitch/aversion
aversion.py
https://github.com/klmitch/aversion/blob/90ca68e7d6426a77db8a926171f8d3bbeb00ee4c/aversion.py#L137-L156
def _match_mask(mask, ctype): """ Determine if a content type mask matches a given content type. :param mask: The content type mask, taken from the Accept header. :param ctype: The content type to match to the mask. """ # Handle the simple cases first if '*' not in mask: return ctype == mask elif mask == '*/*': return True elif not mask.endswith('/*'): return False mask_major = mask[:-2] ctype_major = ctype.split('/', 1)[0] return ctype_major == mask_major
[ "def", "_match_mask", "(", "mask", ",", "ctype", ")", ":", "# Handle the simple cases first", "if", "'*'", "not", "in", "mask", ":", "return", "ctype", "==", "mask", "elif", "mask", "==", "'*/*'", ":", "return", "True", "elif", "not", "mask", ".", "endswith", "(", "'/*'", ")", ":", "return", "False", "mask_major", "=", "mask", "[", ":", "-", "2", "]", "ctype_major", "=", "ctype", ".", "split", "(", "'/'", ",", "1", ")", "[", "0", "]", "return", "ctype_major", "==", "mask_major" ]
Determine if a content type mask matches a given content type. :param mask: The content type mask, taken from the Accept header. :param ctype: The content type to match to the mask.
[ "Determine", "if", "a", "content", "type", "mask", "matches", "a", "given", "content", "type", "." ]
python
train
kennethreitz/omnijson
omnijson/core.py
https://github.com/kennethreitz/omnijson/blob/a5890a51a59ad76f78a61f5bf91fa86b784cf694/omnijson/core.py#L41-L51
def loads(s, **kwargs): """Loads JSON object.""" try: return _engine[0](s) except _engine[2]: # except_clause: 'except' [test ['as' NAME]] # grammar for py3x # except_clause: 'except' [test [('as' | ',') test]] # grammar for py2x why = sys.exc_info()[1] raise JSONError(why)
[ "def", "loads", "(", "s", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "_engine", "[", "0", "]", "(", "s", ")", "except", "_engine", "[", "2", "]", ":", "# except_clause: 'except' [test ['as' NAME]] # grammar for py3x", "# except_clause: 'except' [test [('as' | ',') test]] # grammar for py2x", "why", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "raise", "JSONError", "(", "why", ")" ]
Loads JSON object.
[ "Loads", "JSON", "object", "." ]
python
train
aliyun/aliyun-odps-python-sdk
odps/models/instance.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/models/instance.py#L385-L414
def get_task_cost(self, task_name): """ Get task cost :param task_name: name of the task :return: task cost :rtype: Instance.TaskCost :Example: >>> cost = instance.get_task_cost(instance.get_task_names()[0]) >>> cost.cpu_cost 200 >>> cost.memory_cost 4096 >>> cost.input_size 0 """ summary = self.get_task_summary(task_name) if summary is None: return None if 'Cost' in summary: task_cost = summary['Cost'] cpu_cost = task_cost.get('CPU') memory = task_cost.get('Memory') input_size = task_cost.get('Input') return Instance.TaskCost(cpu_cost, memory, input_size)
[ "def", "get_task_cost", "(", "self", ",", "task_name", ")", ":", "summary", "=", "self", ".", "get_task_summary", "(", "task_name", ")", "if", "summary", "is", "None", ":", "return", "None", "if", "'Cost'", "in", "summary", ":", "task_cost", "=", "summary", "[", "'Cost'", "]", "cpu_cost", "=", "task_cost", ".", "get", "(", "'CPU'", ")", "memory", "=", "task_cost", ".", "get", "(", "'Memory'", ")", "input_size", "=", "task_cost", ".", "get", "(", "'Input'", ")", "return", "Instance", ".", "TaskCost", "(", "cpu_cost", ",", "memory", ",", "input_size", ")" ]
Get task cost :param task_name: name of the task :return: task cost :rtype: Instance.TaskCost :Example: >>> cost = instance.get_task_cost(instance.get_task_names()[0]) >>> cost.cpu_cost 200 >>> cost.memory_cost 4096 >>> cost.input_size 0
[ "Get", "task", "cost" ]
python
train
annoviko/pyclustering
pyclustering/nnet/hysteresis.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/nnet/hysteresis.py#L192-L199
def outputs(self, values): """! @brief Sets outputs of neurons. """ self._outputs = [val for val in values]; self._outputs_buffer = [val for val in values];
[ "def", "outputs", "(", "self", ",", "values", ")", ":", "self", ".", "_outputs", "=", "[", "val", "for", "val", "in", "values", "]", "self", ".", "_outputs_buffer", "=", "[", "val", "for", "val", "in", "values", "]" ]
! @brief Sets outputs of neurons.
[ "!" ]
python
valid
NatLibFi/Skosify
skosify/check.py
https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/check.py#L37-L58
def hierarchy_cycles(rdf, fix=False): """Check if the graph contains skos:broader cycles and optionally break these. :param Graph rdf: An rdflib.graph.Graph object. :param bool fix: Fix the problem by removing any skos:broader that overlaps with skos:broaderTransitive. """ top_concepts = sorted(rdf.subject_objects(SKOS.hasTopConcept)) status = {} for cs, root in top_concepts: _hierarchy_cycles_visit( rdf, root, None, fix, status=status) # double check that all concepts were actually visited in the search, # and visit remaining ones if necessary recheck_top_concepts = False for conc in sorted(rdf.subjects(RDF.type, SKOS.Concept)): if conc not in status: recheck_top_concepts = True _hierarchy_cycles_visit( rdf, conc, None, fix, status=status) return recheck_top_concepts
[ "def", "hierarchy_cycles", "(", "rdf", ",", "fix", "=", "False", ")", ":", "top_concepts", "=", "sorted", "(", "rdf", ".", "subject_objects", "(", "SKOS", ".", "hasTopConcept", ")", ")", "status", "=", "{", "}", "for", "cs", ",", "root", "in", "top_concepts", ":", "_hierarchy_cycles_visit", "(", "rdf", ",", "root", ",", "None", ",", "fix", ",", "status", "=", "status", ")", "# double check that all concepts were actually visited in the search,", "# and visit remaining ones if necessary", "recheck_top_concepts", "=", "False", "for", "conc", "in", "sorted", "(", "rdf", ".", "subjects", "(", "RDF", ".", "type", ",", "SKOS", ".", "Concept", ")", ")", ":", "if", "conc", "not", "in", "status", ":", "recheck_top_concepts", "=", "True", "_hierarchy_cycles_visit", "(", "rdf", ",", "conc", ",", "None", ",", "fix", ",", "status", "=", "status", ")", "return", "recheck_top_concepts" ]
Check if the graph contains skos:broader cycles and optionally break these. :param Graph rdf: An rdflib.graph.Graph object. :param bool fix: Fix the problem by removing any skos:broader that overlaps with skos:broaderTransitive.
[ "Check", "if", "the", "graph", "contains", "skos", ":", "broader", "cycles", "and", "optionally", "break", "these", "." ]
python
train
pypa/pipenv
pipenv/vendor/attr/_make.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/attr/_make.py#L2067-L2086
def and_(*validators): """ A validator that composes multiple validators into one. When called on a value, it runs all wrapped validators. :param validators: Arbitrary number of validators. :type validators: callables .. versionadded:: 17.1.0 """ vals = [] for validator in validators: vals.extend( validator._validators if isinstance(validator, _AndValidator) else [validator] ) return _AndValidator(tuple(vals))
[ "def", "and_", "(", "*", "validators", ")", ":", "vals", "=", "[", "]", "for", "validator", "in", "validators", ":", "vals", ".", "extend", "(", "validator", ".", "_validators", "if", "isinstance", "(", "validator", ",", "_AndValidator", ")", "else", "[", "validator", "]", ")", "return", "_AndValidator", "(", "tuple", "(", "vals", ")", ")" ]
A validator that composes multiple validators into one. When called on a value, it runs all wrapped validators. :param validators: Arbitrary number of validators. :type validators: callables .. versionadded:: 17.1.0
[ "A", "validator", "that", "composes", "multiple", "validators", "into", "one", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_policer.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_policer.py#L12-L21
def police_priority_map_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer") name = ET.SubElement(police_priority_map, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "police_priority_map_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "police_priority_map", "=", "ET", ".", "SubElement", "(", "config", ",", "\"police-priority-map\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-policer\"", ")", "name", "=", "ET", ".", "SubElement", "(", "police_priority_map", ",", "\"name\"", ")", "name", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
urbn/Caesium
caesium/document.py
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L298-L309
def __make_storeable_patch_patchable(self, patch): """Replace all pipes with dots, transform back into the a namespace path. This is done before the $set query is applied to the document :param dict patch: The patch that is to be prepared to be applied """ new_patch = {} for key in patch: new_patch[key.replace("|", ".")] = patch[key] return new_patch
[ "def", "__make_storeable_patch_patchable", "(", "self", ",", "patch", ")", ":", "new_patch", "=", "{", "}", "for", "key", "in", "patch", ":", "new_patch", "[", "key", ".", "replace", "(", "\"|\"", ",", "\".\"", ")", "]", "=", "patch", "[", "key", "]", "return", "new_patch" ]
Replace all pipes with dots, transform back into the a namespace path. This is done before the $set query is applied to the document :param dict patch: The patch that is to be prepared to be applied
[ "Replace", "all", "pipes", "with", "dots", "transform", "back", "into", "the", "a", "namespace", "path", ".", "This", "is", "done", "before", "the", "$set", "query", "is", "applied", "to", "the", "document" ]
python
train
dw/mitogen
mitogen/minify.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/minify.py#L46-L61
def minimize_source(source): """Remove comments and docstrings from Python `source`, preserving line numbers and syntax of empty blocks. :param str source: The source to minimize. :returns str: The minimized source. """ source = mitogen.core.to_text(source) tokens = tokenize.generate_tokens(StringIO(source).readline) tokens = strip_comments(tokens) tokens = strip_docstrings(tokens) tokens = reindent(tokens) return tokenize.untokenize(tokens)
[ "def", "minimize_source", "(", "source", ")", ":", "source", "=", "mitogen", ".", "core", ".", "to_text", "(", "source", ")", "tokens", "=", "tokenize", ".", "generate_tokens", "(", "StringIO", "(", "source", ")", ".", "readline", ")", "tokens", "=", "strip_comments", "(", "tokens", ")", "tokens", "=", "strip_docstrings", "(", "tokens", ")", "tokens", "=", "reindent", "(", "tokens", ")", "return", "tokenize", ".", "untokenize", "(", "tokens", ")" ]
Remove comments and docstrings from Python `source`, preserving line numbers and syntax of empty blocks. :param str source: The source to minimize. :returns str: The minimized source.
[ "Remove", "comments", "and", "docstrings", "from", "Python", "source", "preserving", "line", "numbers", "and", "syntax", "of", "empty", "blocks", "." ]
python
train
GeoffAtHome/lightwave
lightwave/lightwave.py
https://github.com/GeoffAtHome/lightwave/blob/2fab4ee8c9f14dd97dffd4b8cd70b217e884e581/lightwave/lightwave.py#L65-L68
def turn_off(self, device_id, name): """Create the message to turn light or switch off.""" msg = "!%sF0|Turn Off|%s" % (device_id, name) self._send_message(msg)
[ "def", "turn_off", "(", "self", ",", "device_id", ",", "name", ")", ":", "msg", "=", "\"!%sF0|Turn Off|%s\"", "%", "(", "device_id", ",", "name", ")", "self", ".", "_send_message", "(", "msg", ")" ]
Create the message to turn light or switch off.
[ "Create", "the", "message", "to", "turn", "light", "or", "switch", "off", "." ]
python
test
kibitzr/kibitzr
kibitzr/conf.py
https://github.com/kibitzr/kibitzr/blob/749da312488f1dda1ed1093cf4c95aaac0a604f7/kibitzr/conf.py#L135-L154
def reread(self): """ Read and parse credentials file. If something goes wrong, log exception and continue. """ logger.debug("Loading credentials from %s", os.path.abspath(self.creds_filename)) creds = {} try: with self.open_creds() as fp: creds = yaml.safe_load(fp) except IOError: logger.info("No credentials file found at %s", os.path.abspath(self.creds_filename)) except: logger.exception("Error loading credentials file") if creds != self.creds: self.creds = creds return True return False
[ "def", "reread", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"Loading credentials from %s\"", ",", "os", ".", "path", ".", "abspath", "(", "self", ".", "creds_filename", ")", ")", "creds", "=", "{", "}", "try", ":", "with", "self", ".", "open_creds", "(", ")", "as", "fp", ":", "creds", "=", "yaml", ".", "safe_load", "(", "fp", ")", "except", "IOError", ":", "logger", ".", "info", "(", "\"No credentials file found at %s\"", ",", "os", ".", "path", ".", "abspath", "(", "self", ".", "creds_filename", ")", ")", "except", ":", "logger", ".", "exception", "(", "\"Error loading credentials file\"", ")", "if", "creds", "!=", "self", ".", "creds", ":", "self", ".", "creds", "=", "creds", "return", "True", "return", "False" ]
Read and parse credentials file. If something goes wrong, log exception and continue.
[ "Read", "and", "parse", "credentials", "file", ".", "If", "something", "goes", "wrong", "log", "exception", "and", "continue", "." ]
python
train
PythonOptimizers/cygenja
cygenja/treemap/treemap.py
https://github.com/PythonOptimizers/cygenja/blob/a9ef91cdfa8452beeeec4f050f928b830379f91c/cygenja/treemap/treemap.py#L111-L155
def _create_entry(self, location, element, unique=True, delete_element=False): """ Create an entry located at ``location``. Args: location: String or :class:`LocationDescriptor` to describe a "separator location" (i.e. dir1/dir2/dir3 for instance). element: Element to store at the location. unique: ``True`` means that the element to store **must** be unique and that the corresponding node doesn't already exist. delete_element: In case the element must not be unique, delete or not the existing element at the ``location`` if it exist? Returns: The created node with the element. Raises: A ``RuntimeError`` is raised if leaf node already exists and ``unique`` is set to ``True``. Note: Non existing linking node (i.e. non leaf nodes) are created on the fly. """ loc_descriptor = self._get_location_descriptor(location) # find parent node parent_node = self._root_node if loc_descriptor.nbr_of_sub_locations() > 1: parent_node = self._get_node(loc_descriptor.get_sub_location_descriptor(), create_non_existing_nodes=True) # find child node if it exist last_location = loc_descriptor.last_sub_location() child_node = parent_node.get_child_node_or_default(last_location, None) if child_node is None: # create node child_node = TreeMapNode(element) parent_node.set_child_node(last_location, child_node) self._nbr_of_nodes += 1 else: # child node exist if unique: raise RuntimeError("Node corresponding to the location '%s' already exist!" % loc_descriptor.to_string()) elif delete_element: child_node.delete_element() child_node.set_element(element) return child_node
[ "def", "_create_entry", "(", "self", ",", "location", ",", "element", ",", "unique", "=", "True", ",", "delete_element", "=", "False", ")", ":", "loc_descriptor", "=", "self", ".", "_get_location_descriptor", "(", "location", ")", "# find parent node", "parent_node", "=", "self", ".", "_root_node", "if", "loc_descriptor", ".", "nbr_of_sub_locations", "(", ")", ">", "1", ":", "parent_node", "=", "self", ".", "_get_node", "(", "loc_descriptor", ".", "get_sub_location_descriptor", "(", ")", ",", "create_non_existing_nodes", "=", "True", ")", "# find child node if it exist", "last_location", "=", "loc_descriptor", ".", "last_sub_location", "(", ")", "child_node", "=", "parent_node", ".", "get_child_node_or_default", "(", "last_location", ",", "None", ")", "if", "child_node", "is", "None", ":", "# create node", "child_node", "=", "TreeMapNode", "(", "element", ")", "parent_node", ".", "set_child_node", "(", "last_location", ",", "child_node", ")", "self", ".", "_nbr_of_nodes", "+=", "1", "else", ":", "# child node exist", "if", "unique", ":", "raise", "RuntimeError", "(", "\"Node corresponding to the location '%s' already exist!\"", "%", "loc_descriptor", ".", "to_string", "(", ")", ")", "elif", "delete_element", ":", "child_node", ".", "delete_element", "(", ")", "child_node", ".", "set_element", "(", "element", ")", "return", "child_node" ]
Create an entry located at ``location``. Args: location: String or :class:`LocationDescriptor` to describe a "separator location" (i.e. dir1/dir2/dir3 for instance). element: Element to store at the location. unique: ``True`` means that the element to store **must** be unique and that the corresponding node doesn't already exist. delete_element: In case the element must not be unique, delete or not the existing element at the ``location`` if it exist? Returns: The created node with the element. Raises: A ``RuntimeError`` is raised if leaf node already exists and ``unique`` is set to ``True``. Note: Non existing linking node (i.e. non leaf nodes) are created on the fly.
[ "Create", "an", "entry", "located", "at", "location", ".", "Args", ":", "location", ":", "String", "or", ":", "class", ":", "LocationDescriptor", "to", "describe", "a", "separator", "location", "(", "i", ".", "e", ".", "dir1", "/", "dir2", "/", "dir3", "for", "instance", ")", ".", "element", ":", "Element", "to", "store", "at", "the", "location", ".", "unique", ":", "True", "means", "that", "the", "element", "to", "store", "**", "must", "**", "be", "unique", "and", "that", "the", "corresponding", "node", "doesn", "t", "already", "exist", ".", "delete_element", ":", "In", "case", "the", "element", "must", "not", "be", "unique", "delete", "or", "not", "the", "existing", "element", "at", "the", "location", "if", "it", "exist?", "Returns", ":", "The", "created", "node", "with", "the", "element", ".", "Raises", ":", "A", "RuntimeError", "is", "raised", "if", "leaf", "node", "already", "exists", "and", "unique", "is", "set", "to", "True", "." ]
python
train
rinocloud/rinocloud-python
rinocloud/config.py
https://github.com/rinocloud/rinocloud-python/blob/7c4bf994a518f961cffedb7260fc1e4fa1838b38/rinocloud/config.py#L6-L20
def set_local_path(directory, create_dir=False): """ sets path for local saving of information if create is true we will create the folder even if it doesnt exist """ if not os.path.exists(directory) and create_dir is True: os.makedirs(directory) if not os.path.exists(directory) and create_dir is False: raise AttributeError("Path '%s' does not exist, to make it pass create_dir=True to rinocloud.set_local_path" % directory) if os.path.isdir(directory): rinocloud.path = directory return directory
[ "def", "set_local_path", "(", "directory", ",", "create_dir", "=", "False", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", "and", "create_dir", "is", "True", ":", "os", ".", "makedirs", "(", "directory", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", "and", "create_dir", "is", "False", ":", "raise", "AttributeError", "(", "\"Path '%s' does not exist, to make it pass create_dir=True to rinocloud.set_local_path\"", "%", "directory", ")", "if", "os", ".", "path", ".", "isdir", "(", "directory", ")", ":", "rinocloud", ".", "path", "=", "directory", "return", "directory" ]
sets path for local saving of information if create is true we will create the folder even if it doesnt exist
[ "sets", "path", "for", "local", "saving", "of", "information", "if", "create", "is", "true", "we", "will", "create", "the", "folder", "even", "if", "it", "doesnt", "exist" ]
python
train
klahnakoski/pyLibrary
mo_math/vendor/strangman/stats.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_math/vendor/strangman/stats.py#L427-L440
def itemfreq(inlist): """ Returns a list of pairs. Each pair consists of one of the scores in inlist and it's frequency count. Assumes a 1D list is passed. Usage: litemfreq(inlist) Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies) """ scores = pstat.unique(inlist) scores.sort() freq = [] for item in scores: freq.append(inlist.count(item)) return zip(scores, freq)
[ "def", "itemfreq", "(", "inlist", ")", ":", "scores", "=", "pstat", ".", "unique", "(", "inlist", ")", "scores", ".", "sort", "(", ")", "freq", "=", "[", "]", "for", "item", "in", "scores", ":", "freq", ".", "append", "(", "inlist", ".", "count", "(", "item", ")", ")", "return", "zip", "(", "scores", ",", "freq", ")" ]
Returns a list of pairs. Each pair consists of one of the scores in inlist and it's frequency count. Assumes a 1D list is passed. Usage: litemfreq(inlist) Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
[ "Returns", "a", "list", "of", "pairs", ".", "Each", "pair", "consists", "of", "one", "of", "the", "scores", "in", "inlist", "and", "it", "s", "frequency", "count", ".", "Assumes", "a", "1D", "list", "is", "passed", "." ]
python
train
deepmipt/DeepPavlov
deeppavlov/core/models/keras_model.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/models/keras_model.py#L156-L165
def train_on_batch(self, *args) -> None: """Trains the model on a single batch. Args: *args: the list of network inputs. Last element of `args` is the batch of targets, all previous elements are training data batches """ *data, labels = args self._net.train_on_batch(data, labels)
[ "def", "train_on_batch", "(", "self", ",", "*", "args", ")", "->", "None", ":", "*", "data", ",", "labels", "=", "args", "self", ".", "_net", ".", "train_on_batch", "(", "data", ",", "labels", ")" ]
Trains the model on a single batch. Args: *args: the list of network inputs. Last element of `args` is the batch of targets, all previous elements are training data batches
[ "Trains", "the", "model", "on", "a", "single", "batch", "." ]
python
test
PyCQA/pydocstyle
src/pydocstyle/config.py
https://github.com/PyCQA/pydocstyle/blob/2549847f9efad225789f931e83dfe782418ca13e/src/pydocstyle/config.py#L218-L284
def _get_config(self, node): """Get and cache the run configuration for `node`. If no configuration exists (not local and not for the parent node), returns and caches a default configuration. The algorithm: ------------- * If the current directory's configuration exists in `self._cache` - return it. * If a configuration file does not exist in this directory: * If the directory is not a root directory: * Cache its configuration as this directory's and return it. * Else: * Cache a default configuration and return it. * Else: * Read the configuration file. * If a parent directory exists AND the configuration file allows inheritance: * Read the parent configuration by calling this function with the parent directory as `node`. * Merge the parent configuration with the current one and cache it. * If the user has specified one of `BASE_ERROR_SELECTION_OPTIONS` in the CLI - return the CLI configuration with the configuration match clauses * Set the `--add-select` and `--add-ignore` CLI configurations. """ if self._run_conf.config is None: log.debug('No config file specified, discovering.') config = self._get_config_by_discovery(node) else: log.debug('Using config file %r', self._run_conf.config) if not os.path.exists(self._run_conf.config): raise IllegalConfiguration('Configuration file {!r} specified ' 'via --config was not found.' .format(self._run_conf.config)) if None in self._cache: return self._cache[None] options, _ = self._read_configuration_file(self._run_conf.config) if options is None: log.warning('Configuration file does not contain a ' 'pydocstyle section. Using default configuration.') config = self._create_check_config(self._options) else: config = self._create_check_config(options) # Make the CLI always win final_config = {} for attr in CheckConfiguration._fields: cli_val = getattr(self._override_by_cli, attr) conf_val = getattr(config, attr) final_config[attr] = cli_val if cli_val is not None else conf_val config = CheckConfiguration(**final_config) self._set_add_options(config.checked_codes, self._options) # Handle caching if self._run_conf.config is not None: self._cache[None] = config else: self._cache[self._get_node_dir(node)] = config return config
[ "def", "_get_config", "(", "self", ",", "node", ")", ":", "if", "self", ".", "_run_conf", ".", "config", "is", "None", ":", "log", ".", "debug", "(", "'No config file specified, discovering.'", ")", "config", "=", "self", ".", "_get_config_by_discovery", "(", "node", ")", "else", ":", "log", ".", "debug", "(", "'Using config file %r'", ",", "self", ".", "_run_conf", ".", "config", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_run_conf", ".", "config", ")", ":", "raise", "IllegalConfiguration", "(", "'Configuration file {!r} specified '", "'via --config was not found.'", ".", "format", "(", "self", ".", "_run_conf", ".", "config", ")", ")", "if", "None", "in", "self", ".", "_cache", ":", "return", "self", ".", "_cache", "[", "None", "]", "options", ",", "_", "=", "self", ".", "_read_configuration_file", "(", "self", ".", "_run_conf", ".", "config", ")", "if", "options", "is", "None", ":", "log", ".", "warning", "(", "'Configuration file does not contain a '", "'pydocstyle section. Using default configuration.'", ")", "config", "=", "self", ".", "_create_check_config", "(", "self", ".", "_options", ")", "else", ":", "config", "=", "self", ".", "_create_check_config", "(", "options", ")", "# Make the CLI always win", "final_config", "=", "{", "}", "for", "attr", "in", "CheckConfiguration", ".", "_fields", ":", "cli_val", "=", "getattr", "(", "self", ".", "_override_by_cli", ",", "attr", ")", "conf_val", "=", "getattr", "(", "config", ",", "attr", ")", "final_config", "[", "attr", "]", "=", "cli_val", "if", "cli_val", "is", "not", "None", "else", "conf_val", "config", "=", "CheckConfiguration", "(", "*", "*", "final_config", ")", "self", ".", "_set_add_options", "(", "config", ".", "checked_codes", ",", "self", ".", "_options", ")", "# Handle caching", "if", "self", ".", "_run_conf", ".", "config", "is", "not", "None", ":", "self", ".", "_cache", "[", "None", "]", "=", "config", "else", ":", "self", ".", "_cache", "[", "self", ".", "_get_node_dir", "(", "node", ")", "]", "=", "config", "return", "config" ]
Get and cache the run configuration for `node`. If no configuration exists (not local and not for the parent node), returns and caches a default configuration. The algorithm: ------------- * If the current directory's configuration exists in `self._cache` - return it. * If a configuration file does not exist in this directory: * If the directory is not a root directory: * Cache its configuration as this directory's and return it. * Else: * Cache a default configuration and return it. * Else: * Read the configuration file. * If a parent directory exists AND the configuration file allows inheritance: * Read the parent configuration by calling this function with the parent directory as `node`. * Merge the parent configuration with the current one and cache it. * If the user has specified one of `BASE_ERROR_SELECTION_OPTIONS` in the CLI - return the CLI configuration with the configuration match clauses * Set the `--add-select` and `--add-ignore` CLI configurations.
[ "Get", "and", "cache", "the", "run", "configuration", "for", "node", "." ]
python
train
stevearc/dql
dql/models.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/models.py#L574-L590
def schema(self): """ The DQL query that will construct this table's schema """ attrs = self.attrs.copy() parts = ["CREATE", "TABLE", self.name, "(%s," % self.hash_key.schema] del attrs[self.hash_key.name] if self.range_key: parts.append(self.range_key.schema + ",") del attrs[self.range_key.name] if attrs: attr_def = ", ".join([attr.schema for attr in itervalues(attrs)]) parts.append(attr_def + ",") parts.append( "THROUGHPUT (%d, %d))" % (self.read_throughput, self.write_throughput) ) parts.extend([g.schema for g in itervalues(self.global_indexes)]) return " ".join(parts) + ";"
[ "def", "schema", "(", "self", ")", ":", "attrs", "=", "self", ".", "attrs", ".", "copy", "(", ")", "parts", "=", "[", "\"CREATE\"", ",", "\"TABLE\"", ",", "self", ".", "name", ",", "\"(%s,\"", "%", "self", ".", "hash_key", ".", "schema", "]", "del", "attrs", "[", "self", ".", "hash_key", ".", "name", "]", "if", "self", ".", "range_key", ":", "parts", ".", "append", "(", "self", ".", "range_key", ".", "schema", "+", "\",\"", ")", "del", "attrs", "[", "self", ".", "range_key", ".", "name", "]", "if", "attrs", ":", "attr_def", "=", "\", \"", ".", "join", "(", "[", "attr", ".", "schema", "for", "attr", "in", "itervalues", "(", "attrs", ")", "]", ")", "parts", ".", "append", "(", "attr_def", "+", "\",\"", ")", "parts", ".", "append", "(", "\"THROUGHPUT (%d, %d))\"", "%", "(", "self", ".", "read_throughput", ",", "self", ".", "write_throughput", ")", ")", "parts", ".", "extend", "(", "[", "g", ".", "schema", "for", "g", "in", "itervalues", "(", "self", ".", "global_indexes", ")", "]", ")", "return", "\" \"", ".", "join", "(", "parts", ")", "+", "\";\"" ]
The DQL query that will construct this table's schema
[ "The", "DQL", "query", "that", "will", "construct", "this", "table", "s", "schema" ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/sip/domain/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/sip/domain/__init__.py#L369-L382
def auth(self): """ Access the auth :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList """ if self._auth is None: self._auth = AuthTypesList( self._version, account_sid=self._solution['account_sid'], domain_sid=self._solution['sid'], ) return self._auth
[ "def", "auth", "(", "self", ")", ":", "if", "self", ".", "_auth", "is", "None", ":", "self", ".", "_auth", "=", "AuthTypesList", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "domain_sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_auth" ]
Access the auth :returns: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList :rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.AuthTypesList
[ "Access", "the", "auth" ]
python
train