repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
gabstopper/smc-python
smc/api/session.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/api/session.py#L734-L742
def has_credentials(self): """ Does this session have valid credentials :rtype: bool """ return all([ getattr(self, '_%s' % field, None) is not None for field in self.CredentialMap.get(self.provider_name)])
[ "def", "has_credentials", "(", "self", ")", ":", "return", "all", "(", "[", "getattr", "(", "self", ",", "'_%s'", "%", "field", ",", "None", ")", "is", "not", "None", "for", "field", "in", "self", ".", "CredentialMap", ".", "get", "(", "self", ".", "provider_name", ")", "]", ")" ]
Does this session have valid credentials :rtype: bool
[ "Does", "this", "session", "have", "valid", "credentials", ":", "rtype", ":", "bool" ]
python
train
noahbenson/pimms
pimms/util.py
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/util.py#L794-L811
def collect(*args, **kwargs): ''' collect(m1, m2, ...) yields a persistent map whose keys are the union of all keys in the given maps m1, m2, etc. and whose values are tuples containing each of the given maps (in provided order) that contain the given key. This function never evaluates the values in the maps so it implicitly supports laziness. The collect function fist passes its arguments to flatten_maps, so it is fine to pass lists or nested lists of maps to this function; all will be collected. ''' args = flatten_maps(args, **kwargs) if len(args) == 0: return ps.m() m = {} for arg in args: for k in six.iterkeys(arg): if k in m: m[k].append(arg) else: m[k] = [arg] return ps.pmap({k:tuple(v) for (k,v) in six.iteritems(m)})
[ "def", "collect", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "args", "=", "flatten_maps", "(", "args", ",", "*", "*", "kwargs", ")", "if", "len", "(", "args", ")", "==", "0", ":", "return", "ps", ".", "m", "(", ")", "m", "=", "{", "}", "for", "arg", "in", "args", ":", "for", "k", "in", "six", ".", "iterkeys", "(", "arg", ")", ":", "if", "k", "in", "m", ":", "m", "[", "k", "]", ".", "append", "(", "arg", ")", "else", ":", "m", "[", "k", "]", "=", "[", "arg", "]", "return", "ps", ".", "pmap", "(", "{", "k", ":", "tuple", "(", "v", ")", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "m", ")", "}", ")" ]
collect(m1, m2, ...) yields a persistent map whose keys are the union of all keys in the given maps m1, m2, etc. and whose values are tuples containing each of the given maps (in provided order) that contain the given key. This function never evaluates the values in the maps so it implicitly supports laziness. The collect function fist passes its arguments to flatten_maps, so it is fine to pass lists or nested lists of maps to this function; all will be collected.
[ "collect", "(", "m1", "m2", "...", ")", "yields", "a", "persistent", "map", "whose", "keys", "are", "the", "union", "of", "all", "keys", "in", "the", "given", "maps", "m1", "m2", "etc", ".", "and", "whose", "values", "are", "tuples", "containing", "each", "of", "the", "given", "maps", "(", "in", "provided", "order", ")", "that", "contain", "the", "given", "key", ".", "This", "function", "never", "evaluates", "the", "values", "in", "the", "maps", "so", "it", "implicitly", "supports", "laziness", "." ]
python
train
CxAalto/gtfspy
gtfspy/import_loaders/table_loader.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/import_loaders/table_loader.py#L239-L259
def create_table(self, conn): """Make table definitions""" # Make cursor cur = conn.cursor() # Drop table if it already exists, to be recreated. This # could in the future abort if table already exists, and not # recreate it from scratch. #cur.execute('''DROP TABLE IF EXISTS %s'''%self.table) #conn.commit() if self.tabledef is None: return if not self.tabledef.startswith('CREATE'): # "normal" table creation. cur.execute('CREATE TABLE IF NOT EXISTS %s %s' % (self.table, self.tabledef) ) else: # When tabledef contains the full CREATE statement (for # virtual tables). cur.execute(self.tabledef) conn.commit()
[ "def", "create_table", "(", "self", ",", "conn", ")", ":", "# Make cursor", "cur", "=", "conn", ".", "cursor", "(", ")", "# Drop table if it already exists, to be recreated. This", "# could in the future abort if table already exists, and not", "# recreate it from scratch.", "#cur.execute('''DROP TABLE IF EXISTS %s'''%self.table)", "#conn.commit()", "if", "self", ".", "tabledef", "is", "None", ":", "return", "if", "not", "self", ".", "tabledef", ".", "startswith", "(", "'CREATE'", ")", ":", "# \"normal\" table creation.", "cur", ".", "execute", "(", "'CREATE TABLE IF NOT EXISTS %s %s'", "%", "(", "self", ".", "table", ",", "self", ".", "tabledef", ")", ")", "else", ":", "# When tabledef contains the full CREATE statement (for", "# virtual tables).", "cur", ".", "execute", "(", "self", ".", "tabledef", ")", "conn", ".", "commit", "(", ")" ]
Make table definitions
[ "Make", "table", "definitions" ]
python
valid
spyder-ide/conda-manager
conda_manager/api/download_api.py
https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/api/download_api.py#L506-L515
def _is_valid_url(self, url): """Callback for is_valid_url.""" try: r = requests.head(url, proxies=self.proxy_servers) value = r.status_code in [200] except Exception as error: logger.error(str(error)) value = False return value
[ "def", "_is_valid_url", "(", "self", ",", "url", ")", ":", "try", ":", "r", "=", "requests", ".", "head", "(", "url", ",", "proxies", "=", "self", ".", "proxy_servers", ")", "value", "=", "r", ".", "status_code", "in", "[", "200", "]", "except", "Exception", "as", "error", ":", "logger", ".", "error", "(", "str", "(", "error", ")", ")", "value", "=", "False", "return", "value" ]
Callback for is_valid_url.
[ "Callback", "for", "is_valid_url", "." ]
python
train
gem/oq-engine
openquake/commands/info.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commands/info.py#L38-L62
def source_model_info(nodes): """ Extract information about NRML/0.5 source models. Returns a table with TRTs as rows and source classes as columns. """ c = collections.Counter() for node in nodes: for src_group in node: trt = src_group['tectonicRegion'] for src in src_group: src_class = src.tag.split('}')[1] c[trt, src_class] += 1 trts, classes = zip(*c) trts = sorted(set(trts)) classes = sorted(set(classes)) dtlist = [('TRT', (bytes, 30))] + [(name, int) for name in classes] out = numpy.zeros(len(trts) + 1, dtlist) # +1 for the totals for i, trt in enumerate(trts): out[i]['TRT'] = trt for src_class in classes: out[i][src_class] = c[trt, src_class] out[-1]['TRT'] = 'Total' for name in out.dtype.names[1:]: out[-1][name] = out[name][:-1].sum() return rst_table(out)
[ "def", "source_model_info", "(", "nodes", ")", ":", "c", "=", "collections", ".", "Counter", "(", ")", "for", "node", "in", "nodes", ":", "for", "src_group", "in", "node", ":", "trt", "=", "src_group", "[", "'tectonicRegion'", "]", "for", "src", "in", "src_group", ":", "src_class", "=", "src", ".", "tag", ".", "split", "(", "'}'", ")", "[", "1", "]", "c", "[", "trt", ",", "src_class", "]", "+=", "1", "trts", ",", "classes", "=", "zip", "(", "*", "c", ")", "trts", "=", "sorted", "(", "set", "(", "trts", ")", ")", "classes", "=", "sorted", "(", "set", "(", "classes", ")", ")", "dtlist", "=", "[", "(", "'TRT'", ",", "(", "bytes", ",", "30", ")", ")", "]", "+", "[", "(", "name", ",", "int", ")", "for", "name", "in", "classes", "]", "out", "=", "numpy", ".", "zeros", "(", "len", "(", "trts", ")", "+", "1", ",", "dtlist", ")", "# +1 for the totals", "for", "i", ",", "trt", "in", "enumerate", "(", "trts", ")", ":", "out", "[", "i", "]", "[", "'TRT'", "]", "=", "trt", "for", "src_class", "in", "classes", ":", "out", "[", "i", "]", "[", "src_class", "]", "=", "c", "[", "trt", ",", "src_class", "]", "out", "[", "-", "1", "]", "[", "'TRT'", "]", "=", "'Total'", "for", "name", "in", "out", ".", "dtype", ".", "names", "[", "1", ":", "]", ":", "out", "[", "-", "1", "]", "[", "name", "]", "=", "out", "[", "name", "]", "[", ":", "-", "1", "]", ".", "sum", "(", ")", "return", "rst_table", "(", "out", ")" ]
Extract information about NRML/0.5 source models. Returns a table with TRTs as rows and source classes as columns.
[ "Extract", "information", "about", "NRML", "/", "0", ".", "5", "source", "models", ".", "Returns", "a", "table", "with", "TRTs", "as", "rows", "and", "source", "classes", "as", "columns", "." ]
python
train
shoebot/shoebot
shoebot/grammar/drawbot.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/grammar/drawbot.py#L75-L83
def line(self, x1, y1, x2, y2, draw=True): '''Draws a line from (x1,y1) to (x2,y2)''' p = self._path self.newpath() self.moveto(x1,y1) self.lineto(x2,y2) self.endpath(draw=draw) self._path = p return p
[ "def", "line", "(", "self", ",", "x1", ",", "y1", ",", "x2", ",", "y2", ",", "draw", "=", "True", ")", ":", "p", "=", "self", ".", "_path", "self", ".", "newpath", "(", ")", "self", ".", "moveto", "(", "x1", ",", "y1", ")", "self", ".", "lineto", "(", "x2", ",", "y2", ")", "self", ".", "endpath", "(", "draw", "=", "draw", ")", "self", ".", "_path", "=", "p", "return", "p" ]
Draws a line from (x1,y1) to (x2,y2)
[ "Draws", "a", "line", "from", "(", "x1", "y1", ")", "to", "(", "x2", "y2", ")" ]
python
valid
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_fcoe_ext.py#L353-L367
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_rx_fdiscs(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list") fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id") fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id') fcoe_intf_rx_fdiscs = ET.SubElement(fcoe_intf_list, "fcoe-intf-rx-fdiscs") fcoe_intf_rx_fdiscs.text = kwargs.pop('fcoe_intf_rx_fdiscs') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_rx_fdiscs", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fcoe_get_interface", "=", "ET", ".", "Element", "(", "\"fcoe_get_interface\"", ")", "config", "=", "fcoe_get_interface", "output", "=", "ET", ".", "SubElement", "(", "fcoe_get_interface", ",", "\"output\"", ")", "fcoe_intf_list", "=", "ET", ".", "SubElement", "(", "output", ",", "\"fcoe-intf-list\"", ")", "fcoe_intf_fcoe_port_id_key", "=", "ET", ".", "SubElement", "(", "fcoe_intf_list", ",", "\"fcoe-intf-fcoe-port-id\"", ")", "fcoe_intf_fcoe_port_id_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'fcoe_intf_fcoe_port_id'", ")", "fcoe_intf_rx_fdiscs", "=", "ET", ".", "SubElement", "(", "fcoe_intf_list", ",", "\"fcoe-intf-rx-fdiscs\"", ")", "fcoe_intf_rx_fdiscs", ".", "text", "=", "kwargs", ".", "pop", "(", "'fcoe_intf_rx_fdiscs'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
astropy/astropy-helpers
astropy_helpers/setup_helpers.py
https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/setup_helpers.py#L291-L306
def generate_hooked_command(cmd_name, cmd_cls, hooks): """ Returns a generated subclass of ``cmd_cls`` that runs the pre- and post-command hooks for that command before and after the ``cmd_cls.run`` method. """ def run(self, orig_run=cmd_cls.run): self.run_command_hooks('pre_hooks') orig_run(self) self.run_command_hooks('post_hooks') return type(cmd_name, (cmd_cls, object), {'run': run, 'run_command_hooks': run_command_hooks, 'pre_hooks': hooks.get('pre', []), 'post_hooks': hooks.get('post', [])})
[ "def", "generate_hooked_command", "(", "cmd_name", ",", "cmd_cls", ",", "hooks", ")", ":", "def", "run", "(", "self", ",", "orig_run", "=", "cmd_cls", ".", "run", ")", ":", "self", ".", "run_command_hooks", "(", "'pre_hooks'", ")", "orig_run", "(", "self", ")", "self", ".", "run_command_hooks", "(", "'post_hooks'", ")", "return", "type", "(", "cmd_name", ",", "(", "cmd_cls", ",", "object", ")", ",", "{", "'run'", ":", "run", ",", "'run_command_hooks'", ":", "run_command_hooks", ",", "'pre_hooks'", ":", "hooks", ".", "get", "(", "'pre'", ",", "[", "]", ")", ",", "'post_hooks'", ":", "hooks", ".", "get", "(", "'post'", ",", "[", "]", ")", "}", ")" ]
Returns a generated subclass of ``cmd_cls`` that runs the pre- and post-command hooks for that command before and after the ``cmd_cls.run`` method.
[ "Returns", "a", "generated", "subclass", "of", "cmd_cls", "that", "runs", "the", "pre", "-", "and", "post", "-", "command", "hooks", "for", "that", "command", "before", "and", "after", "the", "cmd_cls", ".", "run", "method", "." ]
python
train
rerb/django-fortune
fortune/views.py
https://github.com/rerb/django-fortune/blob/f84d34f616ecabd4fab8351ad7d3062cc9d6b127/fortune/views.py#L30-L35
def loaded(self, request, *args, **kwargs): """Return a list of loaded Packs. """ serializer = self.get_serializer(list(Pack.objects.all()), many=True) return Response(serializer.data)
[ "def", "loaded", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "serializer", "=", "self", ".", "get_serializer", "(", "list", "(", "Pack", ".", "objects", ".", "all", "(", ")", ")", ",", "many", "=", "True", ")", "return", "Response", "(", "serializer", ".", "data", ")" ]
Return a list of loaded Packs.
[ "Return", "a", "list", "of", "loaded", "Packs", "." ]
python
train
pyviz/holoviews
holoviews/element/comparison.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/element/comparison.py#L50-L56
def simple_equality(cls, first, second, msg=None): """ Classmethod equivalent to unittest.TestCase method (longMessage = False.) """ if not first==second: standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second)) raise cls.failureException(msg or standardMsg)
[ "def", "simple_equality", "(", "cls", ",", "first", ",", "second", ",", "msg", "=", "None", ")", ":", "if", "not", "first", "==", "second", ":", "standardMsg", "=", "'%s != %s'", "%", "(", "safe_repr", "(", "first", ")", ",", "safe_repr", "(", "second", ")", ")", "raise", "cls", ".", "failureException", "(", "msg", "or", "standardMsg", ")" ]
Classmethod equivalent to unittest.TestCase method (longMessage = False.)
[ "Classmethod", "equivalent", "to", "unittest", ".", "TestCase", "method", "(", "longMessage", "=", "False", ".", ")" ]
python
train
zetaops/zengine
zengine/messaging/views.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/messaging/views.py#L874-L896
def get_message_actions(current): """ Returns applicable actions for current user for given message key .. code-block:: python # request: { 'view':'_zops_get_message_actions', 'key': key, } # response: { 'actions':[('name_string', 'cmd_string'),] 'status': string, # 'OK' for success 'code': int, # 200 for success } """ current.output = {'status': 'OK', 'code': 200, 'actions': Message.objects.get( current.input['key']).get_actions_for(current.user)}
[ "def", "get_message_actions", "(", "current", ")", ":", "current", ".", "output", "=", "{", "'status'", ":", "'OK'", ",", "'code'", ":", "200", ",", "'actions'", ":", "Message", ".", "objects", ".", "get", "(", "current", ".", "input", "[", "'key'", "]", ")", ".", "get_actions_for", "(", "current", ".", "user", ")", "}" ]
Returns applicable actions for current user for given message key .. code-block:: python # request: { 'view':'_zops_get_message_actions', 'key': key, } # response: { 'actions':[('name_string', 'cmd_string'),] 'status': string, # 'OK' for success 'code': int, # 200 for success }
[ "Returns", "applicable", "actions", "for", "current", "user", "for", "given", "message", "key" ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/pip/vcs/git.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/pip/vcs/git.py#L63-L81
def check_rev_options(self, rev, dest, rev_options): """Check the revision options before checkout to compensate that tags and branches may need origin/ as a prefix. Returns the SHA1 of the branch or tag if found. """ revisions = self.get_refs(dest) origin_rev = 'origin/%s' % rev if origin_rev in revisions: # remote branch return [revisions[origin_rev]] elif rev in revisions: # a local tag or branch name return [revisions[rev]] else: logger.warning( "Could not find a tag or branch '%s', assuming commit.", rev, ) return rev_options
[ "def", "check_rev_options", "(", "self", ",", "rev", ",", "dest", ",", "rev_options", ")", ":", "revisions", "=", "self", ".", "get_refs", "(", "dest", ")", "origin_rev", "=", "'origin/%s'", "%", "rev", "if", "origin_rev", "in", "revisions", ":", "# remote branch", "return", "[", "revisions", "[", "origin_rev", "]", "]", "elif", "rev", "in", "revisions", ":", "# a local tag or branch name", "return", "[", "revisions", "[", "rev", "]", "]", "else", ":", "logger", ".", "warning", "(", "\"Could not find a tag or branch '%s', assuming commit.\"", ",", "rev", ",", ")", "return", "rev_options" ]
Check the revision options before checkout to compensate that tags and branches may need origin/ as a prefix. Returns the SHA1 of the branch or tag if found.
[ "Check", "the", "revision", "options", "before", "checkout", "to", "compensate", "that", "tags", "and", "branches", "may", "need", "origin", "/", "as", "a", "prefix", ".", "Returns", "the", "SHA1", "of", "the", "branch", "or", "tag", "if", "found", "." ]
python
test
jrspruitt/ubi_reader
ubireader/ubi/block/sort.py
https://github.com/jrspruitt/ubi_reader/blob/7079dd380c1c9896bced30d6d34e8780b9181597/ubireader/ubi/block/sort.py#L55-L82
def by_vol_id(blocks, slist=None): """Sort blocks by volume id Arguments: Obj:blocks -- List of block objects. List:slist -- (optional) List of block indexes. Return: Dict -- blocks grouped in lists with dict key as volume id. """ vol_blocks = {} # sort block by volume # not reliable with multiple partitions (fifo) for i in blocks: if slist and i not in slist: continue elif not blocks[i].is_valid: continue if blocks[i].vid_hdr.vol_id not in vol_blocks: vol_blocks[blocks[i].vid_hdr.vol_id] = [] vol_blocks[blocks[i].vid_hdr.vol_id].append(blocks[i].peb_num) return vol_blocks
[ "def", "by_vol_id", "(", "blocks", ",", "slist", "=", "None", ")", ":", "vol_blocks", "=", "{", "}", "# sort block by volume", "# not reliable with multiple partitions (fifo)", "for", "i", "in", "blocks", ":", "if", "slist", "and", "i", "not", "in", "slist", ":", "continue", "elif", "not", "blocks", "[", "i", "]", ".", "is_valid", ":", "continue", "if", "blocks", "[", "i", "]", ".", "vid_hdr", ".", "vol_id", "not", "in", "vol_blocks", ":", "vol_blocks", "[", "blocks", "[", "i", "]", ".", "vid_hdr", ".", "vol_id", "]", "=", "[", "]", "vol_blocks", "[", "blocks", "[", "i", "]", ".", "vid_hdr", ".", "vol_id", "]", ".", "append", "(", "blocks", "[", "i", "]", ".", "peb_num", ")", "return", "vol_blocks" ]
Sort blocks by volume id Arguments: Obj:blocks -- List of block objects. List:slist -- (optional) List of block indexes. Return: Dict -- blocks grouped in lists with dict key as volume id.
[ "Sort", "blocks", "by", "volume", "id" ]
python
train
pymc-devs/pymc
pymc/utils.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L525-L539
def autocorr(x, lag=1): """Sample autocorrelation at specified lag. The autocorrelation is the correlation of x_i with x_{i+lag}. """ if not lag: return 1 if lag < 0: return # x = np.squeeze(asarray(x)) # mu = x.mean() # v = x.var() # return ((x[:-lag]-mu)*(x[lag:]-mu)).sum()/v/(len(x) - lag) S = autocov(x, lag) return S[0, 1] / sqrt(prod(diag(S)))
[ "def", "autocorr", "(", "x", ",", "lag", "=", "1", ")", ":", "if", "not", "lag", ":", "return", "1", "if", "lag", "<", "0", ":", "return", "# x = np.squeeze(asarray(x))", "# mu = x.mean()", "# v = x.var()", "# return ((x[:-lag]-mu)*(x[lag:]-mu)).sum()/v/(len(x) - lag)", "S", "=", "autocov", "(", "x", ",", "lag", ")", "return", "S", "[", "0", ",", "1", "]", "/", "sqrt", "(", "prod", "(", "diag", "(", "S", ")", ")", ")" ]
Sample autocorrelation at specified lag. The autocorrelation is the correlation of x_i with x_{i+lag}.
[ "Sample", "autocorrelation", "at", "specified", "lag", ".", "The", "autocorrelation", "is", "the", "correlation", "of", "x_i", "with", "x_", "{", "i", "+", "lag", "}", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L506-L521
def override_from_dict(self, values_dict): """Override existing hyperparameter values, parsing new values from a dictionary. Args: values_dict: Dictionary of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_dict` doesn't exist. ValueError: If `values_dict` cannot be parsed. """ for name, value in values_dict.items(): self.set_hparam(name, value) return self
[ "def", "override_from_dict", "(", "self", ",", "values_dict", ")", ":", "for", "name", ",", "value", "in", "values_dict", ".", "items", "(", ")", ":", "self", ".", "set_hparam", "(", "name", ",", "value", ")", "return", "self" ]
Override existing hyperparameter values, parsing new values from a dictionary. Args: values_dict: Dictionary of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_dict` doesn't exist. ValueError: If `values_dict` cannot be parsed.
[ "Override", "existing", "hyperparameter", "values", "parsing", "new", "values", "from", "a", "dictionary", "." ]
python
train
newville/wxmplot
wxmplot/basepanel.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/basepanel.py#L430-L449
def __onMouseButtonEvent(self, event=None): """ general mouse press/release events. Here, event is a MplEvent from matplotlib. This routine just dispatches to the appropriate onLeftDown, onLeftUp, onRightDown, onRightUp.... methods. """ if event is None: return button = event.button or 1 handlers = {(1, 'button_press_event'): self.onLeftDown, (1, 'button_release_event'): self.onLeftUp, (3, 'button_press_event'): self.onRightDown, } # (3,'button_release_event'): self.onRightUp} handle_event = handlers.get((button, event.name), None) if hasattr(handle_event, '__call__'): handle_event(event) event.guiEvent.Skip()
[ "def", "__onMouseButtonEvent", "(", "self", ",", "event", "=", "None", ")", ":", "if", "event", "is", "None", ":", "return", "button", "=", "event", ".", "button", "or", "1", "handlers", "=", "{", "(", "1", ",", "'button_press_event'", ")", ":", "self", ".", "onLeftDown", ",", "(", "1", ",", "'button_release_event'", ")", ":", "self", ".", "onLeftUp", ",", "(", "3", ",", "'button_press_event'", ")", ":", "self", ".", "onRightDown", ",", "}", "# (3,'button_release_event'): self.onRightUp}", "handle_event", "=", "handlers", ".", "get", "(", "(", "button", ",", "event", ".", "name", ")", ",", "None", ")", "if", "hasattr", "(", "handle_event", ",", "'__call__'", ")", ":", "handle_event", "(", "event", ")", "event", ".", "guiEvent", ".", "Skip", "(", ")" ]
general mouse press/release events. Here, event is a MplEvent from matplotlib. This routine just dispatches to the appropriate onLeftDown, onLeftUp, onRightDown, onRightUp.... methods.
[ "general", "mouse", "press", "/", "release", "events", ".", "Here", "event", "is", "a", "MplEvent", "from", "matplotlib", ".", "This", "routine", "just", "dispatches", "to", "the", "appropriate", "onLeftDown", "onLeftUp", "onRightDown", "onRightUp", "....", "methods", "." ]
python
train
mosdef-hub/mbuild
mbuild/pattern.py
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/pattern.py#L47-L73
def apply(self, compound, orientation='', compound_port=''): """Arrange copies of a Compound as specified by the Pattern. Parameters ---------- compound orientation Returns ------- """ compounds = list() if self.orientations.get(orientation): for port in self.orientations[orientation]: new_compound = clone(compound) new_port = new_compound.labels[compound_port] (new_compound, new_port['up'], port['up']) compounds.append(new_compound) else: for point in self.points: new_compound = clone(compound) new_compound.translate(point) compounds.append(new_compound) return compounds
[ "def", "apply", "(", "self", ",", "compound", ",", "orientation", "=", "''", ",", "compound_port", "=", "''", ")", ":", "compounds", "=", "list", "(", ")", "if", "self", ".", "orientations", ".", "get", "(", "orientation", ")", ":", "for", "port", "in", "self", ".", "orientations", "[", "orientation", "]", ":", "new_compound", "=", "clone", "(", "compound", ")", "new_port", "=", "new_compound", ".", "labels", "[", "compound_port", "]", "(", "new_compound", ",", "new_port", "[", "'up'", "]", ",", "port", "[", "'up'", "]", ")", "compounds", ".", "append", "(", "new_compound", ")", "else", ":", "for", "point", "in", "self", ".", "points", ":", "new_compound", "=", "clone", "(", "compound", ")", "new_compound", ".", "translate", "(", "point", ")", "compounds", ".", "append", "(", "new_compound", ")", "return", "compounds" ]
Arrange copies of a Compound as specified by the Pattern. Parameters ---------- compound orientation Returns -------
[ "Arrange", "copies", "of", "a", "Compound", "as", "specified", "by", "the", "Pattern", "." ]
python
train
oscarbranson/latools
latools/latools.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/latools.py#L1817-L1907
def filter_threshold_percentile(self, analyte, percentiles, level='population', filt=False, samples=None, subset=None): """ Applies a threshold filter to the data. Generates two filters above and below the threshold value for a given analyte. Parameters ---------- analyte : str The analyte that the filter applies to. percentiles : float or iterable of len=2 The percentile values. level : str Whether to calculate percentiles from the entire dataset ('population') or for each individual sample ('individual') filt : bool Whether or not to apply existing filters to the data before calculating this filter. samples : array_like or None Which samples to apply this filter to. If None, applies to all samples. subset : str or number The subset of samples (defined by make_subset) you want to apply the filter to. Returns ------- None """ params = locals() del(params['self']) if samples is not None: subset = self.make_subset(samples) samples = self._get_samples(subset) self.minimal_analytes.update([analyte]) if isinstance(percentiles, (int, float)): percentiles = [percentiles] if level == 'population': # Get all samples self.get_focus(filt=filt, subset=subset, nominal=True) dat = self.focus[analyte][~np.isnan(self.focus[analyte])] # calculate filter limits lims = np.percentile(dat, percentiles) # Calculate filter for individual samples with self.pbar.set(total=len(samples), desc='Percentile theshold filter') as prog: for s in samples: d = self.data[s] setn = d.filt.maxset + 1 g = d.focus[analyte] if level == 'individual': gt = nominal_values(g) lims = np.percentile(gt[~np.isnan(gt)], percentiles) if len(lims) == 1: above = g >= lims[0] below = g < lims[0] d.filt.add(analyte + '_{:.1f}-pcnt_below'.format(percentiles[0]), below, 'Values below {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]), params, setn=setn) d.filt.add(analyte + '_{:.1f}-pcnt_above'.format(percentiles[0]), above, 'Values above {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]), params, setn=setn) elif len(lims) == 2: inside = (g >= min(lims)) & (g <= max(lims)) outside = (g < min(lims)) | (g > max(lims)) lpc = '-'.join(['{:.1f}'.format(p) for p in percentiles]) d.filt.add(analyte + '_' + lpc + '-pcnt_inside', inside, 'Values between ' + lpc + ' ' + analyte + 'percentiles', params, setn=setn) d.filt.add(analyte + '_' + lpc + '-pcnt_outside', outside, 'Values outside ' + lpc + ' ' + analyte + 'percentiles', params, setn=setn) prog.update() return
[ "def", "filter_threshold_percentile", "(", "self", ",", "analyte", ",", "percentiles", ",", "level", "=", "'population'", ",", "filt", "=", "False", ",", "samples", "=", "None", ",", "subset", "=", "None", ")", ":", "params", "=", "locals", "(", ")", "del", "(", "params", "[", "'self'", "]", ")", "if", "samples", "is", "not", "None", ":", "subset", "=", "self", ".", "make_subset", "(", "samples", ")", "samples", "=", "self", ".", "_get_samples", "(", "subset", ")", "self", ".", "minimal_analytes", ".", "update", "(", "[", "analyte", "]", ")", "if", "isinstance", "(", "percentiles", ",", "(", "int", ",", "float", ")", ")", ":", "percentiles", "=", "[", "percentiles", "]", "if", "level", "==", "'population'", ":", "# Get all samples", "self", ".", "get_focus", "(", "filt", "=", "filt", ",", "subset", "=", "subset", ",", "nominal", "=", "True", ")", "dat", "=", "self", ".", "focus", "[", "analyte", "]", "[", "~", "np", ".", "isnan", "(", "self", ".", "focus", "[", "analyte", "]", ")", "]", "# calculate filter limits", "lims", "=", "np", ".", "percentile", "(", "dat", ",", "percentiles", ")", "# Calculate filter for individual samples", "with", "self", ".", "pbar", ".", "set", "(", "total", "=", "len", "(", "samples", ")", ",", "desc", "=", "'Percentile theshold filter'", ")", "as", "prog", ":", "for", "s", "in", "samples", ":", "d", "=", "self", ".", "data", "[", "s", "]", "setn", "=", "d", ".", "filt", ".", "maxset", "+", "1", "g", "=", "d", ".", "focus", "[", "analyte", "]", "if", "level", "==", "'individual'", ":", "gt", "=", "nominal_values", "(", "g", ")", "lims", "=", "np", ".", "percentile", "(", "gt", "[", "~", "np", ".", "isnan", "(", "gt", ")", "]", ",", "percentiles", ")", "if", "len", "(", "lims", ")", "==", "1", ":", "above", "=", "g", ">=", "lims", "[", "0", "]", "below", "=", "g", "<", "lims", "[", "0", "]", "d", ".", "filt", ".", "add", "(", "analyte", "+", "'_{:.1f}-pcnt_below'", ".", "format", "(", "percentiles", "[", "0", "]", ")", ",", "below", ",", "'Values below {:.1f}th {:} percentile ({:.2e})'", ".", "format", "(", "percentiles", "[", "0", "]", ",", "analyte", ",", "lims", "[", "0", "]", ")", ",", "params", ",", "setn", "=", "setn", ")", "d", ".", "filt", ".", "add", "(", "analyte", "+", "'_{:.1f}-pcnt_above'", ".", "format", "(", "percentiles", "[", "0", "]", ")", ",", "above", ",", "'Values above {:.1f}th {:} percentile ({:.2e})'", ".", "format", "(", "percentiles", "[", "0", "]", ",", "analyte", ",", "lims", "[", "0", "]", ")", ",", "params", ",", "setn", "=", "setn", ")", "elif", "len", "(", "lims", ")", "==", "2", ":", "inside", "=", "(", "g", ">=", "min", "(", "lims", ")", ")", "&", "(", "g", "<=", "max", "(", "lims", ")", ")", "outside", "=", "(", "g", "<", "min", "(", "lims", ")", ")", "|", "(", "g", ">", "max", "(", "lims", ")", ")", "lpc", "=", "'-'", ".", "join", "(", "[", "'{:.1f}'", ".", "format", "(", "p", ")", "for", "p", "in", "percentiles", "]", ")", "d", ".", "filt", ".", "add", "(", "analyte", "+", "'_'", "+", "lpc", "+", "'-pcnt_inside'", ",", "inside", ",", "'Values between '", "+", "lpc", "+", "' '", "+", "analyte", "+", "'percentiles'", ",", "params", ",", "setn", "=", "setn", ")", "d", ".", "filt", ".", "add", "(", "analyte", "+", "'_'", "+", "lpc", "+", "'-pcnt_outside'", ",", "outside", ",", "'Values outside '", "+", "lpc", "+", "' '", "+", "analyte", "+", "'percentiles'", ",", "params", ",", "setn", "=", "setn", ")", "prog", ".", "update", "(", ")", "return" ]
Applies a threshold filter to the data. Generates two filters above and below the threshold value for a given analyte. Parameters ---------- analyte : str The analyte that the filter applies to. percentiles : float or iterable of len=2 The percentile values. level : str Whether to calculate percentiles from the entire dataset ('population') or for each individual sample ('individual') filt : bool Whether or not to apply existing filters to the data before calculating this filter. samples : array_like or None Which samples to apply this filter to. If None, applies to all samples. subset : str or number The subset of samples (defined by make_subset) you want to apply the filter to. Returns ------- None
[ "Applies", "a", "threshold", "filter", "to", "the", "data", "." ]
python
test
wbond/oscrypto
oscrypto/_osx/symmetric.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_osx/symmetric.py#L169-L208
def aes_cbc_pkcs7_decrypt(key, data, iv): """ Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key :param key: The encryption key - a byte string either 16, 24 or 32 bytes long :param data: The ciphertext - a byte string :param iv: The initialization vector - a byte string 16-bytes long :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the plaintext """ if len(key) not in [16, 24, 32]: raise ValueError(pretty_message( ''' key must be either 16, 24 or 32 bytes (128, 192 or 256 bits) long - is %s ''', len(key) )) if len(iv) != 16: raise ValueError(pretty_message( ''' iv must be 16 bytes long - is %s ''', len(iv) )) return _decrypt(Security.kSecAttrKeyTypeAES, key, data, iv, Security.kSecPaddingPKCS7Key)
[ "def", "aes_cbc_pkcs7_decrypt", "(", "key", ",", "data", ",", "iv", ")", ":", "if", "len", "(", "key", ")", "not", "in", "[", "16", ",", "24", ",", "32", "]", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n key must be either 16, 24 or 32 bytes (128, 192 or 256 bits)\n long - is %s\n '''", ",", "len", "(", "key", ")", ")", ")", "if", "len", "(", "iv", ")", "!=", "16", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n iv must be 16 bytes long - is %s\n '''", ",", "len", "(", "iv", ")", ")", ")", "return", "_decrypt", "(", "Security", ".", "kSecAttrKeyTypeAES", ",", "key", ",", "data", ",", "iv", ",", "Security", ".", "kSecPaddingPKCS7Key", ")" ]
Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key :param key: The encryption key - a byte string either 16, 24 or 32 bytes long :param data: The ciphertext - a byte string :param iv: The initialization vector - a byte string 16-bytes long :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the plaintext
[ "Decrypts", "AES", "ciphertext", "in", "CBC", "mode", "using", "a", "128", "192", "or", "256", "bit", "key" ]
python
valid
diging/tethne
tethne/networks/base.py
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/base.py#L143-L167
def multipartite(corpus, featureset_names, min_weight=1, filters={}): """ A network of papers and one or more featuresets. """ pairs = Counter() node_type = {corpus._generate_index(p): {'type': 'paper'} for p in corpus.papers} for featureset_name in featureset_names: ftypes = {} featureset = _get_featureset(corpus, featureset_name) for paper, feature in featureset.iteritems(): if featureset_name in filters: if not filters[featureset_name](featureset, feature): continue if len(feature) < 1: continue for f in list(zip(*feature))[0]: ftypes[f] = {'type': featureset_name} pairs[(paper, f)] += 1 node_type.update(ftypes) return _generate_graph(nx.DiGraph, pairs, node_attrs=node_type, min_weight=min_weight)
[ "def", "multipartite", "(", "corpus", ",", "featureset_names", ",", "min_weight", "=", "1", ",", "filters", "=", "{", "}", ")", ":", "pairs", "=", "Counter", "(", ")", "node_type", "=", "{", "corpus", ".", "_generate_index", "(", "p", ")", ":", "{", "'type'", ":", "'paper'", "}", "for", "p", "in", "corpus", ".", "papers", "}", "for", "featureset_name", "in", "featureset_names", ":", "ftypes", "=", "{", "}", "featureset", "=", "_get_featureset", "(", "corpus", ",", "featureset_name", ")", "for", "paper", ",", "feature", "in", "featureset", ".", "iteritems", "(", ")", ":", "if", "featureset_name", "in", "filters", ":", "if", "not", "filters", "[", "featureset_name", "]", "(", "featureset", ",", "feature", ")", ":", "continue", "if", "len", "(", "feature", ")", "<", "1", ":", "continue", "for", "f", "in", "list", "(", "zip", "(", "*", "feature", ")", ")", "[", "0", "]", ":", "ftypes", "[", "f", "]", "=", "{", "'type'", ":", "featureset_name", "}", "pairs", "[", "(", "paper", ",", "f", ")", "]", "+=", "1", "node_type", ".", "update", "(", "ftypes", ")", "return", "_generate_graph", "(", "nx", ".", "DiGraph", ",", "pairs", ",", "node_attrs", "=", "node_type", ",", "min_weight", "=", "min_weight", ")" ]
A network of papers and one or more featuresets.
[ "A", "network", "of", "papers", "and", "one", "or", "more", "featuresets", "." ]
python
train
gsi-upm/soil
soil/analysis.py
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/soil/analysis.py#L102-L118
def process_one(df, *keys, columns=['key', 'agent_id'], values='value', fill=True, index=['t_step',], aggfunc='first', **kwargs): ''' Process a dataframe in canonical form ``(t_step, agent_id, key, value, value_type)`` into a dataframe with a column per key ''' if df is None: return df if keys: df = df[df['key'].isin(keys)] df = df.pivot_table(values=values, index=index, columns=columns, aggfunc=aggfunc, **kwargs) if fill: df = fillna(df) return df
[ "def", "process_one", "(", "df", ",", "*", "keys", ",", "columns", "=", "[", "'key'", ",", "'agent_id'", "]", ",", "values", "=", "'value'", ",", "fill", "=", "True", ",", "index", "=", "[", "'t_step'", ",", "]", ",", "aggfunc", "=", "'first'", ",", "*", "*", "kwargs", ")", ":", "if", "df", "is", "None", ":", "return", "df", "if", "keys", ":", "df", "=", "df", "[", "df", "[", "'key'", "]", ".", "isin", "(", "keys", ")", "]", "df", "=", "df", ".", "pivot_table", "(", "values", "=", "values", ",", "index", "=", "index", ",", "columns", "=", "columns", ",", "aggfunc", "=", "aggfunc", ",", "*", "*", "kwargs", ")", "if", "fill", ":", "df", "=", "fillna", "(", "df", ")", "return", "df" ]
Process a dataframe in canonical form ``(t_step, agent_id, key, value, value_type)`` into a dataframe with a column per key
[ "Process", "a", "dataframe", "in", "canonical", "form", "(", "t_step", "agent_id", "key", "value", "value_type", ")", "into", "a", "dataframe", "with", "a", "column", "per", "key" ]
python
train
saltstack/salt
salt/grains/napalm.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/napalm.py#L107-L112
def _get_device_grain(name, proxy=None): ''' Retrieves device-specific grains. ''' device = _retrieve_device_cache(proxy=proxy) return device.get(name.upper())
[ "def", "_get_device_grain", "(", "name", ",", "proxy", "=", "None", ")", ":", "device", "=", "_retrieve_device_cache", "(", "proxy", "=", "proxy", ")", "return", "device", ".", "get", "(", "name", ".", "upper", "(", ")", ")" ]
Retrieves device-specific grains.
[ "Retrieves", "device", "-", "specific", "grains", "." ]
python
train
rhgrant10/Groupy
groupy/api/groups.py
https://github.com/rhgrant10/Groupy/blob/ffd8cac57586fa1c218e3b4bfaa531142c3be766/groupy/api/groups.py#L314-L317
def refresh_from_server(self): """Refresh the group from the server in place.""" group = self.manager.get(id=self.id) self.__init__(self.manager, **group.data)
[ "def", "refresh_from_server", "(", "self", ")", ":", "group", "=", "self", ".", "manager", ".", "get", "(", "id", "=", "self", ".", "id", ")", "self", ".", "__init__", "(", "self", ".", "manager", ",", "*", "*", "group", ".", "data", ")" ]
Refresh the group from the server in place.
[ "Refresh", "the", "group", "from", "the", "server", "in", "place", "." ]
python
train
juju/python-libjuju
juju/client/_client1.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/_client1.py#L7401-L7418
async def Import(self, bytes_, charms, tools): ''' bytes_ : typing.Sequence[int] charms : typing.Sequence[str] tools : typing.Sequence[~SerializedModelTools] Returns -> None ''' # map input types to rpc msg _params = dict() msg = dict(type='MigrationTarget', request='Import', version=1, params=_params) _params['bytes'] = bytes_ _params['charms'] = charms _params['tools'] = tools reply = await self.rpc(msg) return reply
[ "async", "def", "Import", "(", "self", ",", "bytes_", ",", "charms", ",", "tools", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'MigrationTarget'", ",", "request", "=", "'Import'", ",", "version", "=", "1", ",", "params", "=", "_params", ")", "_params", "[", "'bytes'", "]", "=", "bytes_", "_params", "[", "'charms'", "]", "=", "charms", "_params", "[", "'tools'", "]", "=", "tools", "reply", "=", "await", "self", ".", "rpc", "(", "msg", ")", "return", "reply" ]
bytes_ : typing.Sequence[int] charms : typing.Sequence[str] tools : typing.Sequence[~SerializedModelTools] Returns -> None
[ "bytes_", ":", "typing", ".", "Sequence", "[", "int", "]", "charms", ":", "typing", ".", "Sequence", "[", "str", "]", "tools", ":", "typing", ".", "Sequence", "[", "~SerializedModelTools", "]", "Returns", "-", ">", "None" ]
python
train
kejbaly2/metrique
metrique/cubes/osinfo/rpm.py
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/cubes/osinfo/rpm.py#L79-L117
def get_objects(self, **kwargs): ''' Run `rpm -q` command on a {local, remote} system to get back details of installed RPMs. Default rpm details extracted are as follows: * name * version * release * arch * nvra * license * os * packager * platform * sourcepackage * sourcerpm * summary ''' fmt = ':::'.join('%%{%s}' % f for f in self._fields) if self.ssh_host: output = self._ssh_cmd(fmt) else: output = self._local_cmd(fmt) if isinstance(output, basestring): output = unicode(output, 'utf-8') output = output.strip().split('\n') lines = [l.strip().split(':::') for l in output] now = utcnow() host = self.ssh_host or socket.gethostname() for line in lines: obj = {'host': host, '_start': now} for i, item in enumerate(line): if item == '(none)': item = None obj[self._fields[i]] = item obj['_oid'] = '%s__%s' % (host, obj['nvra']) self.objects.add(obj) return super(Rpm, self).get_objects(**kwargs)
[ "def", "get_objects", "(", "self", ",", "*", "*", "kwargs", ")", ":", "fmt", "=", "':::'", ".", "join", "(", "'%%{%s}'", "%", "f", "for", "f", "in", "self", ".", "_fields", ")", "if", "self", ".", "ssh_host", ":", "output", "=", "self", ".", "_ssh_cmd", "(", "fmt", ")", "else", ":", "output", "=", "self", ".", "_local_cmd", "(", "fmt", ")", "if", "isinstance", "(", "output", ",", "basestring", ")", ":", "output", "=", "unicode", "(", "output", ",", "'utf-8'", ")", "output", "=", "output", ".", "strip", "(", ")", ".", "split", "(", "'\\n'", ")", "lines", "=", "[", "l", ".", "strip", "(", ")", ".", "split", "(", "':::'", ")", "for", "l", "in", "output", "]", "now", "=", "utcnow", "(", ")", "host", "=", "self", ".", "ssh_host", "or", "socket", ".", "gethostname", "(", ")", "for", "line", "in", "lines", ":", "obj", "=", "{", "'host'", ":", "host", ",", "'_start'", ":", "now", "}", "for", "i", ",", "item", "in", "enumerate", "(", "line", ")", ":", "if", "item", "==", "'(none)'", ":", "item", "=", "None", "obj", "[", "self", ".", "_fields", "[", "i", "]", "]", "=", "item", "obj", "[", "'_oid'", "]", "=", "'%s__%s'", "%", "(", "host", ",", "obj", "[", "'nvra'", "]", ")", "self", ".", "objects", ".", "add", "(", "obj", ")", "return", "super", "(", "Rpm", ",", "self", ")", ".", "get_objects", "(", "*", "*", "kwargs", ")" ]
Run `rpm -q` command on a {local, remote} system to get back details of installed RPMs. Default rpm details extracted are as follows: * name * version * release * arch * nvra * license * os * packager * platform * sourcepackage * sourcerpm * summary
[ "Run", "rpm", "-", "q", "command", "on", "a", "{", "local", "remote", "}", "system", "to", "get", "back", "details", "of", "installed", "RPMs", "." ]
python
train
chaoss/grimoirelab-elk
grimoire_elk/enriched/enrich.py
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/enrich.py#L586-L605
def get_item_project(self, eitem): """ Get the project name related to the eitem :param eitem: enriched item for which to find the project :return: a dictionary with the project data """ eitem_project = {} project = self.find_item_project(eitem) if project is None: project = DEFAULT_PROJECT eitem_project = {"project": project} # Time to add the project levels: eclipse.platform.releng.aggregator eitem_project.update(self.add_project_levels(project)) # And now time to add the metadata eitem_project.update(self.get_item_metadata(eitem)) return eitem_project
[ "def", "get_item_project", "(", "self", ",", "eitem", ")", ":", "eitem_project", "=", "{", "}", "project", "=", "self", ".", "find_item_project", "(", "eitem", ")", "if", "project", "is", "None", ":", "project", "=", "DEFAULT_PROJECT", "eitem_project", "=", "{", "\"project\"", ":", "project", "}", "# Time to add the project levels: eclipse.platform.releng.aggregator", "eitem_project", ".", "update", "(", "self", ".", "add_project_levels", "(", "project", ")", ")", "# And now time to add the metadata", "eitem_project", ".", "update", "(", "self", ".", "get_item_metadata", "(", "eitem", ")", ")", "return", "eitem_project" ]
Get the project name related to the eitem :param eitem: enriched item for which to find the project :return: a dictionary with the project data
[ "Get", "the", "project", "name", "related", "to", "the", "eitem", ":", "param", "eitem", ":", "enriched", "item", "for", "which", "to", "find", "the", "project", ":", "return", ":", "a", "dictionary", "with", "the", "project", "data" ]
python
train
opencivicdata/pupa
pupa/importers/base.py
https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/importers/base.py#L187-L196
def import_directory(self, datadir): """ import a JSON directory into the database """ def json_stream(): # load all json, mapped by json_id for fname in glob.glob(os.path.join(datadir, self._type + '_*.json')): with open(fname) as f: yield json.load(f) return self.import_data(json_stream())
[ "def", "import_directory", "(", "self", ",", "datadir", ")", ":", "def", "json_stream", "(", ")", ":", "# load all json, mapped by json_id", "for", "fname", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "datadir", ",", "self", ".", "_type", "+", "'_*.json'", ")", ")", ":", "with", "open", "(", "fname", ")", "as", "f", ":", "yield", "json", ".", "load", "(", "f", ")", "return", "self", ".", "import_data", "(", "json_stream", "(", ")", ")" ]
import a JSON directory into the database
[ "import", "a", "JSON", "directory", "into", "the", "database" ]
python
train
datasift/datasift-python
datasift/client.py
https://github.com/datasift/datasift-python/blob/bfaca1a47501a18e11065ecf630d9c31df818f65/datasift/client.py#L365-L379
def dpu(self, hash=None, historics_id=None): """ Calculate the DPU cost of consuming a stream. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/dpu :param hash: target CSDL filter hash :type hash: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ if hash: return self.request.get('dpu', params=dict(hash=hash)) if historics_id: return self.request.get('dpu', params=dict(historics_id=historics_id))
[ "def", "dpu", "(", "self", ",", "hash", "=", "None", ",", "historics_id", "=", "None", ")", ":", "if", "hash", ":", "return", "self", ".", "request", ".", "get", "(", "'dpu'", ",", "params", "=", "dict", "(", "hash", "=", "hash", ")", ")", "if", "historics_id", ":", "return", "self", ".", "request", ".", "get", "(", "'dpu'", ",", "params", "=", "dict", "(", "historics_id", "=", "historics_id", ")", ")" ]
Calculate the DPU cost of consuming a stream. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/dpu :param hash: target CSDL filter hash :type hash: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
[ "Calculate", "the", "DPU", "cost", "of", "consuming", "a", "stream", "." ]
python
train
pytries/DAWG-Python
dawg_python/dawgs.py
https://github.com/pytries/DAWG-Python/blob/e56241ec919b78735ff79014bf18d7fd1f8e08b9/dawg_python/dawgs.py#L65-L77
def similar_keys(self, key, replaces): """ Returns all variants of ``key`` in this DAWG according to ``replaces``. ``replaces`` is an object obtained from ``DAWG.compile_replaces(mapping)`` where mapping is a dict that maps single-char unicode sitrings to another single-char unicode strings. This may be useful e.g. for handling single-character umlauts. """ return self._similar_keys("", key, self.dct.ROOT, replaces)
[ "def", "similar_keys", "(", "self", ",", "key", ",", "replaces", ")", ":", "return", "self", ".", "_similar_keys", "(", "\"\"", ",", "key", ",", "self", ".", "dct", ".", "ROOT", ",", "replaces", ")" ]
Returns all variants of ``key`` in this DAWG according to ``replaces``. ``replaces`` is an object obtained from ``DAWG.compile_replaces(mapping)`` where mapping is a dict that maps single-char unicode sitrings to another single-char unicode strings. This may be useful e.g. for handling single-character umlauts.
[ "Returns", "all", "variants", "of", "key", "in", "this", "DAWG", "according", "to", "replaces", "." ]
python
train
cloudant/python-cloudant
src/cloudant/replicator.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/replicator.py#L155-L206
def follow_replication(self, repl_id): """ Blocks and streams status of a given replication. For example: .. code-block:: python for doc in replicator.follow_replication(repl_doc_id): # Process replication information as it comes in :param str repl_id: Replication id used to identify the replication to inspect. :returns: Iterable stream of copies of the replication Document and replication state as a ``str`` for the specified replication id """ def update_state(): """ Retrieves the replication state. """ if "scheduler" in self.client.features(): try: arepl_doc = Scheduler(self.client).get_doc(repl_id) return arepl_doc, arepl_doc['state'] except HTTPError: return None, None else: try: arepl_doc = self.database[repl_id] arepl_doc.fetch() return arepl_doc, arepl_doc.get('_replication_state') except KeyError: return None, None while True: # Make sure we fetch the state up front, just in case it moves # too fast and we miss it in the changes feed. repl_doc, state = update_state() if repl_doc: yield repl_doc if state is not None and state in ['error', 'completed']: return # Now listen on changes feed for the state for change in self.database.changes(): if change.get('id') == repl_id: repl_doc, state = update_state() if repl_doc is not None: yield repl_doc if state is not None and state in ['error', 'completed']: return
[ "def", "follow_replication", "(", "self", ",", "repl_id", ")", ":", "def", "update_state", "(", ")", ":", "\"\"\"\n Retrieves the replication state.\n \"\"\"", "if", "\"scheduler\"", "in", "self", ".", "client", ".", "features", "(", ")", ":", "try", ":", "arepl_doc", "=", "Scheduler", "(", "self", ".", "client", ")", ".", "get_doc", "(", "repl_id", ")", "return", "arepl_doc", ",", "arepl_doc", "[", "'state'", "]", "except", "HTTPError", ":", "return", "None", ",", "None", "else", ":", "try", ":", "arepl_doc", "=", "self", ".", "database", "[", "repl_id", "]", "arepl_doc", ".", "fetch", "(", ")", "return", "arepl_doc", ",", "arepl_doc", ".", "get", "(", "'_replication_state'", ")", "except", "KeyError", ":", "return", "None", ",", "None", "while", "True", ":", "# Make sure we fetch the state up front, just in case it moves", "# too fast and we miss it in the changes feed.", "repl_doc", ",", "state", "=", "update_state", "(", ")", "if", "repl_doc", ":", "yield", "repl_doc", "if", "state", "is", "not", "None", "and", "state", "in", "[", "'error'", ",", "'completed'", "]", ":", "return", "# Now listen on changes feed for the state", "for", "change", "in", "self", ".", "database", ".", "changes", "(", ")", ":", "if", "change", ".", "get", "(", "'id'", ")", "==", "repl_id", ":", "repl_doc", ",", "state", "=", "update_state", "(", ")", "if", "repl_doc", "is", "not", "None", ":", "yield", "repl_doc", "if", "state", "is", "not", "None", "and", "state", "in", "[", "'error'", ",", "'completed'", "]", ":", "return" ]
Blocks and streams status of a given replication. For example: .. code-block:: python for doc in replicator.follow_replication(repl_doc_id): # Process replication information as it comes in :param str repl_id: Replication id used to identify the replication to inspect. :returns: Iterable stream of copies of the replication Document and replication state as a ``str`` for the specified replication id
[ "Blocks", "and", "streams", "status", "of", "a", "given", "replication", "." ]
python
train
fabaff/python-hole
example.py
https://github.com/fabaff/python-hole/blob/1652ebd09b9c17d900c6e46b63b09390a3900b69/example.py#L34-L38
async def enable(): """Get the data from a *hole instance.""" async with aiohttp.ClientSession() as session: data = Hole('192.168.0.215', loop, session, api_token=API_TOKEN) await data.enable()
[ "async", "def", "enable", "(", ")", ":", "async", "with", "aiohttp", ".", "ClientSession", "(", ")", "as", "session", ":", "data", "=", "Hole", "(", "'192.168.0.215'", ",", "loop", ",", "session", ",", "api_token", "=", "API_TOKEN", ")", "await", "data", ".", "enable", "(", ")" ]
Get the data from a *hole instance.
[ "Get", "the", "data", "from", "a", "*", "hole", "instance", "." ]
python
train
koordinates/python-client
koordinates/layers.py
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/layers.py#L88-L94
def get_published(self, layer_id, expand=[]): """ Get the latest published version of this layer. :raises NotFound: if there is no published version. """ target_url = self.client.get_url('VERSION', 'GET', 'published', {'layer_id': layer_id}) return self._get(target_url, expand=expand)
[ "def", "get_published", "(", "self", ",", "layer_id", ",", "expand", "=", "[", "]", ")", ":", "target_url", "=", "self", ".", "client", ".", "get_url", "(", "'VERSION'", ",", "'GET'", ",", "'published'", ",", "{", "'layer_id'", ":", "layer_id", "}", ")", "return", "self", ".", "_get", "(", "target_url", ",", "expand", "=", "expand", ")" ]
Get the latest published version of this layer. :raises NotFound: if there is no published version.
[ "Get", "the", "latest", "published", "version", "of", "this", "layer", ".", ":", "raises", "NotFound", ":", "if", "there", "is", "no", "published", "version", "." ]
python
train
discogs/python-cas-client
cas_client/cas_client.py
https://github.com/discogs/python-cas-client/blob/f1efa2f49a22d43135014cb1b8d9dd3875304318/cas_client/cas_client.py#L311-L318
def session_exists(self, ticket): ''' Test if a session records exists for a service ticket. ''' assert isinstance(self.session_storage_adapter, CASSessionAdapter) exists = self.session_storage_adapter.exists(ticket) logging.debug('[CAS] Session [{}] exists: {}'.format(ticket, exists)) return exists
[ "def", "session_exists", "(", "self", ",", "ticket", ")", ":", "assert", "isinstance", "(", "self", ".", "session_storage_adapter", ",", "CASSessionAdapter", ")", "exists", "=", "self", ".", "session_storage_adapter", ".", "exists", "(", "ticket", ")", "logging", ".", "debug", "(", "'[CAS] Session [{}] exists: {}'", ".", "format", "(", "ticket", ",", "exists", ")", ")", "return", "exists" ]
Test if a session records exists for a service ticket.
[ "Test", "if", "a", "session", "records", "exists", "for", "a", "service", "ticket", "." ]
python
train
tilde-lab/tilde
tilde/core/api.py
https://github.com/tilde-lab/tilde/blob/59841578b3503075aa85c76f9ae647b3ff92b0a3/tilde/core/api.py#L830-L931
def augment(self, session, parent, addendum): ''' Augments a DATASET with some calcs NB: this is the PUBLIC method @returns error ''' parent_calc = session.query(model.Calculation).get(parent) if not parent_calc or not parent_calc.siblings_count: return 'Dataset is erroneously selected!' existing_children, filtered_addendum = [child.checksum for child in parent_calc.children], [] for child in addendum: if not child in existing_children: filtered_addendum.append(child) if not filtered_addendum: return 'All these data are already present in this dataset.' if parent_calc.checksum in filtered_addendum: return 'A dataset cannot be added into itself.' higher_lookup = {} more = parent_calc.parent distance = 0 while True: distance += 1 higher, more = more, [] if not higher: break for item in higher: try: higher_lookup[distance].add(item) except KeyError: higher_lookup[distance] = set([item]) if item.parent: more += item.parent for members in list(higher_lookup.values()): for member in members: if member.checksum in filtered_addendum: return 'A parent dataset cannot be added to its children dataset.' parent_meta = session.query(model.Metadata).get(parent) parent_grid = session.query(model.Grid).get(parent) info_obj = json.loads(parent_grid.info) for nested_depth, grid_item, download_size in session.query(model.Calculation.nested_depth, model.Grid.info, model.Metadata.download_size).filter(model.Calculation.checksum == model.Grid.checksum, model.Grid.checksum == model.Metadata.checksum, model.Calculation.checksum.in_(filtered_addendum)).all(): if nested_depth >= parent_calc.nested_depth: parent_calc.nested_depth = nested_depth + 1 grid_item = json.loads(grid_item) for entity in self.hierarchy: topic = grid_item.get(entity['source']) if not topic: continue if entity['source'] == 'standard': topic = [] if not isinstance(topic, list): topic = [ topic ] existing_term = info_obj.get(entity['source'], []) if not isinstance(existing_term, list): existing_term = [ existing_term ] # TODO info_obj[ entity['source'] ] = list(set( existing_term + topic )) parent_meta.download_size += download_size info_obj['standard'] = info_obj['standard'][0] # TODO parent_grid.info = json.dumps(info_obj) # tags ORM for entity in self.hierarchy: if not entity['creates_topic']: continue for item in info_obj.get( entity['source'], [] ): parent_calc.uitopics.append( model.Topic.as_unique(session, cid=entity['cid'], topic="%s" % item) ) for child in session.query(model.Calculation).filter(model.Calculation.checksum.in_(filtered_addendum)).all(): parent_calc.children.append(child) parent_calc.siblings_count = len(parent_calc.children) for distance, members in higher_lookup.items(): for member in members: d = parent_calc.nested_depth - member.nested_depth + distance if d > 0: member.nested_depth += d member.meta_data.download_size += parent_meta.download_size # FIXME session.add(member) session.add_all([parent_calc, parent_meta, parent_grid]) session.commit() return False
[ "def", "augment", "(", "self", ",", "session", ",", "parent", ",", "addendum", ")", ":", "parent_calc", "=", "session", ".", "query", "(", "model", ".", "Calculation", ")", ".", "get", "(", "parent", ")", "if", "not", "parent_calc", "or", "not", "parent_calc", ".", "siblings_count", ":", "return", "'Dataset is erroneously selected!'", "existing_children", ",", "filtered_addendum", "=", "[", "child", ".", "checksum", "for", "child", "in", "parent_calc", ".", "children", "]", ",", "[", "]", "for", "child", "in", "addendum", ":", "if", "not", "child", "in", "existing_children", ":", "filtered_addendum", ".", "append", "(", "child", ")", "if", "not", "filtered_addendum", ":", "return", "'All these data are already present in this dataset.'", "if", "parent_calc", ".", "checksum", "in", "filtered_addendum", ":", "return", "'A dataset cannot be added into itself.'", "higher_lookup", "=", "{", "}", "more", "=", "parent_calc", ".", "parent", "distance", "=", "0", "while", "True", ":", "distance", "+=", "1", "higher", ",", "more", "=", "more", ",", "[", "]", "if", "not", "higher", ":", "break", "for", "item", "in", "higher", ":", "try", ":", "higher_lookup", "[", "distance", "]", ".", "add", "(", "item", ")", "except", "KeyError", ":", "higher_lookup", "[", "distance", "]", "=", "set", "(", "[", "item", "]", ")", "if", "item", ".", "parent", ":", "more", "+=", "item", ".", "parent", "for", "members", "in", "list", "(", "higher_lookup", ".", "values", "(", ")", ")", ":", "for", "member", "in", "members", ":", "if", "member", ".", "checksum", "in", "filtered_addendum", ":", "return", "'A parent dataset cannot be added to its children dataset.'", "parent_meta", "=", "session", ".", "query", "(", "model", ".", "Metadata", ")", ".", "get", "(", "parent", ")", "parent_grid", "=", "session", ".", "query", "(", "model", ".", "Grid", ")", ".", "get", "(", "parent", ")", "info_obj", "=", "json", ".", "loads", "(", "parent_grid", ".", "info", ")", "for", "nested_depth", ",", "grid_item", ",", "download_size", "in", "session", ".", "query", "(", "model", ".", "Calculation", ".", "nested_depth", ",", "model", ".", "Grid", ".", "info", ",", "model", ".", "Metadata", ".", "download_size", ")", ".", "filter", "(", "model", ".", "Calculation", ".", "checksum", "==", "model", ".", "Grid", ".", "checksum", ",", "model", ".", "Grid", ".", "checksum", "==", "model", ".", "Metadata", ".", "checksum", ",", "model", ".", "Calculation", ".", "checksum", ".", "in_", "(", "filtered_addendum", ")", ")", ".", "all", "(", ")", ":", "if", "nested_depth", ">=", "parent_calc", ".", "nested_depth", ":", "parent_calc", ".", "nested_depth", "=", "nested_depth", "+", "1", "grid_item", "=", "json", ".", "loads", "(", "grid_item", ")", "for", "entity", "in", "self", ".", "hierarchy", ":", "topic", "=", "grid_item", ".", "get", "(", "entity", "[", "'source'", "]", ")", "if", "not", "topic", ":", "continue", "if", "entity", "[", "'source'", "]", "==", "'standard'", ":", "topic", "=", "[", "]", "if", "not", "isinstance", "(", "topic", ",", "list", ")", ":", "topic", "=", "[", "topic", "]", "existing_term", "=", "info_obj", ".", "get", "(", "entity", "[", "'source'", "]", ",", "[", "]", ")", "if", "not", "isinstance", "(", "existing_term", ",", "list", ")", ":", "existing_term", "=", "[", "existing_term", "]", "# TODO", "info_obj", "[", "entity", "[", "'source'", "]", "]", "=", "list", "(", "set", "(", "existing_term", "+", "topic", ")", ")", "parent_meta", ".", "download_size", "+=", "download_size", "info_obj", "[", "'standard'", "]", "=", "info_obj", "[", "'standard'", "]", "[", "0", "]", "# TODO", "parent_grid", ".", "info", "=", "json", ".", "dumps", "(", "info_obj", ")", "# tags ORM", "for", "entity", "in", "self", ".", "hierarchy", ":", "if", "not", "entity", "[", "'creates_topic'", "]", ":", "continue", "for", "item", "in", "info_obj", ".", "get", "(", "entity", "[", "'source'", "]", ",", "[", "]", ")", ":", "parent_calc", ".", "uitopics", ".", "append", "(", "model", ".", "Topic", ".", "as_unique", "(", "session", ",", "cid", "=", "entity", "[", "'cid'", "]", ",", "topic", "=", "\"%s\"", "%", "item", ")", ")", "for", "child", "in", "session", ".", "query", "(", "model", ".", "Calculation", ")", ".", "filter", "(", "model", ".", "Calculation", ".", "checksum", ".", "in_", "(", "filtered_addendum", ")", ")", ".", "all", "(", ")", ":", "parent_calc", ".", "children", ".", "append", "(", "child", ")", "parent_calc", ".", "siblings_count", "=", "len", "(", "parent_calc", ".", "children", ")", "for", "distance", ",", "members", "in", "higher_lookup", ".", "items", "(", ")", ":", "for", "member", "in", "members", ":", "d", "=", "parent_calc", ".", "nested_depth", "-", "member", ".", "nested_depth", "+", "distance", "if", "d", ">", "0", ":", "member", ".", "nested_depth", "+=", "d", "member", ".", "meta_data", ".", "download_size", "+=", "parent_meta", ".", "download_size", "# FIXME", "session", ".", "add", "(", "member", ")", "session", ".", "add_all", "(", "[", "parent_calc", ",", "parent_meta", ",", "parent_grid", "]", ")", "session", ".", "commit", "(", ")", "return", "False" ]
Augments a DATASET with some calcs NB: this is the PUBLIC method @returns error
[ "Augments", "a", "DATASET", "with", "some", "calcs", "NB", ":", "this", "is", "the", "PUBLIC", "method" ]
python
train
python-diamond/Diamond
src/collectors/users/users.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/users/users.py#L38-L47
def get_default_config(self): """ Returns the default collector settings """ config = super(UsersCollector, self).get_default_config() config.update({ 'path': 'users', 'utmp': None, }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "UsersCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'path'", ":", "'users'", ",", "'utmp'", ":", "None", ",", "}", ")", "return", "config" ]
Returns the default collector settings
[ "Returns", "the", "default", "collector", "settings" ]
python
train
acsone/git-aggregator
git_aggregator/main.py
https://github.com/acsone/git-aggregator/blob/8631b0e64f9e8ce1857b21adeddb890ebd8469a6/git_aggregator/main.py#L63-L139
def get_parser(): """Return :py:class:`argparse.ArgumentParser` instance for CLI.""" main_parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter) main_parser.add_argument( '-c', '--config', dest='config', type=str, nargs='?', help='Pull the latest repositories from config(s)' ).completer = argcomplete.completers.FilesCompleter( allowednames=('.yaml', '.yml', '.json'), directories=False ) main_parser.add_argument( '-p', '--push', dest='do_push', action='store_true', default=False, help='Push result to target', ) main_parser.add_argument( '-d', '--dirmatch', dest='dirmatch', type=str, nargs='?', help='Pull only from the directories. Accepts fnmatch(1)' 'by commands' ) main_parser.add_argument( '--log-level', default='INFO', dest='log_level', type=_log_level_string_to_int, nargs='?', help='Set the logging output level. {0}'.format(_LOG_LEVEL_STRINGS)) main_parser.add_argument( '-e', '--expand-env', dest='expand_env', default=False, action='store_true', help='Expand environment variables in configuration file', ) main_parser.add_argument( '-f', '--force', dest='force', default=False, action='store_true', help='Force cleanup and aggregation on dirty repositories.', ) main_parser.add_argument( '-j', '--jobs', dest='jobs', default=1, type=int, help='Amount of processes to use when aggregating repos. ' 'This is useful when there are a lot of large repos. ' 'Set `1` or less to disable multiprocessing (default).', ) main_parser.add_argument( 'command', nargs='?', default='aggregate', help='aggregate (default): run the aggregation process.\n' 'show-all-prs: show GitHub pull requests in merge sections\n' ' such pull requests are indentified as having\n' ' a github.com remote and a\n' ' refs/pull/NNN/head ref in the merge section.\n' 'show-closed-prs: show pull requests that are not open anymore.\n' ) return main_parser
[ "def", "get_parser", "(", ")", ":", "main_parser", "=", "argparse", ".", "ArgumentParser", "(", "formatter_class", "=", "argparse", ".", "RawTextHelpFormatter", ")", "main_parser", ".", "add_argument", "(", "'-c'", ",", "'--config'", ",", "dest", "=", "'config'", ",", "type", "=", "str", ",", "nargs", "=", "'?'", ",", "help", "=", "'Pull the latest repositories from config(s)'", ")", ".", "completer", "=", "argcomplete", ".", "completers", ".", "FilesCompleter", "(", "allowednames", "=", "(", "'.yaml'", ",", "'.yml'", ",", "'.json'", ")", ",", "directories", "=", "False", ")", "main_parser", ".", "add_argument", "(", "'-p'", ",", "'--push'", ",", "dest", "=", "'do_push'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Push result to target'", ",", ")", "main_parser", ".", "add_argument", "(", "'-d'", ",", "'--dirmatch'", ",", "dest", "=", "'dirmatch'", ",", "type", "=", "str", ",", "nargs", "=", "'?'", ",", "help", "=", "'Pull only from the directories. Accepts fnmatch(1)'", "'by commands'", ")", "main_parser", ".", "add_argument", "(", "'--log-level'", ",", "default", "=", "'INFO'", ",", "dest", "=", "'log_level'", ",", "type", "=", "_log_level_string_to_int", ",", "nargs", "=", "'?'", ",", "help", "=", "'Set the logging output level. {0}'", ".", "format", "(", "_LOG_LEVEL_STRINGS", ")", ")", "main_parser", ".", "add_argument", "(", "'-e'", ",", "'--expand-env'", ",", "dest", "=", "'expand_env'", ",", "default", "=", "False", ",", "action", "=", "'store_true'", ",", "help", "=", "'Expand environment variables in configuration file'", ",", ")", "main_parser", ".", "add_argument", "(", "'-f'", ",", "'--force'", ",", "dest", "=", "'force'", ",", "default", "=", "False", ",", "action", "=", "'store_true'", ",", "help", "=", "'Force cleanup and aggregation on dirty repositories.'", ",", ")", "main_parser", ".", "add_argument", "(", "'-j'", ",", "'--jobs'", ",", "dest", "=", "'jobs'", ",", "default", "=", "1", ",", "type", "=", "int", ",", "help", "=", "'Amount of processes to use when aggregating repos. '", "'This is useful when there are a lot of large repos. '", "'Set `1` or less to disable multiprocessing (default).'", ",", ")", "main_parser", ".", "add_argument", "(", "'command'", ",", "nargs", "=", "'?'", ",", "default", "=", "'aggregate'", ",", "help", "=", "'aggregate (default): run the aggregation process.\\n'", "'show-all-prs: show GitHub pull requests in merge sections\\n'", "' such pull requests are indentified as having\\n'", "' a github.com remote and a\\n'", "' refs/pull/NNN/head ref in the merge section.\\n'", "'show-closed-prs: show pull requests that are not open anymore.\\n'", ")", "return", "main_parser" ]
Return :py:class:`argparse.ArgumentParser` instance for CLI.
[ "Return", ":", "py", ":", "class", ":", "argparse", ".", "ArgumentParser", "instance", "for", "CLI", "." ]
python
train
chaoss/grimoirelab-sortinghat
sortinghat/parsing/grimoirelab.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/parsing/grimoirelab.py#L189-L247
def __parse_organizations(self, stream): """Parse GrimoireLab organizations. The GrimoireLab organizations format is a YAML element stored under the "organizations" key. The next example shows the structure of the document: - organizations: Bitergia: - bitergia.com - support.bitergia.com - biterg.io LibreSoft: - libresoft.es :param json: YAML object to parse :raises InvalidFormatError: raised when the format of the YAML is not valid. """ if not stream: return yaml_file = self.__load_yml(stream) try: for element in yaml_file: name = self.__encode(element['organization']) if not name: error = "Empty organization name" msg = self.GRIMOIRELAB_INVALID_FORMAT % {'error': error} raise InvalidFormatError(cause=msg) o = Organization(name=name) if 'domains' in element: if not isinstance(element['domains'], list): error = "List of elements expected for organization %s" % name msg = self.GRIMOIRELAB_INVALID_FORMAT % {'error': error} raise InvalidFormatError(cause=msg) for dom in element['domains']: if dom: d = Domain(domain=dom, is_top_domain=False) o.domains.append(d) else: error = "Empty domain name for organization %s" % name msg = self.GRIMOIRELAB_INVALID_FORMAT % {'error': error} raise InvalidFormatError(cause=msg) self._organizations[name] = o except KeyError as e: error = "Attribute %s not found" % e.args msg = self.GRIMOIRELAB_INVALID_FORMAT % {'error': error} raise InvalidFormatError(cause=msg) except TypeError as e: error = "%s" % e.args msg = self.GRIMOIRELAB_INVALID_FORMAT % {'error': error} raise InvalidFormatError(cause=msg)
[ "def", "__parse_organizations", "(", "self", ",", "stream", ")", ":", "if", "not", "stream", ":", "return", "yaml_file", "=", "self", ".", "__load_yml", "(", "stream", ")", "try", ":", "for", "element", "in", "yaml_file", ":", "name", "=", "self", ".", "__encode", "(", "element", "[", "'organization'", "]", ")", "if", "not", "name", ":", "error", "=", "\"Empty organization name\"", "msg", "=", "self", ".", "GRIMOIRELAB_INVALID_FORMAT", "%", "{", "'error'", ":", "error", "}", "raise", "InvalidFormatError", "(", "cause", "=", "msg", ")", "o", "=", "Organization", "(", "name", "=", "name", ")", "if", "'domains'", "in", "element", ":", "if", "not", "isinstance", "(", "element", "[", "'domains'", "]", ",", "list", ")", ":", "error", "=", "\"List of elements expected for organization %s\"", "%", "name", "msg", "=", "self", ".", "GRIMOIRELAB_INVALID_FORMAT", "%", "{", "'error'", ":", "error", "}", "raise", "InvalidFormatError", "(", "cause", "=", "msg", ")", "for", "dom", "in", "element", "[", "'domains'", "]", ":", "if", "dom", ":", "d", "=", "Domain", "(", "domain", "=", "dom", ",", "is_top_domain", "=", "False", ")", "o", ".", "domains", ".", "append", "(", "d", ")", "else", ":", "error", "=", "\"Empty domain name for organization %s\"", "%", "name", "msg", "=", "self", ".", "GRIMOIRELAB_INVALID_FORMAT", "%", "{", "'error'", ":", "error", "}", "raise", "InvalidFormatError", "(", "cause", "=", "msg", ")", "self", ".", "_organizations", "[", "name", "]", "=", "o", "except", "KeyError", "as", "e", ":", "error", "=", "\"Attribute %s not found\"", "%", "e", ".", "args", "msg", "=", "self", ".", "GRIMOIRELAB_INVALID_FORMAT", "%", "{", "'error'", ":", "error", "}", "raise", "InvalidFormatError", "(", "cause", "=", "msg", ")", "except", "TypeError", "as", "e", ":", "error", "=", "\"%s\"", "%", "e", ".", "args", "msg", "=", "self", ".", "GRIMOIRELAB_INVALID_FORMAT", "%", "{", "'error'", ":", "error", "}", "raise", "InvalidFormatError", "(", "cause", "=", "msg", ")" ]
Parse GrimoireLab organizations. The GrimoireLab organizations format is a YAML element stored under the "organizations" key. The next example shows the structure of the document: - organizations: Bitergia: - bitergia.com - support.bitergia.com - biterg.io LibreSoft: - libresoft.es :param json: YAML object to parse :raises InvalidFormatError: raised when the format of the YAML is not valid.
[ "Parse", "GrimoireLab", "organizations", "." ]
python
train
numenta/nupic
src/nupic/encoders/pass_through.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/pass_through.py#L111-L113
def getBucketInfo(self, buckets): """See the function description in base.py""" return [EncoderResult(value=0, scalar=0, encoding=numpy.zeros(self.n))]
[ "def", "getBucketInfo", "(", "self", ",", "buckets", ")", ":", "return", "[", "EncoderResult", "(", "value", "=", "0", ",", "scalar", "=", "0", ",", "encoding", "=", "numpy", ".", "zeros", "(", "self", ".", "n", ")", ")", "]" ]
See the function description in base.py
[ "See", "the", "function", "description", "in", "base", ".", "py" ]
python
valid
fermiPy/fermipy
fermipy/scripts/dispatch.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/scripts/dispatch.py#L18-L58
def collect_jobs(dirs, runscript, overwrite=False, max_job_age=90): """Construct a list of job dictionaries.""" jobs = [] for dirname in sorted(dirs): o = dict(cfgfile=os.path.join(dirname, 'config.yaml'), logfile=os.path.join( dirname, os.path.splitext(runscript)[0] + '.log'), runscript=os.path.join(dirname, runscript)) if not os.path.isfile(o['cfgfile']): continue if not os.path.isfile(o['runscript']): continue if not os.path.isfile(o['logfile']): jobs.append(o) continue age = file_age_in_seconds(o['logfile']) / 60. job_status = check_log(o['logfile']) print(dirname, job_status, age) if job_status is False or overwrite: jobs.append(o) elif job_status == 'Exited': print("Job Exited. Resending command.") jobs.append(o) elif job_status == 'None' and age > max_job_age: print( "Job did not exit, but no activity on log file for > %.2f min. Resending command." % max_job_age) jobs.append(o) # elif job_status is True: # print("Job Completed. Resending command.") # jobs.append(o) return jobs
[ "def", "collect_jobs", "(", "dirs", ",", "runscript", ",", "overwrite", "=", "False", ",", "max_job_age", "=", "90", ")", ":", "jobs", "=", "[", "]", "for", "dirname", "in", "sorted", "(", "dirs", ")", ":", "o", "=", "dict", "(", "cfgfile", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "'config.yaml'", ")", ",", "logfile", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "os", ".", "path", ".", "splitext", "(", "runscript", ")", "[", "0", "]", "+", "'.log'", ")", ",", "runscript", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "runscript", ")", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "o", "[", "'cfgfile'", "]", ")", ":", "continue", "if", "not", "os", ".", "path", ".", "isfile", "(", "o", "[", "'runscript'", "]", ")", ":", "continue", "if", "not", "os", ".", "path", ".", "isfile", "(", "o", "[", "'logfile'", "]", ")", ":", "jobs", ".", "append", "(", "o", ")", "continue", "age", "=", "file_age_in_seconds", "(", "o", "[", "'logfile'", "]", ")", "/", "60.", "job_status", "=", "check_log", "(", "o", "[", "'logfile'", "]", ")", "print", "(", "dirname", ",", "job_status", ",", "age", ")", "if", "job_status", "is", "False", "or", "overwrite", ":", "jobs", ".", "append", "(", "o", ")", "elif", "job_status", "==", "'Exited'", ":", "print", "(", "\"Job Exited. Resending command.\"", ")", "jobs", ".", "append", "(", "o", ")", "elif", "job_status", "==", "'None'", "and", "age", ">", "max_job_age", ":", "print", "(", "\"Job did not exit, but no activity on log file for > %.2f min. Resending command.\"", "%", "max_job_age", ")", "jobs", ".", "append", "(", "o", ")", "# elif job_status is True:", "# print(\"Job Completed. Resending command.\")", "# jobs.append(o)", "return", "jobs" ]
Construct a list of job dictionaries.
[ "Construct", "a", "list", "of", "job", "dictionaries", "." ]
python
train
Parquery/icontract
icontract/_checkers.py
https://github.com/Parquery/icontract/blob/846e3187869a9ba790e9b893c98e5055e1cce274/icontract/_checkers.py#L349-L357
def _find_self(param_names: List[str], args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any: """Find the instance of ``self`` in the arguments.""" instance_i = param_names.index("self") if instance_i < len(args): instance = args[instance_i] else: instance = kwargs["self"] return instance
[ "def", "_find_self", "(", "param_names", ":", "List", "[", "str", "]", ",", "args", ":", "Tuple", "[", "Any", ",", "...", "]", ",", "kwargs", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "Any", ":", "instance_i", "=", "param_names", ".", "index", "(", "\"self\"", ")", "if", "instance_i", "<", "len", "(", "args", ")", ":", "instance", "=", "args", "[", "instance_i", "]", "else", ":", "instance", "=", "kwargs", "[", "\"self\"", "]", "return", "instance" ]
Find the instance of ``self`` in the arguments.
[ "Find", "the", "instance", "of", "self", "in", "the", "arguments", "." ]
python
train
xym-tool/xym
xym/xym.py
https://github.com/xym-tool/xym/blob/48984e6bd41595df8f383e6dc7e6eedfecc96898/xym/xym.py#L280-L294
def post_process_model(self, input_model, add_line_refs): """ This function defines the order and execution logic for actions that are performed in the model post-processing pipeline. :param input_model: The YANG model to be processed in the pipeline :param add_line_refs: Flag that controls whether line number references should be added to the model. :return: List of strings that constitute the final YANG model to be written to its module file. """ intermediate_model = self.remove_leading_spaces(input_model) intermediate_model = self.remove_extra_empty_lines(intermediate_model) if add_line_refs: intermediate_model = self.add_line_references(intermediate_model) return finalize_model(intermediate_model)
[ "def", "post_process_model", "(", "self", ",", "input_model", ",", "add_line_refs", ")", ":", "intermediate_model", "=", "self", ".", "remove_leading_spaces", "(", "input_model", ")", "intermediate_model", "=", "self", ".", "remove_extra_empty_lines", "(", "intermediate_model", ")", "if", "add_line_refs", ":", "intermediate_model", "=", "self", ".", "add_line_references", "(", "intermediate_model", ")", "return", "finalize_model", "(", "intermediate_model", ")" ]
This function defines the order and execution logic for actions that are performed in the model post-processing pipeline. :param input_model: The YANG model to be processed in the pipeline :param add_line_refs: Flag that controls whether line number references should be added to the model. :return: List of strings that constitute the final YANG model to be written to its module file.
[ "This", "function", "defines", "the", "order", "and", "execution", "logic", "for", "actions", "that", "are", "performed", "in", "the", "model", "post", "-", "processing", "pipeline", ".", ":", "param", "input_model", ":", "The", "YANG", "model", "to", "be", "processed", "in", "the", "pipeline", ":", "param", "add_line_refs", ":", "Flag", "that", "controls", "whether", "line", "number", "references", "should", "be", "added", "to", "the", "model", ".", ":", "return", ":", "List", "of", "strings", "that", "constitute", "the", "final", "YANG", "model", "to", "be", "written", "to", "its", "module", "file", "." ]
python
train
python-openxml/python-docx
docx/oxml/table.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/oxml/table.py#L39-L51
def tc_at_grid_col(self, idx): """ The ``<w:tc>`` element appearing at grid column *idx*. Raises |ValueError| if no ``w:tc`` element begins at that grid column. """ grid_col = 0 for tc in self.tc_lst: if grid_col == idx: return tc grid_col += tc.grid_span if grid_col > idx: raise ValueError('no cell on grid column %d' % idx) raise ValueError('index out of bounds')
[ "def", "tc_at_grid_col", "(", "self", ",", "idx", ")", ":", "grid_col", "=", "0", "for", "tc", "in", "self", ".", "tc_lst", ":", "if", "grid_col", "==", "idx", ":", "return", "tc", "grid_col", "+=", "tc", ".", "grid_span", "if", "grid_col", ">", "idx", ":", "raise", "ValueError", "(", "'no cell on grid column %d'", "%", "idx", ")", "raise", "ValueError", "(", "'index out of bounds'", ")" ]
The ``<w:tc>`` element appearing at grid column *idx*. Raises |ValueError| if no ``w:tc`` element begins at that grid column.
[ "The", "<w", ":", "tc", ">", "element", "appearing", "at", "grid", "column", "*", "idx", "*", ".", "Raises", "|ValueError|", "if", "no", "w", ":", "tc", "element", "begins", "at", "that", "grid", "column", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/db.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/db.py#L14-L44
def mongo_getattr(rec, key): """ Get value from dict using MongoDB dot-separated path semantics. For example: >>> assert mongo_getattr({'a': {'b': 1}, 'x': 2}, 'a.b') == 1 >>> assert mongo_getattr({'a': {'b': 1}, 'x': 2}, 'x') == 2 >>> assert mongo_getattr({'a': {'b': 1}, 'x': 2}, 'a.b.c') is None :param rec: mongodb document :param key: path to mongo value :param default: default to return if not found :return: value, potentially nested, or default if not found :raise: AttributeError, if record is not a dict or key is not found. """ if not isinstance(rec, collections.Mapping): raise AttributeError('input record must act like a dict') if not rec: raise AttributeError('Empty dict') if not '.' in key: return rec.get(key) for key_part in key.split('.'): if not isinstance(rec, collections.Mapping): raise AttributeError('not a mapping for rec_part %s' % key_part) if not key_part in rec: raise AttributeError('key %s not in dict %s' % key) rec = rec[key_part] return rec
[ "def", "mongo_getattr", "(", "rec", ",", "key", ")", ":", "if", "not", "isinstance", "(", "rec", ",", "collections", ".", "Mapping", ")", ":", "raise", "AttributeError", "(", "'input record must act like a dict'", ")", "if", "not", "rec", ":", "raise", "AttributeError", "(", "'Empty dict'", ")", "if", "not", "'.'", "in", "key", ":", "return", "rec", ".", "get", "(", "key", ")", "for", "key_part", "in", "key", ".", "split", "(", "'.'", ")", ":", "if", "not", "isinstance", "(", "rec", ",", "collections", ".", "Mapping", ")", ":", "raise", "AttributeError", "(", "'not a mapping for rec_part %s'", "%", "key_part", ")", "if", "not", "key_part", "in", "rec", ":", "raise", "AttributeError", "(", "'key %s not in dict %s'", "%", "key", ")", "rec", "=", "rec", "[", "key_part", "]", "return", "rec" ]
Get value from dict using MongoDB dot-separated path semantics. For example: >>> assert mongo_getattr({'a': {'b': 1}, 'x': 2}, 'a.b') == 1 >>> assert mongo_getattr({'a': {'b': 1}, 'x': 2}, 'x') == 2 >>> assert mongo_getattr({'a': {'b': 1}, 'x': 2}, 'a.b.c') is None :param rec: mongodb document :param key: path to mongo value :param default: default to return if not found :return: value, potentially nested, or default if not found :raise: AttributeError, if record is not a dict or key is not found.
[ "Get", "value", "from", "dict", "using", "MongoDB", "dot", "-", "separated", "path", "semantics", ".", "For", "example", ":" ]
python
train
Microsoft/LightGBM
python-package/lightgbm/basic.py
https://github.com/Microsoft/LightGBM/blob/8d2ec69f4f685b0ab1c4624d59ee2d3287bb3147/python-package/lightgbm/basic.py#L1958-L1970
def num_model_per_iteration(self): """Get number of models per iteration. Returns ------- model_per_iter : int The number of models per iteration. """ model_per_iter = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterNumModelPerIteration( self.handle, ctypes.byref(model_per_iter))) return model_per_iter.value
[ "def", "num_model_per_iteration", "(", "self", ")", ":", "model_per_iter", "=", "ctypes", ".", "c_int", "(", "0", ")", "_safe_call", "(", "_LIB", ".", "LGBM_BoosterNumModelPerIteration", "(", "self", ".", "handle", ",", "ctypes", ".", "byref", "(", "model_per_iter", ")", ")", ")", "return", "model_per_iter", ".", "value" ]
Get number of models per iteration. Returns ------- model_per_iter : int The number of models per iteration.
[ "Get", "number", "of", "models", "per", "iteration", "." ]
python
train
mabuchilab/QNET
docs/_extensions/inheritance_diagram.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/docs/_extensions/inheritance_diagram.py#L492-L505
def texinfo_visit_inheritance_diagram(self, node): # type: (nodes.NodeVisitor, inheritance_diagram) -> None """ Output the graph for Texinfo. This will insert a PNG. """ graph = node['graph'] graph_hash = get_graph_hash(node) name = 'inheritance%s' % graph_hash dotcode = graph.generate_dot(name, env=self.builder.env, graph_attrs={'size': '"6.0,6.0"'}) render_dot_texinfo(self, node, dotcode, {}, 'inheritance') raise nodes.SkipNode
[ "def", "texinfo_visit_inheritance_diagram", "(", "self", ",", "node", ")", ":", "# type: (nodes.NodeVisitor, inheritance_diagram) -> None", "graph", "=", "node", "[", "'graph'", "]", "graph_hash", "=", "get_graph_hash", "(", "node", ")", "name", "=", "'inheritance%s'", "%", "graph_hash", "dotcode", "=", "graph", ".", "generate_dot", "(", "name", ",", "env", "=", "self", ".", "builder", ".", "env", ",", "graph_attrs", "=", "{", "'size'", ":", "'\"6.0,6.0\"'", "}", ")", "render_dot_texinfo", "(", "self", ",", "node", ",", "dotcode", ",", "{", "}", ",", "'inheritance'", ")", "raise", "nodes", ".", "SkipNode" ]
Output the graph for Texinfo. This will insert a PNG.
[ "Output", "the", "graph", "for", "Texinfo", ".", "This", "will", "insert", "a", "PNG", "." ]
python
train
mila/pyoo
pyoo.py
https://github.com/mila/pyoo/blob/1e024999f608c87ea72cd443e39c89eb0ba3cc62/pyoo.py#L1390-L1396
def __set_values(self, values): """ Sets values in this cell range from an iterable of iterables. """ # Tuple of tuples is required array = tuple(tuple(self._clean_value(col) for col in row) for row in values) self._get_target().setDataArray(array)
[ "def", "__set_values", "(", "self", ",", "values", ")", ":", "# Tuple of tuples is required", "array", "=", "tuple", "(", "tuple", "(", "self", ".", "_clean_value", "(", "col", ")", "for", "col", "in", "row", ")", "for", "row", "in", "values", ")", "self", ".", "_get_target", "(", ")", ".", "setDataArray", "(", "array", ")" ]
Sets values in this cell range from an iterable of iterables.
[ "Sets", "values", "in", "this", "cell", "range", "from", "an", "iterable", "of", "iterables", "." ]
python
train
ejeschke/ginga
ginga/util/heaptimer.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/heaptimer.py#L127-L156
def add(self, timer): """Add a timer to the heap""" with self.lock: if self.heap: top = self.heap[0] else: top = None assert timer not in self.timers self.timers[timer] = timer heapq.heappush(self.heap, timer) # Check to see if we need to reschedule our main timer. # Only do this if we aren't expiring in the other thread. if self.heap[0] != top and not self.expiring: if self.rtimer is not None: self.rtimer.cancel() # self.rtimer.join() self.rtimer = None # If we are expiring timers right now then that will reschedule # as appropriate otherwise let's start a timer if we don't have # one if self.rtimer is None and not self.expiring: top = self.heap[0] ival = top.expire - time.time() if ival < 0: ival = 0 self.rtimer = threading.Timer(ival, self.expire) self.rtimer.start()
[ "def", "add", "(", "self", ",", "timer", ")", ":", "with", "self", ".", "lock", ":", "if", "self", ".", "heap", ":", "top", "=", "self", ".", "heap", "[", "0", "]", "else", ":", "top", "=", "None", "assert", "timer", "not", "in", "self", ".", "timers", "self", ".", "timers", "[", "timer", "]", "=", "timer", "heapq", ".", "heappush", "(", "self", ".", "heap", ",", "timer", ")", "# Check to see if we need to reschedule our main timer.", "# Only do this if we aren't expiring in the other thread.", "if", "self", ".", "heap", "[", "0", "]", "!=", "top", "and", "not", "self", ".", "expiring", ":", "if", "self", ".", "rtimer", "is", "not", "None", ":", "self", ".", "rtimer", ".", "cancel", "(", ")", "# self.rtimer.join()", "self", ".", "rtimer", "=", "None", "# If we are expiring timers right now then that will reschedule", "# as appropriate otherwise let's start a timer if we don't have", "# one", "if", "self", ".", "rtimer", "is", "None", "and", "not", "self", ".", "expiring", ":", "top", "=", "self", ".", "heap", "[", "0", "]", "ival", "=", "top", ".", "expire", "-", "time", ".", "time", "(", ")", "if", "ival", "<", "0", ":", "ival", "=", "0", "self", ".", "rtimer", "=", "threading", ".", "Timer", "(", "ival", ",", "self", ".", "expire", ")", "self", ".", "rtimer", ".", "start", "(", ")" ]
Add a timer to the heap
[ "Add", "a", "timer", "to", "the", "heap" ]
python
train
roboogle/gtkmvc3
gtkmvco/gtkmvc3/adapters/basic.py
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/adapters/basic.py#L433-L447
def _resolve_to_func(self, what): """This method resolves whatever is passed: a string, a bound or unbound method, a function, to make it a function. This makes internal handling of setter and getter uniform and easier.""" if isinstance(what, str): what = getattr(Adapter._get_property(self), what) # makes it an unbounded function if needed if type(what) == types.MethodType: what = what.__func__ if not type(what) == types.FunctionType: raise TypeError("Expected a method name, a method or a function") return what
[ "def", "_resolve_to_func", "(", "self", ",", "what", ")", ":", "if", "isinstance", "(", "what", ",", "str", ")", ":", "what", "=", "getattr", "(", "Adapter", ".", "_get_property", "(", "self", ")", ",", "what", ")", "# makes it an unbounded function if needed", "if", "type", "(", "what", ")", "==", "types", ".", "MethodType", ":", "what", "=", "what", ".", "__func__", "if", "not", "type", "(", "what", ")", "==", "types", ".", "FunctionType", ":", "raise", "TypeError", "(", "\"Expected a method name, a method or a function\"", ")", "return", "what" ]
This method resolves whatever is passed: a string, a bound or unbound method, a function, to make it a function. This makes internal handling of setter and getter uniform and easier.
[ "This", "method", "resolves", "whatever", "is", "passed", ":", "a", "string", "a", "bound", "or", "unbound", "method", "a", "function", "to", "make", "it", "a", "function", ".", "This", "makes", "internal", "handling", "of", "setter", "and", "getter", "uniform", "and", "easier", "." ]
python
train
Capitains/flask-capitains-nemo
flask_nemo/__init__.py
https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/__init__.py#L761-L782
def view_maker(self, name, instance=None): """ Create a view :param name: Name of the route function to use for the view. :type name: str :return: Route function which makes use of Nemo context (such as menu informations) :rtype: function """ if instance is None: instance = self sig = "lang" in [ parameter.name for parameter in inspect.signature(getattr(instance, name)).parameters.values() ] def route(**kwargs): if sig and "lang" not in kwargs: kwargs["lang"] = self.get_locale() if "semantic" in kwargs: del kwargs["semantic"] return self.route(getattr(instance, name), **kwargs) return route
[ "def", "view_maker", "(", "self", ",", "name", ",", "instance", "=", "None", ")", ":", "if", "instance", "is", "None", ":", "instance", "=", "self", "sig", "=", "\"lang\"", "in", "[", "parameter", ".", "name", "for", "parameter", "in", "inspect", ".", "signature", "(", "getattr", "(", "instance", ",", "name", ")", ")", ".", "parameters", ".", "values", "(", ")", "]", "def", "route", "(", "*", "*", "kwargs", ")", ":", "if", "sig", "and", "\"lang\"", "not", "in", "kwargs", ":", "kwargs", "[", "\"lang\"", "]", "=", "self", ".", "get_locale", "(", ")", "if", "\"semantic\"", "in", "kwargs", ":", "del", "kwargs", "[", "\"semantic\"", "]", "return", "self", ".", "route", "(", "getattr", "(", "instance", ",", "name", ")", ",", "*", "*", "kwargs", ")", "return", "route" ]
Create a view :param name: Name of the route function to use for the view. :type name: str :return: Route function which makes use of Nemo context (such as menu informations) :rtype: function
[ "Create", "a", "view" ]
python
valid
reingart/pyafipws
wslpg.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wslpg.py#L994-L1019
def CancelarAnticipo(self, pto_emision=None, nro_orden=None, coe=None, pdf=None): "Cancelar Anticipo de una Liquidación Primaria Electrónica de Granos" # llamo al webservice: ret = self.client.lpgCancelarAnticipo( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, coe=coe, ptoEmision=pto_emision, nroOrden=nro_orden, pdf="S" if pdf else "N", ) # analizo la respusta ret = ret['liqConsReturn'] self.__analizar_errores(ret) if 'liquidacion' in ret: aut = ret['autorizacion'] liq = ret['liquidacion'] self.AnalizarLiquidacion(aut, liq) # guardo el PDF si se indico archivo y vino en la respuesta: if pdf and 'pdf' in ret: open(pdf, "wb").write(ret['pdf']) return True
[ "def", "CancelarAnticipo", "(", "self", ",", "pto_emision", "=", "None", ",", "nro_orden", "=", "None", ",", "coe", "=", "None", ",", "pdf", "=", "None", ")", ":", "# llamo al webservice:", "ret", "=", "self", ".", "client", ".", "lpgCancelarAnticipo", "(", "auth", "=", "{", "'token'", ":", "self", ".", "Token", ",", "'sign'", ":", "self", ".", "Sign", ",", "'cuit'", ":", "self", ".", "Cuit", ",", "}", ",", "coe", "=", "coe", ",", "ptoEmision", "=", "pto_emision", ",", "nroOrden", "=", "nro_orden", ",", "pdf", "=", "\"S\"", "if", "pdf", "else", "\"N\"", ",", ")", "# analizo la respusta", "ret", "=", "ret", "[", "'liqConsReturn'", "]", "self", ".", "__analizar_errores", "(", "ret", ")", "if", "'liquidacion'", "in", "ret", ":", "aut", "=", "ret", "[", "'autorizacion'", "]", "liq", "=", "ret", "[", "'liquidacion'", "]", "self", ".", "AnalizarLiquidacion", "(", "aut", ",", "liq", ")", "# guardo el PDF si se indico archivo y vino en la respuesta:", "if", "pdf", "and", "'pdf'", "in", "ret", ":", "open", "(", "pdf", ",", "\"wb\"", ")", ".", "write", "(", "ret", "[", "'pdf'", "]", ")", "return", "True" ]
Cancelar Anticipo de una Liquidación Primaria Electrónica de Granos
[ "Cancelar", "Anticipo", "de", "una", "Liquidación", "Primaria", "Electrónica", "de", "Granos" ]
python
train
hammerlab/stanity
stanity/psisloo.py
https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/psisloo.py#L65-L74
def plot(self): """ Graphical summary of pointwise pareto-k importance-sampling indices Pareto-k tail indices are plotted (on the y axis) for each observation unit (on the x axis) """ seaborn.pointplot( y = self.pointwise.pareto_k, x = self.pointwise.index, join = False)
[ "def", "plot", "(", "self", ")", ":", "seaborn", ".", "pointplot", "(", "y", "=", "self", ".", "pointwise", ".", "pareto_k", ",", "x", "=", "self", ".", "pointwise", ".", "index", ",", "join", "=", "False", ")" ]
Graphical summary of pointwise pareto-k importance-sampling indices Pareto-k tail indices are plotted (on the y axis) for each observation unit (on the x axis)
[ "Graphical", "summary", "of", "pointwise", "pareto", "-", "k", "importance", "-", "sampling", "indices" ]
python
train
carljm/django-adminfiles
adminfiles/flickr.py
https://github.com/carljm/django-adminfiles/blob/b01dc7be266305d575c11d5ff9a37ccac04a78c2/adminfiles/flickr.py#L359-L373
def editMeta(self, title=None, description=None): """Set metadata for photo. (flickr.photos.setMeta)""" method = 'flickr.photosets.editMeta' if title is None: title = self.title if description is None: description = self.description _dopost(method, auth=True, title=title, \ description=description, photoset_id=self.id) self.__title = title self.__description = description return True
[ "def", "editMeta", "(", "self", ",", "title", "=", "None", ",", "description", "=", "None", ")", ":", "method", "=", "'flickr.photosets.editMeta'", "if", "title", "is", "None", ":", "title", "=", "self", ".", "title", "if", "description", "is", "None", ":", "description", "=", "self", ".", "description", "_dopost", "(", "method", ",", "auth", "=", "True", ",", "title", "=", "title", ",", "description", "=", "description", ",", "photoset_id", "=", "self", ".", "id", ")", "self", ".", "__title", "=", "title", "self", ".", "__description", "=", "description", "return", "True" ]
Set metadata for photo. (flickr.photos.setMeta)
[ "Set", "metadata", "for", "photo", ".", "(", "flickr", ".", "photos", ".", "setMeta", ")" ]
python
train
h2oai/h2o-3
h2o-py/h2o/frame.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L1073-L1110
def set_name(self, col=None, name=None): """ Set a new name for a column. :param col: index or name of the column whose name is to be set; may be skipped for 1-column frames :param name: the new name of the column """ assert_is_type(col, None, int, str) assert_is_type(name, str) ncols = self.ncols col_index = None if is_type(col, int): if not(-ncols <= col < ncols): raise H2OValueError("Index %d is out of bounds for a frame with %d columns" % (col, ncols)) col_index = (col + ncols) % ncols # handle negative indices elif is_type(col, str): if col not in self.names: raise H2OValueError("Column %s doesn't exist in the frame." % col) col_index = self.names.index(col) # lookup the name else: assert col is None if ncols != 1: raise H2OValueError("The frame has %d columns; please specify which one to rename" % ncols) col_index = 0 if name != self.names[col_index] and name in self.types: raise H2OValueError("Column '%s' already exists in the frame" % name) oldname = self.names[col_index] old_cache = self._ex._cache self._ex = ExprNode("colnames=", self, col_index, name) # Update-in-place, but still lazy self._ex._cache.fill_from(old_cache) if self.names is None: self._frame()._ex._cache.fill() else: self._ex._cache._names = self.names[:col_index] + [name] + self.names[col_index + 1:] self._ex._cache._types[name] = self._ex._cache._types.pop(oldname) return
[ "def", "set_name", "(", "self", ",", "col", "=", "None", ",", "name", "=", "None", ")", ":", "assert_is_type", "(", "col", ",", "None", ",", "int", ",", "str", ")", "assert_is_type", "(", "name", ",", "str", ")", "ncols", "=", "self", ".", "ncols", "col_index", "=", "None", "if", "is_type", "(", "col", ",", "int", ")", ":", "if", "not", "(", "-", "ncols", "<=", "col", "<", "ncols", ")", ":", "raise", "H2OValueError", "(", "\"Index %d is out of bounds for a frame with %d columns\"", "%", "(", "col", ",", "ncols", ")", ")", "col_index", "=", "(", "col", "+", "ncols", ")", "%", "ncols", "# handle negative indices", "elif", "is_type", "(", "col", ",", "str", ")", ":", "if", "col", "not", "in", "self", ".", "names", ":", "raise", "H2OValueError", "(", "\"Column %s doesn't exist in the frame.\"", "%", "col", ")", "col_index", "=", "self", ".", "names", ".", "index", "(", "col", ")", "# lookup the name", "else", ":", "assert", "col", "is", "None", "if", "ncols", "!=", "1", ":", "raise", "H2OValueError", "(", "\"The frame has %d columns; please specify which one to rename\"", "%", "ncols", ")", "col_index", "=", "0", "if", "name", "!=", "self", ".", "names", "[", "col_index", "]", "and", "name", "in", "self", ".", "types", ":", "raise", "H2OValueError", "(", "\"Column '%s' already exists in the frame\"", "%", "name", ")", "oldname", "=", "self", ".", "names", "[", "col_index", "]", "old_cache", "=", "self", ".", "_ex", ".", "_cache", "self", ".", "_ex", "=", "ExprNode", "(", "\"colnames=\"", ",", "self", ",", "col_index", ",", "name", ")", "# Update-in-place, but still lazy", "self", ".", "_ex", ".", "_cache", ".", "fill_from", "(", "old_cache", ")", "if", "self", ".", "names", "is", "None", ":", "self", ".", "_frame", "(", ")", ".", "_ex", ".", "_cache", ".", "fill", "(", ")", "else", ":", "self", ".", "_ex", ".", "_cache", ".", "_names", "=", "self", ".", "names", "[", ":", "col_index", "]", "+", "[", "name", "]", "+", "self", ".", "names", "[", "col_index", "+", "1", ":", "]", "self", ".", "_ex", ".", "_cache", ".", "_types", "[", "name", "]", "=", "self", ".", "_ex", ".", "_cache", ".", "_types", ".", "pop", "(", "oldname", ")", "return" ]
Set a new name for a column. :param col: index or name of the column whose name is to be set; may be skipped for 1-column frames :param name: the new name of the column
[ "Set", "a", "new", "name", "for", "a", "column", "." ]
python
test
mardiros/pyshop
pyshop/helpers/pypi.py
https://github.com/mardiros/pyshop/blob/b42510b9c3fa16e0e5710457401ac38fea5bf7a0/pyshop/helpers/pypi.py#L104-L111
def set_proxy(proxy_url, transport_proxy=None): """Create the proxy to PyPI XML-RPC Server""" global proxy, PYPI_URL PYPI_URL = proxy_url proxy = xmlrpc.ServerProxy( proxy_url, transport=RequestsTransport(proxy_url.startswith('https://')), allow_none=True)
[ "def", "set_proxy", "(", "proxy_url", ",", "transport_proxy", "=", "None", ")", ":", "global", "proxy", ",", "PYPI_URL", "PYPI_URL", "=", "proxy_url", "proxy", "=", "xmlrpc", ".", "ServerProxy", "(", "proxy_url", ",", "transport", "=", "RequestsTransport", "(", "proxy_url", ".", "startswith", "(", "'https://'", ")", ")", ",", "allow_none", "=", "True", ")" ]
Create the proxy to PyPI XML-RPC Server
[ "Create", "the", "proxy", "to", "PyPI", "XML", "-", "RPC", "Server" ]
python
train
glitchassassin/lackey
lackey/RegionMatching.py
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1815-L1817
def getTarget(self): """ Returns the location of the match click target (center by default, but may be offset) """ return self.getCenter().offset(self._target.x, self._target.y)
[ "def", "getTarget", "(", "self", ")", ":", "return", "self", ".", "getCenter", "(", ")", ".", "offset", "(", "self", ".", "_target", ".", "x", ",", "self", ".", "_target", ".", "y", ")" ]
Returns the location of the match click target (center by default, but may be offset)
[ "Returns", "the", "location", "of", "the", "match", "click", "target", "(", "center", "by", "default", "but", "may", "be", "offset", ")" ]
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/networking/interconnect.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/networking/interconnect.py#L1038-L1076
def send(self, message_type, data, connection_id, callback=None, one_way=False): """ Send a message of message_type :param connection_id: the identity for the connection to send to :param message_type: validator_pb2.Message.* enum value :param data: bytes serialized protobuf :return: future.Future """ if connection_id not in self._connections: raise ValueError("Unknown connection id: {}".format(connection_id)) connection_info = self._connections.get(connection_id) if connection_info.connection_type == \ ConnectionType.ZMQ_IDENTITY: message = validator_pb2.Message( correlation_id=_generate_id(), content=data, message_type=message_type) timer_tag = get_enum_name(message.message_type) timer_ctx = self._get_send_response_timer(timer_tag).time() fut = future.Future( message.correlation_id, message.content, callback, timeout=self._connection_timeout, timer_ctx=timer_ctx) if not one_way: self._futures.put(fut) self._send_receive_thread.send_message(msg=message, connection_id=connection_id) return fut return connection_info.connection.send( message_type, data, callback=callback, one_way=one_way)
[ "def", "send", "(", "self", ",", "message_type", ",", "data", ",", "connection_id", ",", "callback", "=", "None", ",", "one_way", "=", "False", ")", ":", "if", "connection_id", "not", "in", "self", ".", "_connections", ":", "raise", "ValueError", "(", "\"Unknown connection id: {}\"", ".", "format", "(", "connection_id", ")", ")", "connection_info", "=", "self", ".", "_connections", ".", "get", "(", "connection_id", ")", "if", "connection_info", ".", "connection_type", "==", "ConnectionType", ".", "ZMQ_IDENTITY", ":", "message", "=", "validator_pb2", ".", "Message", "(", "correlation_id", "=", "_generate_id", "(", ")", ",", "content", "=", "data", ",", "message_type", "=", "message_type", ")", "timer_tag", "=", "get_enum_name", "(", "message", ".", "message_type", ")", "timer_ctx", "=", "self", ".", "_get_send_response_timer", "(", "timer_tag", ")", ".", "time", "(", ")", "fut", "=", "future", ".", "Future", "(", "message", ".", "correlation_id", ",", "message", ".", "content", ",", "callback", ",", "timeout", "=", "self", ".", "_connection_timeout", ",", "timer_ctx", "=", "timer_ctx", ")", "if", "not", "one_way", ":", "self", ".", "_futures", ".", "put", "(", "fut", ")", "self", ".", "_send_receive_thread", ".", "send_message", "(", "msg", "=", "message", ",", "connection_id", "=", "connection_id", ")", "return", "fut", "return", "connection_info", ".", "connection", ".", "send", "(", "message_type", ",", "data", ",", "callback", "=", "callback", ",", "one_way", "=", "one_way", ")" ]
Send a message of message_type :param connection_id: the identity for the connection to send to :param message_type: validator_pb2.Message.* enum value :param data: bytes serialized protobuf :return: future.Future
[ "Send", "a", "message", "of", "message_type", ":", "param", "connection_id", ":", "the", "identity", "for", "the", "connection", "to", "send", "to", ":", "param", "message_type", ":", "validator_pb2", ".", "Message", ".", "*", "enum", "value", ":", "param", "data", ":", "bytes", "serialized", "protobuf", ":", "return", ":", "future", ".", "Future" ]
python
train
spookylukey/django-paypal
paypal/pro/helpers.py
https://github.com/spookylukey/django-paypal/blob/b07d0a3ad91b5c5fe7bb27be3e5d70aabcdef76f/paypal/pro/helpers.py#L126-L152
def doDirectPayment(self, params): """Call PayPal DoDirectPayment method.""" defaults = {"method": "DoDirectPayment", "paymentaction": "Sale"} required = ["creditcardtype", "acct", "expdate", "cvv2", "ipaddress", "firstname", "lastname", "street", "city", "state", "countrycode", "zip", "amt", ] nvp_obj = self._fetch(params, required, defaults) if nvp_obj.flag: raise PayPalFailure(nvp_obj.flag_info, nvp=nvp_obj) # @@@ Could check cvv2match / avscode are both 'X' or '0' # qd = django.http.QueryDict(nvp_obj.response) # if qd.get('cvv2match') not in ['X', '0']: # nvp_obj.set_flag("Invalid cvv2match: %s" % qd.get('cvv2match') # if qd.get('avscode') not in ['X', '0']: # nvp_obj.set_flag("Invalid avscode: %s" % qd.get('avscode') return nvp_obj
[ "def", "doDirectPayment", "(", "self", ",", "params", ")", ":", "defaults", "=", "{", "\"method\"", ":", "\"DoDirectPayment\"", ",", "\"paymentaction\"", ":", "\"Sale\"", "}", "required", "=", "[", "\"creditcardtype\"", ",", "\"acct\"", ",", "\"expdate\"", ",", "\"cvv2\"", ",", "\"ipaddress\"", ",", "\"firstname\"", ",", "\"lastname\"", ",", "\"street\"", ",", "\"city\"", ",", "\"state\"", ",", "\"countrycode\"", ",", "\"zip\"", ",", "\"amt\"", ",", "]", "nvp_obj", "=", "self", ".", "_fetch", "(", "params", ",", "required", ",", "defaults", ")", "if", "nvp_obj", ".", "flag", ":", "raise", "PayPalFailure", "(", "nvp_obj", ".", "flag_info", ",", "nvp", "=", "nvp_obj", ")", "# @@@ Could check cvv2match / avscode are both 'X' or '0'", "# qd = django.http.QueryDict(nvp_obj.response)", "# if qd.get('cvv2match') not in ['X', '0']:", "# nvp_obj.set_flag(\"Invalid cvv2match: %s\" % qd.get('cvv2match')", "# if qd.get('avscode') not in ['X', '0']:", "# nvp_obj.set_flag(\"Invalid avscode: %s\" % qd.get('avscode')", "return", "nvp_obj" ]
Call PayPal DoDirectPayment method.
[ "Call", "PayPal", "DoDirectPayment", "method", "." ]
python
train
kgaughan/dbkit
dbkit.py
https://github.com/kgaughan/dbkit/blob/2aef6376a60965d7820c91692046f4bcf7d43640/dbkit.py#L724-L732
def query_value(stmt, args=(), default=None): """ Execute a query, returning the first value in the first row of the result set. If the query returns no result set, a default value is returned, which is `None` by default. """ for row in query(stmt, args, TupleFactory): return row[0] return default
[ "def", "query_value", "(", "stmt", ",", "args", "=", "(", ")", ",", "default", "=", "None", ")", ":", "for", "row", "in", "query", "(", "stmt", ",", "args", ",", "TupleFactory", ")", ":", "return", "row", "[", "0", "]", "return", "default" ]
Execute a query, returning the first value in the first row of the result set. If the query returns no result set, a default value is returned, which is `None` by default.
[ "Execute", "a", "query", "returning", "the", "first", "value", "in", "the", "first", "row", "of", "the", "result", "set", ".", "If", "the", "query", "returns", "no", "result", "set", "a", "default", "value", "is", "returned", "which", "is", "None", "by", "default", "." ]
python
train
koszullab/metaTOR
metator/scripts/hicstuff.py
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1288-L1321
def scalogram(M, circ=False): """Computes so-called 'scalograms' used to easily visualize contacts at different distance scales. Edge cases have been painstakingly taken care of. """ # Sanity checks if not type(M) is np.ndarray: M = np.array(M) if M.shape[0] != M.shape[1]: raise ValueError("Matrix is not square.") try: n = min(M.shape) except AttributeError: n = M.size N = np.zeros(M.shape) for i in range(n): for j in range(n): if i + j < n and i >= j: N[i, j] = M[i, i - j:i + j + 1].sum() elif circ and i + j < n and i < j: N[i, j] = M[i, i - j:].sum() + M[i, :i + j + 1].sum() elif circ and i >= j and i + j >= n: N[i, j] = M[i, i - j:].sum() + M[i, :i + j - n + 1].sum() elif circ and i < j and i + j >= n: N[i, j] = (M[i, i - j:].sum() + M[i, :].sum() + M[i, :i + j - n + 1].sum()) return N
[ "def", "scalogram", "(", "M", ",", "circ", "=", "False", ")", ":", "# Sanity checks", "if", "not", "type", "(", "M", ")", "is", "np", ".", "ndarray", ":", "M", "=", "np", ".", "array", "(", "M", ")", "if", "M", ".", "shape", "[", "0", "]", "!=", "M", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "\"Matrix is not square.\"", ")", "try", ":", "n", "=", "min", "(", "M", ".", "shape", ")", "except", "AttributeError", ":", "n", "=", "M", ".", "size", "N", "=", "np", ".", "zeros", "(", "M", ".", "shape", ")", "for", "i", "in", "range", "(", "n", ")", ":", "for", "j", "in", "range", "(", "n", ")", ":", "if", "i", "+", "j", "<", "n", "and", "i", ">=", "j", ":", "N", "[", "i", ",", "j", "]", "=", "M", "[", "i", ",", "i", "-", "j", ":", "i", "+", "j", "+", "1", "]", ".", "sum", "(", ")", "elif", "circ", "and", "i", "+", "j", "<", "n", "and", "i", "<", "j", ":", "N", "[", "i", ",", "j", "]", "=", "M", "[", "i", ",", "i", "-", "j", ":", "]", ".", "sum", "(", ")", "+", "M", "[", "i", ",", ":", "i", "+", "j", "+", "1", "]", ".", "sum", "(", ")", "elif", "circ", "and", "i", ">=", "j", "and", "i", "+", "j", ">=", "n", ":", "N", "[", "i", ",", "j", "]", "=", "M", "[", "i", ",", "i", "-", "j", ":", "]", ".", "sum", "(", ")", "+", "M", "[", "i", ",", ":", "i", "+", "j", "-", "n", "+", "1", "]", ".", "sum", "(", ")", "elif", "circ", "and", "i", "<", "j", "and", "i", "+", "j", ">=", "n", ":", "N", "[", "i", ",", "j", "]", "=", "(", "M", "[", "i", ",", "i", "-", "j", ":", "]", ".", "sum", "(", ")", "+", "M", "[", "i", ",", ":", "]", ".", "sum", "(", ")", "+", "M", "[", "i", ",", ":", "i", "+", "j", "-", "n", "+", "1", "]", ".", "sum", "(", ")", ")", "return", "N" ]
Computes so-called 'scalograms' used to easily visualize contacts at different distance scales. Edge cases have been painstakingly taken care of.
[ "Computes", "so", "-", "called", "scalograms", "used", "to", "easily", "visualize", "contacts", "at", "different", "distance", "scales", ".", "Edge", "cases", "have", "been", "painstakingly", "taken", "care", "of", "." ]
python
train
JensAstrup/pyOutlook
pyOutlook/core/message.py
https://github.com/JensAstrup/pyOutlook/blob/f4ca9d4a8629c0a41f78102ce84fab702a841167/pyOutlook/core/message.py#L262-L295
def _make_api_call(self, http_type, endpoint, extra_headers = None, data=None): # type: (str, str, dict, Any) -> None """ Internal method to handle making calls to the Outlook API and logging both the request and response Args: http_type: (str) 'post' or 'delete' endpoint: (str) The endpoint the request will be made to headers: A dict of headers to send to the requests module in addition to Authorization and Content-Type data: The data to provide to the requests module Raises: MiscError: For errors that aren't a 401 AuthError: For 401 errors """ headers = {"Authorization": "Bearer " + self.account.access_token, "Content-Type": "application/json"} if extra_headers is not None: headers.update(extra_headers) log.debug('Making Outlook API request for message (ID: {}) with Headers: {} Data: {}' .format(self.message_id, headers, data)) if http_type == 'post': r = requests.post(endpoint, headers=headers, data=data) elif http_type == 'delete': r = requests.delete(endpoint, headers=headers) elif http_type == 'patch': r = requests.patch(endpoint, headers=headers, data=data) else: raise NotImplemented check_response(r)
[ "def", "_make_api_call", "(", "self", ",", "http_type", ",", "endpoint", ",", "extra_headers", "=", "None", ",", "data", "=", "None", ")", ":", "# type: (str, str, dict, Any) -> None", "headers", "=", "{", "\"Authorization\"", ":", "\"Bearer \"", "+", "self", ".", "account", ".", "access_token", ",", "\"Content-Type\"", ":", "\"application/json\"", "}", "if", "extra_headers", "is", "not", "None", ":", "headers", ".", "update", "(", "extra_headers", ")", "log", ".", "debug", "(", "'Making Outlook API request for message (ID: {}) with Headers: {} Data: {}'", ".", "format", "(", "self", ".", "message_id", ",", "headers", ",", "data", ")", ")", "if", "http_type", "==", "'post'", ":", "r", "=", "requests", ".", "post", "(", "endpoint", ",", "headers", "=", "headers", ",", "data", "=", "data", ")", "elif", "http_type", "==", "'delete'", ":", "r", "=", "requests", ".", "delete", "(", "endpoint", ",", "headers", "=", "headers", ")", "elif", "http_type", "==", "'patch'", ":", "r", "=", "requests", ".", "patch", "(", "endpoint", ",", "headers", "=", "headers", ",", "data", "=", "data", ")", "else", ":", "raise", "NotImplemented", "check_response", "(", "r", ")" ]
Internal method to handle making calls to the Outlook API and logging both the request and response Args: http_type: (str) 'post' or 'delete' endpoint: (str) The endpoint the request will be made to headers: A dict of headers to send to the requests module in addition to Authorization and Content-Type data: The data to provide to the requests module Raises: MiscError: For errors that aren't a 401 AuthError: For 401 errors
[ "Internal", "method", "to", "handle", "making", "calls", "to", "the", "Outlook", "API", "and", "logging", "both", "the", "request", "and", "response", "Args", ":", "http_type", ":", "(", "str", ")", "post", "or", "delete", "endpoint", ":", "(", "str", ")", "The", "endpoint", "the", "request", "will", "be", "made", "to", "headers", ":", "A", "dict", "of", "headers", "to", "send", "to", "the", "requests", "module", "in", "addition", "to", "Authorization", "and", "Content", "-", "Type", "data", ":", "The", "data", "to", "provide", "to", "the", "requests", "module" ]
python
train
nwilming/ocupy
ocupy/datamat.py
https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/datamat.py#L323-L408
def annotate (self, src_dm, data_field, key_field, take_first=True): """ Adds a new field (data_field) to the Datamat with data from the corresponding field of another Datamat (src_dm). This is accomplished through the use of a key_field, which is used to determine how the data is copied. This operation corresponds loosely to an SQL join operation. The two Datamats are essentially aligned by the unique values of key_field so that each block element of the new field of the target Datamat will consist of those elements of src_dm's data_field where the corresponding element in key_field matches. If 'take_first' is not true, and there is not only a single corresponding element (typical usage case) then the target element value will be a sequence (array) of all the matching elements. The target Datamat (self) must not have a field name data_field already, and both Datamats must have key_field. The new field in the target Datamat will be a masked array to handle non-existent data. TODO: Make example more generic, remove interoceptive reference TODO: Make standalone test Examples: >>> dm_intero = load_interoception_files ('test-ecg.csv', silent=True) >>> dm_emotiv = load_emotivestimuli_files ('test-bpm.csv', silent=True) >>> length(dm_intero) 4 >>> unique(dm_intero.subject_id) ['p05', 'p06'] >>> length(dm_emotiv) 3 >>> unique(dm_emotiv.subject_id) ['p04', 'p05', 'p06'] >>> 'interospective_awareness' in dm_intero.fieldnames() True >>> unique(dm_intero.interospective_awareness) == [0.5555, 0.6666] True >>> 'interospective_awareness' in dm_emotiv.fieldnames() False >>> dm_emotiv.copy_field(dm_intero, 'interospective_awareness', 'subject_id') >>> 'interospective_awareness' in dm_emotiv.fieldnames() True >>> unique(dm_emotiv.interospective_awareness) == [NaN, 0.5555, 0.6666] False """ if key_field not in self._fields or key_field not in src_dm._fields: raise AttributeError('key field (%s) must exist in both Datamats'%(key_field)) if data_field not in src_dm._fields: raise AttributeError('data field (%s) must exist in source Datamat' % (data_field)) if data_field in self._fields: raise AttributeError('data field (%s) already exists in target Datamat' % (data_field)) #Create a mapping of key_field value to data value. data_to_copy = dict([(x.field(key_field)[0], x.field(data_field)) for x in src_dm.by_field(key_field)]) data_element = list(data_to_copy.values())[0] #Create the new data array of correct size. # We use a masked array because it is possible that for some elements # of the target Datamat, there exist simply no data in the source # Datamat. NaNs are fine as indication of this for floats, but if the # field happens to hold booleans or integers or something else, NaN # does not work. new_shape = [len(self)] + list(data_element.shape) new_data = ma.empty(new_shape, data_element.dtype) new_data.mask=True if np.issubdtype(new_data.dtype, np.float): new_data.fill(np.NaN) #For backwards compatibility, if mask not used #Now we copy the data. If the data to copy contains only a single value, # it is added to the target as a scalar (single value). # Otherwise, it is copied as is, i.e. as a sequence. for (key, val) in list(data_to_copy.items()): if take_first: new_data[self.field(key_field) == key] = val[0] else: new_data[self.field(key_field) == key] = val self.add_field(data_field, new_data)
[ "def", "annotate", "(", "self", ",", "src_dm", ",", "data_field", ",", "key_field", ",", "take_first", "=", "True", ")", ":", "if", "key_field", "not", "in", "self", ".", "_fields", "or", "key_field", "not", "in", "src_dm", ".", "_fields", ":", "raise", "AttributeError", "(", "'key field (%s) must exist in both Datamats'", "%", "(", "key_field", ")", ")", "if", "data_field", "not", "in", "src_dm", ".", "_fields", ":", "raise", "AttributeError", "(", "'data field (%s) must exist in source Datamat'", "%", "(", "data_field", ")", ")", "if", "data_field", "in", "self", ".", "_fields", ":", "raise", "AttributeError", "(", "'data field (%s) already exists in target Datamat'", "%", "(", "data_field", ")", ")", "#Create a mapping of key_field value to data value.", "data_to_copy", "=", "dict", "(", "[", "(", "x", ".", "field", "(", "key_field", ")", "[", "0", "]", ",", "x", ".", "field", "(", "data_field", ")", ")", "for", "x", "in", "src_dm", ".", "by_field", "(", "key_field", ")", "]", ")", "data_element", "=", "list", "(", "data_to_copy", ".", "values", "(", ")", ")", "[", "0", "]", "#Create the new data array of correct size.", "# We use a masked array because it is possible that for some elements", "# of the target Datamat, there exist simply no data in the source", "# Datamat. NaNs are fine as indication of this for floats, but if the", "# field happens to hold booleans or integers or something else, NaN", "# does not work.", "new_shape", "=", "[", "len", "(", "self", ")", "]", "+", "list", "(", "data_element", ".", "shape", ")", "new_data", "=", "ma", ".", "empty", "(", "new_shape", ",", "data_element", ".", "dtype", ")", "new_data", ".", "mask", "=", "True", "if", "np", ".", "issubdtype", "(", "new_data", ".", "dtype", ",", "np", ".", "float", ")", ":", "new_data", ".", "fill", "(", "np", ".", "NaN", ")", "#For backwards compatibility, if mask not used", "#Now we copy the data. If the data to copy contains only a single value,", "# it is added to the target as a scalar (single value).", "# Otherwise, it is copied as is, i.e. as a sequence.", "for", "(", "key", ",", "val", ")", "in", "list", "(", "data_to_copy", ".", "items", "(", ")", ")", ":", "if", "take_first", ":", "new_data", "[", "self", ".", "field", "(", "key_field", ")", "==", "key", "]", "=", "val", "[", "0", "]", "else", ":", "new_data", "[", "self", ".", "field", "(", "key_field", ")", "==", "key", "]", "=", "val", "self", ".", "add_field", "(", "data_field", ",", "new_data", ")" ]
Adds a new field (data_field) to the Datamat with data from the corresponding field of another Datamat (src_dm). This is accomplished through the use of a key_field, which is used to determine how the data is copied. This operation corresponds loosely to an SQL join operation. The two Datamats are essentially aligned by the unique values of key_field so that each block element of the new field of the target Datamat will consist of those elements of src_dm's data_field where the corresponding element in key_field matches. If 'take_first' is not true, and there is not only a single corresponding element (typical usage case) then the target element value will be a sequence (array) of all the matching elements. The target Datamat (self) must not have a field name data_field already, and both Datamats must have key_field. The new field in the target Datamat will be a masked array to handle non-existent data. TODO: Make example more generic, remove interoceptive reference TODO: Make standalone test Examples: >>> dm_intero = load_interoception_files ('test-ecg.csv', silent=True) >>> dm_emotiv = load_emotivestimuli_files ('test-bpm.csv', silent=True) >>> length(dm_intero) 4 >>> unique(dm_intero.subject_id) ['p05', 'p06'] >>> length(dm_emotiv) 3 >>> unique(dm_emotiv.subject_id) ['p04', 'p05', 'p06'] >>> 'interospective_awareness' in dm_intero.fieldnames() True >>> unique(dm_intero.interospective_awareness) == [0.5555, 0.6666] True >>> 'interospective_awareness' in dm_emotiv.fieldnames() False >>> dm_emotiv.copy_field(dm_intero, 'interospective_awareness', 'subject_id') >>> 'interospective_awareness' in dm_emotiv.fieldnames() True >>> unique(dm_emotiv.interospective_awareness) == [NaN, 0.5555, 0.6666] False
[ "Adds", "a", "new", "field", "(", "data_field", ")", "to", "the", "Datamat", "with", "data", "from", "the", "corresponding", "field", "of", "another", "Datamat", "(", "src_dm", ")", "." ]
python
train
pandas-dev/pandas
pandas/core/reshape/merge.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/merge.py#L823-L933
def _get_merge_keys(self): """ Note: has side effects (copy/delete key columns) Parameters ---------- left right on Returns ------- left_keys, right_keys """ left_keys = [] right_keys = [] join_names = [] right_drop = [] left_drop = [] left, right = self.left, self.right is_lkey = lambda x: is_array_like(x) and len(x) == len(left) is_rkey = lambda x: is_array_like(x) and len(x) == len(right) # Note that pd.merge_asof() has separate 'on' and 'by' parameters. A # user could, for example, request 'left_index' and 'left_by'. In a # regular pd.merge(), users cannot specify both 'left_index' and # 'left_on'. (Instead, users have a MultiIndex). That means the # self.left_on in this function is always empty in a pd.merge(), but # a pd.merge_asof(left_index=True, left_by=...) will result in a # self.left_on array with a None in the middle of it. This requires # a work-around as designated in the code below. # See _validate_specification() for where this happens. # ugh, spaghetti re #733 if _any(self.left_on) and _any(self.right_on): for lk, rk in zip(self.left_on, self.right_on): if is_lkey(lk): left_keys.append(lk) if is_rkey(rk): right_keys.append(rk) join_names.append(None) # what to do? else: if rk is not None: right_keys.append( right._get_label_or_level_values(rk)) join_names.append(rk) else: # work-around for merge_asof(right_index=True) right_keys.append(right.index) join_names.append(right.index.name) else: if not is_rkey(rk): if rk is not None: right_keys.append( right._get_label_or_level_values(rk)) else: # work-around for merge_asof(right_index=True) right_keys.append(right.index) if lk is not None and lk == rk: # avoid key upcast in corner case (length-0) if len(left) > 0: right_drop.append(rk) else: left_drop.append(lk) else: right_keys.append(rk) if lk is not None: left_keys.append(left._get_label_or_level_values(lk)) join_names.append(lk) else: # work-around for merge_asof(left_index=True) left_keys.append(left.index) join_names.append(left.index.name) elif _any(self.left_on): for k in self.left_on: if is_lkey(k): left_keys.append(k) join_names.append(None) else: left_keys.append(left._get_label_or_level_values(k)) join_names.append(k) if isinstance(self.right.index, MultiIndex): right_keys = [lev._values.take(lev_codes) for lev, lev_codes in zip(self.right.index.levels, self.right.index.codes)] else: right_keys = [self.right.index._values] elif _any(self.right_on): for k in self.right_on: if is_rkey(k): right_keys.append(k) join_names.append(None) else: right_keys.append(right._get_label_or_level_values(k)) join_names.append(k) if isinstance(self.left.index, MultiIndex): left_keys = [lev._values.take(lev_codes) for lev, lev_codes in zip(self.left.index.levels, self.left.index.codes)] else: left_keys = [self.left.index.values] if left_drop: self.left = self.left._drop_labels_or_levels(left_drop) if right_drop: self.right = self.right._drop_labels_or_levels(right_drop) return left_keys, right_keys, join_names
[ "def", "_get_merge_keys", "(", "self", ")", ":", "left_keys", "=", "[", "]", "right_keys", "=", "[", "]", "join_names", "=", "[", "]", "right_drop", "=", "[", "]", "left_drop", "=", "[", "]", "left", ",", "right", "=", "self", ".", "left", ",", "self", ".", "right", "is_lkey", "=", "lambda", "x", ":", "is_array_like", "(", "x", ")", "and", "len", "(", "x", ")", "==", "len", "(", "left", ")", "is_rkey", "=", "lambda", "x", ":", "is_array_like", "(", "x", ")", "and", "len", "(", "x", ")", "==", "len", "(", "right", ")", "# Note that pd.merge_asof() has separate 'on' and 'by' parameters. A", "# user could, for example, request 'left_index' and 'left_by'. In a", "# regular pd.merge(), users cannot specify both 'left_index' and", "# 'left_on'. (Instead, users have a MultiIndex). That means the", "# self.left_on in this function is always empty in a pd.merge(), but", "# a pd.merge_asof(left_index=True, left_by=...) will result in a", "# self.left_on array with a None in the middle of it. This requires", "# a work-around as designated in the code below.", "# See _validate_specification() for where this happens.", "# ugh, spaghetti re #733", "if", "_any", "(", "self", ".", "left_on", ")", "and", "_any", "(", "self", ".", "right_on", ")", ":", "for", "lk", ",", "rk", "in", "zip", "(", "self", ".", "left_on", ",", "self", ".", "right_on", ")", ":", "if", "is_lkey", "(", "lk", ")", ":", "left_keys", ".", "append", "(", "lk", ")", "if", "is_rkey", "(", "rk", ")", ":", "right_keys", ".", "append", "(", "rk", ")", "join_names", ".", "append", "(", "None", ")", "# what to do?", "else", ":", "if", "rk", "is", "not", "None", ":", "right_keys", ".", "append", "(", "right", ".", "_get_label_or_level_values", "(", "rk", ")", ")", "join_names", ".", "append", "(", "rk", ")", "else", ":", "# work-around for merge_asof(right_index=True)", "right_keys", ".", "append", "(", "right", ".", "index", ")", "join_names", ".", "append", "(", "right", ".", "index", ".", "name", ")", "else", ":", "if", "not", "is_rkey", "(", "rk", ")", ":", "if", "rk", "is", "not", "None", ":", "right_keys", ".", "append", "(", "right", ".", "_get_label_or_level_values", "(", "rk", ")", ")", "else", ":", "# work-around for merge_asof(right_index=True)", "right_keys", ".", "append", "(", "right", ".", "index", ")", "if", "lk", "is", "not", "None", "and", "lk", "==", "rk", ":", "# avoid key upcast in corner case (length-0)", "if", "len", "(", "left", ")", ">", "0", ":", "right_drop", ".", "append", "(", "rk", ")", "else", ":", "left_drop", ".", "append", "(", "lk", ")", "else", ":", "right_keys", ".", "append", "(", "rk", ")", "if", "lk", "is", "not", "None", ":", "left_keys", ".", "append", "(", "left", ".", "_get_label_or_level_values", "(", "lk", ")", ")", "join_names", ".", "append", "(", "lk", ")", "else", ":", "# work-around for merge_asof(left_index=True)", "left_keys", ".", "append", "(", "left", ".", "index", ")", "join_names", ".", "append", "(", "left", ".", "index", ".", "name", ")", "elif", "_any", "(", "self", ".", "left_on", ")", ":", "for", "k", "in", "self", ".", "left_on", ":", "if", "is_lkey", "(", "k", ")", ":", "left_keys", ".", "append", "(", "k", ")", "join_names", ".", "append", "(", "None", ")", "else", ":", "left_keys", ".", "append", "(", "left", ".", "_get_label_or_level_values", "(", "k", ")", ")", "join_names", ".", "append", "(", "k", ")", "if", "isinstance", "(", "self", ".", "right", ".", "index", ",", "MultiIndex", ")", ":", "right_keys", "=", "[", "lev", ".", "_values", ".", "take", "(", "lev_codes", ")", "for", "lev", ",", "lev_codes", "in", "zip", "(", "self", ".", "right", ".", "index", ".", "levels", ",", "self", ".", "right", ".", "index", ".", "codes", ")", "]", "else", ":", "right_keys", "=", "[", "self", ".", "right", ".", "index", ".", "_values", "]", "elif", "_any", "(", "self", ".", "right_on", ")", ":", "for", "k", "in", "self", ".", "right_on", ":", "if", "is_rkey", "(", "k", ")", ":", "right_keys", ".", "append", "(", "k", ")", "join_names", ".", "append", "(", "None", ")", "else", ":", "right_keys", ".", "append", "(", "right", ".", "_get_label_or_level_values", "(", "k", ")", ")", "join_names", ".", "append", "(", "k", ")", "if", "isinstance", "(", "self", ".", "left", ".", "index", ",", "MultiIndex", ")", ":", "left_keys", "=", "[", "lev", ".", "_values", ".", "take", "(", "lev_codes", ")", "for", "lev", ",", "lev_codes", "in", "zip", "(", "self", ".", "left", ".", "index", ".", "levels", ",", "self", ".", "left", ".", "index", ".", "codes", ")", "]", "else", ":", "left_keys", "=", "[", "self", ".", "left", ".", "index", ".", "values", "]", "if", "left_drop", ":", "self", ".", "left", "=", "self", ".", "left", ".", "_drop_labels_or_levels", "(", "left_drop", ")", "if", "right_drop", ":", "self", ".", "right", "=", "self", ".", "right", ".", "_drop_labels_or_levels", "(", "right_drop", ")", "return", "left_keys", ",", "right_keys", ",", "join_names" ]
Note: has side effects (copy/delete key columns) Parameters ---------- left right on Returns ------- left_keys, right_keys
[ "Note", ":", "has", "side", "effects", "(", "copy", "/", "delete", "key", "columns", ")" ]
python
train
spyder-ide/spyder
spyder/preferences/shortcuts.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/shortcuts.py#L590-L607
def headerData(self, section, orientation, role=Qt.DisplayRole): """Qt Override.""" if role == Qt.TextAlignmentRole: if orientation == Qt.Horizontal: return to_qvariant(int(Qt.AlignHCenter | Qt.AlignVCenter)) return to_qvariant(int(Qt.AlignRight | Qt.AlignVCenter)) if role != Qt.DisplayRole: return to_qvariant() if orientation == Qt.Horizontal: if section == CONTEXT: return to_qvariant(_("Context")) elif section == NAME: return to_qvariant(_("Name")) elif section == SEQUENCE: return to_qvariant(_("Shortcut")) elif section == SEARCH_SCORE: return to_qvariant(_("Score")) return to_qvariant()
[ "def", "headerData", "(", "self", ",", "section", ",", "orientation", ",", "role", "=", "Qt", ".", "DisplayRole", ")", ":", "if", "role", "==", "Qt", ".", "TextAlignmentRole", ":", "if", "orientation", "==", "Qt", ".", "Horizontal", ":", "return", "to_qvariant", "(", "int", "(", "Qt", ".", "AlignHCenter", "|", "Qt", ".", "AlignVCenter", ")", ")", "return", "to_qvariant", "(", "int", "(", "Qt", ".", "AlignRight", "|", "Qt", ".", "AlignVCenter", ")", ")", "if", "role", "!=", "Qt", ".", "DisplayRole", ":", "return", "to_qvariant", "(", ")", "if", "orientation", "==", "Qt", ".", "Horizontal", ":", "if", "section", "==", "CONTEXT", ":", "return", "to_qvariant", "(", "_", "(", "\"Context\"", ")", ")", "elif", "section", "==", "NAME", ":", "return", "to_qvariant", "(", "_", "(", "\"Name\"", ")", ")", "elif", "section", "==", "SEQUENCE", ":", "return", "to_qvariant", "(", "_", "(", "\"Shortcut\"", ")", ")", "elif", "section", "==", "SEARCH_SCORE", ":", "return", "to_qvariant", "(", "_", "(", "\"Score\"", ")", ")", "return", "to_qvariant", "(", ")" ]
Qt Override.
[ "Qt", "Override", "." ]
python
train
apache/airflow
airflow/contrib/hooks/gcp_vision_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L211-L222
def delete_product_set( self, location, product_set_id, project_id=None, retry=None, timeout=None, metadata=None ): """ For the documentation see: :class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetDeleteOperator` """ client = self.get_conn() name = ProductSearchClient.product_set_path(project_id, location, product_set_id) self.log.info('Deleting ProductSet: %s', name) client.delete_product_set(name=name, retry=retry, timeout=timeout, metadata=metadata) self.log.info('ProductSet with the name [%s] deleted.', name)
[ "def", "delete_product_set", "(", "self", ",", "location", ",", "product_set_id", ",", "project_id", "=", "None", ",", "retry", "=", "None", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ")", ":", "client", "=", "self", ".", "get_conn", "(", ")", "name", "=", "ProductSearchClient", ".", "product_set_path", "(", "project_id", ",", "location", ",", "product_set_id", ")", "self", ".", "log", ".", "info", "(", "'Deleting ProductSet: %s'", ",", "name", ")", "client", ".", "delete_product_set", "(", "name", "=", "name", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")", "self", ".", "log", ".", "info", "(", "'ProductSet with the name [%s] deleted.'", ",", "name", ")" ]
For the documentation see: :class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetDeleteOperator`
[ "For", "the", "documentation", "see", ":", ":", "class", ":", "~airflow", ".", "contrib", ".", "operators", ".", "gcp_vision_operator", ".", "CloudVisionProductSetDeleteOperator" ]
python
test
woolfson-group/isambard
isambard/external_programs/scwrl.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/external_programs/scwrl.py#L136-L156
def pack_sidechains(pdb, sequence, path=False): """Packs sidechains onto a given PDB file or string. Parameters ---------- pdb : str PDB string or a path to a PDB file. sequence : str Amino acid sequence for SCWRL to pack in single-letter code. path : bool, optional True if pdb is a path. Returns ------- scwrl_pdb : str String of packed SCWRL PDB. scwrl_score : float Scwrl packing score. """ scwrl_std_out, scwrl_pdb = run_scwrl(pdb, sequence, path=path) return parse_scwrl_out(scwrl_std_out, scwrl_pdb)
[ "def", "pack_sidechains", "(", "pdb", ",", "sequence", ",", "path", "=", "False", ")", ":", "scwrl_std_out", ",", "scwrl_pdb", "=", "run_scwrl", "(", "pdb", ",", "sequence", ",", "path", "=", "path", ")", "return", "parse_scwrl_out", "(", "scwrl_std_out", ",", "scwrl_pdb", ")" ]
Packs sidechains onto a given PDB file or string. Parameters ---------- pdb : str PDB string or a path to a PDB file. sequence : str Amino acid sequence for SCWRL to pack in single-letter code. path : bool, optional True if pdb is a path. Returns ------- scwrl_pdb : str String of packed SCWRL PDB. scwrl_score : float Scwrl packing score.
[ "Packs", "sidechains", "onto", "a", "given", "PDB", "file", "or", "string", "." ]
python
train
urbn/Caesium
caesium/document.py
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L600-L611
def upsert(self, _id, dct, attribute="_id"): """Update or Insert a new document :param str _id: The document id :param dict dct: The dictionary to set on the document :param str attribute: The attribute to query for to find the object to set this data on :returns: JSON Mongo client response including the "n" key to show number of objects effected """ mongo_response = yield self.update(_id, dct, upsert=True, attribute=attribute) raise Return(mongo_response)
[ "def", "upsert", "(", "self", ",", "_id", ",", "dct", ",", "attribute", "=", "\"_id\"", ")", ":", "mongo_response", "=", "yield", "self", ".", "update", "(", "_id", ",", "dct", ",", "upsert", "=", "True", ",", "attribute", "=", "attribute", ")", "raise", "Return", "(", "mongo_response", ")" ]
Update or Insert a new document :param str _id: The document id :param dict dct: The dictionary to set on the document :param str attribute: The attribute to query for to find the object to set this data on :returns: JSON Mongo client response including the "n" key to show number of objects effected
[ "Update", "or", "Insert", "a", "new", "document" ]
python
train
skorokithakis/shortuuid
shortuuid/main.py
https://github.com/skorokithakis/shortuuid/blob/4da632a986c3a43f75c7df64f27a90bbf7ff8039/shortuuid/main.py#L111-L121
def set_alphabet(self, alphabet): """Set the alphabet to be used for new UUIDs.""" # Turn the alphabet into a set and sort it to prevent duplicates # and ensure reproducibility. new_alphabet = list(sorted(set(alphabet))) if len(new_alphabet) > 1: self._alphabet = new_alphabet self._alpha_len = len(self._alphabet) else: raise ValueError("Alphabet with more than " "one unique symbols required.")
[ "def", "set_alphabet", "(", "self", ",", "alphabet", ")", ":", "# Turn the alphabet into a set and sort it to prevent duplicates", "# and ensure reproducibility.", "new_alphabet", "=", "list", "(", "sorted", "(", "set", "(", "alphabet", ")", ")", ")", "if", "len", "(", "new_alphabet", ")", ">", "1", ":", "self", ".", "_alphabet", "=", "new_alphabet", "self", ".", "_alpha_len", "=", "len", "(", "self", ".", "_alphabet", ")", "else", ":", "raise", "ValueError", "(", "\"Alphabet with more than \"", "\"one unique symbols required.\"", ")" ]
Set the alphabet to be used for new UUIDs.
[ "Set", "the", "alphabet", "to", "be", "used", "for", "new", "UUIDs", "." ]
python
train
googleapis/google-cloud-python
spanner/benchmark/ycsb.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/benchmark/ycsb.py#L129-L140
def do_operation(database, keys, table, operation, latencies_ms): """Does a single operation and records latency.""" key = random.choice(keys) start = timeit.default_timer() if operation == 'read': read(database, table, key) elif operation == 'update': update(database, table, key) else: raise ValueError('Unknown operation: %s' % operation) end = timeit.default_timer() latencies_ms[operation].append((end - start) * 1000)
[ "def", "do_operation", "(", "database", ",", "keys", ",", "table", ",", "operation", ",", "latencies_ms", ")", ":", "key", "=", "random", ".", "choice", "(", "keys", ")", "start", "=", "timeit", ".", "default_timer", "(", ")", "if", "operation", "==", "'read'", ":", "read", "(", "database", ",", "table", ",", "key", ")", "elif", "operation", "==", "'update'", ":", "update", "(", "database", ",", "table", ",", "key", ")", "else", ":", "raise", "ValueError", "(", "'Unknown operation: %s'", "%", "operation", ")", "end", "=", "timeit", ".", "default_timer", "(", ")", "latencies_ms", "[", "operation", "]", ".", "append", "(", "(", "end", "-", "start", ")", "*", "1000", ")" ]
Does a single operation and records latency.
[ "Does", "a", "single", "operation", "and", "records", "latency", "." ]
python
train
futapi/fut
fut/core.py
https://github.com/futapi/fut/blob/3792c9eee8f5884f38a02210e649c46c6c7a756d/fut/core.py#L1090-L1115
def bid(self, trade_id, bid, fast=False): """Make a bid. :params trade_id: Trade id. :params bid: Amount of credits You want to spend. :params fast: True for fastest bidding (skips trade status & credits check). """ method = 'PUT' url = 'trade/%s/bid' % trade_id if not fast: rc = self.tradeStatus(trade_id)[0] # don't bid if current bid is equal or greater than our max bid if rc['currentBid'] >= bid or self.credits < bid: return False # TODO: add exceptions data = {'bid': bid} try: rc = self.__request__(method, url, data=json.dumps(data), params={'sku_b': self.sku_b}, fast=fast)[ 'auctionInfo'][0] except PermissionDenied: # too slow, somebody took it already :-( return False if rc['bidState'] == 'highest' or ( rc['tradeState'] == 'closed' and rc['bidState'] == 'buyNow'): # checking 'tradeState' is required? return True else: return False
[ "def", "bid", "(", "self", ",", "trade_id", ",", "bid", ",", "fast", "=", "False", ")", ":", "method", "=", "'PUT'", "url", "=", "'trade/%s/bid'", "%", "trade_id", "if", "not", "fast", ":", "rc", "=", "self", ".", "tradeStatus", "(", "trade_id", ")", "[", "0", "]", "# don't bid if current bid is equal or greater than our max bid", "if", "rc", "[", "'currentBid'", "]", ">=", "bid", "or", "self", ".", "credits", "<", "bid", ":", "return", "False", "# TODO: add exceptions", "data", "=", "{", "'bid'", ":", "bid", "}", "try", ":", "rc", "=", "self", ".", "__request__", "(", "method", ",", "url", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", "params", "=", "{", "'sku_b'", ":", "self", ".", "sku_b", "}", ",", "fast", "=", "fast", ")", "[", "'auctionInfo'", "]", "[", "0", "]", "except", "PermissionDenied", ":", "# too slow, somebody took it already :-(", "return", "False", "if", "rc", "[", "'bidState'", "]", "==", "'highest'", "or", "(", "rc", "[", "'tradeState'", "]", "==", "'closed'", "and", "rc", "[", "'bidState'", "]", "==", "'buyNow'", ")", ":", "# checking 'tradeState' is required?", "return", "True", "else", ":", "return", "False" ]
Make a bid. :params trade_id: Trade id. :params bid: Amount of credits You want to spend. :params fast: True for fastest bidding (skips trade status & credits check).
[ "Make", "a", "bid", "." ]
python
valid
Rapptz/discord.py
discord/abc.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/abc.py#L290-L303
def changed_roles(self): """Returns a :class:`list` of :class:`Roles` that have been overridden from their default values in the :attr:`Guild.roles` attribute.""" ret = [] g = self.guild for overwrite in filter(lambda o: o.type == 'role', self._overwrites): role = g.get_role(overwrite.id) if role is None: continue role = copy.copy(role) role.permissions.handle_overwrite(overwrite.allow, overwrite.deny) ret.append(role) return ret
[ "def", "changed_roles", "(", "self", ")", ":", "ret", "=", "[", "]", "g", "=", "self", ".", "guild", "for", "overwrite", "in", "filter", "(", "lambda", "o", ":", "o", ".", "type", "==", "'role'", ",", "self", ".", "_overwrites", ")", ":", "role", "=", "g", ".", "get_role", "(", "overwrite", ".", "id", ")", "if", "role", "is", "None", ":", "continue", "role", "=", "copy", ".", "copy", "(", "role", ")", "role", ".", "permissions", ".", "handle_overwrite", "(", "overwrite", ".", "allow", ",", "overwrite", ".", "deny", ")", "ret", ".", "append", "(", "role", ")", "return", "ret" ]
Returns a :class:`list` of :class:`Roles` that have been overridden from their default values in the :attr:`Guild.roles` attribute.
[ "Returns", "a", ":", "class", ":", "list", "of", ":", "class", ":", "Roles", "that", "have", "been", "overridden", "from", "their", "default", "values", "in", "the", ":", "attr", ":", "Guild", ".", "roles", "attribute", "." ]
python
train
outini/python-pylls
pylls/cachet.py
https://github.com/outini/python-pylls/blob/f9fa220594bc1974469097d9bad690a42d0d0f0f/pylls/cachet.py#L139-L165
def update(self, component_id, name=None, status=None, description=None, link=None, order=None, group_id=None, enabled=True): """Update a component :param int component_id: Component ID :param str name: Name of the component (optional) :param int status: Status of the component; 1-4 :param str description: Description of the component (optional) :param str link: A hyperlink to the component (optional) :param int order: Order of the component (optional) :param int group_id: The group ID of the component (optional) :param bool enabled: Whether the component is enabled (optional) :return: Updated component data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#components .. seealso:: https://docs.cachethq.io/docs/component-statuses """ data = ApiParams() data['component'] = component_id data['name'] = name data['status'] = status data['description'] = description data['link'] = link data['order'] = order data['group_id'] = group_id data['enabled'] = enabled return self._put('components/%s' % component_id, data=data)['data']
[ "def", "update", "(", "self", ",", "component_id", ",", "name", "=", "None", ",", "status", "=", "None", ",", "description", "=", "None", ",", "link", "=", "None", ",", "order", "=", "None", ",", "group_id", "=", "None", ",", "enabled", "=", "True", ")", ":", "data", "=", "ApiParams", "(", ")", "data", "[", "'component'", "]", "=", "component_id", "data", "[", "'name'", "]", "=", "name", "data", "[", "'status'", "]", "=", "status", "data", "[", "'description'", "]", "=", "description", "data", "[", "'link'", "]", "=", "link", "data", "[", "'order'", "]", "=", "order", "data", "[", "'group_id'", "]", "=", "group_id", "data", "[", "'enabled'", "]", "=", "enabled", "return", "self", ".", "_put", "(", "'components/%s'", "%", "component_id", ",", "data", "=", "data", ")", "[", "'data'", "]" ]
Update a component :param int component_id: Component ID :param str name: Name of the component (optional) :param int status: Status of the component; 1-4 :param str description: Description of the component (optional) :param str link: A hyperlink to the component (optional) :param int order: Order of the component (optional) :param int group_id: The group ID of the component (optional) :param bool enabled: Whether the component is enabled (optional) :return: Updated component data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#components .. seealso:: https://docs.cachethq.io/docs/component-statuses
[ "Update", "a", "component" ]
python
train
jason-weirather/py-seq-tools
seqtools/range/locus.py
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/range/locus.py#L68-L84
def update_loci(self): """Goes through and combines loci until we have one set meeting our overlap definition""" # Create sub-loci for each chromosome lbc = {} chroms = sorted([x.range.chr for x in self.loci]) for chrom in chroms: lbc[chrom] = Loci() for x in self.loci: lbc[x.range.chr].add_locus(x) for chrom in sorted(lbc.keys()): if self.verbose: lbc[chrom].verbose = True sys.stderr.write(chrom+"\n") lbc[chrom].overhang = self.overhang lbc[chrom].use_direction = self.use_direction lbc[chrom].merge_down_loci() self.loci = [] for chrom in sorted(lbc.keys()): for locus in lbc[chrom].loci: self.loci.append(locus)
[ "def", "update_loci", "(", "self", ")", ":", "# Create sub-loci for each chromosome", "lbc", "=", "{", "}", "chroms", "=", "sorted", "(", "[", "x", ".", "range", ".", "chr", "for", "x", "in", "self", ".", "loci", "]", ")", "for", "chrom", "in", "chroms", ":", "lbc", "[", "chrom", "]", "=", "Loci", "(", ")", "for", "x", "in", "self", ".", "loci", ":", "lbc", "[", "x", ".", "range", ".", "chr", "]", ".", "add_locus", "(", "x", ")", "for", "chrom", "in", "sorted", "(", "lbc", ".", "keys", "(", ")", ")", ":", "if", "self", ".", "verbose", ":", "lbc", "[", "chrom", "]", ".", "verbose", "=", "True", "sys", ".", "stderr", ".", "write", "(", "chrom", "+", "\"\\n\"", ")", "lbc", "[", "chrom", "]", ".", "overhang", "=", "self", ".", "overhang", "lbc", "[", "chrom", "]", ".", "use_direction", "=", "self", ".", "use_direction", "lbc", "[", "chrom", "]", ".", "merge_down_loci", "(", ")", "self", ".", "loci", "=", "[", "]", "for", "chrom", "in", "sorted", "(", "lbc", ".", "keys", "(", ")", ")", ":", "for", "locus", "in", "lbc", "[", "chrom", "]", ".", "loci", ":", "self", ".", "loci", ".", "append", "(", "locus", ")" ]
Goes through and combines loci until we have one set meeting our overlap definition
[ "Goes", "through", "and", "combines", "loci", "until", "we", "have", "one", "set", "meeting", "our", "overlap", "definition" ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/lib/MacOS/backend_wx.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/MacOS/backend_wx.py#L1795-L1803
def _create_menu(self): """ Creates the 'menu' - implemented as a button which opens a pop-up menu since wxPython does not allow a menu as a control """ DEBUG_MSG("_create_menu()", 1, self) self._menu = MenuButtonWx(self) self.AddControl(self._menu) self.AddSeparator()
[ "def", "_create_menu", "(", "self", ")", ":", "DEBUG_MSG", "(", "\"_create_menu()\"", ",", "1", ",", "self", ")", "self", ".", "_menu", "=", "MenuButtonWx", "(", "self", ")", "self", ".", "AddControl", "(", "self", ".", "_menu", ")", "self", ".", "AddSeparator", "(", ")" ]
Creates the 'menu' - implemented as a button which opens a pop-up menu since wxPython does not allow a menu as a control
[ "Creates", "the", "menu", "-", "implemented", "as", "a", "button", "which", "opens", "a", "pop", "-", "up", "menu", "since", "wxPython", "does", "not", "allow", "a", "menu", "as", "a", "control" ]
python
train
google/budou
budou/parser.py
https://github.com/google/budou/blob/101224e6523186851f38ee57a6b2e7bdbd826de2/budou/parser.py#L169-L182
def preprocess(source): """Removes unnecessary break lines and white spaces. Args: source (str): Input sentence. Returns: Preprocessed sentence. (str) """ doc = html5lib.parseFragment(source) source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8') source = source.replace(u'\n', u'').strip() source = re.sub(r'\s\s+', u' ', source) return source
[ "def", "preprocess", "(", "source", ")", ":", "doc", "=", "html5lib", ".", "parseFragment", "(", "source", ")", "source", "=", "ET", ".", "tostring", "(", "doc", ",", "encoding", "=", "'utf-8'", ",", "method", "=", "'text'", ")", ".", "decode", "(", "'utf-8'", ")", "source", "=", "source", ".", "replace", "(", "u'\\n'", ",", "u''", ")", ".", "strip", "(", ")", "source", "=", "re", ".", "sub", "(", "r'\\s\\s+'", ",", "u' '", ",", "source", ")", "return", "source" ]
Removes unnecessary break lines and white spaces. Args: source (str): Input sentence. Returns: Preprocessed sentence. (str)
[ "Removes", "unnecessary", "break", "lines", "and", "white", "spaces", "." ]
python
train
Azure/azure-uamqp-python
uamqp/message.py
https://github.com/Azure/azure-uamqp-python/blob/b67e4fcaf2e8a337636947523570239c10a58ae2/uamqp/message.py#L214-L228
def get_message_encoded_size(self): """Pre-emptively get the size of the message once it has been encoded to go over the wire so we can raise an error if the message will be rejected for being to large. This method is not available for messages that have been received. :rtype: int """ if not self._message: raise ValueError("No message data to encode.") cloned_data = self._message.clone() self._populate_message_attributes(cloned_data) encoded_data = [] return c_uamqp.get_encoded_message_size(cloned_data, encoded_data)
[ "def", "get_message_encoded_size", "(", "self", ")", ":", "if", "not", "self", ".", "_message", ":", "raise", "ValueError", "(", "\"No message data to encode.\"", ")", "cloned_data", "=", "self", ".", "_message", ".", "clone", "(", ")", "self", ".", "_populate_message_attributes", "(", "cloned_data", ")", "encoded_data", "=", "[", "]", "return", "c_uamqp", ".", "get_encoded_message_size", "(", "cloned_data", ",", "encoded_data", ")" ]
Pre-emptively get the size of the message once it has been encoded to go over the wire so we can raise an error if the message will be rejected for being to large. This method is not available for messages that have been received. :rtype: int
[ "Pre", "-", "emptively", "get", "the", "size", "of", "the", "message", "once", "it", "has", "been", "encoded", "to", "go", "over", "the", "wire", "so", "we", "can", "raise", "an", "error", "if", "the", "message", "will", "be", "rejected", "for", "being", "to", "large", "." ]
python
train
ppo/django-guitar
guitar/utils/admin.py
https://github.com/ppo/django-guitar/blob/857282219c0c4ff5907c3ad04ef012281d245348/guitar/utils/admin.py#L114-L128
def get_list_index(lst, index_or_name): """ Return the index of an element in the list. Args: lst (list): The list. index_or_name (int or str): The value of the reference element, or directly its numeric index. Returns: (int) The index of the element in the list. """ if isinstance(index_or_name, six.integer_types): return index_or_name return lst.index(index_or_name)
[ "def", "get_list_index", "(", "lst", ",", "index_or_name", ")", ":", "if", "isinstance", "(", "index_or_name", ",", "six", ".", "integer_types", ")", ":", "return", "index_or_name", "return", "lst", ".", "index", "(", "index_or_name", ")" ]
Return the index of an element in the list. Args: lst (list): The list. index_or_name (int or str): The value of the reference element, or directly its numeric index. Returns: (int) The index of the element in the list.
[ "Return", "the", "index", "of", "an", "element", "in", "the", "list", "." ]
python
train
raiden-network/raiden
raiden/storage/sqlite.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/storage/sqlite.py#L615-L626
def write_events(self, state_change_identifier, events, log_time): """ Save events. Args: state_change_identifier: Id of the state change that generate these events. events: List of Event objects. """ events_data = [ (None, state_change_identifier, log_time, self.serializer.serialize(event)) for event in events ] return super().write_events(events_data)
[ "def", "write_events", "(", "self", ",", "state_change_identifier", ",", "events", ",", "log_time", ")", ":", "events_data", "=", "[", "(", "None", ",", "state_change_identifier", ",", "log_time", ",", "self", ".", "serializer", ".", "serialize", "(", "event", ")", ")", "for", "event", "in", "events", "]", "return", "super", "(", ")", ".", "write_events", "(", "events_data", ")" ]
Save events. Args: state_change_identifier: Id of the state change that generate these events. events: List of Event objects.
[ "Save", "events", "." ]
python
train
rorr73/LifeSOSpy
lifesospy/client.py
https://github.com/rorr73/LifeSOSpy/blob/62360fbab2e90bf04d52b547093bdab2d4e389b4/lifesospy/client.py#L44-L50
async def async_open(self) -> None: """Opens connection to the LifeSOS ethernet interface.""" await self._loop.create_connection( lambda: self, self._host, self._port)
[ "async", "def", "async_open", "(", "self", ")", "->", "None", ":", "await", "self", ".", "_loop", ".", "create_connection", "(", "lambda", ":", "self", ",", "self", ".", "_host", ",", "self", ".", "_port", ")" ]
Opens connection to the LifeSOS ethernet interface.
[ "Opens", "connection", "to", "the", "LifeSOS", "ethernet", "interface", "." ]
python
train
klahnakoski/pyLibrary
jx_python/group_by.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_python/group_by.py#L27-L81
def groupby(data, keys=None, size=None, min_size=None, max_size=None, contiguous=False): """ :param data: :param keys: :param size: :param min_size: :param max_size: :param contiguous: MAINTAIN THE ORDER OF THE DATA, STARTING THE NEW GROUP WHEN THE SELECTOR CHANGES :return: return list of (keys, values) PAIRS, WHERE keys IS IN LEAF FORM (FOR USE WITH {"eq": terms} OPERATOR values IS GENERATOR OF ALL VALUE THAT MATCH keys contiguous - """ if isinstance(data, Container): return data.groupby(keys) if size != None or min_size != None or max_size != None: if size != None: max_size = size return groupby_min_max_size(data, min_size=min_size, max_size=max_size) try: keys = listwrap(keys) if not contiguous: from jx_python import jx data = jx.sort(data, keys) if not data: return Null if any(is_expression(k) for k in keys): Log.error("can not handle expressions") else: accessor = jx_expression_to_function(jx_expression({"tuple": keys})) # CAN RETURN Null, WHICH DOES NOT PLAY WELL WITH __cmp__ def _output(): start = 0 prev = accessor(data[0]) for i, d in enumerate(data): curr = accessor(d) if curr != prev: group = {} for k, gg in zip(keys, prev): group[k] = gg yield Data(group), data[start:i:] start = i prev = curr group = {} for k, gg in zip(keys, prev): group[k] = gg yield Data(group), data[start::] return _output() except Exception as e: Log.error("Problem grouping", cause=e)
[ "def", "groupby", "(", "data", ",", "keys", "=", "None", ",", "size", "=", "None", ",", "min_size", "=", "None", ",", "max_size", "=", "None", ",", "contiguous", "=", "False", ")", ":", "if", "isinstance", "(", "data", ",", "Container", ")", ":", "return", "data", ".", "groupby", "(", "keys", ")", "if", "size", "!=", "None", "or", "min_size", "!=", "None", "or", "max_size", "!=", "None", ":", "if", "size", "!=", "None", ":", "max_size", "=", "size", "return", "groupby_min_max_size", "(", "data", ",", "min_size", "=", "min_size", ",", "max_size", "=", "max_size", ")", "try", ":", "keys", "=", "listwrap", "(", "keys", ")", "if", "not", "contiguous", ":", "from", "jx_python", "import", "jx", "data", "=", "jx", ".", "sort", "(", "data", ",", "keys", ")", "if", "not", "data", ":", "return", "Null", "if", "any", "(", "is_expression", "(", "k", ")", "for", "k", "in", "keys", ")", ":", "Log", ".", "error", "(", "\"can not handle expressions\"", ")", "else", ":", "accessor", "=", "jx_expression_to_function", "(", "jx_expression", "(", "{", "\"tuple\"", ":", "keys", "}", ")", ")", "# CAN RETURN Null, WHICH DOES NOT PLAY WELL WITH __cmp__", "def", "_output", "(", ")", ":", "start", "=", "0", "prev", "=", "accessor", "(", "data", "[", "0", "]", ")", "for", "i", ",", "d", "in", "enumerate", "(", "data", ")", ":", "curr", "=", "accessor", "(", "d", ")", "if", "curr", "!=", "prev", ":", "group", "=", "{", "}", "for", "k", ",", "gg", "in", "zip", "(", "keys", ",", "prev", ")", ":", "group", "[", "k", "]", "=", "gg", "yield", "Data", "(", "group", ")", ",", "data", "[", "start", ":", "i", ":", "]", "start", "=", "i", "prev", "=", "curr", "group", "=", "{", "}", "for", "k", ",", "gg", "in", "zip", "(", "keys", ",", "prev", ")", ":", "group", "[", "k", "]", "=", "gg", "yield", "Data", "(", "group", ")", ",", "data", "[", "start", ":", ":", "]", "return", "_output", "(", ")", "except", "Exception", "as", "e", ":", "Log", ".", "error", "(", "\"Problem grouping\"", ",", "cause", "=", "e", ")" ]
:param data: :param keys: :param size: :param min_size: :param max_size: :param contiguous: MAINTAIN THE ORDER OF THE DATA, STARTING THE NEW GROUP WHEN THE SELECTOR CHANGES :return: return list of (keys, values) PAIRS, WHERE keys IS IN LEAF FORM (FOR USE WITH {"eq": terms} OPERATOR values IS GENERATOR OF ALL VALUE THAT MATCH keys contiguous -
[ ":", "param", "data", ":", ":", "param", "keys", ":", ":", "param", "size", ":", ":", "param", "min_size", ":", ":", "param", "max_size", ":", ":", "param", "contiguous", ":", "MAINTAIN", "THE", "ORDER", "OF", "THE", "DATA", "STARTING", "THE", "NEW", "GROUP", "WHEN", "THE", "SELECTOR", "CHANGES", ":", "return", ":", "return", "list", "of", "(", "keys", "values", ")", "PAIRS", "WHERE", "keys", "IS", "IN", "LEAF", "FORM", "(", "FOR", "USE", "WITH", "{", "eq", ":", "terms", "}", "OPERATOR", "values", "IS", "GENERATOR", "OF", "ALL", "VALUE", "THAT", "MATCH", "keys", "contiguous", "-" ]
python
train
markrwilliams/txdarn
txdarn/protocol.py
https://github.com/markrwilliams/txdarn/blob/154d25a1ac78c4e2877c0656e3b9cea4332eda57/txdarn/protocol.py#L140-L143
def _writeToTransport(self, data): '''Frame the array-like thing and write it.''' self.transport.writeData(data) self.heartbeater.schedule()
[ "def", "_writeToTransport", "(", "self", ",", "data", ")", ":", "self", ".", "transport", ".", "writeData", "(", "data", ")", "self", ".", "heartbeater", ".", "schedule", "(", ")" ]
Frame the array-like thing and write it.
[ "Frame", "the", "array", "-", "like", "thing", "and", "write", "it", "." ]
python
train
duniter/duniter-python-api
duniterpy/documents/membership.py
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/membership.py#L138-L158
def raw(self) -> str: """ Return signed raw format string of the Membership instance :return: """ return """Version: {0} Type: Membership Currency: {1} Issuer: {2} Block: {3} Membership: {4} UserID: {5} CertTS: {6} """.format(self.version, self.currency, self.issuer, self.membership_ts, self.membership_type, self.uid, self.identity_ts)
[ "def", "raw", "(", "self", ")", "->", "str", ":", "return", "\"\"\"Version: {0}\nType: Membership\nCurrency: {1}\nIssuer: {2}\nBlock: {3}\nMembership: {4}\nUserID: {5}\nCertTS: {6}\n\"\"\"", ".", "format", "(", "self", ".", "version", ",", "self", ".", "currency", ",", "self", ".", "issuer", ",", "self", ".", "membership_ts", ",", "self", ".", "membership_type", ",", "self", ".", "uid", ",", "self", ".", "identity_ts", ")" ]
Return signed raw format string of the Membership instance :return:
[ "Return", "signed", "raw", "format", "string", "of", "the", "Membership", "instance" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/utils/hparam.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L470-L480
def del_hparam(self, name): """Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter. """ if hasattr(self, name): delattr(self, name) del self._hparam_types[name]
[ "def", "del_hparam", "(", "self", ",", "name", ")", ":", "if", "hasattr", "(", "self", ",", "name", ")", ":", "delattr", "(", "self", ",", "name", ")", "del", "self", ".", "_hparam_types", "[", "name", "]" ]
Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter.
[ "Removes", "the", "hyperparameter", "with", "key", "name", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L10493-L10510
def set_attitude_target_encode(self, time_boot_ms, target_system, target_component, type_mask, q, body_roll_rate, body_pitch_rate, body_yaw_rate, thrust): ''' Sets a desired vehicle attitude. Used by an external controller to command the vehicle (manual controller or other system). time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) target_system : System ID (uint8_t) target_component : Component ID (uint8_t) type_mask : Mappings: If any of these bits are set, the corresponding input should be ignored: bit 1: body roll rate, bit 2: body pitch rate, bit 3: body yaw rate. bit 4-bit 6: reserved, bit 7: throttle, bit 8: attitude (uint8_t) q : Attitude quaternion (w, x, y, z order, zero-rotation is 1, 0, 0, 0) (float) body_roll_rate : Body roll rate in radians per second (float) body_pitch_rate : Body roll rate in radians per second (float) body_yaw_rate : Body roll rate in radians per second (float) thrust : Collective thrust, normalized to 0 .. 1 (-1 .. 1 for vehicles capable of reverse trust) (float) ''' return MAVLink_set_attitude_target_message(time_boot_ms, target_system, target_component, type_mask, q, body_roll_rate, body_pitch_rate, body_yaw_rate, thrust)
[ "def", "set_attitude_target_encode", "(", "self", ",", "time_boot_ms", ",", "target_system", ",", "target_component", ",", "type_mask", ",", "q", ",", "body_roll_rate", ",", "body_pitch_rate", ",", "body_yaw_rate", ",", "thrust", ")", ":", "return", "MAVLink_set_attitude_target_message", "(", "time_boot_ms", ",", "target_system", ",", "target_component", ",", "type_mask", ",", "q", ",", "body_roll_rate", ",", "body_pitch_rate", ",", "body_yaw_rate", ",", "thrust", ")" ]
Sets a desired vehicle attitude. Used by an external controller to command the vehicle (manual controller or other system). time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) target_system : System ID (uint8_t) target_component : Component ID (uint8_t) type_mask : Mappings: If any of these bits are set, the corresponding input should be ignored: bit 1: body roll rate, bit 2: body pitch rate, bit 3: body yaw rate. bit 4-bit 6: reserved, bit 7: throttle, bit 8: attitude (uint8_t) q : Attitude quaternion (w, x, y, z order, zero-rotation is 1, 0, 0, 0) (float) body_roll_rate : Body roll rate in radians per second (float) body_pitch_rate : Body roll rate in radians per second (float) body_yaw_rate : Body roll rate in radians per second (float) thrust : Collective thrust, normalized to 0 .. 1 (-1 .. 1 for vehicles capable of reverse trust) (float)
[ "Sets", "a", "desired", "vehicle", "attitude", ".", "Used", "by", "an", "external", "controller", "to", "command", "the", "vehicle", "(", "manual", "controller", "or", "other", "system", ")", "." ]
python
train
PayEx/pypayex
payex/handlers.py
https://github.com/PayEx/pypayex/blob/549ba7cc47f112a7aa3417fcf87ff07bc74cd9ab/payex/handlers.py#L109-L128
def client_factory(self): """ Custom client factory to set proxy options. """ if self._service.production: url = self.production_url else: url = self.testing_url proxy_options = dict() https_proxy_setting = os.environ.get('PAYEX_HTTPS_PROXY') or os.environ.get('https_proxy') http_proxy_setting = os.environ.get('PAYEX_HTTP_PROXY') or os.environ.get('http_proxy') if https_proxy_setting: proxy_options['https'] = https_proxy_setting if http_proxy_setting: proxy_options['http'] = http_proxy_setting return client.Client(url, proxy=proxy_options)
[ "def", "client_factory", "(", "self", ")", ":", "if", "self", ".", "_service", ".", "production", ":", "url", "=", "self", ".", "production_url", "else", ":", "url", "=", "self", ".", "testing_url", "proxy_options", "=", "dict", "(", ")", "https_proxy_setting", "=", "os", ".", "environ", ".", "get", "(", "'PAYEX_HTTPS_PROXY'", ")", "or", "os", ".", "environ", ".", "get", "(", "'https_proxy'", ")", "http_proxy_setting", "=", "os", ".", "environ", ".", "get", "(", "'PAYEX_HTTP_PROXY'", ")", "or", "os", ".", "environ", ".", "get", "(", "'http_proxy'", ")", "if", "https_proxy_setting", ":", "proxy_options", "[", "'https'", "]", "=", "https_proxy_setting", "if", "http_proxy_setting", ":", "proxy_options", "[", "'http'", "]", "=", "http_proxy_setting", "return", "client", ".", "Client", "(", "url", ",", "proxy", "=", "proxy_options", ")" ]
Custom client factory to set proxy options.
[ "Custom", "client", "factory", "to", "set", "proxy", "options", "." ]
python
train
CivicSpleen/ambry
ambry/valuetype/types.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/valuetype/types.py#L45-L54
def nullify(v): """Convert empty strings and strings with only spaces to None values. """ if isinstance(v, six.string_types): v = v.strip() if v is None or v == '': return None else: return v
[ "def", "nullify", "(", "v", ")", ":", "if", "isinstance", "(", "v", ",", "six", ".", "string_types", ")", ":", "v", "=", "v", ".", "strip", "(", ")", "if", "v", "is", "None", "or", "v", "==", "''", ":", "return", "None", "else", ":", "return", "v" ]
Convert empty strings and strings with only spaces to None values.
[ "Convert", "empty", "strings", "and", "strings", "with", "only", "spaces", "to", "None", "values", "." ]
python
train
google/grumpy
third_party/stdlib/uu.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/uu.py#L90-L156
def decode(in_file, out_file=None, mode=None, quiet=0): """Decode uuencoded file""" # # Open the input file, if needed. # opened_files = [] if in_file == '-': in_file = sys.stdin elif isinstance(in_file, basestring): in_file = open(in_file) opened_files.append(in_file) try: # # Read until a begin is encountered or we've exhausted the file # while True: hdr = in_file.readline() if not hdr: raise Error('No valid begin line found in input file') if not hdr.startswith('begin'): continue hdrfields = hdr.split(' ', 2) if len(hdrfields) == 3 and hdrfields[0] == 'begin': try: int(hdrfields[1], 8) break except ValueError: pass if out_file is None: out_file = hdrfields[2].rstrip() if os.path.exists(out_file): raise Error('Cannot overwrite existing file: %s' % out_file) if mode is None: mode = int(hdrfields[1], 8) # # Open the output file # if out_file == '-': out_file = sys.stdout elif isinstance(out_file, basestring): fp = open(out_file, 'wb') try: os.path.chmod(out_file, mode) except AttributeError: pass out_file = fp opened_files.append(out_file) # # Main decoding loop # s = in_file.readline() while s and s.strip() != 'end': try: data = binascii.a2b_uu(s) except binascii.Error, v: # Workaround for broken uuencoders by /Fredrik Lundh nbytes = (((ord(s[0])-32) & 63) * 4 + 5) // 3 data = binascii.a2b_uu(s[:nbytes]) if not quiet: sys.stderr.write("Warning: %s\n" % v) out_file.write(data) s = in_file.readline() if not s: raise Error('Truncated input file') finally: for f in opened_files: f.close()
[ "def", "decode", "(", "in_file", ",", "out_file", "=", "None", ",", "mode", "=", "None", ",", "quiet", "=", "0", ")", ":", "#", "# Open the input file, if needed.", "#", "opened_files", "=", "[", "]", "if", "in_file", "==", "'-'", ":", "in_file", "=", "sys", ".", "stdin", "elif", "isinstance", "(", "in_file", ",", "basestring", ")", ":", "in_file", "=", "open", "(", "in_file", ")", "opened_files", ".", "append", "(", "in_file", ")", "try", ":", "#", "# Read until a begin is encountered or we've exhausted the file", "#", "while", "True", ":", "hdr", "=", "in_file", ".", "readline", "(", ")", "if", "not", "hdr", ":", "raise", "Error", "(", "'No valid begin line found in input file'", ")", "if", "not", "hdr", ".", "startswith", "(", "'begin'", ")", ":", "continue", "hdrfields", "=", "hdr", ".", "split", "(", "' '", ",", "2", ")", "if", "len", "(", "hdrfields", ")", "==", "3", "and", "hdrfields", "[", "0", "]", "==", "'begin'", ":", "try", ":", "int", "(", "hdrfields", "[", "1", "]", ",", "8", ")", "break", "except", "ValueError", ":", "pass", "if", "out_file", "is", "None", ":", "out_file", "=", "hdrfields", "[", "2", "]", ".", "rstrip", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "out_file", ")", ":", "raise", "Error", "(", "'Cannot overwrite existing file: %s'", "%", "out_file", ")", "if", "mode", "is", "None", ":", "mode", "=", "int", "(", "hdrfields", "[", "1", "]", ",", "8", ")", "#", "# Open the output file", "#", "if", "out_file", "==", "'-'", ":", "out_file", "=", "sys", ".", "stdout", "elif", "isinstance", "(", "out_file", ",", "basestring", ")", ":", "fp", "=", "open", "(", "out_file", ",", "'wb'", ")", "try", ":", "os", ".", "path", ".", "chmod", "(", "out_file", ",", "mode", ")", "except", "AttributeError", ":", "pass", "out_file", "=", "fp", "opened_files", ".", "append", "(", "out_file", ")", "#", "# Main decoding loop", "#", "s", "=", "in_file", ".", "readline", "(", ")", "while", "s", "and", "s", ".", "strip", "(", ")", "!=", "'end'", ":", "try", ":", "data", "=", "binascii", ".", "a2b_uu", "(", "s", ")", "except", "binascii", ".", "Error", ",", "v", ":", "# Workaround for broken uuencoders by /Fredrik Lundh", "nbytes", "=", "(", "(", "(", "ord", "(", "s", "[", "0", "]", ")", "-", "32", ")", "&", "63", ")", "*", "4", "+", "5", ")", "//", "3", "data", "=", "binascii", ".", "a2b_uu", "(", "s", "[", ":", "nbytes", "]", ")", "if", "not", "quiet", ":", "sys", ".", "stderr", ".", "write", "(", "\"Warning: %s\\n\"", "%", "v", ")", "out_file", ".", "write", "(", "data", ")", "s", "=", "in_file", ".", "readline", "(", ")", "if", "not", "s", ":", "raise", "Error", "(", "'Truncated input file'", ")", "finally", ":", "for", "f", "in", "opened_files", ":", "f", ".", "close", "(", ")" ]
Decode uuencoded file
[ "Decode", "uuencoded", "file" ]
python
valid
capless/python-jose-cryptodome
jose/jwt.py
https://github.com/capless/python-jose-cryptodome/blob/a169236e2380cff7f1380bbe8eba70cda7393e28/jose/jwt.py#L193-L218
def get_unverified_claims(token): """Returns the decoded claims without verification of any kind. Args: token (str): A signed JWT to decode the headers from. Returns: dict: The dict representation of the token claims. Raises: JWTError: If there is an exception decoding the token. """ try: claims = jws.get_unverified_claims(token) except: raise JWTError('Error decoding token claims.') try: claims = json.loads(claims.decode('utf-8')) except ValueError as e: raise JWTError('Invalid claims string: %s' % e) if not isinstance(claims, Mapping): raise JWTError('Invalid claims string: must be a json object') return claims
[ "def", "get_unverified_claims", "(", "token", ")", ":", "try", ":", "claims", "=", "jws", ".", "get_unverified_claims", "(", "token", ")", "except", ":", "raise", "JWTError", "(", "'Error decoding token claims.'", ")", "try", ":", "claims", "=", "json", ".", "loads", "(", "claims", ".", "decode", "(", "'utf-8'", ")", ")", "except", "ValueError", "as", "e", ":", "raise", "JWTError", "(", "'Invalid claims string: %s'", "%", "e", ")", "if", "not", "isinstance", "(", "claims", ",", "Mapping", ")", ":", "raise", "JWTError", "(", "'Invalid claims string: must be a json object'", ")", "return", "claims" ]
Returns the decoded claims without verification of any kind. Args: token (str): A signed JWT to decode the headers from. Returns: dict: The dict representation of the token claims. Raises: JWTError: If there is an exception decoding the token.
[ "Returns", "the", "decoded", "claims", "without", "verification", "of", "any", "kind", "." ]
python
train
hotdoc/hotdoc
hotdoc/core/config.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/config.py#L207-L230
def get_index(self, prefix=''): """ Retrieve the absolute path to an index, according to `prefix`. Args: prefix: str, the desired prefix or `None`. Returns: str: An absolute path, or `None` """ if prefix: prefixed = '%s_index' % prefix else: prefixed = 'index' if prefixed in self.__cli and self.__cli[prefixed]: index = self.__cli.get(prefixed) from_conf = False else: index = self.__config.get(prefixed) from_conf = True return self.__abspath(index, from_conf)
[ "def", "get_index", "(", "self", ",", "prefix", "=", "''", ")", ":", "if", "prefix", ":", "prefixed", "=", "'%s_index'", "%", "prefix", "else", ":", "prefixed", "=", "'index'", "if", "prefixed", "in", "self", ".", "__cli", "and", "self", ".", "__cli", "[", "prefixed", "]", ":", "index", "=", "self", ".", "__cli", ".", "get", "(", "prefixed", ")", "from_conf", "=", "False", "else", ":", "index", "=", "self", ".", "__config", ".", "get", "(", "prefixed", ")", "from_conf", "=", "True", "return", "self", ".", "__abspath", "(", "index", ",", "from_conf", ")" ]
Retrieve the absolute path to an index, according to `prefix`. Args: prefix: str, the desired prefix or `None`. Returns: str: An absolute path, or `None`
[ "Retrieve", "the", "absolute", "path", "to", "an", "index", "according", "to", "prefix", "." ]
python
train
chrislit/abydos
abydos/fingerprint/_omission_key.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/fingerprint/_omission_key.py#L49-L88
def fingerprint(self, word): """Return the omission key. Parameters ---------- word : str The word to transform into its omission key Returns ------- str The omission key Examples -------- >>> ok = OmissionKey() >>> ok.fingerprint('The quick brown fox jumped over the lazy dog.') 'JKQXZVWYBFMGPDHCLNTREUIOA' >>> ok.fingerprint('Christopher') 'PHCTSRIOE' >>> ok.fingerprint('Niall') 'LNIA' """ word = unicode_normalize('NFKD', text_type(word.upper())) word = ''.join(c for c in word if c in self._letters) key = '' # add consonants in order supplied by _consonants (no duplicates) for char in self._consonants: if char in word: key += char # add vowels in order they appeared in the word (no duplicates) for char in word: if char not in self._consonants and char not in key: key += char return key
[ "def", "fingerprint", "(", "self", ",", "word", ")", ":", "word", "=", "unicode_normalize", "(", "'NFKD'", ",", "text_type", "(", "word", ".", "upper", "(", ")", ")", ")", "word", "=", "''", ".", "join", "(", "c", "for", "c", "in", "word", "if", "c", "in", "self", ".", "_letters", ")", "key", "=", "''", "# add consonants in order supplied by _consonants (no duplicates)", "for", "char", "in", "self", ".", "_consonants", ":", "if", "char", "in", "word", ":", "key", "+=", "char", "# add vowels in order they appeared in the word (no duplicates)", "for", "char", "in", "word", ":", "if", "char", "not", "in", "self", ".", "_consonants", "and", "char", "not", "in", "key", ":", "key", "+=", "char", "return", "key" ]
Return the omission key. Parameters ---------- word : str The word to transform into its omission key Returns ------- str The omission key Examples -------- >>> ok = OmissionKey() >>> ok.fingerprint('The quick brown fox jumped over the lazy dog.') 'JKQXZVWYBFMGPDHCLNTREUIOA' >>> ok.fingerprint('Christopher') 'PHCTSRIOE' >>> ok.fingerprint('Niall') 'LNIA'
[ "Return", "the", "omission", "key", "." ]
python
valid
aouyar/PyMunin
pymunin/__init__.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/__init__.py#L938-L973
def muninMain(pluginClass, argv=None, env=None, debug=False): """Main Block for Munin Plugins. @param pluginClass: Child class of MuninPlugin that implements plugin. @param argv: List of command line arguments to Munin Plugin. @param env: Dictionary of environment variables passed to Munin Plugin. @param debug: Print debugging messages if True. (Default: False) """ if argv is None: argv = sys.argv if env is None: env = os.environ debug = debug or env.has_key('MUNIN_DEBUG') if len(argv) > 1 and argv[1] == 'autoconf': autoconf = True else: autoconf = False try: plugin = pluginClass(argv, env, debug) ret = plugin.run() if ret: return 0 else: return 1 except Exception: print >> sys.stderr, "ERROR: %s" % repr(sys.exc_info()[1]) if autoconf: print "no" if debug: raise else: if autoconf: return 0 else: return 1
[ "def", "muninMain", "(", "pluginClass", ",", "argv", "=", "None", ",", "env", "=", "None", ",", "debug", "=", "False", ")", ":", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "if", "env", "is", "None", ":", "env", "=", "os", ".", "environ", "debug", "=", "debug", "or", "env", ".", "has_key", "(", "'MUNIN_DEBUG'", ")", "if", "len", "(", "argv", ")", ">", "1", "and", "argv", "[", "1", "]", "==", "'autoconf'", ":", "autoconf", "=", "True", "else", ":", "autoconf", "=", "False", "try", ":", "plugin", "=", "pluginClass", "(", "argv", ",", "env", ",", "debug", ")", "ret", "=", "plugin", ".", "run", "(", ")", "if", "ret", ":", "return", "0", "else", ":", "return", "1", "except", "Exception", ":", "print", ">>", "sys", ".", "stderr", ",", "\"ERROR: %s\"", "%", "repr", "(", "sys", ".", "exc_info", "(", ")", "[", "1", "]", ")", "if", "autoconf", ":", "print", "\"no\"", "if", "debug", ":", "raise", "else", ":", "if", "autoconf", ":", "return", "0", "else", ":", "return", "1" ]
Main Block for Munin Plugins. @param pluginClass: Child class of MuninPlugin that implements plugin. @param argv: List of command line arguments to Munin Plugin. @param env: Dictionary of environment variables passed to Munin Plugin. @param debug: Print debugging messages if True. (Default: False)
[ "Main", "Block", "for", "Munin", "Plugins", "." ]
python
train
saltstack/salt
salt/cloud/clouds/msazure.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/msazure.py#L1797-L1838
def delete_service_certificate(kwargs=None, conn=None, call=None): ''' .. versionadded:: 2015.8.0 Delete a specific certificate associated with the service CLI Examples: .. code-block:: bash salt-cloud -f delete_service_certificate my-azure name=my_service_certificate \\ thumbalgorithm=sha1 thumbprint=0123456789ABCDEF ''' if call != 'function': raise SaltCloudSystemExit( 'The delete_service_certificate function must be called with -f or --function.' ) if kwargs is None: kwargs = {} if 'name' not in kwargs: raise SaltCloudSystemExit('A name must be specified as "name"') if 'thumbalgorithm' not in kwargs: raise SaltCloudSystemExit('A thumbalgorithm must be specified as "thumbalgorithm"') if 'thumbprint' not in kwargs: raise SaltCloudSystemExit('A thumbprint must be specified as "thumbprint"') if not conn: conn = get_conn() try: data = conn.delete_service_certificate( kwargs['name'], kwargs['thumbalgorithm'], kwargs['thumbprint'], ) return {'Success': 'The service certificate was successfully deleted'} except AzureMissingResourceHttpError as exc: raise SaltCloudSystemExit('{0}: {1}'.format(kwargs['name'], exc.message))
[ "def", "delete_service_certificate", "(", "kwargs", "=", "None", ",", "conn", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The delete_service_certificate function must be called with -f or --function.'", ")", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "if", "'name'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'A name must be specified as \"name\"'", ")", "if", "'thumbalgorithm'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'A thumbalgorithm must be specified as \"thumbalgorithm\"'", ")", "if", "'thumbprint'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'A thumbprint must be specified as \"thumbprint\"'", ")", "if", "not", "conn", ":", "conn", "=", "get_conn", "(", ")", "try", ":", "data", "=", "conn", ".", "delete_service_certificate", "(", "kwargs", "[", "'name'", "]", ",", "kwargs", "[", "'thumbalgorithm'", "]", ",", "kwargs", "[", "'thumbprint'", "]", ",", ")", "return", "{", "'Success'", ":", "'The service certificate was successfully deleted'", "}", "except", "AzureMissingResourceHttpError", "as", "exc", ":", "raise", "SaltCloudSystemExit", "(", "'{0}: {1}'", ".", "format", "(", "kwargs", "[", "'name'", "]", ",", "exc", ".", "message", ")", ")" ]
.. versionadded:: 2015.8.0 Delete a specific certificate associated with the service CLI Examples: .. code-block:: bash salt-cloud -f delete_service_certificate my-azure name=my_service_certificate \\ thumbalgorithm=sha1 thumbprint=0123456789ABCDEF
[ "..", "versionadded", "::", "2015", ".", "8", ".", "0" ]
python
train
coddingtonbear/python-myfitnesspal
myfitnesspal/client.py
https://github.com/coddingtonbear/python-myfitnesspal/blob/29aad88d31adc025eacaddd3390cb521b6012b73/myfitnesspal/client.py#L588-L667
def set_measurements( self, measurement='Weight', value=None ): """ Sets measurement for today's date.""" if value is None: raise ValueError( "Cannot update blank value." ) # get the URL for the main check in page # this is left in because we need to parse # the 'measurement' name to set the value. document = self._get_document_for_url( self._get_url_for_measurements() ) # gather the IDs for all measurement types measurement_ids = self._get_measurement_ids(document) # check if the measurement exists before going too far if measurement not in measurement_ids.keys(): raise ValueError( "Measurement '%s' does not exist." % measurement ) # build the update url. update_url = parse.urljoin( self.BASE_URL, 'measurements/save' ) # setup a dict for the post data = {} # here's where we need that required element data['authenticity_token'] = self._authenticity_token # Weight has it's own key value pair if measurement == 'Weight': data['weight[display_value]'] = value # the other measurements have generic names with # an incrementing numeric index. measurement_index = 0 # iterate all the measurement_ids for measurement_id in measurement_ids.keys(): # create the measurement_type[n] # key value pair n = str(measurement_index) meas_type = 'measurement_type[' + n + ']' meas_val = 'measurement_value[' + n + ']' data[meas_type] = measurement_ids[measurement_id] # and if it corresponds to the value we want to update if measurement == measurement_id: # create the measurement_value[n] # key value pair and assign it the value. data[meas_val] = value else: # otherwise, create the key value pair and leave it blank data[meas_val] = "" measurement_index += 1 # now post it. result = self.session.post( update_url, data=data ) # throw an error if it failed. if not result.ok: raise RuntimeError( "Unable to update measurement in MyFitnessPal: " "status code: {status}".format( status=result.status_code ) )
[ "def", "set_measurements", "(", "self", ",", "measurement", "=", "'Weight'", ",", "value", "=", "None", ")", ":", "if", "value", "is", "None", ":", "raise", "ValueError", "(", "\"Cannot update blank value.\"", ")", "# get the URL for the main check in page", "# this is left in because we need to parse", "# the 'measurement' name to set the value.", "document", "=", "self", ".", "_get_document_for_url", "(", "self", ".", "_get_url_for_measurements", "(", ")", ")", "# gather the IDs for all measurement types", "measurement_ids", "=", "self", ".", "_get_measurement_ids", "(", "document", ")", "# check if the measurement exists before going too far", "if", "measurement", "not", "in", "measurement_ids", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "\"Measurement '%s' does not exist.\"", "%", "measurement", ")", "# build the update url.", "update_url", "=", "parse", ".", "urljoin", "(", "self", ".", "BASE_URL", ",", "'measurements/save'", ")", "# setup a dict for the post", "data", "=", "{", "}", "# here's where we need that required element", "data", "[", "'authenticity_token'", "]", "=", "self", ".", "_authenticity_token", "# Weight has it's own key value pair", "if", "measurement", "==", "'Weight'", ":", "data", "[", "'weight[display_value]'", "]", "=", "value", "# the other measurements have generic names with", "# an incrementing numeric index.", "measurement_index", "=", "0", "# iterate all the measurement_ids", "for", "measurement_id", "in", "measurement_ids", ".", "keys", "(", ")", ":", "# create the measurement_type[n]", "# key value pair", "n", "=", "str", "(", "measurement_index", ")", "meas_type", "=", "'measurement_type['", "+", "n", "+", "']'", "meas_val", "=", "'measurement_value['", "+", "n", "+", "']'", "data", "[", "meas_type", "]", "=", "measurement_ids", "[", "measurement_id", "]", "# and if it corresponds to the value we want to update", "if", "measurement", "==", "measurement_id", ":", "# create the measurement_value[n]", "# key value pair and assign it the value.", "data", "[", "meas_val", "]", "=", "value", "else", ":", "# otherwise, create the key value pair and leave it blank", "data", "[", "meas_val", "]", "=", "\"\"", "measurement_index", "+=", "1", "# now post it.", "result", "=", "self", ".", "session", ".", "post", "(", "update_url", ",", "data", "=", "data", ")", "# throw an error if it failed.", "if", "not", "result", ".", "ok", ":", "raise", "RuntimeError", "(", "\"Unable to update measurement in MyFitnessPal: \"", "\"status code: {status}\"", ".", "format", "(", "status", "=", "result", ".", "status_code", ")", ")" ]
Sets measurement for today's date.
[ "Sets", "measurement", "for", "today", "s", "date", "." ]
python
train
jrigden/pyPodcastParser
pyPodcastParser/Podcast.py
https://github.com/jrigden/pyPodcastParser/blob/b21e027bb56ec77986d76fc1990f4e420c6de869/pyPodcastParser/Podcast.py#L289-L294
def set_itunes_author_name(self): """Parses author name from itunes tags and sets value""" try: self.itunes_author_name = self.soup.find('itunes:author').string except AttributeError: self.itunes_author_name = None
[ "def", "set_itunes_author_name", "(", "self", ")", ":", "try", ":", "self", ".", "itunes_author_name", "=", "self", ".", "soup", ".", "find", "(", "'itunes:author'", ")", ".", "string", "except", "AttributeError", ":", "self", ".", "itunes_author_name", "=", "None" ]
Parses author name from itunes tags and sets value
[ "Parses", "author", "name", "from", "itunes", "tags", "and", "sets", "value" ]
python
train
ARMmbed/icetea
icetea_lib/tools/GitTool.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/tools/GitTool.py#L68-L78
def get_remote_url(path, remote="origin"): """ Run git config --get remote.<remote>.url in path. :param path: Path where git is to be run :param remote: Remote name :return: str or None """ path = get_path(path) cmd = ["config", "--get", "remote.%s.url" % remote] return __run_git(cmd, path)[0]
[ "def", "get_remote_url", "(", "path", ",", "remote", "=", "\"origin\"", ")", ":", "path", "=", "get_path", "(", "path", ")", "cmd", "=", "[", "\"config\"", ",", "\"--get\"", ",", "\"remote.%s.url\"", "%", "remote", "]", "return", "__run_git", "(", "cmd", ",", "path", ")", "[", "0", "]" ]
Run git config --get remote.<remote>.url in path. :param path: Path where git is to be run :param remote: Remote name :return: str or None
[ "Run", "git", "config", "--", "get", "remote", ".", "<remote", ">", ".", "url", "in", "path", "." ]
python
train
darothen/xbpch
xbpch/uff.py
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/uff.py#L125-L139
def writeline(self, fmt, *args): """ Write `line` (list of objects) with given `fmt` to file. The `line` will be chained if object is iterable (except for basestrings). """ fmt = self.endian + fmt size = struct.calcsize(fmt) fix = struct.pack(self.endian + 'i', size) line = struct.pack(fmt, *args) self.write(fix) self.write(line) self.write(fix)
[ "def", "writeline", "(", "self", ",", "fmt", ",", "*", "args", ")", ":", "fmt", "=", "self", ".", "endian", "+", "fmt", "size", "=", "struct", ".", "calcsize", "(", "fmt", ")", "fix", "=", "struct", ".", "pack", "(", "self", ".", "endian", "+", "'i'", ",", "size", ")", "line", "=", "struct", ".", "pack", "(", "fmt", ",", "*", "args", ")", "self", ".", "write", "(", "fix", ")", "self", ".", "write", "(", "line", ")", "self", ".", "write", "(", "fix", ")" ]
Write `line` (list of objects) with given `fmt` to file. The `line` will be chained if object is iterable (except for basestrings).
[ "Write", "line", "(", "list", "of", "objects", ")", "with", "given", "fmt", "to", "file", ".", "The", "line", "will", "be", "chained", "if", "object", "is", "iterable", "(", "except", "for", "basestrings", ")", "." ]
python
train
ratt-ru/PyMORESANE
pymoresane/iuwt_toolbox.py
https://github.com/ratt-ru/PyMORESANE/blob/b024591ad0bbb69320d08841f28a2c27f62ae1af/pymoresane/iuwt_toolbox.py#L292-L306
def snr_ratio(in1, in2): """ The following function simply calculates the signal to noise ratio between two signals. INPUTS: in1 (no default): Array containing values for signal 1. in2 (no default): Array containing values for signal 2. OUTPUTS: out1 The ratio of the signal to noise ratios of two signals. """ out1 = 20*(np.log10(np.linalg.norm(in1)/np.linalg.norm(in1-in2))) return out1
[ "def", "snr_ratio", "(", "in1", ",", "in2", ")", ":", "out1", "=", "20", "*", "(", "np", ".", "log10", "(", "np", ".", "linalg", ".", "norm", "(", "in1", ")", "/", "np", ".", "linalg", ".", "norm", "(", "in1", "-", "in2", ")", ")", ")", "return", "out1" ]
The following function simply calculates the signal to noise ratio between two signals. INPUTS: in1 (no default): Array containing values for signal 1. in2 (no default): Array containing values for signal 2. OUTPUTS: out1 The ratio of the signal to noise ratios of two signals.
[ "The", "following", "function", "simply", "calculates", "the", "signal", "to", "noise", "ratio", "between", "two", "signals", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/attention_lm_moe.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/attention_lm_moe.py#L599-L611
def attention_lm_ae_extended(): """Experiment with the exp_factor params.""" hparams = attention_lm_moe_base_long_seq() hparams.attention_layers = "eeee" hparams.attention_local = True # hparams.factored_logits=1 # Necessary when the number of expert grow bigger hparams.attention_moe_k = 2 hparams.attention_exp_factor = 4 # hparams.attention_exp_inputdim = 128 hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" return hparams
[ "def", "attention_lm_ae_extended", "(", ")", ":", "hparams", "=", "attention_lm_moe_base_long_seq", "(", ")", "hparams", ".", "attention_layers", "=", "\"eeee\"", "hparams", ".", "attention_local", "=", "True", "# hparams.factored_logits=1 # Necessary when the number of expert grow bigger", "hparams", ".", "attention_moe_k", "=", "2", "hparams", ".", "attention_exp_factor", "=", "4", "# hparams.attention_exp_inputdim = 128", "hparams", ".", "layer_preprocess_sequence", "=", "\"n\"", "hparams", ".", "layer_postprocess_sequence", "=", "\"da\"", "return", "hparams" ]
Experiment with the exp_factor params.
[ "Experiment", "with", "the", "exp_factor", "params", "." ]
python
train
eyurtsev/FlowCytometryTools
FlowCytometryTools/gui/fc_widget.py
https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/gui/fc_widget.py#L347-L375
def get_generation_code(self, **gencode): """ Generates python code that can create the gate. """ channels, verts = self.coordinates channels = ', '.join(["'{}'".format(ch) for ch in channels]) verts = list(verts) ## Formatting the vertexes # List level (must be first), used for gates that may have multiple vertexes like a polygon if len(verts) == 1: verts = verts[0] # Tuple level (must be second), used for catching the number of dimensions # on which a vertex is defined if len(verts) == 1: verts = verts[0] # Format vertices to include less sigfigs verts = apply_format(verts, '{:.3e}') gencode.setdefault('name', self.name) gencode.setdefault('region', self.region) gencode.setdefault('gate_type', self._gencode_gate_class) gencode.setdefault('verts', verts) gencode.setdefault('channels', channels) format_string = "{name} = {gate_type}({verts}, ({channels}), region='{region}', name='{name}')" return format_string.format(**gencode)
[ "def", "get_generation_code", "(", "self", ",", "*", "*", "gencode", ")", ":", "channels", ",", "verts", "=", "self", ".", "coordinates", "channels", "=", "', '", ".", "join", "(", "[", "\"'{}'\"", ".", "format", "(", "ch", ")", "for", "ch", "in", "channels", "]", ")", "verts", "=", "list", "(", "verts", ")", "## Formatting the vertexes", "# List level (must be first), used for gates that may have multiple vertexes like a polygon", "if", "len", "(", "verts", ")", "==", "1", ":", "verts", "=", "verts", "[", "0", "]", "# Tuple level (must be second), used for catching the number of dimensions", "# on which a vertex is defined", "if", "len", "(", "verts", ")", "==", "1", ":", "verts", "=", "verts", "[", "0", "]", "# Format vertices to include less sigfigs", "verts", "=", "apply_format", "(", "verts", ",", "'{:.3e}'", ")", "gencode", ".", "setdefault", "(", "'name'", ",", "self", ".", "name", ")", "gencode", ".", "setdefault", "(", "'region'", ",", "self", ".", "region", ")", "gencode", ".", "setdefault", "(", "'gate_type'", ",", "self", ".", "_gencode_gate_class", ")", "gencode", ".", "setdefault", "(", "'verts'", ",", "verts", ")", "gencode", ".", "setdefault", "(", "'channels'", ",", "channels", ")", "format_string", "=", "\"{name} = {gate_type}({verts}, ({channels}), region='{region}', name='{name}')\"", "return", "format_string", ".", "format", "(", "*", "*", "gencode", ")" ]
Generates python code that can create the gate.
[ "Generates", "python", "code", "that", "can", "create", "the", "gate", "." ]
python
train
evhub/coconut
coconut/compiler/compiler.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L1973-L1979
def parse_package(self, inputstring, addhash=True): """Parse package code.""" if addhash: use_hash = self.genhash(True, inputstring) else: use_hash = None return self.parse(inputstring, self.file_parser, {"nl_at_eof_check": True}, {"header": "package", "use_hash": use_hash})
[ "def", "parse_package", "(", "self", ",", "inputstring", ",", "addhash", "=", "True", ")", ":", "if", "addhash", ":", "use_hash", "=", "self", ".", "genhash", "(", "True", ",", "inputstring", ")", "else", ":", "use_hash", "=", "None", "return", "self", ".", "parse", "(", "inputstring", ",", "self", ".", "file_parser", ",", "{", "\"nl_at_eof_check\"", ":", "True", "}", ",", "{", "\"header\"", ":", "\"package\"", ",", "\"use_hash\"", ":", "use_hash", "}", ")" ]
Parse package code.
[ "Parse", "package", "code", "." ]
python
train
marl/jams
jams/sonify.py
https://github.com/marl/jams/blob/b16778399b9528efbd71434842a079f7691a7a66/jams/sonify.py#L25-L36
def mkclick(freq, sr=22050, duration=0.1): '''Generate a click sample. This replicates functionality from mir_eval.sonify.clicks, but exposes the target frequency and duration. ''' times = np.arange(int(sr * duration)) click = np.sin(2 * np.pi * times * freq / float(sr)) click *= np.exp(- times / (1e-2 * sr)) return click
[ "def", "mkclick", "(", "freq", ",", "sr", "=", "22050", ",", "duration", "=", "0.1", ")", ":", "times", "=", "np", ".", "arange", "(", "int", "(", "sr", "*", "duration", ")", ")", "click", "=", "np", ".", "sin", "(", "2", "*", "np", ".", "pi", "*", "times", "*", "freq", "/", "float", "(", "sr", ")", ")", "click", "*=", "np", ".", "exp", "(", "-", "times", "/", "(", "1e-2", "*", "sr", ")", ")", "return", "click" ]
Generate a click sample. This replicates functionality from mir_eval.sonify.clicks, but exposes the target frequency and duration.
[ "Generate", "a", "click", "sample", "." ]
python
valid