id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
248,900
tomnor/channelpack
channelpack/pack.py
ChannelPack.add_condition
def add_condition(self, conkey, cond): """Add a condition, one of the addable ones. conkey: str One of 'cond', startcond' or 'stopcond'. 'start' or 'stop' is accepted as shorts for 'startcond' or 'stopcond'. If the conkey is given with an explicit number (like 'stopcond3') and already exist, it will be over-written, else created. When the trailing number is implicit, the first condition with a value of None is taken. If no None value is found, a new condition is added. cond: str The condition string. See ... .. note:: Updates the mask if not no_auto. .. seealso:: :meth:`~channelpack.ChannelPack.set_duration` :meth:`~channelpack.ChannelPack.set_samplerate` :meth:`~channelpack.ChannelPack.set_stopextend` :meth:`~channelpack.ChannelPack.clear_conditions` """ # Audit: if conkey == 'start' or conkey == 'stop': conkey += 'cond' if not any(conkey.startswith(addable) for addable in _ADDABLES): raise KeyError(conkey) if not self.conconf.valid_conkey(conkey): raise KeyError(conkey) self._parse_cond(cond) # Checking conkey = self.conconf.next_conkey(conkey) self.conconf.set_condition(conkey, cond) if not self.no_auto: self.make_mask()
python
def add_condition(self, conkey, cond): """Add a condition, one of the addable ones. conkey: str One of 'cond', startcond' or 'stopcond'. 'start' or 'stop' is accepted as shorts for 'startcond' or 'stopcond'. If the conkey is given with an explicit number (like 'stopcond3') and already exist, it will be over-written, else created. When the trailing number is implicit, the first condition with a value of None is taken. If no None value is found, a new condition is added. cond: str The condition string. See ... .. note:: Updates the mask if not no_auto. .. seealso:: :meth:`~channelpack.ChannelPack.set_duration` :meth:`~channelpack.ChannelPack.set_samplerate` :meth:`~channelpack.ChannelPack.set_stopextend` :meth:`~channelpack.ChannelPack.clear_conditions` """ # Audit: if conkey == 'start' or conkey == 'stop': conkey += 'cond' if not any(conkey.startswith(addable) for addable in _ADDABLES): raise KeyError(conkey) if not self.conconf.valid_conkey(conkey): raise KeyError(conkey) self._parse_cond(cond) # Checking conkey = self.conconf.next_conkey(conkey) self.conconf.set_condition(conkey, cond) if not self.no_auto: self.make_mask()
[ "def", "add_condition", "(", "self", ",", "conkey", ",", "cond", ")", ":", "# Audit:", "if", "conkey", "==", "'start'", "or", "conkey", "==", "'stop'", ":", "conkey", "+=", "'cond'", "if", "not", "any", "(", "conkey", ".", "startswith", "(", "addable", ")", "for", "addable", "in", "_ADDABLES", ")", ":", "raise", "KeyError", "(", "conkey", ")", "if", "not", "self", ".", "conconf", ".", "valid_conkey", "(", "conkey", ")", ":", "raise", "KeyError", "(", "conkey", ")", "self", ".", "_parse_cond", "(", "cond", ")", "# Checking", "conkey", "=", "self", ".", "conconf", ".", "next_conkey", "(", "conkey", ")", "self", ".", "conconf", ".", "set_condition", "(", "conkey", ",", "cond", ")", "if", "not", "self", ".", "no_auto", ":", "self", ".", "make_mask", "(", ")" ]
Add a condition, one of the addable ones. conkey: str One of 'cond', startcond' or 'stopcond'. 'start' or 'stop' is accepted as shorts for 'startcond' or 'stopcond'. If the conkey is given with an explicit number (like 'stopcond3') and already exist, it will be over-written, else created. When the trailing number is implicit, the first condition with a value of None is taken. If no None value is found, a new condition is added. cond: str The condition string. See ... .. note:: Updates the mask if not no_auto. .. seealso:: :meth:`~channelpack.ChannelPack.set_duration` :meth:`~channelpack.ChannelPack.set_samplerate` :meth:`~channelpack.ChannelPack.set_stopextend` :meth:`~channelpack.ChannelPack.clear_conditions`
[ "Add", "a", "condition", "one", "of", "the", "addable", "ones", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L431-L472
248,901
tomnor/channelpack
channelpack/pack.py
ChannelPack.spit_config
def spit_config(self, conf_file=None, firstwordonly=False): """Write a config_file based on this instance. conf_file: str (or Falseish) If conf_file is Falseish, write the file to the directory where self.filename sits, if self is not already associated with such a file. If associated, and conf_file is Falseish, use self.conf_file. If conf_file is a file name, write to that file and set self.conf_file to conf_file. firstwordonly: bool or "pattern" Same meaning as in name method, and applies to the channel names spitted. There is no effect on the instance channel names until eat_config is called. Sections in the ini/cfg kind of file can be: [channels] A mapping of self.D integer keys to channel names. Options are numbers corresponding to the keys. Values are the channel names, being the fallback names if custom names are not available (self.chnames). (When spitting that is). [conditions] Options correspond to the keys in self.conditions, values correspond to the values in the same. """ chroot = os.path.dirname(self.filename) chroot = os.path.abspath(chroot) # Figure out file name of conf_file: if hasattr(self, 'conf_file') and not conf_file: cfgfn = self.conf_file elif conf_file: cfgfn = conf_file else: cfgfn = os.path.join(chroot, CONFIG_FILE) with open(cfgfn, 'wb') as fo: self.conconf.spit_config(fo, firstwordonly=firstwordonly) self.conf_file = os.path.abspath(cfgfn)
python
def spit_config(self, conf_file=None, firstwordonly=False): """Write a config_file based on this instance. conf_file: str (or Falseish) If conf_file is Falseish, write the file to the directory where self.filename sits, if self is not already associated with such a file. If associated, and conf_file is Falseish, use self.conf_file. If conf_file is a file name, write to that file and set self.conf_file to conf_file. firstwordonly: bool or "pattern" Same meaning as in name method, and applies to the channel names spitted. There is no effect on the instance channel names until eat_config is called. Sections in the ini/cfg kind of file can be: [channels] A mapping of self.D integer keys to channel names. Options are numbers corresponding to the keys. Values are the channel names, being the fallback names if custom names are not available (self.chnames). (When spitting that is). [conditions] Options correspond to the keys in self.conditions, values correspond to the values in the same. """ chroot = os.path.dirname(self.filename) chroot = os.path.abspath(chroot) # Figure out file name of conf_file: if hasattr(self, 'conf_file') and not conf_file: cfgfn = self.conf_file elif conf_file: cfgfn = conf_file else: cfgfn = os.path.join(chroot, CONFIG_FILE) with open(cfgfn, 'wb') as fo: self.conconf.spit_config(fo, firstwordonly=firstwordonly) self.conf_file = os.path.abspath(cfgfn)
[ "def", "spit_config", "(", "self", ",", "conf_file", "=", "None", ",", "firstwordonly", "=", "False", ")", ":", "chroot", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "filename", ")", "chroot", "=", "os", ".", "path", ".", "abspath", "(", "chroot", ")", "# Figure out file name of conf_file:", "if", "hasattr", "(", "self", ",", "'conf_file'", ")", "and", "not", "conf_file", ":", "cfgfn", "=", "self", ".", "conf_file", "elif", "conf_file", ":", "cfgfn", "=", "conf_file", "else", ":", "cfgfn", "=", "os", ".", "path", ".", "join", "(", "chroot", ",", "CONFIG_FILE", ")", "with", "open", "(", "cfgfn", ",", "'wb'", ")", "as", "fo", ":", "self", ".", "conconf", ".", "spit_config", "(", "fo", ",", "firstwordonly", "=", "firstwordonly", ")", "self", ".", "conf_file", "=", "os", ".", "path", ".", "abspath", "(", "cfgfn", ")" ]
Write a config_file based on this instance. conf_file: str (or Falseish) If conf_file is Falseish, write the file to the directory where self.filename sits, if self is not already associated with such a file. If associated, and conf_file is Falseish, use self.conf_file. If conf_file is a file name, write to that file and set self.conf_file to conf_file. firstwordonly: bool or "pattern" Same meaning as in name method, and applies to the channel names spitted. There is no effect on the instance channel names until eat_config is called. Sections in the ini/cfg kind of file can be: [channels] A mapping of self.D integer keys to channel names. Options are numbers corresponding to the keys. Values are the channel names, being the fallback names if custom names are not available (self.chnames). (When spitting that is). [conditions] Options correspond to the keys in self.conditions, values correspond to the values in the same.
[ "Write", "a", "config_file", "based", "on", "this", "instance", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L506-L548
248,902
tomnor/channelpack
channelpack/pack.py
ChannelPack.eat_config
def eat_config(self, conf_file=None): """ Read the the conf_file and update this instance accordingly. conf_file: str or Falseish If conf_file is Falseish, look in the directory where self.filename sits if self is not already associated with a conf_file. If associated, and conf_file arg is Falseish, read self.conf_file. If conf_file arg is a file name, read from that file, but do not update self.conf_file accordingly. An Implicit IOError is raised if no conf_file was found. See spit_config for documentation on the file layout. .. note:: Updates the mask if not no_auto. .. note:: If the config_file exist because of an earlier spit, and custom channel names was not available, channels are listed as the fallback names in the file. Then after this eat, self.chnames will be set to the list in the conf_file section 'channels'. The result can be that self.chnames and self.chnames_0 will be equal. The message then is that, if channel names are updated, you should spit before you eat. """ chroot = os.path.dirname(self.filename) # "channels root dir" chroot = os.path.abspath(chroot) # Figure out file name of conf_file: if hasattr(self, 'conf_file') and not conf_file: cfgfn = self.conf_file elif conf_file: cfgfn = conf_file else: cfgfn = os.path.join(chroot, CONFIG_FILE) with open(cfgfn, 'r') as fo: self.conconf.eat_config(fo) # Update mask: if not self.no_auto: self.make_mask() else: self.make_mask(dry=True)
python
def eat_config(self, conf_file=None): """ Read the the conf_file and update this instance accordingly. conf_file: str or Falseish If conf_file is Falseish, look in the directory where self.filename sits if self is not already associated with a conf_file. If associated, and conf_file arg is Falseish, read self.conf_file. If conf_file arg is a file name, read from that file, but do not update self.conf_file accordingly. An Implicit IOError is raised if no conf_file was found. See spit_config for documentation on the file layout. .. note:: Updates the mask if not no_auto. .. note:: If the config_file exist because of an earlier spit, and custom channel names was not available, channels are listed as the fallback names in the file. Then after this eat, self.chnames will be set to the list in the conf_file section 'channels'. The result can be that self.chnames and self.chnames_0 will be equal. The message then is that, if channel names are updated, you should spit before you eat. """ chroot = os.path.dirname(self.filename) # "channels root dir" chroot = os.path.abspath(chroot) # Figure out file name of conf_file: if hasattr(self, 'conf_file') and not conf_file: cfgfn = self.conf_file elif conf_file: cfgfn = conf_file else: cfgfn = os.path.join(chroot, CONFIG_FILE) with open(cfgfn, 'r') as fo: self.conconf.eat_config(fo) # Update mask: if not self.no_auto: self.make_mask() else: self.make_mask(dry=True)
[ "def", "eat_config", "(", "self", ",", "conf_file", "=", "None", ")", ":", "chroot", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "filename", ")", "# \"channels root dir\"", "chroot", "=", "os", ".", "path", ".", "abspath", "(", "chroot", ")", "# Figure out file name of conf_file:", "if", "hasattr", "(", "self", ",", "'conf_file'", ")", "and", "not", "conf_file", ":", "cfgfn", "=", "self", ".", "conf_file", "elif", "conf_file", ":", "cfgfn", "=", "conf_file", "else", ":", "cfgfn", "=", "os", ".", "path", ".", "join", "(", "chroot", ",", "CONFIG_FILE", ")", "with", "open", "(", "cfgfn", ",", "'r'", ")", "as", "fo", ":", "self", ".", "conconf", ".", "eat_config", "(", "fo", ")", "# Update mask:", "if", "not", "self", ".", "no_auto", ":", "self", ".", "make_mask", "(", ")", "else", ":", "self", ".", "make_mask", "(", "dry", "=", "True", ")" ]
Read the the conf_file and update this instance accordingly. conf_file: str or Falseish If conf_file is Falseish, look in the directory where self.filename sits if self is not already associated with a conf_file. If associated, and conf_file arg is Falseish, read self.conf_file. If conf_file arg is a file name, read from that file, but do not update self.conf_file accordingly. An Implicit IOError is raised if no conf_file was found. See spit_config for documentation on the file layout. .. note:: Updates the mask if not no_auto. .. note:: If the config_file exist because of an earlier spit, and custom channel names was not available, channels are listed as the fallback names in the file. Then after this eat, self.chnames will be set to the list in the conf_file section 'channels'. The result can be that self.chnames and self.chnames_0 will be equal. The message then is that, if channel names are updated, you should spit before you eat.
[ "Read", "the", "the", "conf_file", "and", "update", "this", "instance", "accordingly", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L550-L599
248,903
tomnor/channelpack
channelpack/pack.py
ChannelPack.set_stopextend
def set_stopextend(self, n): """Extend the True elements by n when setting the conditions based on a 'stopcond' condition. n is an integer >= 0. .. note:: Updates the mask if not no_auto. """ self.conconf.set_condition('stopextend', n) if not self.no_auto: self.make_mask()
python
def set_stopextend(self, n): """Extend the True elements by n when setting the conditions based on a 'stopcond' condition. n is an integer >= 0. .. note:: Updates the mask if not no_auto. """ self.conconf.set_condition('stopextend', n) if not self.no_auto: self.make_mask()
[ "def", "set_stopextend", "(", "self", ",", "n", ")", ":", "self", ".", "conconf", ".", "set_condition", "(", "'stopextend'", ",", "n", ")", "if", "not", "self", ".", "no_auto", ":", "self", ".", "make_mask", "(", ")" ]
Extend the True elements by n when setting the conditions based on a 'stopcond' condition. n is an integer >= 0. .. note:: Updates the mask if not no_auto.
[ "Extend", "the", "True", "elements", "by", "n", "when", "setting", "the", "conditions", "based", "on", "a", "stopcond", "condition", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L613-L624
248,904
tomnor/channelpack
channelpack/pack.py
ChannelPack.set_duration
def set_duration(self, rule): """Set the duration according to rule. rule: str The rule operating on the variable ``dur``. rule is an expression like:: >>> rule = 'dur == 150 or dur > 822' setting a duration rule assuming a pack sp:: >>> sp.set_duration(rule) The identifier ``dur`` must be present or the rule will fail. .. note:: The logical ``or`` and ``and`` operators must be used. ``dur`` is a primitive, not an array. .. note:: Updates the mask if not no_auto. .. seealso:: :meth:`~channelpack.ChannelPack.set_samplerate` :meth:`~channelpack.ChannelPack.add_condition` :meth:`~channelpack.ChannelPack.pprint_conditions` """ self.conconf.set_condition('duration', rule) if not self.no_auto: self.make_mask()
python
def set_duration(self, rule): """Set the duration according to rule. rule: str The rule operating on the variable ``dur``. rule is an expression like:: >>> rule = 'dur == 150 or dur > 822' setting a duration rule assuming a pack sp:: >>> sp.set_duration(rule) The identifier ``dur`` must be present or the rule will fail. .. note:: The logical ``or`` and ``and`` operators must be used. ``dur`` is a primitive, not an array. .. note:: Updates the mask if not no_auto. .. seealso:: :meth:`~channelpack.ChannelPack.set_samplerate` :meth:`~channelpack.ChannelPack.add_condition` :meth:`~channelpack.ChannelPack.pprint_conditions` """ self.conconf.set_condition('duration', rule) if not self.no_auto: self.make_mask()
[ "def", "set_duration", "(", "self", ",", "rule", ")", ":", "self", ".", "conconf", ".", "set_condition", "(", "'duration'", ",", "rule", ")", "if", "not", "self", ".", "no_auto", ":", "self", ".", "make_mask", "(", ")" ]
Set the duration according to rule. rule: str The rule operating on the variable ``dur``. rule is an expression like:: >>> rule = 'dur == 150 or dur > 822' setting a duration rule assuming a pack sp:: >>> sp.set_duration(rule) The identifier ``dur`` must be present or the rule will fail. .. note:: The logical ``or`` and ``and`` operators must be used. ``dur`` is a primitive, not an array. .. note:: Updates the mask if not no_auto. .. seealso:: :meth:`~channelpack.ChannelPack.set_samplerate` :meth:`~channelpack.ChannelPack.add_condition` :meth:`~channelpack.ChannelPack.pprint_conditions`
[ "Set", "the", "duration", "according", "to", "rule", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L626-L658
248,905
tomnor/channelpack
channelpack/pack.py
ChannelPack.clear_conditions
def clear_conditions(self, *conkeys, **noclear): """Clear conditions. Clear only the conditions conkeys if specified. Clear only the conditions not specified by conkeys if noclear is True (False default). .. note:: Updates the mask if not no_auto. """ offenders = set(conkeys) - set(self.conconf.conditions.keys()) if offenders: raise KeyError(', '.join([off for off in offenders])) # Valid keywords subtracted offenders = set(noclear) - set({'noclear'}) if offenders: raise KeyError(', '.join([off for off in offenders])) noclear = noclear.get('noclear', False) for ck in self.conconf.conditions: if not conkeys: # self.conconf.set_condition(ck, None) self.conconf.reset() break elif not noclear and ck in conkeys: self.conconf.set_condition(ck, None) elif noclear and ck not in conkeys: self.conconf.set_condition(ck, None) if not self.no_auto: self.make_mask()
python
def clear_conditions(self, *conkeys, **noclear): """Clear conditions. Clear only the conditions conkeys if specified. Clear only the conditions not specified by conkeys if noclear is True (False default). .. note:: Updates the mask if not no_auto. """ offenders = set(conkeys) - set(self.conconf.conditions.keys()) if offenders: raise KeyError(', '.join([off for off in offenders])) # Valid keywords subtracted offenders = set(noclear) - set({'noclear'}) if offenders: raise KeyError(', '.join([off for off in offenders])) noclear = noclear.get('noclear', False) for ck in self.conconf.conditions: if not conkeys: # self.conconf.set_condition(ck, None) self.conconf.reset() break elif not noclear and ck in conkeys: self.conconf.set_condition(ck, None) elif noclear and ck not in conkeys: self.conconf.set_condition(ck, None) if not self.no_auto: self.make_mask()
[ "def", "clear_conditions", "(", "self", ",", "*", "conkeys", ",", "*", "*", "noclear", ")", ":", "offenders", "=", "set", "(", "conkeys", ")", "-", "set", "(", "self", ".", "conconf", ".", "conditions", ".", "keys", "(", ")", ")", "if", "offenders", ":", "raise", "KeyError", "(", "', '", ".", "join", "(", "[", "off", "for", "off", "in", "offenders", "]", ")", ")", "# Valid keywords subtracted", "offenders", "=", "set", "(", "noclear", ")", "-", "set", "(", "{", "'noclear'", "}", ")", "if", "offenders", ":", "raise", "KeyError", "(", "', '", ".", "join", "(", "[", "off", "for", "off", "in", "offenders", "]", ")", ")", "noclear", "=", "noclear", ".", "get", "(", "'noclear'", ",", "False", ")", "for", "ck", "in", "self", ".", "conconf", ".", "conditions", ":", "if", "not", "conkeys", ":", "# self.conconf.set_condition(ck, None)", "self", ".", "conconf", ".", "reset", "(", ")", "break", "elif", "not", "noclear", "and", "ck", "in", "conkeys", ":", "self", ".", "conconf", ".", "set_condition", "(", "ck", ",", "None", ")", "elif", "noclear", "and", "ck", "not", "in", "conkeys", ":", "self", ".", "conconf", ".", "set_condition", "(", "ck", ",", "None", ")", "if", "not", "self", ".", "no_auto", ":", "self", ".", "make_mask", "(", ")" ]
Clear conditions. Clear only the conditions conkeys if specified. Clear only the conditions not specified by conkeys if noclear is True (False default). .. note:: Updates the mask if not no_auto.
[ "Clear", "conditions", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L660-L693
248,906
tomnor/channelpack
channelpack/pack.py
ChannelPack.make_mask
def make_mask(self, clean=True, dry=False): """Set the attribute self.mask to a mask based on the conditions. clean: bool If not True, let the current mask be a condition as well. If True, the mask is set solely on the pack's current conditions dry: bool If True, only try to make a mask, but don't touch self.mask This method is called automatically unless ``no_auto`` is set to True, whenever conditions are updated. .. seealso:: :meth:`~channelpack.ChannelPack.pprint_conditions` """ cc = self.conconf # All True initially. mask = np.ones(self.rec_cnt) == True # NOQA for cond in cc.conditions_list('cond'): try: mask = mask & self._mask_array(cond) except Exception: print cond print 'produced an error:' raise # re-raise mask = mask & datautils.startstop_bool(self) samplerate = cc.get_condition('samplerate') if samplerate is not None: samplerate = float(samplerate) mask = datautils.duration_bool(mask, cc.get_condition('duration'), samplerate) if dry: return if not clean and self.mask is not None: self.mask = self.mask & mask else: self.mask = mask
python
def make_mask(self, clean=True, dry=False): """Set the attribute self.mask to a mask based on the conditions. clean: bool If not True, let the current mask be a condition as well. If True, the mask is set solely on the pack's current conditions dry: bool If True, only try to make a mask, but don't touch self.mask This method is called automatically unless ``no_auto`` is set to True, whenever conditions are updated. .. seealso:: :meth:`~channelpack.ChannelPack.pprint_conditions` """ cc = self.conconf # All True initially. mask = np.ones(self.rec_cnt) == True # NOQA for cond in cc.conditions_list('cond'): try: mask = mask & self._mask_array(cond) except Exception: print cond print 'produced an error:' raise # re-raise mask = mask & datautils.startstop_bool(self) samplerate = cc.get_condition('samplerate') if samplerate is not None: samplerate = float(samplerate) mask = datautils.duration_bool(mask, cc.get_condition('duration'), samplerate) if dry: return if not clean and self.mask is not None: self.mask = self.mask & mask else: self.mask = mask
[ "def", "make_mask", "(", "self", ",", "clean", "=", "True", ",", "dry", "=", "False", ")", ":", "cc", "=", "self", ".", "conconf", "# All True initially.", "mask", "=", "np", ".", "ones", "(", "self", ".", "rec_cnt", ")", "==", "True", "# NOQA", "for", "cond", "in", "cc", ".", "conditions_list", "(", "'cond'", ")", ":", "try", ":", "mask", "=", "mask", "&", "self", ".", "_mask_array", "(", "cond", ")", "except", "Exception", ":", "print", "cond", "print", "'produced an error:'", "raise", "# re-raise", "mask", "=", "mask", "&", "datautils", ".", "startstop_bool", "(", "self", ")", "samplerate", "=", "cc", ".", "get_condition", "(", "'samplerate'", ")", "if", "samplerate", "is", "not", "None", ":", "samplerate", "=", "float", "(", "samplerate", ")", "mask", "=", "datautils", ".", "duration_bool", "(", "mask", ",", "cc", ".", "get_condition", "(", "'duration'", ")", ",", "samplerate", ")", "if", "dry", ":", "return", "if", "not", "clean", "and", "self", ".", "mask", "is", "not", "None", ":", "self", ".", "mask", "=", "self", ".", "mask", "&", "mask", "else", ":", "self", ".", "mask", "=", "mask" ]
Set the attribute self.mask to a mask based on the conditions. clean: bool If not True, let the current mask be a condition as well. If True, the mask is set solely on the pack's current conditions dry: bool If True, only try to make a mask, but don't touch self.mask This method is called automatically unless ``no_auto`` is set to True, whenever conditions are updated. .. seealso:: :meth:`~channelpack.ChannelPack.pprint_conditions`
[ "Set", "the", "attribute", "self", ".", "mask", "to", "a", "mask", "based", "on", "the", "conditions", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L695-L738
248,907
tomnor/channelpack
channelpack/pack.py
ChannelPack.set_channel_names
def set_channel_names(self, names): """ Set self.chnames. Custom channel names that can be used in calls on this object and in condition strings. names: list or None It is the callers responsibility to make sure the list is in column order. self.chnames will be a dict with channel integer indexes as keys. If names is None, self.chnames will be None. """ if not names: self.chnames = None return if len(names) != len(self.keys): raise ValueError('len(names) != len(self.D.keys())') self.chnames = dict(zip(self.keys, names))
python
def set_channel_names(self, names): """ Set self.chnames. Custom channel names that can be used in calls on this object and in condition strings. names: list or None It is the callers responsibility to make sure the list is in column order. self.chnames will be a dict with channel integer indexes as keys. If names is None, self.chnames will be None. """ if not names: self.chnames = None return if len(names) != len(self.keys): raise ValueError('len(names) != len(self.D.keys())') self.chnames = dict(zip(self.keys, names))
[ "def", "set_channel_names", "(", "self", ",", "names", ")", ":", "if", "not", "names", ":", "self", ".", "chnames", "=", "None", "return", "if", "len", "(", "names", ")", "!=", "len", "(", "self", ".", "keys", ")", ":", "raise", "ValueError", "(", "'len(names) != len(self.D.keys())'", ")", "self", ".", "chnames", "=", "dict", "(", "zip", "(", "self", ".", "keys", ",", "names", ")", ")" ]
Set self.chnames. Custom channel names that can be used in calls on this object and in condition strings. names: list or None It is the callers responsibility to make sure the list is in column order. self.chnames will be a dict with channel integer indexes as keys. If names is None, self.chnames will be None.
[ "Set", "self", ".", "chnames", ".", "Custom", "channel", "names", "that", "can", "be", "used", "in", "calls", "on", "this", "object", "and", "in", "condition", "strings", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L740-L759
248,908
tomnor/channelpack
channelpack/pack.py
ChannelPack.counter
def counter(self, ch, part=None): """Return a counter on the channel ch. ch: string or integer. The channel index number or channel name. part: int or None The 0-based enumeration of a True part to return. This has an effect whether or not the mask or filter is turned on. Raise IndexError if the part does not exist. See `Counter <https://docs.python.org/2.7/library/collections.html#counter-objects>`_ for the counter object returned. """ return Counter(self(self._key(ch), part=part))
python
def counter(self, ch, part=None): """Return a counter on the channel ch. ch: string or integer. The channel index number or channel name. part: int or None The 0-based enumeration of a True part to return. This has an effect whether or not the mask or filter is turned on. Raise IndexError if the part does not exist. See `Counter <https://docs.python.org/2.7/library/collections.html#counter-objects>`_ for the counter object returned. """ return Counter(self(self._key(ch), part=part))
[ "def", "counter", "(", "self", ",", "ch", ",", "part", "=", "None", ")", ":", "return", "Counter", "(", "self", "(", "self", ".", "_key", "(", "ch", ")", ",", "part", "=", "part", ")", ")" ]
Return a counter on the channel ch. ch: string or integer. The channel index number or channel name. part: int or None The 0-based enumeration of a True part to return. This has an effect whether or not the mask or filter is turned on. Raise IndexError if the part does not exist. See `Counter <https://docs.python.org/2.7/library/collections.html#counter-objects>`_ for the counter object returned.
[ "Return", "a", "counter", "on", "the", "channel", "ch", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L791-L807
248,909
tomnor/channelpack
channelpack/pack.py
ChannelPack.records
def records(self, part=None, fallback=True): """Return an iterator over the records in the pack. Each record is supplied as a namedtuple with the channel names as field names. This is useful if each record make a meaningful data set on its own. part: int or None Same meaning as in :meth:`~channelpack.ChannelPack.__call__`. fallback: boolean The named tuple requires python-valid naming. If fallback is False, there will be an error if ``self.chnames`` is not valid names and not None. If True, fall back to the ``self.chnames_0`` on error. .. note:: The error produced on invalid names if fallback is False is not produced until iteration start. Here is a good post on stack overflow on the subject `231767 <http://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do-in-python>`_ """ names_0 = [self.chnames_0[k] for k in sorted(self.chnames_0.keys())] if self.chnames is not None: names = [self.chnames[k] for k in sorted(self.chnames.keys())] try: Record = namedtuple('Record', names) except NameError: # no names Record = namedtuple('Record', names_0) names = names_0 except ValueError: # no good names if fallback: Record = namedtuple('Record', names_0) names = names_0 else: raise for tup in zip(*[self(name, part) for name in names]): yield Record(*tup)
python
def records(self, part=None, fallback=True): """Return an iterator over the records in the pack. Each record is supplied as a namedtuple with the channel names as field names. This is useful if each record make a meaningful data set on its own. part: int or None Same meaning as in :meth:`~channelpack.ChannelPack.__call__`. fallback: boolean The named tuple requires python-valid naming. If fallback is False, there will be an error if ``self.chnames`` is not valid names and not None. If True, fall back to the ``self.chnames_0`` on error. .. note:: The error produced on invalid names if fallback is False is not produced until iteration start. Here is a good post on stack overflow on the subject `231767 <http://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do-in-python>`_ """ names_0 = [self.chnames_0[k] for k in sorted(self.chnames_0.keys())] if self.chnames is not None: names = [self.chnames[k] for k in sorted(self.chnames.keys())] try: Record = namedtuple('Record', names) except NameError: # no names Record = namedtuple('Record', names_0) names = names_0 except ValueError: # no good names if fallback: Record = namedtuple('Record', names_0) names = names_0 else: raise for tup in zip(*[self(name, part) for name in names]): yield Record(*tup)
[ "def", "records", "(", "self", ",", "part", "=", "None", ",", "fallback", "=", "True", ")", ":", "names_0", "=", "[", "self", ".", "chnames_0", "[", "k", "]", "for", "k", "in", "sorted", "(", "self", ".", "chnames_0", ".", "keys", "(", ")", ")", "]", "if", "self", ".", "chnames", "is", "not", "None", ":", "names", "=", "[", "self", ".", "chnames", "[", "k", "]", "for", "k", "in", "sorted", "(", "self", ".", "chnames", ".", "keys", "(", ")", ")", "]", "try", ":", "Record", "=", "namedtuple", "(", "'Record'", ",", "names", ")", "except", "NameError", ":", "# no names", "Record", "=", "namedtuple", "(", "'Record'", ",", "names_0", ")", "names", "=", "names_0", "except", "ValueError", ":", "# no good names", "if", "fallback", ":", "Record", "=", "namedtuple", "(", "'Record'", ",", "names_0", ")", "names", "=", "names_0", "else", ":", "raise", "for", "tup", "in", "zip", "(", "*", "[", "self", "(", "name", ",", "part", ")", "for", "name", "in", "names", "]", ")", ":", "yield", "Record", "(", "*", "tup", ")" ]
Return an iterator over the records in the pack. Each record is supplied as a namedtuple with the channel names as field names. This is useful if each record make a meaningful data set on its own. part: int or None Same meaning as in :meth:`~channelpack.ChannelPack.__call__`. fallback: boolean The named tuple requires python-valid naming. If fallback is False, there will be an error if ``self.chnames`` is not valid names and not None. If True, fall back to the ``self.chnames_0`` on error. .. note:: The error produced on invalid names if fallback is False is not produced until iteration start. Here is a good post on stack overflow on the subject `231767 <http://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do-in-python>`_
[ "Return", "an", "iterator", "over", "the", "records", "in", "the", "pack", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L837-L878
248,910
tomnor/channelpack
channelpack/pack.py
ChannelPack._key
def _key(self, ch): """Return the integer key for ch. It is the key for the first value found in chnames and chnames_0, that matches ch. Or if ch is an int, ch is returned if it is a key in self.D""" if ch in self.D: return ch if isinstance(ch, int): raise KeyError(ch) # dont accept integers as custom names if self.chnames: for item in self.chnames.items(): if item[1] == ch: return item[0] for item in self.chnames_0.items(): if item[1] == ch: return item[0] # If we got here, ch can be an int represented by a string if it comes # from a condition string: try: chint = int(ch) if chint in self.D: return chint except ValueError: pass raise KeyError(ch)
python
def _key(self, ch): """Return the integer key for ch. It is the key for the first value found in chnames and chnames_0, that matches ch. Or if ch is an int, ch is returned if it is a key in self.D""" if ch in self.D: return ch if isinstance(ch, int): raise KeyError(ch) # dont accept integers as custom names if self.chnames: for item in self.chnames.items(): if item[1] == ch: return item[0] for item in self.chnames_0.items(): if item[1] == ch: return item[0] # If we got here, ch can be an int represented by a string if it comes # from a condition string: try: chint = int(ch) if chint in self.D: return chint except ValueError: pass raise KeyError(ch)
[ "def", "_key", "(", "self", ",", "ch", ")", ":", "if", "ch", "in", "self", ".", "D", ":", "return", "ch", "if", "isinstance", "(", "ch", ",", "int", ")", ":", "raise", "KeyError", "(", "ch", ")", "# dont accept integers as custom names", "if", "self", ".", "chnames", ":", "for", "item", "in", "self", ".", "chnames", ".", "items", "(", ")", ":", "if", "item", "[", "1", "]", "==", "ch", ":", "return", "item", "[", "0", "]", "for", "item", "in", "self", ".", "chnames_0", ".", "items", "(", ")", ":", "if", "item", "[", "1", "]", "==", "ch", ":", "return", "item", "[", "0", "]", "# If we got here, ch can be an int represented by a string if it comes", "# from a condition string:", "try", ":", "chint", "=", "int", "(", "ch", ")", "if", "chint", "in", "self", ".", "D", ":", "return", "chint", "except", "ValueError", ":", "pass", "raise", "KeyError", "(", "ch", ")" ]
Return the integer key for ch. It is the key for the first value found in chnames and chnames_0, that matches ch. Or if ch is an int, ch is returned if it is a key in self.D
[ "Return", "the", "integer", "key", "for", "ch", ".", "It", "is", "the", "key", "for", "the", "first", "value", "found", "in", "chnames", "and", "chnames_0", "that", "matches", "ch", ".", "Or", "if", "ch", "is", "an", "int", "ch", "is", "returned", "if", "it", "is", "a", "key", "in", "self", ".", "D" ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L880-L908
248,911
tomnor/channelpack
channelpack/pack.py
ChannelPack.name
def name(self, ch, firstwordonly=False): """Return channel name for ch. ch is the channel name or the index number for the channel name, 0-based. ch: str or int. The channel name or indexed number. firstwordonly: bool or "pattern". If True, return only the first non-spaced word in the name. If a string, use as a re-pattern to re.findall and return the first element found. There will be error if no match. r'\w+' is good pattern for excluding leading and trailing obscure characters. Returned channel name is the fallback string if "custom" names are not available. """ names = self.chnames or self.chnames_0 i = self._key(ch) if not firstwordonly: return names[i] elif firstwordonly is True or firstwordonly == 1: return names[i].split()[0].strip() # According to user pattern return re.findall(firstwordonly, names[i])[0]
python
def name(self, ch, firstwordonly=False): """Return channel name for ch. ch is the channel name or the index number for the channel name, 0-based. ch: str or int. The channel name or indexed number. firstwordonly: bool or "pattern". If True, return only the first non-spaced word in the name. If a string, use as a re-pattern to re.findall and return the first element found. There will be error if no match. r'\w+' is good pattern for excluding leading and trailing obscure characters. Returned channel name is the fallback string if "custom" names are not available. """ names = self.chnames or self.chnames_0 i = self._key(ch) if not firstwordonly: return names[i] elif firstwordonly is True or firstwordonly == 1: return names[i].split()[0].strip() # According to user pattern return re.findall(firstwordonly, names[i])[0]
[ "def", "name", "(", "self", ",", "ch", ",", "firstwordonly", "=", "False", ")", ":", "names", "=", "self", ".", "chnames", "or", "self", ".", "chnames_0", "i", "=", "self", ".", "_key", "(", "ch", ")", "if", "not", "firstwordonly", ":", "return", "names", "[", "i", "]", "elif", "firstwordonly", "is", "True", "or", "firstwordonly", "==", "1", ":", "return", "names", "[", "i", "]", ".", "split", "(", ")", "[", "0", "]", ".", "strip", "(", ")", "# According to user pattern", "return", "re", ".", "findall", "(", "firstwordonly", ",", "names", "[", "i", "]", ")", "[", "0", "]" ]
Return channel name for ch. ch is the channel name or the index number for the channel name, 0-based. ch: str or int. The channel name or indexed number. firstwordonly: bool or "pattern". If True, return only the first non-spaced word in the name. If a string, use as a re-pattern to re.findall and return the first element found. There will be error if no match. r'\w+' is good pattern for excluding leading and trailing obscure characters. Returned channel name is the fallback string if "custom" names are not available.
[ "Return", "channel", "name", "for", "ch", ".", "ch", "is", "the", "channel", "name", "or", "the", "index", "number", "for", "the", "channel", "name", "0", "-", "based", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L910-L938
248,912
tomnor/channelpack
channelpack/pack.py
ChannelPack.query_names
def query_names(self, pat): """pat a shell pattern. See fnmatch.fnmatchcase. Print the results to stdout.""" for item in self.chnames.items(): if fnmatch.fnmatchcase(item[1], pat): print item
python
def query_names(self, pat): """pat a shell pattern. See fnmatch.fnmatchcase. Print the results to stdout.""" for item in self.chnames.items(): if fnmatch.fnmatchcase(item[1], pat): print item
[ "def", "query_names", "(", "self", ",", "pat", ")", ":", "for", "item", "in", "self", ".", "chnames", ".", "items", "(", ")", ":", "if", "fnmatch", ".", "fnmatchcase", "(", "item", "[", "1", "]", ",", "pat", ")", ":", "print", "item" ]
pat a shell pattern. See fnmatch.fnmatchcase. Print the results to stdout.
[ "pat", "a", "shell", "pattern", ".", "See", "fnmatch", ".", "fnmatchcase", ".", "Print", "the", "results", "to", "stdout", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L940-L946
248,913
tomnor/channelpack
channelpack/pack.py
_ConditionConfigure.set_condition
def set_condition(self, conkey, val): """Set condition conkey to value val. Convert val to str if not None. conkey: str A valid condition key. val: str, int, float, None Can always be None. Can be number or string depending on conkey. """ if not any([conkey.startswith(c) for c in _COND_PREFIXES]): raise KeyError(conkey) if val in NONES: self.conditions[conkey] = None else: self.conditions[conkey] = str(val)
python
def set_condition(self, conkey, val): """Set condition conkey to value val. Convert val to str if not None. conkey: str A valid condition key. val: str, int, float, None Can always be None. Can be number or string depending on conkey. """ if not any([conkey.startswith(c) for c in _COND_PREFIXES]): raise KeyError(conkey) if val in NONES: self.conditions[conkey] = None else: self.conditions[conkey] = str(val)
[ "def", "set_condition", "(", "self", ",", "conkey", ",", "val", ")", ":", "if", "not", "any", "(", "[", "conkey", ".", "startswith", "(", "c", ")", "for", "c", "in", "_COND_PREFIXES", "]", ")", ":", "raise", "KeyError", "(", "conkey", ")", "if", "val", "in", "NONES", ":", "self", ".", "conditions", "[", "conkey", "]", "=", "None", "else", ":", "self", ".", "conditions", "[", "conkey", "]", "=", "str", "(", "val", ")" ]
Set condition conkey to value val. Convert val to str if not None. conkey: str A valid condition key. val: str, int, float, None Can always be None. Can be number or string depending on conkey.
[ "Set", "condition", "conkey", "to", "value", "val", ".", "Convert", "val", "to", "str", "if", "not", "None", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L1013-L1030
248,914
tomnor/channelpack
channelpack/pack.py
_ConditionConfigure.spit_config
def spit_config(self, conf_file, firstwordonly=False): """conf_file a file opened for writing.""" cfg = ConfigParser.RawConfigParser() for sec in _CONFIG_SECS: cfg.add_section(sec) sec = 'channels' for i in sorted(self.pack.D): cfg.set(sec, str(i), self.pack.name(i, firstwordonly=firstwordonly)) sec = 'conditions' for k in self.sorted_conkeys(): cfg.set(sec, k, self.conditions[k]) cfg.write(conf_file)
python
def spit_config(self, conf_file, firstwordonly=False): """conf_file a file opened for writing.""" cfg = ConfigParser.RawConfigParser() for sec in _CONFIG_SECS: cfg.add_section(sec) sec = 'channels' for i in sorted(self.pack.D): cfg.set(sec, str(i), self.pack.name(i, firstwordonly=firstwordonly)) sec = 'conditions' for k in self.sorted_conkeys(): cfg.set(sec, k, self.conditions[k]) cfg.write(conf_file)
[ "def", "spit_config", "(", "self", ",", "conf_file", ",", "firstwordonly", "=", "False", ")", ":", "cfg", "=", "ConfigParser", ".", "RawConfigParser", "(", ")", "for", "sec", "in", "_CONFIG_SECS", ":", "cfg", ".", "add_section", "(", "sec", ")", "sec", "=", "'channels'", "for", "i", "in", "sorted", "(", "self", ".", "pack", ".", "D", ")", ":", "cfg", ".", "set", "(", "sec", ",", "str", "(", "i", ")", ",", "self", ".", "pack", ".", "name", "(", "i", ",", "firstwordonly", "=", "firstwordonly", ")", ")", "sec", "=", "'conditions'", "for", "k", "in", "self", ".", "sorted_conkeys", "(", ")", ":", "cfg", ".", "set", "(", "sec", ",", "k", ",", "self", ".", "conditions", "[", "k", "]", ")", "cfg", ".", "write", "(", "conf_file", ")" ]
conf_file a file opened for writing.
[ "conf_file", "a", "file", "opened", "for", "writing", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L1032-L1048
248,915
tomnor/channelpack
channelpack/pack.py
_ConditionConfigure.eat_config
def eat_config(self, conf_file): """conf_file a file opened for reading. Update the packs channel names and the conditions, accordingly. """ # Read the file: cfg = ConfigParser.RawConfigParser() cfg.readfp(conf_file) # Update channel names: sec = 'channels' mess = 'missmatch of channel keys' assert(set(self.pack.D.keys()) == set([int(i) for i in cfg.options(sec)])), mess # NOQA if not self.pack.chnames: self.pack.chnames = dict(self.pack.chnames_0) for i in cfg.options(sec): # i is a string. self.pack.chnames[self.pack._key(int(i))] = cfg.get(sec, i) # Update conditions: sec = 'conditions' # conkeys = set(self.conditions.keys()) # conops = set(cfg.options(sec)) # This check should be superfluous: # -------------------------------------------------- # for conkey in conkeys: # if not any([conkey.startswith(c) for c in _COND_PREFIXES]): # raise KeyError(conkey) # -------------------------------------------------- # for con in conkeys - conops: # Removed conditions. # self.set_condition(con, None) conops = cfg.options(sec) self.reset() # Scary for con in conops: self.set_condition(con, cfg.get(sec, con))
python
def eat_config(self, conf_file): """conf_file a file opened for reading. Update the packs channel names and the conditions, accordingly. """ # Read the file: cfg = ConfigParser.RawConfigParser() cfg.readfp(conf_file) # Update channel names: sec = 'channels' mess = 'missmatch of channel keys' assert(set(self.pack.D.keys()) == set([int(i) for i in cfg.options(sec)])), mess # NOQA if not self.pack.chnames: self.pack.chnames = dict(self.pack.chnames_0) for i in cfg.options(sec): # i is a string. self.pack.chnames[self.pack._key(int(i))] = cfg.get(sec, i) # Update conditions: sec = 'conditions' # conkeys = set(self.conditions.keys()) # conops = set(cfg.options(sec)) # This check should be superfluous: # -------------------------------------------------- # for conkey in conkeys: # if not any([conkey.startswith(c) for c in _COND_PREFIXES]): # raise KeyError(conkey) # -------------------------------------------------- # for con in conkeys - conops: # Removed conditions. # self.set_condition(con, None) conops = cfg.options(sec) self.reset() # Scary for con in conops: self.set_condition(con, cfg.get(sec, con))
[ "def", "eat_config", "(", "self", ",", "conf_file", ")", ":", "# Read the file:", "cfg", "=", "ConfigParser", ".", "RawConfigParser", "(", ")", "cfg", ".", "readfp", "(", "conf_file", ")", "# Update channel names:", "sec", "=", "'channels'", "mess", "=", "'missmatch of channel keys'", "assert", "(", "set", "(", "self", ".", "pack", ".", "D", ".", "keys", "(", ")", ")", "==", "set", "(", "[", "int", "(", "i", ")", "for", "i", "in", "cfg", ".", "options", "(", "sec", ")", "]", ")", ")", ",", "mess", "# NOQA", "if", "not", "self", ".", "pack", ".", "chnames", ":", "self", ".", "pack", ".", "chnames", "=", "dict", "(", "self", ".", "pack", ".", "chnames_0", ")", "for", "i", "in", "cfg", ".", "options", "(", "sec", ")", ":", "# i is a string.", "self", ".", "pack", ".", "chnames", "[", "self", ".", "pack", ".", "_key", "(", "int", "(", "i", ")", ")", "]", "=", "cfg", ".", "get", "(", "sec", ",", "i", ")", "# Update conditions:", "sec", "=", "'conditions'", "# conkeys = set(self.conditions.keys())", "# conops = set(cfg.options(sec))", "# This check should be superfluous:", "# --------------------------------------------------", "# for conkey in conkeys:", "# if not any([conkey.startswith(c) for c in _COND_PREFIXES]):", "# raise KeyError(conkey)", "# --------------------------------------------------", "# for con in conkeys - conops: # Removed conditions.", "# self.set_condition(con, None)", "conops", "=", "cfg", ".", "options", "(", "sec", ")", "self", ".", "reset", "(", ")", "# Scary", "for", "con", "in", "conops", ":", "self", ".", "set_condition", "(", "con", ",", "cfg", ".", "get", "(", "sec", ",", "con", ")", ")" ]
conf_file a file opened for reading. Update the packs channel names and the conditions, accordingly.
[ "conf_file", "a", "file", "opened", "for", "reading", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L1050-L1088
248,916
tomnor/channelpack
channelpack/pack.py
_ConditionConfigure.cond_int
def cond_int(self, conkey): """Return the trailing number from cond if any, as an int. If no trailing number, return the string conkey as is. This is used for sorting the conditions properly even when passing the number 10. The name of this function could be improved since it might return a string.""" m = re.match(self.numrx, conkey) if not m: return conkey return int(m.group(1))
python
def cond_int(self, conkey): """Return the trailing number from cond if any, as an int. If no trailing number, return the string conkey as is. This is used for sorting the conditions properly even when passing the number 10. The name of this function could be improved since it might return a string.""" m = re.match(self.numrx, conkey) if not m: return conkey return int(m.group(1))
[ "def", "cond_int", "(", "self", ",", "conkey", ")", ":", "m", "=", "re", ".", "match", "(", "self", ".", "numrx", ",", "conkey", ")", "if", "not", "m", ":", "return", "conkey", "return", "int", "(", "m", ".", "group", "(", "1", ")", ")" ]
Return the trailing number from cond if any, as an int. If no trailing number, return the string conkey as is. This is used for sorting the conditions properly even when passing the number 10. The name of this function could be improved since it might return a string.
[ "Return", "the", "trailing", "number", "from", "cond", "if", "any", "as", "an", "int", ".", "If", "no", "trailing", "number", "return", "the", "string", "conkey", "as", "is", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L1125-L1136
248,917
tomnor/channelpack
channelpack/pack.py
_ConditionConfigure.valid_conkey
def valid_conkey(self, conkey): """Check that the conkey is a valid one. Return True if valid. A condition key is valid if it is one in the _COND_PREFIXES list. With the prefix removed, the remaining string must be either a number or the empty string.""" for prefix in _COND_PREFIXES: trailing = conkey.lstrip(prefix) if trailing == '' and conkey: # conkey is not empty return True try: int(trailing) return True except ValueError: pass return False
python
def valid_conkey(self, conkey): """Check that the conkey is a valid one. Return True if valid. A condition key is valid if it is one in the _COND_PREFIXES list. With the prefix removed, the remaining string must be either a number or the empty string.""" for prefix in _COND_PREFIXES: trailing = conkey.lstrip(prefix) if trailing == '' and conkey: # conkey is not empty return True try: int(trailing) return True except ValueError: pass return False
[ "def", "valid_conkey", "(", "self", ",", "conkey", ")", ":", "for", "prefix", "in", "_COND_PREFIXES", ":", "trailing", "=", "conkey", ".", "lstrip", "(", "prefix", ")", "if", "trailing", "==", "''", "and", "conkey", ":", "# conkey is not empty", "return", "True", "try", ":", "int", "(", "trailing", ")", "return", "True", "except", "ValueError", ":", "pass", "return", "False" ]
Check that the conkey is a valid one. Return True if valid. A condition key is valid if it is one in the _COND_PREFIXES list. With the prefix removed, the remaining string must be either a number or the empty string.
[ "Check", "that", "the", "conkey", "is", "a", "valid", "one", ".", "Return", "True", "if", "valid", ".", "A", "condition", "key", "is", "valid", "if", "it", "is", "one", "in", "the", "_COND_PREFIXES", "list", ".", "With", "the", "prefix", "removed", "the", "remaining", "string", "must", "be", "either", "a", "number", "or", "the", "empty", "string", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L1138-L1154
248,918
tomnor/channelpack
channelpack/pack.py
_ConditionConfigure.sorted_conkeys
def sorted_conkeys(self, prefix=None): """Return all condition keys in self.conditions as a list sorted suitable for print or write to a file. If prefix is given return only the ones prefixed with prefix.""" # Make for defined and sorted output: conkeys = [] for cond in _COND_PREFIXES: conkeys += sorted([key for key in self.conditions if key.startswith(cond)], key=self.cond_int) if not prefix: return conkeys return [key for key in conkeys if key.startswith(prefix)]
python
def sorted_conkeys(self, prefix=None): """Return all condition keys in self.conditions as a list sorted suitable for print or write to a file. If prefix is given return only the ones prefixed with prefix.""" # Make for defined and sorted output: conkeys = [] for cond in _COND_PREFIXES: conkeys += sorted([key for key in self.conditions if key.startswith(cond)], key=self.cond_int) if not prefix: return conkeys return [key for key in conkeys if key.startswith(prefix)]
[ "def", "sorted_conkeys", "(", "self", ",", "prefix", "=", "None", ")", ":", "# Make for defined and sorted output:", "conkeys", "=", "[", "]", "for", "cond", "in", "_COND_PREFIXES", ":", "conkeys", "+=", "sorted", "(", "[", "key", "for", "key", "in", "self", ".", "conditions", "if", "key", ".", "startswith", "(", "cond", ")", "]", ",", "key", "=", "self", ".", "cond_int", ")", "if", "not", "prefix", ":", "return", "conkeys", "return", "[", "key", "for", "key", "in", "conkeys", "if", "key", ".", "startswith", "(", "prefix", ")", "]" ]
Return all condition keys in self.conditions as a list sorted suitable for print or write to a file. If prefix is given return only the ones prefixed with prefix.
[ "Return", "all", "condition", "keys", "in", "self", ".", "conditions", "as", "a", "list", "sorted", "suitable", "for", "print", "or", "write", "to", "a", "file", ".", "If", "prefix", "is", "given", "return", "only", "the", "ones", "prefixed", "with", "prefix", "." ]
9ad3cd11c698aed4c0fc178385b2ba38a7d0efae
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L1186-L1198
248,919
ulf1/oxyba
oxyba/isordinal.py
isordinal
def isordinal(x): """Checks if a list or array contains ordinal data. Warning: -------- This is not a reliable check for a variable being ordinal. The following criteria are used - There are more observations than unique values. Why? Ordinal means discrete or countable and I just assume that an ordinal sample should have some recurring (countable) values. - Values are integers or strings. Why? Ordinal scale data are usually labels (e.g. strings) or are encoded labels (e.g. as integers). - Ordinal scale data is sortable Integers imply that the Analyst encoded the labels according to the ordinal data's "natural order". Strings would imply that the alphabetic order would be the natual order (what is usually not the case) Usage: ------ obs = 10 np.random.seed(42) x1 = np.random.randint(1,50, (obs,)) x2 = np.random.randint(0,3, (obs,)) x3 = np.random.uniform(0,3, (obs,)) flag, msg = isordinal(x1) if not flag: warnings.warn(msg) """ import numpy as np if len(x) == len(np.unique(x)): return False, ("number of observations equals the " "number of unique values.") if not isinstance(x[0], str): if not np.all(np.equal(np.mod(x, 1), 0)): return False, "elements are not integer or strings." return True, "is ordinal"
python
def isordinal(x): """Checks if a list or array contains ordinal data. Warning: -------- This is not a reliable check for a variable being ordinal. The following criteria are used - There are more observations than unique values. Why? Ordinal means discrete or countable and I just assume that an ordinal sample should have some recurring (countable) values. - Values are integers or strings. Why? Ordinal scale data are usually labels (e.g. strings) or are encoded labels (e.g. as integers). - Ordinal scale data is sortable Integers imply that the Analyst encoded the labels according to the ordinal data's "natural order". Strings would imply that the alphabetic order would be the natual order (what is usually not the case) Usage: ------ obs = 10 np.random.seed(42) x1 = np.random.randint(1,50, (obs,)) x2 = np.random.randint(0,3, (obs,)) x3 = np.random.uniform(0,3, (obs,)) flag, msg = isordinal(x1) if not flag: warnings.warn(msg) """ import numpy as np if len(x) == len(np.unique(x)): return False, ("number of observations equals the " "number of unique values.") if not isinstance(x[0], str): if not np.all(np.equal(np.mod(x, 1), 0)): return False, "elements are not integer or strings." return True, "is ordinal"
[ "def", "isordinal", "(", "x", ")", ":", "import", "numpy", "as", "np", "if", "len", "(", "x", ")", "==", "len", "(", "np", ".", "unique", "(", "x", ")", ")", ":", "return", "False", ",", "(", "\"number of observations equals the \"", "\"number of unique values.\"", ")", "if", "not", "isinstance", "(", "x", "[", "0", "]", ",", "str", ")", ":", "if", "not", "np", ".", "all", "(", "np", ".", "equal", "(", "np", ".", "mod", "(", "x", ",", "1", ")", ",", "0", ")", ")", ":", "return", "False", ",", "\"elements are not integer or strings.\"", "return", "True", ",", "\"is ordinal\"" ]
Checks if a list or array contains ordinal data. Warning: -------- This is not a reliable check for a variable being ordinal. The following criteria are used - There are more observations than unique values. Why? Ordinal means discrete or countable and I just assume that an ordinal sample should have some recurring (countable) values. - Values are integers or strings. Why? Ordinal scale data are usually labels (e.g. strings) or are encoded labels (e.g. as integers). - Ordinal scale data is sortable Integers imply that the Analyst encoded the labels according to the ordinal data's "natural order". Strings would imply that the alphabetic order would be the natual order (what is usually not the case) Usage: ------ obs = 10 np.random.seed(42) x1 = np.random.randint(1,50, (obs,)) x2 = np.random.randint(0,3, (obs,)) x3 = np.random.uniform(0,3, (obs,)) flag, msg = isordinal(x1) if not flag: warnings.warn(msg)
[ "Checks", "if", "a", "list", "or", "array", "contains", "ordinal", "data", "." ]
b3043116050de275124365cb11e7df91fb40169d
https://github.com/ulf1/oxyba/blob/b3043116050de275124365cb11e7df91fb40169d/oxyba/isordinal.py#L2-L49
248,920
rbarrois/confutils
confutils/merged_config.py
MergedConfig.get
def get(self, key, default=NoDefault): """Retrieve a value from its key. Retrieval steps are: 1) Normalize the key 2) For each option group: a) Retrieve the value at that key b) If no value exists, continue c) If the value is an instance of 'Default', continue d) Otherwise, return the value 3) If no option had a non-default value for the key, return the first Default() option for the key (or :arg:`default`). """ key = normalize_key(key) if default is NoDefault: defaults = [] else: defaults = [default] for options in self.options: try: value = options[key] except KeyError: continue if isinstance(value, Default): defaults.append(value.value) continue else: return value if defaults: return defaults[0] return NoDefault
python
def get(self, key, default=NoDefault): """Retrieve a value from its key. Retrieval steps are: 1) Normalize the key 2) For each option group: a) Retrieve the value at that key b) If no value exists, continue c) If the value is an instance of 'Default', continue d) Otherwise, return the value 3) If no option had a non-default value for the key, return the first Default() option for the key (or :arg:`default`). """ key = normalize_key(key) if default is NoDefault: defaults = [] else: defaults = [default] for options in self.options: try: value = options[key] except KeyError: continue if isinstance(value, Default): defaults.append(value.value) continue else: return value if defaults: return defaults[0] return NoDefault
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "NoDefault", ")", ":", "key", "=", "normalize_key", "(", "key", ")", "if", "default", "is", "NoDefault", ":", "defaults", "=", "[", "]", "else", ":", "defaults", "=", "[", "default", "]", "for", "options", "in", "self", ".", "options", ":", "try", ":", "value", "=", "options", "[", "key", "]", "except", "KeyError", ":", "continue", "if", "isinstance", "(", "value", ",", "Default", ")", ":", "defaults", ".", "append", "(", "value", ".", "value", ")", "continue", "else", ":", "return", "value", "if", "defaults", ":", "return", "defaults", "[", "0", "]", "return", "NoDefault" ]
Retrieve a value from its key. Retrieval steps are: 1) Normalize the key 2) For each option group: a) Retrieve the value at that key b) If no value exists, continue c) If the value is an instance of 'Default', continue d) Otherwise, return the value 3) If no option had a non-default value for the key, return the first Default() option for the key (or :arg:`default`).
[ "Retrieve", "a", "value", "from", "its", "key", "." ]
26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4
https://github.com/rbarrois/confutils/blob/26bbb3f31c09a99ee2104263a9e97d6d3fc8e4f4/confutils/merged_config.py#L92-L126
248,921
Valuehorizon/valuehorizon-companies
companies/models.py
Company.get_all_children
def get_all_children(self, include_self=False): """ Return all subsidiaries of this company. """ ownership = Ownership.objects.filter(parent=self) subsidiaries = Company.objects.filter(child__in=ownership) for sub in subsidiaries: subsidiaries = subsidiaries | sub.get_all_children() if include_self is True: self_company = Company.objects.filter(id=self.id) subsidiaries = subsidiaries | self_company return subsidiaries
python
def get_all_children(self, include_self=False): """ Return all subsidiaries of this company. """ ownership = Ownership.objects.filter(parent=self) subsidiaries = Company.objects.filter(child__in=ownership) for sub in subsidiaries: subsidiaries = subsidiaries | sub.get_all_children() if include_self is True: self_company = Company.objects.filter(id=self.id) subsidiaries = subsidiaries | self_company return subsidiaries
[ "def", "get_all_children", "(", "self", ",", "include_self", "=", "False", ")", ":", "ownership", "=", "Ownership", ".", "objects", ".", "filter", "(", "parent", "=", "self", ")", "subsidiaries", "=", "Company", ".", "objects", ".", "filter", "(", "child__in", "=", "ownership", ")", "for", "sub", "in", "subsidiaries", ":", "subsidiaries", "=", "subsidiaries", "|", "sub", ".", "get_all_children", "(", ")", "if", "include_self", "is", "True", ":", "self_company", "=", "Company", ".", "objects", ".", "filter", "(", "id", "=", "self", ".", "id", ")", "subsidiaries", "=", "subsidiaries", "|", "self_company", "return", "subsidiaries" ]
Return all subsidiaries of this company.
[ "Return", "all", "subsidiaries", "of", "this", "company", "." ]
5366e230da69ee30fcdc1bf4beddc99310f6b767
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L190-L203
248,922
Valuehorizon/valuehorizon-companies
companies/models.py
Company.get_all_parents
def get_all_parents(self): """ Return all parents of this company. """ ownership = Ownership.objects.filter(child=self) parents = Company.objects.filter(parent__in=ownership) for parent in parents: parents = parents | parent.get_all_parents() return parents
python
def get_all_parents(self): """ Return all parents of this company. """ ownership = Ownership.objects.filter(child=self) parents = Company.objects.filter(parent__in=ownership) for parent in parents: parents = parents | parent.get_all_parents() return parents
[ "def", "get_all_parents", "(", "self", ")", ":", "ownership", "=", "Ownership", ".", "objects", ".", "filter", "(", "child", "=", "self", ")", "parents", "=", "Company", ".", "objects", ".", "filter", "(", "parent__in", "=", "ownership", ")", "for", "parent", "in", "parents", ":", "parents", "=", "parents", "|", "parent", ".", "get_all_parents", "(", ")", "return", "parents" ]
Return all parents of this company.
[ "Return", "all", "parents", "of", "this", "company", "." ]
5366e230da69ee30fcdc1bf4beddc99310f6b767
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L205-L213
248,923
Valuehorizon/valuehorizon-companies
companies/models.py
Company.get_all_related_companies
def get_all_related_companies(self, include_self=False): """ Return all parents and subsidiaries of the company Include the company if include_self = True """ parents = self.get_all_parents() subsidiaries = self.get_all_children() related_companies = parents | subsidiaries if include_self is True: company_qs = Company.objects.filter(id=self.id) related_companies = related_companies | company_qs related_companies_ids = [company.id for company in list(set(related_companies))] related_companies = Company.objects.filter(id__in=related_companies_ids) return related_companies
python
def get_all_related_companies(self, include_self=False): """ Return all parents and subsidiaries of the company Include the company if include_self = True """ parents = self.get_all_parents() subsidiaries = self.get_all_children() related_companies = parents | subsidiaries if include_self is True: company_qs = Company.objects.filter(id=self.id) related_companies = related_companies | company_qs related_companies_ids = [company.id for company in list(set(related_companies))] related_companies = Company.objects.filter(id__in=related_companies_ids) return related_companies
[ "def", "get_all_related_companies", "(", "self", ",", "include_self", "=", "False", ")", ":", "parents", "=", "self", ".", "get_all_parents", "(", ")", "subsidiaries", "=", "self", ".", "get_all_children", "(", ")", "related_companies", "=", "parents", "|", "subsidiaries", "if", "include_self", "is", "True", ":", "company_qs", "=", "Company", ".", "objects", ".", "filter", "(", "id", "=", "self", ".", "id", ")", "related_companies", "=", "related_companies", "|", "company_qs", "related_companies_ids", "=", "[", "company", ".", "id", "for", "company", "in", "list", "(", "set", "(", "related_companies", ")", ")", "]", "related_companies", "=", "Company", ".", "objects", ".", "filter", "(", "id__in", "=", "related_companies_ids", ")", "return", "related_companies" ]
Return all parents and subsidiaries of the company Include the company if include_self = True
[ "Return", "all", "parents", "and", "subsidiaries", "of", "the", "company", "Include", "the", "company", "if", "include_self", "=", "True" ]
5366e230da69ee30fcdc1bf4beddc99310f6b767
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L215-L231
248,924
Valuehorizon/valuehorizon-companies
companies/models.py
Company.get_immediate_children
def get_immediate_children(self): """ Return all direct subsidiaries of this company. Excludes subsidiaries of subsidiaries """ ownership = Ownership.objects.filter(parent=self) subsidiaries = Company.objects.filter(child__in=ownership).distinct() return subsidiaries
python
def get_immediate_children(self): """ Return all direct subsidiaries of this company. Excludes subsidiaries of subsidiaries """ ownership = Ownership.objects.filter(parent=self) subsidiaries = Company.objects.filter(child__in=ownership).distinct() return subsidiaries
[ "def", "get_immediate_children", "(", "self", ")", ":", "ownership", "=", "Ownership", ".", "objects", ".", "filter", "(", "parent", "=", "self", ")", "subsidiaries", "=", "Company", ".", "objects", ".", "filter", "(", "child__in", "=", "ownership", ")", ".", "distinct", "(", ")", "return", "subsidiaries" ]
Return all direct subsidiaries of this company. Excludes subsidiaries of subsidiaries
[ "Return", "all", "direct", "subsidiaries", "of", "this", "company", ".", "Excludes", "subsidiaries", "of", "subsidiaries" ]
5366e230da69ee30fcdc1bf4beddc99310f6b767
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L233-L240
248,925
Valuehorizon/valuehorizon-companies
companies/models.py
Company.get_immediate_children_ownership
def get_immediate_children_ownership(self): """ Return all direct subsidiaries of this company AS OWNERSHIP OBJECTS. Excludes subsidiaries of subsidiaries. """ ownership = Ownership.objects.filter(parent=self).select_related('child', 'child__country') return ownership
python
def get_immediate_children_ownership(self): """ Return all direct subsidiaries of this company AS OWNERSHIP OBJECTS. Excludes subsidiaries of subsidiaries. """ ownership = Ownership.objects.filter(parent=self).select_related('child', 'child__country') return ownership
[ "def", "get_immediate_children_ownership", "(", "self", ")", ":", "ownership", "=", "Ownership", ".", "objects", ".", "filter", "(", "parent", "=", "self", ")", ".", "select_related", "(", "'child'", ",", "'child__country'", ")", "return", "ownership" ]
Return all direct subsidiaries of this company AS OWNERSHIP OBJECTS. Excludes subsidiaries of subsidiaries.
[ "Return", "all", "direct", "subsidiaries", "of", "this", "company", "AS", "OWNERSHIP", "OBJECTS", ".", "Excludes", "subsidiaries", "of", "subsidiaries", "." ]
5366e230da69ee30fcdc1bf4beddc99310f6b767
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L242-L249
248,926
Valuehorizon/valuehorizon-companies
companies/models.py
Company.get_immediate_parents
def get_immediate_parents(self): """ Return all direct parents of this company. Excludes parents of parents """ ownership = Ownership.objects.filter(child=self) parents = Company.objects.filter(parent__in=ownership).distinct() return parents
python
def get_immediate_parents(self): """ Return all direct parents of this company. Excludes parents of parents """ ownership = Ownership.objects.filter(child=self) parents = Company.objects.filter(parent__in=ownership).distinct() return parents
[ "def", "get_immediate_parents", "(", "self", ")", ":", "ownership", "=", "Ownership", ".", "objects", ".", "filter", "(", "child", "=", "self", ")", "parents", "=", "Company", ".", "objects", ".", "filter", "(", "parent__in", "=", "ownership", ")", ".", "distinct", "(", ")", "return", "parents" ]
Return all direct parents of this company. Excludes parents of parents
[ "Return", "all", "direct", "parents", "of", "this", "company", ".", "Excludes", "parents", "of", "parents" ]
5366e230da69ee30fcdc1bf4beddc99310f6b767
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L251-L257
248,927
Valuehorizon/valuehorizon-companies
companies/models.py
Company.get_directors
def get_directors(self): """ Return all directors for this company """ directors = Director.objects.filter(company=self, is_current=True).select_related('person') return directors
python
def get_directors(self): """ Return all directors for this company """ directors = Director.objects.filter(company=self, is_current=True).select_related('person') return directors
[ "def", "get_directors", "(", "self", ")", ":", "directors", "=", "Director", ".", "objects", ".", "filter", "(", "company", "=", "self", ",", "is_current", "=", "True", ")", ".", "select_related", "(", "'person'", ")", "return", "directors" ]
Return all directors for this company
[ "Return", "all", "directors", "for", "this", "company" ]
5366e230da69ee30fcdc1bf4beddc99310f6b767
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L259-L264
248,928
Valuehorizon/valuehorizon-companies
companies/models.py
Company.cache_data
def cache_data(self): """ Cache some basic data such as financial statement metrics """ # Set Slug if not set if not self.slug_name: self.slug_name = slugify(self.name).strip() if len(self.slug_name) > 255: self.slug_name = self.slug_name[0:254]
python
def cache_data(self): """ Cache some basic data such as financial statement metrics """ # Set Slug if not set if not self.slug_name: self.slug_name = slugify(self.name).strip() if len(self.slug_name) > 255: self.slug_name = self.slug_name[0:254]
[ "def", "cache_data", "(", "self", ")", ":", "# Set Slug if not set", "if", "not", "self", ".", "slug_name", ":", "self", ".", "slug_name", "=", "slugify", "(", "self", ".", "name", ")", ".", "strip", "(", ")", "if", "len", "(", "self", ".", "slug_name", ")", ">", "255", ":", "self", ".", "slug_name", "=", "self", ".", "slug_name", "[", "0", ":", "254", "]" ]
Cache some basic data such as financial statement metrics
[ "Cache", "some", "basic", "data", "such", "as", "financial", "statement", "metrics" ]
5366e230da69ee30fcdc1bf4beddc99310f6b767
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L266-L274
248,929
Valuehorizon/valuehorizon-companies
companies/models.py
Company.get_name_on_date
def get_name_on_date(self, date): """ Get the name of a company on a given date. This takes into accounts and name changes that may have occurred. """ if date is None: return self.name post_name_changes = CompanyNameChange.objects.filter(company=self, date__gte=date).order_by('date') if post_name_changes.count() == 0: return self.name else: return post_name_changes[0].name_before
python
def get_name_on_date(self, date): """ Get the name of a company on a given date. This takes into accounts and name changes that may have occurred. """ if date is None: return self.name post_name_changes = CompanyNameChange.objects.filter(company=self, date__gte=date).order_by('date') if post_name_changes.count() == 0: return self.name else: return post_name_changes[0].name_before
[ "def", "get_name_on_date", "(", "self", ",", "date", ")", ":", "if", "date", "is", "None", ":", "return", "self", ".", "name", "post_name_changes", "=", "CompanyNameChange", ".", "objects", ".", "filter", "(", "company", "=", "self", ",", "date__gte", "=", "date", ")", ".", "order_by", "(", "'date'", ")", "if", "post_name_changes", ".", "count", "(", ")", "==", "0", ":", "return", "self", ".", "name", "else", ":", "return", "post_name_changes", "[", "0", "]", ".", "name_before" ]
Get the name of a company on a given date. This takes into accounts and name changes that may have occurred.
[ "Get", "the", "name", "of", "a", "company", "on", "a", "given", "date", ".", "This", "takes", "into", "accounts", "and", "name", "changes", "that", "may", "have", "occurred", "." ]
5366e230da69ee30fcdc1bf4beddc99310f6b767
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L276-L289
248,930
Valuehorizon/valuehorizon-companies
companies/models.py
Company.save
def save(self, *args, **kwargs): """ This method autogenerates the auto_generated_description field """ # Cache basic data self.cache_data() # Ensure slug doesn't change if self.id is not None: db_company = Company.objects.get(id=self.id) if self.slug_name != db_company.slug_name: raise ValueError("Cannot reset slug_name") if str(self.trade_name).strip() == "": self.trade_name = None # Short description check if len(str(self.short_description)) > 370: raise AssertionError("Short description must be no more than 370 characters") if self.sub_industry is not None: # Cache GICS self.industry = self.sub_industry.industry self.industry_group = self.sub_industry.industry.industry_group self.sector = self.sub_industry.industry.industry_group.sector # Cache GICS names self.sub_industry_name = self.sub_industry.name self.industry_name = self.industry.name self.industry_group_name = self.industry_group.name self.sector_name = self.sector.name # Call save method super(Company, self).save(*args, **kwargs)
python
def save(self, *args, **kwargs): """ This method autogenerates the auto_generated_description field """ # Cache basic data self.cache_data() # Ensure slug doesn't change if self.id is not None: db_company = Company.objects.get(id=self.id) if self.slug_name != db_company.slug_name: raise ValueError("Cannot reset slug_name") if str(self.trade_name).strip() == "": self.trade_name = None # Short description check if len(str(self.short_description)) > 370: raise AssertionError("Short description must be no more than 370 characters") if self.sub_industry is not None: # Cache GICS self.industry = self.sub_industry.industry self.industry_group = self.sub_industry.industry.industry_group self.sector = self.sub_industry.industry.industry_group.sector # Cache GICS names self.sub_industry_name = self.sub_industry.name self.industry_name = self.industry.name self.industry_group_name = self.industry_group.name self.sector_name = self.sector.name # Call save method super(Company, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Cache basic data", "self", ".", "cache_data", "(", ")", "# Ensure slug doesn't change", "if", "self", ".", "id", "is", "not", "None", ":", "db_company", "=", "Company", ".", "objects", ".", "get", "(", "id", "=", "self", ".", "id", ")", "if", "self", ".", "slug_name", "!=", "db_company", ".", "slug_name", ":", "raise", "ValueError", "(", "\"Cannot reset slug_name\"", ")", "if", "str", "(", "self", ".", "trade_name", ")", ".", "strip", "(", ")", "==", "\"\"", ":", "self", ".", "trade_name", "=", "None", "# Short description check", "if", "len", "(", "str", "(", "self", ".", "short_description", ")", ")", ">", "370", ":", "raise", "AssertionError", "(", "\"Short description must be no more than 370 characters\"", ")", "if", "self", ".", "sub_industry", "is", "not", "None", ":", "# Cache GICS", "self", ".", "industry", "=", "self", ".", "sub_industry", ".", "industry", "self", ".", "industry_group", "=", "self", ".", "sub_industry", ".", "industry", ".", "industry_group", "self", ".", "sector", "=", "self", ".", "sub_industry", ".", "industry", ".", "industry_group", ".", "sector", "# Cache GICS names", "self", ".", "sub_industry_name", "=", "self", ".", "sub_industry", ".", "name", "self", ".", "industry_name", "=", "self", ".", "industry", ".", "name", "self", ".", "industry_group_name", "=", "self", ".", "industry_group", ".", "name", "self", ".", "sector_name", "=", "self", ".", "sector", ".", "name", "# Call save method", "super", "(", "Company", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
This method autogenerates the auto_generated_description field
[ "This", "method", "autogenerates", "the", "auto_generated_description", "field" ]
5366e230da69ee30fcdc1bf4beddc99310f6b767
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L291-L324
248,931
Valuehorizon/valuehorizon-companies
companies/models.py
Ownership.save
def save(self, *args, **kwargs): """ Generate a name, and ensure amount is less than or equal to 100 """ self.name = str(self.parent.name) + " - " + str(self.child.name) + " - " + str(self.ownership_type) if self.amount > 100: raise ValueError("Ownership amount cannot be more than 100%") elif self.amount < 0: raise ValueError("Ownership amount cannot be less than 0%") else: super(Ownership, self).save(*args, **kwargs)
python
def save(self, *args, **kwargs): """ Generate a name, and ensure amount is less than or equal to 100 """ self.name = str(self.parent.name) + " - " + str(self.child.name) + " - " + str(self.ownership_type) if self.amount > 100: raise ValueError("Ownership amount cannot be more than 100%") elif self.amount < 0: raise ValueError("Ownership amount cannot be less than 0%") else: super(Ownership, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "name", "=", "str", "(", "self", ".", "parent", ".", "name", ")", "+", "\" - \"", "+", "str", "(", "self", ".", "child", ".", "name", ")", "+", "\" - \"", "+", "str", "(", "self", ".", "ownership_type", ")", "if", "self", ".", "amount", ">", "100", ":", "raise", "ValueError", "(", "\"Ownership amount cannot be more than 100%\"", ")", "elif", "self", ".", "amount", "<", "0", ":", "raise", "ValueError", "(", "\"Ownership amount cannot be less than 0%\"", ")", "else", ":", "super", "(", "Ownership", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Generate a name, and ensure amount is less than or equal to 100
[ "Generate", "a", "name", "and", "ensure", "amount", "is", "less", "than", "or", "equal", "to", "100" ]
5366e230da69ee30fcdc1bf4beddc99310f6b767
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L386-L398
248,932
Valuehorizon/valuehorizon-companies
companies/models.py
Director.tenure
def tenure(self): """ Calculates board tenure in years """ if self.end_date: return round((date.end_date - self.start_date).days / 365., 2) else: return round((date.today() - self.start_date).days / 365., 2)
python
def tenure(self): """ Calculates board tenure in years """ if self.end_date: return round((date.end_date - self.start_date).days / 365., 2) else: return round((date.today() - self.start_date).days / 365., 2)
[ "def", "tenure", "(", "self", ")", ":", "if", "self", ".", "end_date", ":", "return", "round", "(", "(", "date", ".", "end_date", "-", "self", ".", "start_date", ")", ".", "days", "/", "365.", ",", "2", ")", "else", ":", "return", "round", "(", "(", "date", ".", "today", "(", ")", "-", "self", ".", "start_date", ")", ".", "days", "/", "365.", ",", "2", ")" ]
Calculates board tenure in years
[ "Calculates", "board", "tenure", "in", "years" ]
5366e230da69ee30fcdc1bf4beddc99310f6b767
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L423-L430
248,933
KelSolaar/Oncilla
oncilla/build_toc_tree.py
build_toc_tree
def build_toc_tree(title, input, output, content_directory): """ Builds Sphinx documentation table of content tree file. :param title: Package title. :type title: unicode :param input: Input file to convert. :type input: unicode :param output: Output file. :type output: unicode :param content_directory: Directory containing the content to be included in the table of content. :type content_directory: unicode :return: Definition success. :rtype: bool """ LOGGER.info("{0} | Building Sphinx documentation index '{1}' file!".format(build_toc_tree.__name__, output)) file = File(input) file.cache() existing_files = [foundations.strings.get_splitext_basename(item) for item in glob.glob("{0}/*{1}".format(content_directory, FILES_EXTENSION))] relative_directory = content_directory.replace("{0}/".format(os.path.dirname(output)), "") toc_tree = ["\n"] for line in file.content: search = re.search(r"`([a-zA-Z_ ]+)`_", line) if not search: continue item = search.groups()[0] code = "{0}{1}".format(item[0].lower(), item.replace(" ", "")[1:]) if code in existing_files: link = "{0}/{1}".format(relative_directory, code) data = "{0}{1}{2} <{3}>\n".format(" ", " " * line.index("-"), item, link) LOGGER.info("{0} | Adding '{1}' entry to Toc Tree!".format(build_toc_tree.__name__, data.replace("\n", ""))) toc_tree.append(data) toc_tree.append("\n") TOCTREE_TEMPLATE_BEGIN[0] = TOCTREE_TEMPLATE_BEGIN[0].format(title) TOCTREE_TEMPLATE_BEGIN[1] = TOCTREE_TEMPLATE_BEGIN[1].format("=" * len(TOCTREE_TEMPLATE_BEGIN[0])) content = TOCTREE_TEMPLATE_BEGIN content.extend(toc_tree) content.extend(TOCTREE_TEMPLATE_END) file = File(output) file.content = content file.write() return True
python
def build_toc_tree(title, input, output, content_directory): """ Builds Sphinx documentation table of content tree file. :param title: Package title. :type title: unicode :param input: Input file to convert. :type input: unicode :param output: Output file. :type output: unicode :param content_directory: Directory containing the content to be included in the table of content. :type content_directory: unicode :return: Definition success. :rtype: bool """ LOGGER.info("{0} | Building Sphinx documentation index '{1}' file!".format(build_toc_tree.__name__, output)) file = File(input) file.cache() existing_files = [foundations.strings.get_splitext_basename(item) for item in glob.glob("{0}/*{1}".format(content_directory, FILES_EXTENSION))] relative_directory = content_directory.replace("{0}/".format(os.path.dirname(output)), "") toc_tree = ["\n"] for line in file.content: search = re.search(r"`([a-zA-Z_ ]+)`_", line) if not search: continue item = search.groups()[0] code = "{0}{1}".format(item[0].lower(), item.replace(" ", "")[1:]) if code in existing_files: link = "{0}/{1}".format(relative_directory, code) data = "{0}{1}{2} <{3}>\n".format(" ", " " * line.index("-"), item, link) LOGGER.info("{0} | Adding '{1}' entry to Toc Tree!".format(build_toc_tree.__name__, data.replace("\n", ""))) toc_tree.append(data) toc_tree.append("\n") TOCTREE_TEMPLATE_BEGIN[0] = TOCTREE_TEMPLATE_BEGIN[0].format(title) TOCTREE_TEMPLATE_BEGIN[1] = TOCTREE_TEMPLATE_BEGIN[1].format("=" * len(TOCTREE_TEMPLATE_BEGIN[0])) content = TOCTREE_TEMPLATE_BEGIN content.extend(toc_tree) content.extend(TOCTREE_TEMPLATE_END) file = File(output) file.content = content file.write() return True
[ "def", "build_toc_tree", "(", "title", ",", "input", ",", "output", ",", "content_directory", ")", ":", "LOGGER", ".", "info", "(", "\"{0} | Building Sphinx documentation index '{1}' file!\"", ".", "format", "(", "build_toc_tree", ".", "__name__", ",", "output", ")", ")", "file", "=", "File", "(", "input", ")", "file", ".", "cache", "(", ")", "existing_files", "=", "[", "foundations", ".", "strings", ".", "get_splitext_basename", "(", "item", ")", "for", "item", "in", "glob", ".", "glob", "(", "\"{0}/*{1}\"", ".", "format", "(", "content_directory", ",", "FILES_EXTENSION", ")", ")", "]", "relative_directory", "=", "content_directory", ".", "replace", "(", "\"{0}/\"", ".", "format", "(", "os", ".", "path", ".", "dirname", "(", "output", ")", ")", ",", "\"\"", ")", "toc_tree", "=", "[", "\"\\n\"", "]", "for", "line", "in", "file", ".", "content", ":", "search", "=", "re", ".", "search", "(", "r\"`([a-zA-Z_ ]+)`_\"", ",", "line", ")", "if", "not", "search", ":", "continue", "item", "=", "search", ".", "groups", "(", ")", "[", "0", "]", "code", "=", "\"{0}{1}\"", ".", "format", "(", "item", "[", "0", "]", ".", "lower", "(", ")", ",", "item", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "[", "1", ":", "]", ")", "if", "code", "in", "existing_files", ":", "link", "=", "\"{0}/{1}\"", ".", "format", "(", "relative_directory", ",", "code", ")", "data", "=", "\"{0}{1}{2} <{3}>\\n\"", ".", "format", "(", "\" \"", ",", "\" \"", "*", "line", ".", "index", "(", "\"-\"", ")", ",", "item", ",", "link", ")", "LOGGER", ".", "info", "(", "\"{0} | Adding '{1}' entry to Toc Tree!\"", ".", "format", "(", "build_toc_tree", ".", "__name__", ",", "data", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", ")", ")", "toc_tree", ".", "append", "(", "data", ")", "toc_tree", ".", "append", "(", "\"\\n\"", ")", "TOCTREE_TEMPLATE_BEGIN", "[", "0", "]", "=", "TOCTREE_TEMPLATE_BEGIN", "[", "0", "]", ".", "format", "(", "title", ")", "TOCTREE_TEMPLATE_BEGIN", "[", "1", "]", "=", "TOCTREE_TEMPLATE_BEGIN", "[", "1", "]", ".", "format", "(", "\"=\"", "*", "len", "(", "TOCTREE_TEMPLATE_BEGIN", "[", "0", "]", ")", ")", "content", "=", "TOCTREE_TEMPLATE_BEGIN", "content", ".", "extend", "(", "toc_tree", ")", "content", ".", "extend", "(", "TOCTREE_TEMPLATE_END", ")", "file", "=", "File", "(", "output", ")", "file", ".", "content", "=", "content", "file", ".", "write", "(", ")", "return", "True" ]
Builds Sphinx documentation table of content tree file. :param title: Package title. :type title: unicode :param input: Input file to convert. :type input: unicode :param output: Output file. :type output: unicode :param content_directory: Directory containing the content to be included in the table of content. :type content_directory: unicode :return: Definition success. :rtype: bool
[ "Builds", "Sphinx", "documentation", "table", "of", "content", "tree", "file", "." ]
2b4db3704cf2c22a09a207681cb041fff555a994
https://github.com/KelSolaar/Oncilla/blob/2b4db3704cf2c22a09a207681cb041fff555a994/oncilla/build_toc_tree.py#L73-L124
248,934
KelSolaar/Oncilla
oncilla/build_toc_tree.py
get_command_line_arguments
def get_command_line_arguments(): """ Retrieves command line arguments. :return: Namespace. :rtype: Namespace """ parser = argparse.ArgumentParser(add_help=False) parser.add_argument("-h", "--help", action="help", help="'Displays this help message and exit.'") parser.add_argument("-t", "--title", type=unicode, dest="title", help="'Package title.'") parser.add_argument("-i", "--input", type=unicode, dest="input", help="'Input file to convert.'") parser.add_argument("-o", "--output", type=unicode, dest="output", help="'Output file.'") parser.add_argument("-c", "--content_directory", type=unicode, dest="content_directory", help="'Content directory.'") if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args()
python
def get_command_line_arguments(): """ Retrieves command line arguments. :return: Namespace. :rtype: Namespace """ parser = argparse.ArgumentParser(add_help=False) parser.add_argument("-h", "--help", action="help", help="'Displays this help message and exit.'") parser.add_argument("-t", "--title", type=unicode, dest="title", help="'Package title.'") parser.add_argument("-i", "--input", type=unicode, dest="input", help="'Input file to convert.'") parser.add_argument("-o", "--output", type=unicode, dest="output", help="'Output file.'") parser.add_argument("-c", "--content_directory", type=unicode, dest="content_directory", help="'Content directory.'") if len(sys.argv) == 1: parser.print_help() sys.exit(1) return parser.parse_args()
[ "def", "get_command_line_arguments", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "False", ")", "parser", ".", "add_argument", "(", "\"-h\"", ",", "\"--help\"", ",", "action", "=", "\"help\"", ",", "help", "=", "\"'Displays this help message and exit.'\"", ")", "parser", ".", "add_argument", "(", "\"-t\"", ",", "\"--title\"", ",", "type", "=", "unicode", ",", "dest", "=", "\"title\"", ",", "help", "=", "\"'Package title.'\"", ")", "parser", ".", "add_argument", "(", "\"-i\"", ",", "\"--input\"", ",", "type", "=", "unicode", ",", "dest", "=", "\"input\"", ",", "help", "=", "\"'Input file to convert.'\"", ")", "parser", ".", "add_argument", "(", "\"-o\"", ",", "\"--output\"", ",", "type", "=", "unicode", ",", "dest", "=", "\"output\"", ",", "help", "=", "\"'Output file.'\"", ")", "parser", ".", "add_argument", "(", "\"-c\"", ",", "\"--content_directory\"", ",", "type", "=", "unicode", ",", "dest", "=", "\"content_directory\"", ",", "help", "=", "\"'Content directory.'\"", ")", "if", "len", "(", "sys", ".", "argv", ")", "==", "1", ":", "parser", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "1", ")", "return", "parser", ".", "parse_args", "(", ")" ]
Retrieves command line arguments. :return: Namespace. :rtype: Namespace
[ "Retrieves", "command", "line", "arguments", "." ]
2b4db3704cf2c22a09a207681cb041fff555a994
https://github.com/KelSolaar/Oncilla/blob/2b4db3704cf2c22a09a207681cb041fff555a994/oncilla/build_toc_tree.py#L127-L170
248,935
jmgilman/Neolib
neolib/neocodex/blowfish.py
Blowfish.initCTR
def initCTR(self, iv=0): """Initializes CTR mode of the cypher""" assert struct.calcsize("Q") == self.blocksize() self.ctr_iv = iv self._calcCTRBUF()
python
def initCTR(self, iv=0): """Initializes CTR mode of the cypher""" assert struct.calcsize("Q") == self.blocksize() self.ctr_iv = iv self._calcCTRBUF()
[ "def", "initCTR", "(", "self", ",", "iv", "=", "0", ")", ":", "assert", "struct", ".", "calcsize", "(", "\"Q\"", ")", "==", "self", ".", "blocksize", "(", ")", "self", ".", "ctr_iv", "=", "iv", "self", ".", "_calcCTRBUF", "(", ")" ]
Initializes CTR mode of the cypher
[ "Initializes", "CTR", "mode", "of", "the", "cypher" ]
228fafeaed0f3195676137732384a14820ae285c
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/neocodex/blowfish.py#L496-L500
248,936
jmgilman/Neolib
neolib/neocodex/blowfish.py
Blowfish._calcCTRBUF
def _calcCTRBUF(self): """Calculates one block of CTR keystream""" self.ctr_cks = self.encrypt(struct.pack("Q", self.ctr_iv)) # keystream block self.ctr_iv += 1 self.ctr_pos = 0
python
def _calcCTRBUF(self): """Calculates one block of CTR keystream""" self.ctr_cks = self.encrypt(struct.pack("Q", self.ctr_iv)) # keystream block self.ctr_iv += 1 self.ctr_pos = 0
[ "def", "_calcCTRBUF", "(", "self", ")", ":", "self", ".", "ctr_cks", "=", "self", ".", "encrypt", "(", "struct", ".", "pack", "(", "\"Q\"", ",", "self", ".", "ctr_iv", ")", ")", "# keystream block", "self", ".", "ctr_iv", "+=", "1", "self", ".", "ctr_pos", "=", "0" ]
Calculates one block of CTR keystream
[ "Calculates", "one", "block", "of", "CTR", "keystream" ]
228fafeaed0f3195676137732384a14820ae285c
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/neocodex/blowfish.py#L503-L507
248,937
jmgilman/Neolib
neolib/neocodex/blowfish.py
Blowfish._nextCTRByte
def _nextCTRByte(self): """Returns one byte of CTR keystream""" b = ord(self.ctr_cks[self.ctr_pos]) self.ctr_pos += 1 if self.ctr_pos >= len(self.ctr_cks): self._calcCTRBUF() return b
python
def _nextCTRByte(self): """Returns one byte of CTR keystream""" b = ord(self.ctr_cks[self.ctr_pos]) self.ctr_pos += 1 if self.ctr_pos >= len(self.ctr_cks): self._calcCTRBUF() return b
[ "def", "_nextCTRByte", "(", "self", ")", ":", "b", "=", "ord", "(", "self", ".", "ctr_cks", "[", "self", ".", "ctr_pos", "]", ")", "self", ".", "ctr_pos", "+=", "1", "if", "self", ".", "ctr_pos", ">=", "len", "(", "self", ".", "ctr_cks", ")", ":", "self", ".", "_calcCTRBUF", "(", ")", "return", "b" ]
Returns one byte of CTR keystream
[ "Returns", "one", "byte", "of", "CTR", "keystream" ]
228fafeaed0f3195676137732384a14820ae285c
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/neocodex/blowfish.py#L510-L516
248,938
defensio/defensio-python
defensio/__init__.py
Defensio._call
def _call(self, method, path, data=None): """ Do the actual HTTP request """ if is_python3(): conn = http.client.HTTPConnection(API_HOST) else: conn = httplib.HTTPConnection(API_HOST) headers = {'User-Agent' : USER_AGENT} if data: headers.update( {'Content-type': 'application/x-www-form-urlencoded'} ) conn.request(method, path, self._urlencode(data), headers) else: conn.request(method, path, None, headers) response = conn.getresponse() result = [response.status, self._parse_body(response.read())] conn.close() return result
python
def _call(self, method, path, data=None): """ Do the actual HTTP request """ if is_python3(): conn = http.client.HTTPConnection(API_HOST) else: conn = httplib.HTTPConnection(API_HOST) headers = {'User-Agent' : USER_AGENT} if data: headers.update( {'Content-type': 'application/x-www-form-urlencoded'} ) conn.request(method, path, self._urlencode(data), headers) else: conn.request(method, path, None, headers) response = conn.getresponse() result = [response.status, self._parse_body(response.read())] conn.close() return result
[ "def", "_call", "(", "self", ",", "method", ",", "path", ",", "data", "=", "None", ")", ":", "if", "is_python3", "(", ")", ":", "conn", "=", "http", ".", "client", ".", "HTTPConnection", "(", "API_HOST", ")", "else", ":", "conn", "=", "httplib", ".", "HTTPConnection", "(", "API_HOST", ")", "headers", "=", "{", "'User-Agent'", ":", "USER_AGENT", "}", "if", "data", ":", "headers", ".", "update", "(", "{", "'Content-type'", ":", "'application/x-www-form-urlencoded'", "}", ")", "conn", ".", "request", "(", "method", ",", "path", ",", "self", ".", "_urlencode", "(", "data", ")", ",", "headers", ")", "else", ":", "conn", ".", "request", "(", "method", ",", "path", ",", "None", ",", "headers", ")", "response", "=", "conn", ".", "getresponse", "(", ")", "result", "=", "[", "response", ".", "status", ",", "self", ".", "_parse_body", "(", "response", ".", "read", "(", ")", ")", "]", "conn", ".", "close", "(", ")", "return", "result" ]
Do the actual HTTP request
[ "Do", "the", "actual", "HTTP", "request" ]
c1d2b64be941acb63c452a6d9a5526c59cb37007
https://github.com/defensio/defensio-python/blob/c1d2b64be941acb63c452a6d9a5526c59cb37007/defensio/__init__.py#L86-L105
248,939
defensio/defensio-python
defensio/__init__.py
Defensio._parse_body
def _parse_body(self, body): """ For just call a deserializer for FORMAT""" if is_python3(): return json.loads(body.decode('UTF-8')) else: return json.loads(body)
python
def _parse_body(self, body): """ For just call a deserializer for FORMAT""" if is_python3(): return json.loads(body.decode('UTF-8')) else: return json.loads(body)
[ "def", "_parse_body", "(", "self", ",", "body", ")", ":", "if", "is_python3", "(", ")", ":", "return", "json", ".", "loads", "(", "body", ".", "decode", "(", "'UTF-8'", ")", ")", "else", ":", "return", "json", ".", "loads", "(", "body", ")" ]
For just call a deserializer for FORMAT
[ "For", "just", "call", "a", "deserializer", "for", "FORMAT" ]
c1d2b64be941acb63c452a6d9a5526c59cb37007
https://github.com/defensio/defensio-python/blob/c1d2b64be941acb63c452a6d9a5526c59cb37007/defensio/__init__.py#L114-L119
248,940
praekelt/jmbo-competition
competition/admin.py
CompetitionEntryAdmin.get_urls
def get_urls(self): """ Extend the admin urls for the CompetitionEntryAdmin model to be able to invoke a CSV export view on the admin model """ urls = super(CompetitionEntryAdmin, self).get_urls() csv_urls = patterns('', url( r'^exportcsv/$', self.admin_site.admin_view(self.csv_export), name='competition-csv-export' ) ) return csv_urls + urls
python
def get_urls(self): """ Extend the admin urls for the CompetitionEntryAdmin model to be able to invoke a CSV export view on the admin model """ urls = super(CompetitionEntryAdmin, self).get_urls() csv_urls = patterns('', url( r'^exportcsv/$', self.admin_site.admin_view(self.csv_export), name='competition-csv-export' ) ) return csv_urls + urls
[ "def", "get_urls", "(", "self", ")", ":", "urls", "=", "super", "(", "CompetitionEntryAdmin", ",", "self", ")", ".", "get_urls", "(", ")", "csv_urls", "=", "patterns", "(", "''", ",", "url", "(", "r'^exportcsv/$'", ",", "self", ".", "admin_site", ".", "admin_view", "(", "self", ".", "csv_export", ")", ",", "name", "=", "'competition-csv-export'", ")", ")", "return", "csv_urls", "+", "urls" ]
Extend the admin urls for the CompetitionEntryAdmin model to be able to invoke a CSV export view on the admin model
[ "Extend", "the", "admin", "urls", "for", "the", "CompetitionEntryAdmin", "model", "to", "be", "able", "to", "invoke", "a", "CSV", "export", "view", "on", "the", "admin", "model" ]
7efdc6d2d57077229108e7eb2ae99f87c32210ee
https://github.com/praekelt/jmbo-competition/blob/7efdc6d2d57077229108e7eb2ae99f87c32210ee/competition/admin.py#L152-L163
248,941
praekelt/jmbo-competition
competition/admin.py
CompetitionEntryAdmin.csv_export
def csv_export(self, request): """ Return a CSV document of the competition entry and its user details """ response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=competitionentries.csv' # create the csv writer with the response as the output file writer = UnicodeWriter(response) writer.writerow([ 'Competition ID', 'Competition', 'First Name', 'Last Name', 'Email Address', 'Cell Number', 'Question', 'Answer File', 'Answer Option', 'Answer Text', 'Has Correct Answer', 'Winner', 'Time Stamp' ]) # This sucks big time. get_urls is cached upon first call, which means # it has no concept of a filter currently being applied to the # changelist. Grab the querystring from the referrer and re-use # changelist API to apply the filtering for us. try: dc, qs = request.META.get('HTTP_REFERER', '').split('?') except ValueError: qs = '' request.META['QUERY_STRING'] = qs queryset = self.get_changelist(request)( request, self.model, self.list_display, self.list_display_links, self.list_filter, self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self.list_max_show_all, self.list_editable, self ).get_query_set(request) # select_related is too slow, so cache for fast lookups. This will not # scale indefinitely. competition_map = {} ids = queryset.distinct('competition').values_list( 'competition_id', flat=True ) for obj in Competition.objects.filter(id__in=ids): competition_map[obj.id] = obj # Looking up individual members is too slow, so cache for fast # lookups. This will not scale indefinitely. member_mobile_number_map = {} ids = queryset.distinct('user').values_list( 'user_id', flat=True ) for di in Member.objects.filter(id__in=ids).values( 'id', 'mobile_number' ): member_mobile_number_map[di['id']] = di['mobile_number'] for entry in queryset: competition = competition_map[entry.competition_id] entry.competition = competition row = [ entry.competition.id, entry.competition.title, entry.user.first_name, entry.user.last_name, entry.user.email, member_mobile_number_map.get(entry.user_id, ''), entry.competition.question, entry.answer_file.name if entry.answer_file else '', entry.answer_option.text if entry.answer_option else '', entry.answer_text, entry.has_correct_answer(), entry.winner, entry.timestamp ] writer.writerow(['' if f is None else unicode(f) for f in row]) # '' instead of None return response
python
def csv_export(self, request): """ Return a CSV document of the competition entry and its user details """ response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=competitionentries.csv' # create the csv writer with the response as the output file writer = UnicodeWriter(response) writer.writerow([ 'Competition ID', 'Competition', 'First Name', 'Last Name', 'Email Address', 'Cell Number', 'Question', 'Answer File', 'Answer Option', 'Answer Text', 'Has Correct Answer', 'Winner', 'Time Stamp' ]) # This sucks big time. get_urls is cached upon first call, which means # it has no concept of a filter currently being applied to the # changelist. Grab the querystring from the referrer and re-use # changelist API to apply the filtering for us. try: dc, qs = request.META.get('HTTP_REFERER', '').split('?') except ValueError: qs = '' request.META['QUERY_STRING'] = qs queryset = self.get_changelist(request)( request, self.model, self.list_display, self.list_display_links, self.list_filter, self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self.list_max_show_all, self.list_editable, self ).get_query_set(request) # select_related is too slow, so cache for fast lookups. This will not # scale indefinitely. competition_map = {} ids = queryset.distinct('competition').values_list( 'competition_id', flat=True ) for obj in Competition.objects.filter(id__in=ids): competition_map[obj.id] = obj # Looking up individual members is too slow, so cache for fast # lookups. This will not scale indefinitely. member_mobile_number_map = {} ids = queryset.distinct('user').values_list( 'user_id', flat=True ) for di in Member.objects.filter(id__in=ids).values( 'id', 'mobile_number' ): member_mobile_number_map[di['id']] = di['mobile_number'] for entry in queryset: competition = competition_map[entry.competition_id] entry.competition = competition row = [ entry.competition.id, entry.competition.title, entry.user.first_name, entry.user.last_name, entry.user.email, member_mobile_number_map.get(entry.user_id, ''), entry.competition.question, entry.answer_file.name if entry.answer_file else '', entry.answer_option.text if entry.answer_option else '', entry.answer_text, entry.has_correct_answer(), entry.winner, entry.timestamp ] writer.writerow(['' if f is None else unicode(f) for f in row]) # '' instead of None return response
[ "def", "csv_export", "(", "self", ",", "request", ")", ":", "response", "=", "HttpResponse", "(", "content_type", "=", "'text/csv'", ")", "response", "[", "'Content-Disposition'", "]", "=", "'attachment; filename=competitionentries.csv'", "# create the csv writer with the response as the output file", "writer", "=", "UnicodeWriter", "(", "response", ")", "writer", ".", "writerow", "(", "[", "'Competition ID'", ",", "'Competition'", ",", "'First Name'", ",", "'Last Name'", ",", "'Email Address'", ",", "'Cell Number'", ",", "'Question'", ",", "'Answer File'", ",", "'Answer Option'", ",", "'Answer Text'", ",", "'Has Correct Answer'", ",", "'Winner'", ",", "'Time Stamp'", "]", ")", "# This sucks big time. get_urls is cached upon first call, which means", "# it has no concept of a filter currently being applied to the", "# changelist. Grab the querystring from the referrer and re-use", "# changelist API to apply the filtering for us.", "try", ":", "dc", ",", "qs", "=", "request", ".", "META", ".", "get", "(", "'HTTP_REFERER'", ",", "''", ")", ".", "split", "(", "'?'", ")", "except", "ValueError", ":", "qs", "=", "''", "request", ".", "META", "[", "'QUERY_STRING'", "]", "=", "qs", "queryset", "=", "self", ".", "get_changelist", "(", "request", ")", "(", "request", ",", "self", ".", "model", ",", "self", ".", "list_display", ",", "self", ".", "list_display_links", ",", "self", ".", "list_filter", ",", "self", ".", "date_hierarchy", ",", "self", ".", "search_fields", ",", "self", ".", "list_select_related", ",", "self", ".", "list_per_page", ",", "self", ".", "list_max_show_all", ",", "self", ".", "list_editable", ",", "self", ")", ".", "get_query_set", "(", "request", ")", "# select_related is too slow, so cache for fast lookups. This will not", "# scale indefinitely.", "competition_map", "=", "{", "}", "ids", "=", "queryset", ".", "distinct", "(", "'competition'", ")", ".", "values_list", "(", "'competition_id'", ",", "flat", "=", "True", ")", "for", "obj", "in", "Competition", ".", "objects", ".", "filter", "(", "id__in", "=", "ids", ")", ":", "competition_map", "[", "obj", ".", "id", "]", "=", "obj", "# Looking up individual members is too slow, so cache for fast", "# lookups. This will not scale indefinitely.", "member_mobile_number_map", "=", "{", "}", "ids", "=", "queryset", ".", "distinct", "(", "'user'", ")", ".", "values_list", "(", "'user_id'", ",", "flat", "=", "True", ")", "for", "di", "in", "Member", ".", "objects", ".", "filter", "(", "id__in", "=", "ids", ")", ".", "values", "(", "'id'", ",", "'mobile_number'", ")", ":", "member_mobile_number_map", "[", "di", "[", "'id'", "]", "]", "=", "di", "[", "'mobile_number'", "]", "for", "entry", "in", "queryset", ":", "competition", "=", "competition_map", "[", "entry", ".", "competition_id", "]", "entry", ".", "competition", "=", "competition", "row", "=", "[", "entry", ".", "competition", ".", "id", ",", "entry", ".", "competition", ".", "title", ",", "entry", ".", "user", ".", "first_name", ",", "entry", ".", "user", ".", "last_name", ",", "entry", ".", "user", ".", "email", ",", "member_mobile_number_map", ".", "get", "(", "entry", ".", "user_id", ",", "''", ")", ",", "entry", ".", "competition", ".", "question", ",", "entry", ".", "answer_file", ".", "name", "if", "entry", ".", "answer_file", "else", "''", ",", "entry", ".", "answer_option", ".", "text", "if", "entry", ".", "answer_option", "else", "''", ",", "entry", ".", "answer_text", ",", "entry", ".", "has_correct_answer", "(", ")", ",", "entry", ".", "winner", ",", "entry", ".", "timestamp", "]", "writer", ".", "writerow", "(", "[", "''", "if", "f", "is", "None", "else", "unicode", "(", "f", ")", "for", "f", "in", "row", "]", ")", "# '' instead of None", "return", "response" ]
Return a CSV document of the competition entry and its user details
[ "Return", "a", "CSV", "document", "of", "the", "competition", "entry", "and", "its", "user", "details" ]
7efdc6d2d57077229108e7eb2ae99f87c32210ee
https://github.com/praekelt/jmbo-competition/blob/7efdc6d2d57077229108e7eb2ae99f87c32210ee/competition/admin.py#L165-L235
248,942
klingtnet/sblgntparser
sblgntparser/tools.py
filelist
def filelist(folderpath, ext=None): ''' Returns a list of all the files contained in the folder specified by `folderpath`. To filter the files by extension simply add a list containing all the extension with `.` as the second argument. If `flat` is False, then the Path objects are returned. ''' if not ext: ext = [] if os.path.exists(folderpath) and os.path.isdir(folderpath): return [ os.path.join(folderpath, f) for f in os.listdir(folderpath) if os.path.isfile(os.path.join(folderpath, f)) and os.path.splitext(f)[1] in ext ] else: log.warn('"{}" does not exist or is not a directory'.format(folderpath))
python
def filelist(folderpath, ext=None): ''' Returns a list of all the files contained in the folder specified by `folderpath`. To filter the files by extension simply add a list containing all the extension with `.` as the second argument. If `flat` is False, then the Path objects are returned. ''' if not ext: ext = [] if os.path.exists(folderpath) and os.path.isdir(folderpath): return [ os.path.join(folderpath, f) for f in os.listdir(folderpath) if os.path.isfile(os.path.join(folderpath, f)) and os.path.splitext(f)[1] in ext ] else: log.warn('"{}" does not exist or is not a directory'.format(folderpath))
[ "def", "filelist", "(", "folderpath", ",", "ext", "=", "None", ")", ":", "if", "not", "ext", ":", "ext", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "folderpath", ")", "and", "os", ".", "path", ".", "isdir", "(", "folderpath", ")", ":", "return", "[", "os", ".", "path", ".", "join", "(", "folderpath", ",", "f", ")", "for", "f", "in", "os", ".", "listdir", "(", "folderpath", ")", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "folderpath", ",", "f", ")", ")", "and", "os", ".", "path", ".", "splitext", "(", "f", ")", "[", "1", "]", "in", "ext", "]", "else", ":", "log", ".", "warn", "(", "'\"{}\" does not exist or is not a directory'", ".", "format", "(", "folderpath", ")", ")" ]
Returns a list of all the files contained in the folder specified by `folderpath`. To filter the files by extension simply add a list containing all the extension with `.` as the second argument. If `flat` is False, then the Path objects are returned.
[ "Returns", "a", "list", "of", "all", "the", "files", "contained", "in", "the", "folder", "specified", "by", "folderpath", ".", "To", "filter", "the", "files", "by", "extension", "simply", "add", "a", "list", "containing", "all", "the", "extension", "with", ".", "as", "the", "second", "argument", ".", "If", "flat", "is", "False", "then", "the", "Path", "objects", "are", "returned", "." ]
535931a833203e5d9065072ec988c575b493d67f
https://github.com/klingtnet/sblgntparser/blob/535931a833203e5d9065072ec988c575b493d67f/sblgntparser/tools.py#L10-L21
248,943
klingtnet/sblgntparser
sblgntparser/tools.py
particles
def particles(category=None): ''' Returns a dict containing old greek particles grouped by category. ''' filepath = os.path.join(os.path.dirname(__file__), './particles.json') with open(filepath) as f: try: particles = json.load(f) except ValueError as e: log.error('Bad json format in "{}"'.format(filepath)) else: if category: if category in particles: return particles[category] else: log.warn('Category "{}" not contained in particle dictionary!'.format(category)) return particles
python
def particles(category=None): ''' Returns a dict containing old greek particles grouped by category. ''' filepath = os.path.join(os.path.dirname(__file__), './particles.json') with open(filepath) as f: try: particles = json.load(f) except ValueError as e: log.error('Bad json format in "{}"'.format(filepath)) else: if category: if category in particles: return particles[category] else: log.warn('Category "{}" not contained in particle dictionary!'.format(category)) return particles
[ "def", "particles", "(", "category", "=", "None", ")", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'./particles.json'", ")", "with", "open", "(", "filepath", ")", "as", "f", ":", "try", ":", "particles", "=", "json", ".", "load", "(", "f", ")", "except", "ValueError", "as", "e", ":", "log", ".", "error", "(", "'Bad json format in \"{}\"'", ".", "format", "(", "filepath", ")", ")", "else", ":", "if", "category", ":", "if", "category", "in", "particles", ":", "return", "particles", "[", "category", "]", "else", ":", "log", ".", "warn", "(", "'Category \"{}\" not contained in particle dictionary!'", ".", "format", "(", "category", ")", ")", "return", "particles" ]
Returns a dict containing old greek particles grouped by category.
[ "Returns", "a", "dict", "containing", "old", "greek", "particles", "grouped", "by", "category", "." ]
535931a833203e5d9065072ec988c575b493d67f
https://github.com/klingtnet/sblgntparser/blob/535931a833203e5d9065072ec988c575b493d67f/sblgntparser/tools.py#L23-L39
248,944
klingtnet/sblgntparser
sblgntparser/tools.py
parts
def parts(): ''' Returns the dictionary with the part as key and the contained book as indices. ''' parts = { 'Canon': [ _ for _ in range(1, 5) ], 'Apostle': [ 5 ], 'Paul': [ _ for _ in range(6, 19) ], 'General': [ _ for _ in range(19, 26) ], 'Apocalypse': [ 27 ] } return parts
python
def parts(): ''' Returns the dictionary with the part as key and the contained book as indices. ''' parts = { 'Canon': [ _ for _ in range(1, 5) ], 'Apostle': [ 5 ], 'Paul': [ _ for _ in range(6, 19) ], 'General': [ _ for _ in range(19, 26) ], 'Apocalypse': [ 27 ] } return parts
[ "def", "parts", "(", ")", ":", "parts", "=", "{", "'Canon'", ":", "[", "_", "for", "_", "in", "range", "(", "1", ",", "5", ")", "]", ",", "'Apostle'", ":", "[", "5", "]", ",", "'Paul'", ":", "[", "_", "for", "_", "in", "range", "(", "6", ",", "19", ")", "]", ",", "'General'", ":", "[", "_", "for", "_", "in", "range", "(", "19", ",", "26", ")", "]", ",", "'Apocalypse'", ":", "[", "27", "]", "}", "return", "parts" ]
Returns the dictionary with the part as key and the contained book as indices.
[ "Returns", "the", "dictionary", "with", "the", "part", "as", "key", "and", "the", "contained", "book", "as", "indices", "." ]
535931a833203e5d9065072ec988c575b493d67f
https://github.com/klingtnet/sblgntparser/blob/535931a833203e5d9065072ec988c575b493d67f/sblgntparser/tools.py#L77-L88
248,945
cogniteev/docido-python-sdk
docido_sdk/env.py
Environment.component_activated
def component_activated(self, component): """Initialize additional member variables for components. Every component activated through the `Environment` object gets an additional member variable: `env` (the environment object) """ component.env = self super(Environment, self).component_activated(component)
python
def component_activated(self, component): """Initialize additional member variables for components. Every component activated through the `Environment` object gets an additional member variable: `env` (the environment object) """ component.env = self super(Environment, self).component_activated(component)
[ "def", "component_activated", "(", "self", ",", "component", ")", ":", "component", ".", "env", "=", "self", "super", "(", "Environment", ",", "self", ")", ".", "component_activated", "(", "component", ")" ]
Initialize additional member variables for components. Every component activated through the `Environment` object gets an additional member variable: `env` (the environment object)
[ "Initialize", "additional", "member", "variables", "for", "components", "." ]
58ecb6c6f5757fd40c0601657ab18368da7ddf33
https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/env.py#L17-L24
248,946
openbermuda/ripl
ripl/show.py
SlideShow.interpret
def interpret(self, msg): """ Create a slide show """ self.captions = msg.get('captions', '.') for item in msg['slides']: self.add(item)
python
def interpret(self, msg): """ Create a slide show """ self.captions = msg.get('captions', '.') for item in msg['slides']: self.add(item)
[ "def", "interpret", "(", "self", ",", "msg", ")", ":", "self", ".", "captions", "=", "msg", ".", "get", "(", "'captions'", ",", "'.'", ")", "for", "item", "in", "msg", "[", "'slides'", "]", ":", "self", ".", "add", "(", "item", ")" ]
Create a slide show
[ "Create", "a", "slide", "show" ]
4886b1a697e4b81c2202db9cb977609e034f8e70
https://github.com/openbermuda/ripl/blob/4886b1a697e4b81c2202db9cb977609e034f8e70/ripl/show.py#L21-L27
248,947
openbermuda/ripl
ripl/show.py
SlideShow.set_duration
def set_duration(self, duration): """ Calculate how long each slide should show """ fixed = sum(int(x.get('time', 0)) for x in self.slides) nfixed = len([x for x in self.slides if x.get('time', 0) > 0]) unfixed = len(self.slides) - nfixed self.wait = max(1, int(duration / unfixed))
python
def set_duration(self, duration): """ Calculate how long each slide should show """ fixed = sum(int(x.get('time', 0)) for x in self.slides) nfixed = len([x for x in self.slides if x.get('time', 0) > 0]) unfixed = len(self.slides) - nfixed self.wait = max(1, int(duration / unfixed))
[ "def", "set_duration", "(", "self", ",", "duration", ")", ":", "fixed", "=", "sum", "(", "int", "(", "x", ".", "get", "(", "'time'", ",", "0", ")", ")", "for", "x", "in", "self", ".", "slides", ")", "nfixed", "=", "len", "(", "[", "x", "for", "x", "in", "self", ".", "slides", "if", "x", ".", "get", "(", "'time'", ",", "0", ")", ">", "0", "]", ")", "unfixed", "=", "len", "(", "self", ".", "slides", ")", "-", "nfixed", "self", ".", "wait", "=", "max", "(", "1", ",", "int", "(", "duration", "/", "unfixed", ")", ")" ]
Calculate how long each slide should show
[ "Calculate", "how", "long", "each", "slide", "should", "show" ]
4886b1a697e4b81c2202db9cb977609e034f8e70
https://github.com/openbermuda/ripl/blob/4886b1a697e4b81c2202db9cb977609e034f8e70/ripl/show.py#L50-L58
248,948
openbermuda/ripl
ripl/show.py
SlideShow.run
def run(self): """ Run the show """ self.show() if not self.wait: return for image in self.slides: wait = image.get('time', 0) wait = max(self.wait, wait) print('waiting %d seconds %s' % ( wait, image.get('image', ''))) yield image time.sleep(wait) self.next()
python
def run(self): """ Run the show """ self.show() if not self.wait: return for image in self.slides: wait = image.get('time', 0) wait = max(self.wait, wait) print('waiting %d seconds %s' % ( wait, image.get('image', ''))) yield image time.sleep(wait) self.next()
[ "def", "run", "(", "self", ")", ":", "self", ".", "show", "(", ")", "if", "not", "self", ".", "wait", ":", "return", "for", "image", "in", "self", ".", "slides", ":", "wait", "=", "image", ".", "get", "(", "'time'", ",", "0", ")", "wait", "=", "max", "(", "self", ".", "wait", ",", "wait", ")", "print", "(", "'waiting %d seconds %s'", "%", "(", "wait", ",", "image", ".", "get", "(", "'image'", ",", "''", ")", ")", ")", "yield", "image", "time", ".", "sleep", "(", "wait", ")", "self", ".", "next", "(", ")" ]
Run the show
[ "Run", "the", "show" ]
4886b1a697e4b81c2202db9cb977609e034f8e70
https://github.com/openbermuda/ripl/blob/4886b1a697e4b81c2202db9cb977609e034f8e70/ripl/show.py#L60-L75
248,949
pjuren/pyokit
src/pyokit/io/fastaIterators.py
fastaIterator
def fastaIterator(fn, useMutableString=False, verbose=False): """ A generator function which yields fastaSequence objects from a fasta-format file or stream. :param fn: a file-like stream or a string; if this is a string, it's treated as a filename, else it's treated it as a file-like object, which must have a readline() method. :param useMustableString: if True, construct sequences from lists of chars, rather than python string objects, to allow more efficient editing. Use with caution. :param verbose: if True, output additional status messages to stderr about progress """ fh = fn if type(fh).__name__ == "str": fh = open(fh) if verbose: try: pind = __build_progress_indicator(fh) except ProgressIndicatorError as e: sys.stderr.write("Warning: unable to show progress for stream. " + "Reason: " + str(e)) verbose = False prev_line = None while True: seqHeader = __read_seq_header(fh, prev_line) name = seqHeader[1:].strip() seq_data, prev_line = __read_seq_data(fh) if verbose: pind.done = fh.tell() pind.showProgress(to_strm=sys.stderr) yield Sequence(name, seq_data, useMutableString) # remember where we stopped for next call, or finish if prev_line == "": break
python
def fastaIterator(fn, useMutableString=False, verbose=False): """ A generator function which yields fastaSequence objects from a fasta-format file or stream. :param fn: a file-like stream or a string; if this is a string, it's treated as a filename, else it's treated it as a file-like object, which must have a readline() method. :param useMustableString: if True, construct sequences from lists of chars, rather than python string objects, to allow more efficient editing. Use with caution. :param verbose: if True, output additional status messages to stderr about progress """ fh = fn if type(fh).__name__ == "str": fh = open(fh) if verbose: try: pind = __build_progress_indicator(fh) except ProgressIndicatorError as e: sys.stderr.write("Warning: unable to show progress for stream. " + "Reason: " + str(e)) verbose = False prev_line = None while True: seqHeader = __read_seq_header(fh, prev_line) name = seqHeader[1:].strip() seq_data, prev_line = __read_seq_data(fh) if verbose: pind.done = fh.tell() pind.showProgress(to_strm=sys.stderr) yield Sequence(name, seq_data, useMutableString) # remember where we stopped for next call, or finish if prev_line == "": break
[ "def", "fastaIterator", "(", "fn", ",", "useMutableString", "=", "False", ",", "verbose", "=", "False", ")", ":", "fh", "=", "fn", "if", "type", "(", "fh", ")", ".", "__name__", "==", "\"str\"", ":", "fh", "=", "open", "(", "fh", ")", "if", "verbose", ":", "try", ":", "pind", "=", "__build_progress_indicator", "(", "fh", ")", "except", "ProgressIndicatorError", "as", "e", ":", "sys", ".", "stderr", ".", "write", "(", "\"Warning: unable to show progress for stream. \"", "+", "\"Reason: \"", "+", "str", "(", "e", ")", ")", "verbose", "=", "False", "prev_line", "=", "None", "while", "True", ":", "seqHeader", "=", "__read_seq_header", "(", "fh", ",", "prev_line", ")", "name", "=", "seqHeader", "[", "1", ":", "]", ".", "strip", "(", ")", "seq_data", ",", "prev_line", "=", "__read_seq_data", "(", "fh", ")", "if", "verbose", ":", "pind", ".", "done", "=", "fh", ".", "tell", "(", ")", "pind", ".", "showProgress", "(", "to_strm", "=", "sys", ".", "stderr", ")", "yield", "Sequence", "(", "name", ",", "seq_data", ",", "useMutableString", ")", "# remember where we stopped for next call, or finish", "if", "prev_line", "==", "\"\"", ":", "break" ]
A generator function which yields fastaSequence objects from a fasta-format file or stream. :param fn: a file-like stream or a string; if this is a string, it's treated as a filename, else it's treated it as a file-like object, which must have a readline() method. :param useMustableString: if True, construct sequences from lists of chars, rather than python string objects, to allow more efficient editing. Use with caution. :param verbose: if True, output additional status messages to stderr about progress
[ "A", "generator", "function", "which", "yields", "fastaSequence", "objects", "from", "a", "fasta", "-", "format", "file", "or", "stream", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/fastaIterators.py#L148-L186
248,950
tylerbutler/propane
propane/django/fields.py
PickledObjectField.get_default
def get_default(self): """ Returns the default value for this field. The default implementation on models.Field calls force_unicode on the default, which means you can't set arbitrary Python objects as the default. To fix this, we just return the value without calling force_unicode on it. Note that if you set a callable as a default, the field will still call it. It will *not* try to pickle and encode it. """ if self.has_default(): if callable(self.default): return self.default() return self.default # If the field doesn't have a default, then we punt to models.Field. return super(PickledObjectField, self).get_default()
python
def get_default(self): """ Returns the default value for this field. The default implementation on models.Field calls force_unicode on the default, which means you can't set arbitrary Python objects as the default. To fix this, we just return the value without calling force_unicode on it. Note that if you set a callable as a default, the field will still call it. It will *not* try to pickle and encode it. """ if self.has_default(): if callable(self.default): return self.default() return self.default # If the field doesn't have a default, then we punt to models.Field. return super(PickledObjectField, self).get_default()
[ "def", "get_default", "(", "self", ")", ":", "if", "self", ".", "has_default", "(", ")", ":", "if", "callable", "(", "self", ".", "default", ")", ":", "return", "self", ".", "default", "(", ")", "return", "self", ".", "default", "# If the field doesn't have a default, then we punt to models.Field.", "return", "super", "(", "PickledObjectField", ",", "self", ")", ".", "get_default", "(", ")" ]
Returns the default value for this field. The default implementation on models.Field calls force_unicode on the default, which means you can't set arbitrary Python objects as the default. To fix this, we just return the value without calling force_unicode on it. Note that if you set a callable as a default, the field will still call it. It will *not* try to pickle and encode it.
[ "Returns", "the", "default", "value", "for", "this", "field", "." ]
6c404285ab8d78865b7175a5c8adf8fae12d6be5
https://github.com/tylerbutler/propane/blob/6c404285ab8d78865b7175a5c8adf8fae12d6be5/propane/django/fields.py#L89-L106
248,951
tylerbutler/propane
propane/django/fields.py
PickledObjectField.to_python
def to_python(self, value): """ B64decode and unpickle the object, optionally decompressing it. If an error is raised in de-pickling and we're sure the value is a definite pickle, the error is allowed to propogate. If we aren't sure if the value is a pickle or not, then we catch the error and return the original value instead. """ if value is not None: try: value = dbsafe_decode(value, self.compress) except: # If the value is a definite pickle; and an error is raised in # de-pickling it should be allowed to propogate. if isinstance(value, PickledObject): raise return value
python
def to_python(self, value): """ B64decode and unpickle the object, optionally decompressing it. If an error is raised in de-pickling and we're sure the value is a definite pickle, the error is allowed to propogate. If we aren't sure if the value is a pickle or not, then we catch the error and return the original value instead. """ if value is not None: try: value = dbsafe_decode(value, self.compress) except: # If the value is a definite pickle; and an error is raised in # de-pickling it should be allowed to propogate. if isinstance(value, PickledObject): raise return value
[ "def", "to_python", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "dbsafe_decode", "(", "value", ",", "self", ".", "compress", ")", "except", ":", "# If the value is a definite pickle; and an error is raised in", "# de-pickling it should be allowed to propogate.", "if", "isinstance", "(", "value", ",", "PickledObject", ")", ":", "raise", "return", "value" ]
B64decode and unpickle the object, optionally decompressing it. If an error is raised in de-pickling and we're sure the value is a definite pickle, the error is allowed to propogate. If we aren't sure if the value is a pickle or not, then we catch the error and return the original value instead.
[ "B64decode", "and", "unpickle", "the", "object", "optionally", "decompressing", "it", "." ]
6c404285ab8d78865b7175a5c8adf8fae12d6be5
https://github.com/tylerbutler/propane/blob/6c404285ab8d78865b7175a5c8adf8fae12d6be5/propane/django/fields.py#L109-L127
248,952
tylerbutler/propane
propane/django/fields.py
PickledObjectField.get_db_prep_value
def get_db_prep_value(self, value): """ Pickle and b64encode the object, optionally compressing it. The pickling protocol is specified explicitly (by default 2), rather than as -1 or HIGHEST_PROTOCOL, because we don't want the protocol to change over time. If it did, ``exact`` and ``in`` lookups would likely fail, since pickle would now be generating a different string. """ if value is not None and not isinstance(value, PickledObject): # We call force_unicode here explicitly, so that the encoded string # isn't rejected by the postgresql_psycopg2 backend. Alternatively, # we could have just registered PickledObject with the psycopg # marshaller (telling it to store it like it would a string), but # since both of these methods result in the same value being stored, # doing things this way is much easier. value = force_unicode(dbsafe_encode(value, self.compress)) return value
python
def get_db_prep_value(self, value): """ Pickle and b64encode the object, optionally compressing it. The pickling protocol is specified explicitly (by default 2), rather than as -1 or HIGHEST_PROTOCOL, because we don't want the protocol to change over time. If it did, ``exact`` and ``in`` lookups would likely fail, since pickle would now be generating a different string. """ if value is not None and not isinstance(value, PickledObject): # We call force_unicode here explicitly, so that the encoded string # isn't rejected by the postgresql_psycopg2 backend. Alternatively, # we could have just registered PickledObject with the psycopg # marshaller (telling it to store it like it would a string), but # since both of these methods result in the same value being stored, # doing things this way is much easier. value = force_unicode(dbsafe_encode(value, self.compress)) return value
[ "def", "get_db_prep_value", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", "and", "not", "isinstance", "(", "value", ",", "PickledObject", ")", ":", "# We call force_unicode here explicitly, so that the encoded string", "# isn't rejected by the postgresql_psycopg2 backend. Alternatively,", "# we could have just registered PickledObject with the psycopg", "# marshaller (telling it to store it like it would a string), but", "# since both of these methods result in the same value being stored,", "# doing things this way is much easier.", "value", "=", "force_unicode", "(", "dbsafe_encode", "(", "value", ",", "self", ".", "compress", ")", ")", "return", "value" ]
Pickle and b64encode the object, optionally compressing it. The pickling protocol is specified explicitly (by default 2), rather than as -1 or HIGHEST_PROTOCOL, because we don't want the protocol to change over time. If it did, ``exact`` and ``in`` lookups would likely fail, since pickle would now be generating a different string.
[ "Pickle", "and", "b64encode", "the", "object", "optionally", "compressing", "it", "." ]
6c404285ab8d78865b7175a5c8adf8fae12d6be5
https://github.com/tylerbutler/propane/blob/6c404285ab8d78865b7175a5c8adf8fae12d6be5/propane/django/fields.py#L129-L148
248,953
kcolford/txt2boil
txt2boil/core/extractor.py
Extractor.nextComment
def nextComment(self, text, start=0): """Return the next comment found in text starting at start. """ m = min([self.lineComment(text, start), self.blockComment(text, start), self._emptylineregex.search(text, start)], key=lambda m: m.start(0) if m else len(text)) return m
python
def nextComment(self, text, start=0): """Return the next comment found in text starting at start. """ m = min([self.lineComment(text, start), self.blockComment(text, start), self._emptylineregex.search(text, start)], key=lambda m: m.start(0) if m else len(text)) return m
[ "def", "nextComment", "(", "self", ",", "text", ",", "start", "=", "0", ")", ":", "m", "=", "min", "(", "[", "self", ".", "lineComment", "(", "text", ",", "start", ")", ",", "self", ".", "blockComment", "(", "text", ",", "start", ")", ",", "self", ".", "_emptylineregex", ".", "search", "(", "text", ",", "start", ")", "]", ",", "key", "=", "lambda", "m", ":", "m", ".", "start", "(", "0", ")", "if", "m", "else", "len", "(", "text", ")", ")", "return", "m" ]
Return the next comment found in text starting at start.
[ "Return", "the", "next", "comment", "found", "in", "text", "starting", "at", "start", "." ]
853a47bb8db27c0224531f24dfd02839c983d027
https://github.com/kcolford/txt2boil/blob/853a47bb8db27c0224531f24dfd02839c983d027/txt2boil/core/extractor.py#L57-L66
248,954
kcolford/txt2boil
txt2boil/core/extractor.py
Extractor.isLineComment
def isLineComment(self, text): """Return true if the text is a line comment. """ m = self.lineComment(text, 0) return m and m.start(0) == 0 and m.end(0) == len(text)
python
def isLineComment(self, text): """Return true if the text is a line comment. """ m = self.lineComment(text, 0) return m and m.start(0) == 0 and m.end(0) == len(text)
[ "def", "isLineComment", "(", "self", ",", "text", ")", ":", "m", "=", "self", ".", "lineComment", "(", "text", ",", "0", ")", "return", "m", "and", "m", ".", "start", "(", "0", ")", "==", "0", "and", "m", ".", "end", "(", "0", ")", "==", "len", "(", "text", ")" ]
Return true if the text is a line comment.
[ "Return", "true", "if", "the", "text", "is", "a", "line", "comment", "." ]
853a47bb8db27c0224531f24dfd02839c983d027
https://github.com/kcolford/txt2boil/blob/853a47bb8db27c0224531f24dfd02839c983d027/txt2boil/core/extractor.py#L75-L81
248,955
kcolford/txt2boil
txt2boil/core/extractor.py
Extractor.nextValidComment
def nextValidComment(self, text, start=0): """Return the next actual comment. """ m = min([self.lineComment(text, start), self.blockComment(text, start)], key=lambda m: m.start(0) if m else len(text)) return m
python
def nextValidComment(self, text, start=0): """Return the next actual comment. """ m = min([self.lineComment(text, start), self.blockComment(text, start)], key=lambda m: m.start(0) if m else len(text)) return m
[ "def", "nextValidComment", "(", "self", ",", "text", ",", "start", "=", "0", ")", ":", "m", "=", "min", "(", "[", "self", ".", "lineComment", "(", "text", ",", "start", ")", ",", "self", ".", "blockComment", "(", "text", ",", "start", ")", "]", ",", "key", "=", "lambda", "m", ":", "m", ".", "start", "(", "0", ")", "if", "m", "else", "len", "(", "text", ")", ")", "return", "m" ]
Return the next actual comment.
[ "Return", "the", "next", "actual", "comment", "." ]
853a47bb8db27c0224531f24dfd02839c983d027
https://github.com/kcolford/txt2boil/blob/853a47bb8db27c0224531f24dfd02839c983d027/txt2boil/core/extractor.py#L83-L91
248,956
kcolford/txt2boil
txt2boil/core/extractor.py
Extractor.extractContent
def extractContent(self, text): """Extract the content of comment text. """ m = self.nextValidComment(text) return '' if m is None else m.group(1)
python
def extractContent(self, text): """Extract the content of comment text. """ m = self.nextValidComment(text) return '' if m is None else m.group(1)
[ "def", "extractContent", "(", "self", ",", "text", ")", ":", "m", "=", "self", ".", "nextValidComment", "(", "text", ")", "return", "''", "if", "m", "is", "None", "else", "m", ".", "group", "(", "1", ")" ]
Extract the content of comment text.
[ "Extract", "the", "content", "of", "comment", "text", "." ]
853a47bb8db27c0224531f24dfd02839c983d027
https://github.com/kcolford/txt2boil/blob/853a47bb8db27c0224531f24dfd02839c983d027/txt2boil/core/extractor.py#L93-L99
248,957
kcolford/txt2boil
txt2boil/core/extractor.py
Extractor.chunkComment
def chunkComment(self, text, start=0): """Return a list of chunks of comments. """ # Build a list of comments comm, out = self.nextComment(text, start), [] while comm: out.append(comm.group(0)) comm = self.nextComment(text, comm.start(0) + 1) # Collect the comments according to whether they are line # comments or block comments. out = [list(g) for (_, g) in groupby(out, self.isLineComment)] # Filter out seperator lines. out = [i for i in out if i != ['']] return out
python
def chunkComment(self, text, start=0): """Return a list of chunks of comments. """ # Build a list of comments comm, out = self.nextComment(text, start), [] while comm: out.append(comm.group(0)) comm = self.nextComment(text, comm.start(0) + 1) # Collect the comments according to whether they are line # comments or block comments. out = [list(g) for (_, g) in groupby(out, self.isLineComment)] # Filter out seperator lines. out = [i for i in out if i != ['']] return out
[ "def", "chunkComment", "(", "self", ",", "text", ",", "start", "=", "0", ")", ":", "# Build a list of comments", "comm", ",", "out", "=", "self", ".", "nextComment", "(", "text", ",", "start", ")", ",", "[", "]", "while", "comm", ":", "out", ".", "append", "(", "comm", ".", "group", "(", "0", ")", ")", "comm", "=", "self", ".", "nextComment", "(", "text", ",", "comm", ".", "start", "(", "0", ")", "+", "1", ")", "# Collect the comments according to whether they are line", "# comments or block comments.", "out", "=", "[", "list", "(", "g", ")", "for", "(", "_", ",", "g", ")", "in", "groupby", "(", "out", ",", "self", ".", "isLineComment", ")", "]", "# Filter out seperator lines.", "out", "=", "[", "i", "for", "i", "in", "out", "if", "i", "!=", "[", "''", "]", "]", "return", "out" ]
Return a list of chunks of comments.
[ "Return", "a", "list", "of", "chunks", "of", "comments", "." ]
853a47bb8db27c0224531f24dfd02839c983d027
https://github.com/kcolford/txt2boil/blob/853a47bb8db27c0224531f24dfd02839c983d027/txt2boil/core/extractor.py#L109-L127
248,958
kcolford/txt2boil
txt2boil/core/extractor.py
Extractor.code
def code(self, text): """Return the code instead of the comments. """ comm = self.nextValidComment(text) while comm: text = text[:comm.start()] + text[comm.end():] comm = self.nextValidComment(text, comm.end(0)) return text
python
def code(self, text): """Return the code instead of the comments. """ comm = self.nextValidComment(text) while comm: text = text[:comm.start()] + text[comm.end():] comm = self.nextValidComment(text, comm.end(0)) return text
[ "def", "code", "(", "self", ",", "text", ")", ":", "comm", "=", "self", ".", "nextValidComment", "(", "text", ")", "while", "comm", ":", "text", "=", "text", "[", ":", "comm", ".", "start", "(", ")", "]", "+", "text", "[", "comm", ".", "end", "(", ")", ":", "]", "comm", "=", "self", ".", "nextValidComment", "(", "text", ",", "comm", ".", "end", "(", "0", ")", ")", "return", "text" ]
Return the code instead of the comments.
[ "Return", "the", "code", "instead", "of", "the", "comments", "." ]
853a47bb8db27c0224531f24dfd02839c983d027
https://github.com/kcolford/txt2boil/blob/853a47bb8db27c0224531f24dfd02839c983d027/txt2boil/core/extractor.py#L138-L147
248,959
emencia/emencia-django-forum
forum/views/post.py
PostEditView.get_object
def get_object(self, *args, **kwargs): """ Should memoize the object to avoid multiple query if get_object is used many times in the view """ self.category_instance = get_object_or_404(Category, slug=self.kwargs['category_slug']) return get_object_or_404(Post, thread__id=self.kwargs['thread_id'], thread__category=self.category_instance, pk=self.kwargs['post_id'])
python
def get_object(self, *args, **kwargs): """ Should memoize the object to avoid multiple query if get_object is used many times in the view """ self.category_instance = get_object_or_404(Category, slug=self.kwargs['category_slug']) return get_object_or_404(Post, thread__id=self.kwargs['thread_id'], thread__category=self.category_instance, pk=self.kwargs['post_id'])
[ "def", "get_object", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "category_instance", "=", "get_object_or_404", "(", "Category", ",", "slug", "=", "self", ".", "kwargs", "[", "'category_slug'", "]", ")", "return", "get_object_or_404", "(", "Post", ",", "thread__id", "=", "self", ".", "kwargs", "[", "'thread_id'", "]", ",", "thread__category", "=", "self", ".", "category_instance", ",", "pk", "=", "self", ".", "kwargs", "[", "'post_id'", "]", ")" ]
Should memoize the object to avoid multiple query if get_object is used many times in the view
[ "Should", "memoize", "the", "object", "to", "avoid", "multiple", "query", "if", "get_object", "is", "used", "many", "times", "in", "the", "view" ]
cda74ed7e5822675c340ee5ec71548d981bccd3b
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/views/post.py#L42-L47
248,960
eliasdorneles/jk
jk/__init__.py
each_cons
def each_cons(sequence, size): """Iterates lazily through a sequence looking at a sliding window with given size, for each time. each_cons([1, 2, 3, 4], 2) --> [(0, 1), (1, 2), (2, 3), (3, 4)] """ return zip(*(islice(it, start, None) for start, it in enumerate(tee(sequence, size))))
python
def each_cons(sequence, size): """Iterates lazily through a sequence looking at a sliding window with given size, for each time. each_cons([1, 2, 3, 4], 2) --> [(0, 1), (1, 2), (2, 3), (3, 4)] """ return zip(*(islice(it, start, None) for start, it in enumerate(tee(sequence, size))))
[ "def", "each_cons", "(", "sequence", ",", "size", ")", ":", "return", "zip", "(", "*", "(", "islice", "(", "it", ",", "start", ",", "None", ")", "for", "start", ",", "it", "in", "enumerate", "(", "tee", "(", "sequence", ",", "size", ")", ")", ")", ")" ]
Iterates lazily through a sequence looking at a sliding window with given size, for each time. each_cons([1, 2, 3, 4], 2) --> [(0, 1), (1, 2), (2, 3), (3, 4)]
[ "Iterates", "lazily", "through", "a", "sequence", "looking", "at", "a", "sliding", "window", "with", "given", "size", "for", "each", "time", "." ]
bf28563cc80e380cc51e837b8ae9a1b0b0ab8704
https://github.com/eliasdorneles/jk/blob/bf28563cc80e380cc51e837b8ae9a1b0b0ab8704/jk/__init__.py#L29-L36
248,961
smetj/wishbone-decode-perfdata
wishbone_decode_perfdata/perfdata.py
PerfData.__filter
def __filter(self, name): '''Filter out problematic characters. This should become a separate module allowing the user to define filter rules from a bootstrap file and most likely become a separate module. ''' name = name.replace("'", '') name = name.replace('"', '') name = name.replace('!(null)', '') name = name.replace(" ", "_") name = name.replace("/", "_") name = name.replace(".", "_") return name.lower()
python
def __filter(self, name): '''Filter out problematic characters. This should become a separate module allowing the user to define filter rules from a bootstrap file and most likely become a separate module. ''' name = name.replace("'", '') name = name.replace('"', '') name = name.replace('!(null)', '') name = name.replace(" ", "_") name = name.replace("/", "_") name = name.replace(".", "_") return name.lower()
[ "def", "__filter", "(", "self", ",", "name", ")", ":", "name", "=", "name", ".", "replace", "(", "\"'\"", ",", "''", ")", "name", "=", "name", ".", "replace", "(", "'\"'", ",", "''", ")", "name", "=", "name", ".", "replace", "(", "'!(null)'", ",", "''", ")", "name", "=", "name", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", "name", "=", "name", ".", "replace", "(", "\"/\"", ",", "\"_\"", ")", "name", "=", "name", ".", "replace", "(", "\".\"", ",", "\"_\"", ")", "return", "name", ".", "lower", "(", ")" ]
Filter out problematic characters. This should become a separate module allowing the user to define filter rules from a bootstrap file and most likely become a separate module.
[ "Filter", "out", "problematic", "characters", "." ]
70dbbc7c27055c730db61de0a8906f3acbad9532
https://github.com/smetj/wishbone-decode-perfdata/blob/70dbbc7c27055c730db61de0a8906f3acbad9532/wishbone_decode_perfdata/perfdata.py#L140-L153
248,962
vuolter/autoupgrade
autoupgrade/package.py
Package.check
def check(self): """ Check if pkg has a later version Returns true if later version exists """ current = self._get_current() highest = self._get_highest_version() return highest > current
python
def check(self): """ Check if pkg has a later version Returns true if later version exists """ current = self._get_current() highest = self._get_highest_version() return highest > current
[ "def", "check", "(", "self", ")", ":", "current", "=", "self", ".", "_get_current", "(", ")", "highest", "=", "self", ".", "_get_highest_version", "(", ")", "return", "highest", ">", "current" ]
Check if pkg has a later version Returns true if later version exists
[ "Check", "if", "pkg", "has", "a", "later", "version", "Returns", "true", "if", "later", "version", "exists" ]
e34aca9eacd6a6f5c7a7634a67c2ee911d48ac68
https://github.com/vuolter/autoupgrade/blob/e34aca9eacd6a6f5c7a7634a67c2ee911d48ac68/autoupgrade/package.py#L115-L122
248,963
thomasvandoren/bugzscout-py
doc/example/src/simple_wsgi.py
bugzscout_app
def bugzscout_app(environ, start_response): """Simple WSGI application that returns 200 OK response with 'Hellow world!' in the body. If an uncaught exception is thrown, it is reported to BugzScout. :param environ: WSGI environ :param start_response: function that accepts status string and headers """ try: start_response('200 OK', [('content-type', 'text/html')]) return ['Hellow world!'] except Exception as ex: # Set the description to a familiar string with the exception # message. Add the stack trace to extra. b.submit_error('An error occurred in MyApp: {0}'.format(ex.message), extra=traceback.extract_tb(*sys.exc_info())) # Reraise the exception. raise ex
python
def bugzscout_app(environ, start_response): """Simple WSGI application that returns 200 OK response with 'Hellow world!' in the body. If an uncaught exception is thrown, it is reported to BugzScout. :param environ: WSGI environ :param start_response: function that accepts status string and headers """ try: start_response('200 OK', [('content-type', 'text/html')]) return ['Hellow world!'] except Exception as ex: # Set the description to a familiar string with the exception # message. Add the stack trace to extra. b.submit_error('An error occurred in MyApp: {0}'.format(ex.message), extra=traceback.extract_tb(*sys.exc_info())) # Reraise the exception. raise ex
[ "def", "bugzscout_app", "(", "environ", ",", "start_response", ")", ":", "try", ":", "start_response", "(", "'200 OK'", ",", "[", "(", "'content-type'", ",", "'text/html'", ")", "]", ")", "return", "[", "'Hellow world!'", "]", "except", "Exception", "as", "ex", ":", "# Set the description to a familiar string with the exception", "# message. Add the stack trace to extra.", "b", ".", "submit_error", "(", "'An error occurred in MyApp: {0}'", ".", "format", "(", "ex", ".", "message", ")", ",", "extra", "=", "traceback", ".", "extract_tb", "(", "*", "sys", ".", "exc_info", "(", ")", ")", ")", "# Reraise the exception.", "raise", "ex" ]
Simple WSGI application that returns 200 OK response with 'Hellow world!' in the body. If an uncaught exception is thrown, it is reported to BugzScout. :param environ: WSGI environ :param start_response: function that accepts status string and headers
[ "Simple", "WSGI", "application", "that", "returns", "200", "OK", "response", "with", "Hellow", "world!", "in", "the", "body", ".", "If", "an", "uncaught", "exception", "is", "thrown", "it", "is", "reported", "to", "BugzScout", "." ]
514528e958a97e0e7b36870037c5c69661511824
https://github.com/thomasvandoren/bugzscout-py/blob/514528e958a97e0e7b36870037c5c69661511824/doc/example/src/simple_wsgi.py#L35-L53
248,964
openpermissions/chub
chub/oauth2.py
RequestToken._request
def _request(self, base_url, client_id, client_secret, parameters, **kwargs): """Make an API request to get the token""" logging.debug('Getting an OAuth token for client "%s" with scope "%s"', client_id, parameters.get('scope')) headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'application/json'} api = API(base_url, auth_username=client_id, auth_password=client_secret, **kwargs) endpoint = api.auth.token response = yield endpoint.post(body=urllib.urlencode(parameters), request_timeout=60, headers=headers) logging.debug('Received token: %s', response.get('access_token')) raise Return(response)
python
def _request(self, base_url, client_id, client_secret, parameters, **kwargs): """Make an API request to get the token""" logging.debug('Getting an OAuth token for client "%s" with scope "%s"', client_id, parameters.get('scope')) headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'application/json'} api = API(base_url, auth_username=client_id, auth_password=client_secret, **kwargs) endpoint = api.auth.token response = yield endpoint.post(body=urllib.urlencode(parameters), request_timeout=60, headers=headers) logging.debug('Received token: %s', response.get('access_token')) raise Return(response)
[ "def", "_request", "(", "self", ",", "base_url", ",", "client_id", ",", "client_secret", ",", "parameters", ",", "*", "*", "kwargs", ")", ":", "logging", ".", "debug", "(", "'Getting an OAuth token for client \"%s\" with scope \"%s\"'", ",", "client_id", ",", "parameters", ".", "get", "(", "'scope'", ")", ")", "headers", "=", "{", "'Content-Type'", ":", "'application/x-www-form-urlencoded'", ",", "'Accept'", ":", "'application/json'", "}", "api", "=", "API", "(", "base_url", ",", "auth_username", "=", "client_id", ",", "auth_password", "=", "client_secret", ",", "*", "*", "kwargs", ")", "endpoint", "=", "api", ".", "auth", ".", "token", "response", "=", "yield", "endpoint", ".", "post", "(", "body", "=", "urllib", ".", "urlencode", "(", "parameters", ")", ",", "request_timeout", "=", "60", ",", "headers", "=", "headers", ")", "logging", ".", "debug", "(", "'Received token: %s'", ",", "response", ".", "get", "(", "'access_token'", ")", ")", "raise", "Return", "(", "response", ")" ]
Make an API request to get the token
[ "Make", "an", "API", "request", "to", "get", "the", "token" ]
00762aa17015f4b3010673d1570c708eab3c34ed
https://github.com/openpermissions/chub/blob/00762aa17015f4b3010673d1570c708eab3c34ed/chub/oauth2.py#L227-L246
248,965
openpermissions/chub
chub/oauth2.py
RequestToken._cached_request
def _cached_request(self, base_url, client_id, client_secret, parameters, **kwargs): """Cache the token request and use cached responses if available""" key = (base_url, client_id, tuple(parameters.items())) cached = self._cache.get(key, {}) if not cached.get('access_token') or self._expired(cached): cached = yield self._request(base_url, client_id, client_secret, parameters, **kwargs) self._cache[key] = cached # Purge cache when adding a new item so it doesn't grow too large # It's assumed the cache size is small enough that it's OK to loop # over the whole cache regularly. If not, could change this to # just pop off the oldest one self.purge_cache() logging.debug('Using a cached token: %s', cached.get('access_token')) raise Return(cached)
python
def _cached_request(self, base_url, client_id, client_secret, parameters, **kwargs): """Cache the token request and use cached responses if available""" key = (base_url, client_id, tuple(parameters.items())) cached = self._cache.get(key, {}) if not cached.get('access_token') or self._expired(cached): cached = yield self._request(base_url, client_id, client_secret, parameters, **kwargs) self._cache[key] = cached # Purge cache when adding a new item so it doesn't grow too large # It's assumed the cache size is small enough that it's OK to loop # over the whole cache regularly. If not, could change this to # just pop off the oldest one self.purge_cache() logging.debug('Using a cached token: %s', cached.get('access_token')) raise Return(cached)
[ "def", "_cached_request", "(", "self", ",", "base_url", ",", "client_id", ",", "client_secret", ",", "parameters", ",", "*", "*", "kwargs", ")", ":", "key", "=", "(", "base_url", ",", "client_id", ",", "tuple", "(", "parameters", ".", "items", "(", ")", ")", ")", "cached", "=", "self", ".", "_cache", ".", "get", "(", "key", ",", "{", "}", ")", "if", "not", "cached", ".", "get", "(", "'access_token'", ")", "or", "self", ".", "_expired", "(", "cached", ")", ":", "cached", "=", "yield", "self", ".", "_request", "(", "base_url", ",", "client_id", ",", "client_secret", ",", "parameters", ",", "*", "*", "kwargs", ")", "self", ".", "_cache", "[", "key", "]", "=", "cached", "# Purge cache when adding a new item so it doesn't grow too large", "# It's assumed the cache size is small enough that it's OK to loop", "# over the whole cache regularly. If not, could change this to", "# just pop off the oldest one", "self", ".", "purge_cache", "(", ")", "logging", ".", "debug", "(", "'Using a cached token: %s'", ",", "cached", ".", "get", "(", "'access_token'", ")", ")", "raise", "Return", "(", "cached", ")" ]
Cache the token request and use cached responses if available
[ "Cache", "the", "token", "request", "and", "use", "cached", "responses", "if", "available" ]
00762aa17015f4b3010673d1570c708eab3c34ed
https://github.com/openpermissions/chub/blob/00762aa17015f4b3010673d1570c708eab3c34ed/chub/oauth2.py#L257-L274
248,966
openpermissions/chub
chub/oauth2.py
RequestToken.purge_cache
def purge_cache(self): """ Purge expired cached tokens and oldest tokens if more than cache_size """ if len(self._cache) > self.max_cache_size: items = sorted(self._cache.items(), key=lambda (k, v): v['expiry']) self._cache = {k: v for k, v in items[self.max_cache_size:] if not self._expired(v)}
python
def purge_cache(self): """ Purge expired cached tokens and oldest tokens if more than cache_size """ if len(self._cache) > self.max_cache_size: items = sorted(self._cache.items(), key=lambda (k, v): v['expiry']) self._cache = {k: v for k, v in items[self.max_cache_size:] if not self._expired(v)}
[ "def", "purge_cache", "(", "self", ")", ":", "if", "len", "(", "self", ".", "_cache", ")", ">", "self", ".", "max_cache_size", ":", "items", "=", "sorted", "(", "self", ".", "_cache", ".", "items", "(", ")", ",", "key", "=", "lambda", "(", "k", ",", "v", ")", ":", "v", "[", "'expiry'", "]", ")", "self", ".", "_cache", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "items", "[", "self", ".", "max_cache_size", ":", "]", "if", "not", "self", ".", "_expired", "(", "v", ")", "}" ]
Purge expired cached tokens and oldest tokens if more than cache_size
[ "Purge", "expired", "cached", "tokens", "and", "oldest", "tokens", "if", "more", "than", "cache_size" ]
00762aa17015f4b3010673d1570c708eab3c34ed
https://github.com/openpermissions/chub/blob/00762aa17015f4b3010673d1570c708eab3c34ed/chub/oauth2.py#L280-L287
248,967
totokaka/pySpaceGDN
pyspacegdn/spacegdn.py
SpaceGDN._create_user_agent
def _create_user_agent(self): """ Create the user agent and return it as a string. """ user_agent = '{}/{} {}'.format(pyspacegdn.__title__, pyspacegdn.__version__, default_user_agent()) if self.client_name: user_agent = '{}/{} {}'.format(self.client_name, self.client_version, user_agent) return user_agent
python
def _create_user_agent(self): """ Create the user agent and return it as a string. """ user_agent = '{}/{} {}'.format(pyspacegdn.__title__, pyspacegdn.__version__, default_user_agent()) if self.client_name: user_agent = '{}/{} {}'.format(self.client_name, self.client_version, user_agent) return user_agent
[ "def", "_create_user_agent", "(", "self", ")", ":", "user_agent", "=", "'{}/{} {}'", ".", "format", "(", "pyspacegdn", ".", "__title__", ",", "pyspacegdn", ".", "__version__", ",", "default_user_agent", "(", ")", ")", "if", "self", ".", "client_name", ":", "user_agent", "=", "'{}/{} {}'", ".", "format", "(", "self", ".", "client_name", ",", "self", ".", "client_version", ",", "user_agent", ")", "return", "user_agent" ]
Create the user agent and return it as a string.
[ "Create", "the", "user", "agent", "and", "return", "it", "as", "a", "string", "." ]
55c8be8d751e24873e0a7f7e99d2b715442ec878
https://github.com/totokaka/pySpaceGDN/blob/55c8be8d751e24873e0a7f7e99d2b715442ec878/pyspacegdn/spacegdn.py#L64-L72
248,968
yv/pathconfig
py_src/pathconfig/config.py
load_configuration
def load_configuration(app_name): ''' creates a new configuration and loads the appropriate files. ''' if sys.prefix == '/usr': conf_dir = '/etc' share_dir = '/usr/share' else: conf_dir = os.path.join(sys.prefix, 'etc') share_dir = os.path.join(sys.prefix, 'share') # Step 1: try to locate pynlp.yml yml_config = {} for fname in [ '%s.yml'%(app_name,), os.path.expanduser('~/.%s.yml'%(app_name,)), os.path.join(conf_dir, '%s.yml'%(app_name,))]: if os.path.exists(fname): yml_config = yaml.load(open(fname)) break try: data_dir = yml_config['paths']['data_dir'] except KeyError: try: data_dir = os.environ[app_name.upper()] except KeyError: data_dir = os.path.join(share_dir, app_name) return AppContext(yml_config, data_dir)
python
def load_configuration(app_name): ''' creates a new configuration and loads the appropriate files. ''' if sys.prefix == '/usr': conf_dir = '/etc' share_dir = '/usr/share' else: conf_dir = os.path.join(sys.prefix, 'etc') share_dir = os.path.join(sys.prefix, 'share') # Step 1: try to locate pynlp.yml yml_config = {} for fname in [ '%s.yml'%(app_name,), os.path.expanduser('~/.%s.yml'%(app_name,)), os.path.join(conf_dir, '%s.yml'%(app_name,))]: if os.path.exists(fname): yml_config = yaml.load(open(fname)) break try: data_dir = yml_config['paths']['data_dir'] except KeyError: try: data_dir = os.environ[app_name.upper()] except KeyError: data_dir = os.path.join(share_dir, app_name) return AppContext(yml_config, data_dir)
[ "def", "load_configuration", "(", "app_name", ")", ":", "if", "sys", ".", "prefix", "==", "'/usr'", ":", "conf_dir", "=", "'/etc'", "share_dir", "=", "'/usr/share'", "else", ":", "conf_dir", "=", "os", ".", "path", ".", "join", "(", "sys", ".", "prefix", ",", "'etc'", ")", "share_dir", "=", "os", ".", "path", ".", "join", "(", "sys", ".", "prefix", ",", "'share'", ")", "# Step 1: try to locate pynlp.yml", "yml_config", "=", "{", "}", "for", "fname", "in", "[", "'%s.yml'", "%", "(", "app_name", ",", ")", ",", "os", ".", "path", ".", "expanduser", "(", "'~/.%s.yml'", "%", "(", "app_name", ",", ")", ")", ",", "os", ".", "path", ".", "join", "(", "conf_dir", ",", "'%s.yml'", "%", "(", "app_name", ",", ")", ")", "]", ":", "if", "os", ".", "path", ".", "exists", "(", "fname", ")", ":", "yml_config", "=", "yaml", ".", "load", "(", "open", "(", "fname", ")", ")", "break", "try", ":", "data_dir", "=", "yml_config", "[", "'paths'", "]", "[", "'data_dir'", "]", "except", "KeyError", ":", "try", ":", "data_dir", "=", "os", ".", "environ", "[", "app_name", ".", "upper", "(", ")", "]", "except", "KeyError", ":", "data_dir", "=", "os", ".", "path", ".", "join", "(", "share_dir", ",", "app_name", ")", "return", "AppContext", "(", "yml_config", ",", "data_dir", ")" ]
creates a new configuration and loads the appropriate files.
[ "creates", "a", "new", "configuration", "and", "loads", "the", "appropriate", "files", "." ]
ae13901773b8465061e2aa93b2a53fd436ab6c69
https://github.com/yv/pathconfig/blob/ae13901773b8465061e2aa93b2a53fd436ab6c69/py_src/pathconfig/config.py#L89-L116
248,969
hirokiky/uiro
uiro/view.py
get_base_wrappers
def get_base_wrappers(method='get', template_name='', predicates=(), wrappers=()): """ basic View Wrappers used by view_config. """ wrappers += (preserve_view(MethodPredicate(method), *predicates),) if template_name: wrappers += (render_template(template_name),) return wrappers
python
def get_base_wrappers(method='get', template_name='', predicates=(), wrappers=()): """ basic View Wrappers used by view_config. """ wrappers += (preserve_view(MethodPredicate(method), *predicates),) if template_name: wrappers += (render_template(template_name),) return wrappers
[ "def", "get_base_wrappers", "(", "method", "=", "'get'", ",", "template_name", "=", "''", ",", "predicates", "=", "(", ")", ",", "wrappers", "=", "(", ")", ")", ":", "wrappers", "+=", "(", "preserve_view", "(", "MethodPredicate", "(", "method", ")", ",", "*", "predicates", ")", ",", ")", "if", "template_name", ":", "wrappers", "+=", "(", "render_template", "(", "template_name", ")", ",", ")", "return", "wrappers" ]
basic View Wrappers used by view_config.
[ "basic", "View", "Wrappers", "used", "by", "view_config", "." ]
8436976b21ac9b0eac4243768f5ada12479b9e00
https://github.com/hirokiky/uiro/blob/8436976b21ac9b0eac4243768f5ada12479b9e00/uiro/view.py#L14-L22
248,970
hirokiky/uiro
uiro/view.py
view_config
def view_config( method='get', template_name='', predicates=(), wrappers=(), base_wrappers_getter=get_base_wrappers, ): """ Creating Views applied some configurations and store it to _wrapped attribute on each Views. * _wrapped expects to be called by Controller (subclasses of uiro.controller.BaseController) * The original view will not be affected by this decorator. """ wrappers = base_wrappers_getter(method, template_name, predicates, wrappers) def wrapper(view_callable): def _wrapped(*args, **kwargs): return reduce( lambda a, b: b(a), reversed(wrappers + (view_callable,)) )(*args, **kwargs) view_callable._wrapped = _wrapped view_callable._order = next(_counter) return view_callable return wrapper
python
def view_config( method='get', template_name='', predicates=(), wrappers=(), base_wrappers_getter=get_base_wrappers, ): """ Creating Views applied some configurations and store it to _wrapped attribute on each Views. * _wrapped expects to be called by Controller (subclasses of uiro.controller.BaseController) * The original view will not be affected by this decorator. """ wrappers = base_wrappers_getter(method, template_name, predicates, wrappers) def wrapper(view_callable): def _wrapped(*args, **kwargs): return reduce( lambda a, b: b(a), reversed(wrappers + (view_callable,)) )(*args, **kwargs) view_callable._wrapped = _wrapped view_callable._order = next(_counter) return view_callable return wrapper
[ "def", "view_config", "(", "method", "=", "'get'", ",", "template_name", "=", "''", ",", "predicates", "=", "(", ")", ",", "wrappers", "=", "(", ")", ",", "base_wrappers_getter", "=", "get_base_wrappers", ",", ")", ":", "wrappers", "=", "base_wrappers_getter", "(", "method", ",", "template_name", ",", "predicates", ",", "wrappers", ")", "def", "wrapper", "(", "view_callable", ")", ":", "def", "_wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "reduce", "(", "lambda", "a", ",", "b", ":", "b", "(", "a", ")", ",", "reversed", "(", "wrappers", "+", "(", "view_callable", ",", ")", ")", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "view_callable", ".", "_wrapped", "=", "_wrapped", "view_callable", ".", "_order", "=", "next", "(", "_counter", ")", "return", "view_callable", "return", "wrapper" ]
Creating Views applied some configurations and store it to _wrapped attribute on each Views. * _wrapped expects to be called by Controller (subclasses of uiro.controller.BaseController) * The original view will not be affected by this decorator.
[ "Creating", "Views", "applied", "some", "configurations", "and", "store", "it", "to", "_wrapped", "attribute", "on", "each", "Views", "." ]
8436976b21ac9b0eac4243768f5ada12479b9e00
https://github.com/hirokiky/uiro/blob/8436976b21ac9b0eac4243768f5ada12479b9e00/uiro/view.py#L28-L53
248,971
hirokiky/uiro
uiro/view.py
preserve_view
def preserve_view(*predicates): """ Raising ViewNotMatched when applied request was not apposite. preserve_view calls all Predicates and when return values of them was all True it will call a wrapped view. It raises ViewNotMatched if this is not the case. Predicates: This decorator takes Predicates one or more, Predicate is callable to return True or False in response to inputted request. If the request was apposite it should return True. """ def wrapper(view_callable): def _wrapped(self, request, context, *args, **kwargs): if all([predicate(request, context) for predicate in predicates]): return view_callable(self, request, context, *args, **kwargs) else: raise ViewNotMatched return _wrapped return wrapper
python
def preserve_view(*predicates): """ Raising ViewNotMatched when applied request was not apposite. preserve_view calls all Predicates and when return values of them was all True it will call a wrapped view. It raises ViewNotMatched if this is not the case. Predicates: This decorator takes Predicates one or more, Predicate is callable to return True or False in response to inputted request. If the request was apposite it should return True. """ def wrapper(view_callable): def _wrapped(self, request, context, *args, **kwargs): if all([predicate(request, context) for predicate in predicates]): return view_callable(self, request, context, *args, **kwargs) else: raise ViewNotMatched return _wrapped return wrapper
[ "def", "preserve_view", "(", "*", "predicates", ")", ":", "def", "wrapper", "(", "view_callable", ")", ":", "def", "_wrapped", "(", "self", ",", "request", ",", "context", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "all", "(", "[", "predicate", "(", "request", ",", "context", ")", "for", "predicate", "in", "predicates", "]", ")", ":", "return", "view_callable", "(", "self", ",", "request", ",", "context", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "ViewNotMatched", "return", "_wrapped", "return", "wrapper" ]
Raising ViewNotMatched when applied request was not apposite. preserve_view calls all Predicates and when return values of them was all True it will call a wrapped view. It raises ViewNotMatched if this is not the case. Predicates: This decorator takes Predicates one or more, Predicate is callable to return True or False in response to inputted request. If the request was apposite it should return True.
[ "Raising", "ViewNotMatched", "when", "applied", "request", "was", "not", "apposite", "." ]
8436976b21ac9b0eac4243768f5ada12479b9e00
https://github.com/hirokiky/uiro/blob/8436976b21ac9b0eac4243768f5ada12479b9e00/uiro/view.py#L56-L75
248,972
hirokiky/uiro
uiro/view.py
render_template
def render_template(template_name, template_getter=get_app_template): """ Decorator to specify which template to use for Wrapped Views. It will return string rendered by specified template and returned dictionary from wrapped views as a context for template. The returned value was not dictionary, it does nothing, just returns the result. """ def wrapper(func): template = template_getter(template_name) def _wraped(self, request, context, *args, **kwargs): res = func(self, request, context, *args, **kwargs) if isinstance(res, dict): return template.render(**res) else: return res return _wraped return wrapper
python
def render_template(template_name, template_getter=get_app_template): """ Decorator to specify which template to use for Wrapped Views. It will return string rendered by specified template and returned dictionary from wrapped views as a context for template. The returned value was not dictionary, it does nothing, just returns the result. """ def wrapper(func): template = template_getter(template_name) def _wraped(self, request, context, *args, **kwargs): res = func(self, request, context, *args, **kwargs) if isinstance(res, dict): return template.render(**res) else: return res return _wraped return wrapper
[ "def", "render_template", "(", "template_name", ",", "template_getter", "=", "get_app_template", ")", ":", "def", "wrapper", "(", "func", ")", ":", "template", "=", "template_getter", "(", "template_name", ")", "def", "_wraped", "(", "self", ",", "request", ",", "context", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "res", "=", "func", "(", "self", ",", "request", ",", "context", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "res", ",", "dict", ")", ":", "return", "template", ".", "render", "(", "*", "*", "res", ")", "else", ":", "return", "res", "return", "_wraped", "return", "wrapper" ]
Decorator to specify which template to use for Wrapped Views. It will return string rendered by specified template and returned dictionary from wrapped views as a context for template. The returned value was not dictionary, it does nothing, just returns the result.
[ "Decorator", "to", "specify", "which", "template", "to", "use", "for", "Wrapped", "Views", "." ]
8436976b21ac9b0eac4243768f5ada12479b9e00
https://github.com/hirokiky/uiro/blob/8436976b21ac9b0eac4243768f5ada12479b9e00/uiro/view.py#L91-L109
248,973
xtrementl/focus
focus/environment/io.py
IOStream.prompt
def prompt(self, prompt_msg=None, newline=False): """ Writes prompt message to output stream and reads line from standard input stream. `prompt_msg` Message to write. `newline` Append newline character to prompt message before writing. Return string. """ if prompt_msg is not None: self.write(prompt_msg, newline) return self._input.readline().rstrip(os.linesep)
python
def prompt(self, prompt_msg=None, newline=False): """ Writes prompt message to output stream and reads line from standard input stream. `prompt_msg` Message to write. `newline` Append newline character to prompt message before writing. Return string. """ if prompt_msg is not None: self.write(prompt_msg, newline) return self._input.readline().rstrip(os.linesep)
[ "def", "prompt", "(", "self", ",", "prompt_msg", "=", "None", ",", "newline", "=", "False", ")", ":", "if", "prompt_msg", "is", "not", "None", ":", "self", ".", "write", "(", "prompt_msg", ",", "newline", ")", "return", "self", ".", "_input", ".", "readline", "(", ")", ".", "rstrip", "(", "os", ".", "linesep", ")" ]
Writes prompt message to output stream and reads line from standard input stream. `prompt_msg` Message to write. `newline` Append newline character to prompt message before writing. Return string.
[ "Writes", "prompt", "message", "to", "output", "stream", "and", "reads", "line", "from", "standard", "input", "stream", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/environment/io.py#L95-L110
248,974
xtrementl/focus
focus/environment/io.py
IOStream.write
def write(self, buf, newline=True): """ Writes buffer to output stream. `buf` Data buffer to write. `newline` Append newline character to buffer before writing. """ buf = buf or '' if newline: buf += os.linesep try: self._output.write(buf) if hasattr(self._output, 'flush'): self._output.flush() except IOError as exc: if exc.errno != errno.EPIPE: # silence EPIPE errors raise
python
def write(self, buf, newline=True): """ Writes buffer to output stream. `buf` Data buffer to write. `newline` Append newline character to buffer before writing. """ buf = buf or '' if newline: buf += os.linesep try: self._output.write(buf) if hasattr(self._output, 'flush'): self._output.flush() except IOError as exc: if exc.errno != errno.EPIPE: # silence EPIPE errors raise
[ "def", "write", "(", "self", ",", "buf", ",", "newline", "=", "True", ")", ":", "buf", "=", "buf", "or", "''", "if", "newline", ":", "buf", "+=", "os", ".", "linesep", "try", ":", "self", ".", "_output", ".", "write", "(", "buf", ")", "if", "hasattr", "(", "self", ".", "_output", ",", "'flush'", ")", ":", "self", ".", "_output", ".", "flush", "(", ")", "except", "IOError", "as", "exc", ":", "if", "exc", ".", "errno", "!=", "errno", ".", "EPIPE", ":", "# silence EPIPE errors", "raise" ]
Writes buffer to output stream. `buf` Data buffer to write. `newline` Append newline character to buffer before writing.
[ "Writes", "buffer", "to", "output", "stream", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/environment/io.py#L112-L134
248,975
xtrementl/focus
focus/environment/io.py
IOStream.success
def success(self, buf, newline=True): """ Same as `write`, but adds success coloring if enabled. `buf` Data buffer to write. `newline` Append newline character to buffer before writing. """ if self._colored: buf = self.ESCAPE_GREEN + buf + self.ESCAPE_CLEAR self.write(buf, newline)
python
def success(self, buf, newline=True): """ Same as `write`, but adds success coloring if enabled. `buf` Data buffer to write. `newline` Append newline character to buffer before writing. """ if self._colored: buf = self.ESCAPE_GREEN + buf + self.ESCAPE_CLEAR self.write(buf, newline)
[ "def", "success", "(", "self", ",", "buf", ",", "newline", "=", "True", ")", ":", "if", "self", ".", "_colored", ":", "buf", "=", "self", ".", "ESCAPE_GREEN", "+", "buf", "+", "self", ".", "ESCAPE_CLEAR", "self", ".", "write", "(", "buf", ",", "newline", ")" ]
Same as `write`, but adds success coloring if enabled. `buf` Data buffer to write. `newline` Append newline character to buffer before writing.
[ "Same", "as", "write", "but", "adds", "success", "coloring", "if", "enabled", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/environment/io.py#L136-L148
248,976
xtrementl/focus
focus/environment/io.py
IOStream.error
def error(self, buf, newline=True): """ Similar to `write`, except it writes buffer to error stream. If coloring enabled, adds error coloring. `buf` Data buffer to write. `newline` Append newline character to buffer before writing. """ buf = buf or '' if self._colored: buf = self.ESCAPE_RED + buf + self.ESCAPE_CLEAR if newline: buf += os.linesep try: self._error.write(buf) if hasattr(self._error, 'flush'): self._error.flush() except IOError as exc: if exc.errno != errno.EPIPE: # silence EPIPE errors raise
python
def error(self, buf, newline=True): """ Similar to `write`, except it writes buffer to error stream. If coloring enabled, adds error coloring. `buf` Data buffer to write. `newline` Append newline character to buffer before writing. """ buf = buf or '' if self._colored: buf = self.ESCAPE_RED + buf + self.ESCAPE_CLEAR if newline: buf += os.linesep try: self._error.write(buf) if hasattr(self._error, 'flush'): self._error.flush() except IOError as exc: if exc.errno != errno.EPIPE: # silence EPIPE errors raise
[ "def", "error", "(", "self", ",", "buf", ",", "newline", "=", "True", ")", ":", "buf", "=", "buf", "or", "''", "if", "self", ".", "_colored", ":", "buf", "=", "self", ".", "ESCAPE_RED", "+", "buf", "+", "self", ".", "ESCAPE_CLEAR", "if", "newline", ":", "buf", "+=", "os", ".", "linesep", "try", ":", "self", ".", "_error", ".", "write", "(", "buf", ")", "if", "hasattr", "(", "self", ".", "_error", ",", "'flush'", ")", ":", "self", ".", "_error", ".", "flush", "(", ")", "except", "IOError", "as", "exc", ":", "if", "exc", ".", "errno", "!=", "errno", ".", "EPIPE", ":", "# silence EPIPE errors", "raise" ]
Similar to `write`, except it writes buffer to error stream. If coloring enabled, adds error coloring. `buf` Data buffer to write. `newline` Append newline character to buffer before writing.
[ "Similar", "to", "write", "except", "it", "writes", "buffer", "to", "error", "stream", ".", "If", "coloring", "enabled", "adds", "error", "coloring", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/environment/io.py#L150-L175
248,977
fprimex/plac_ini
plac_ini.py
config_conf
def config_conf(obj): "Extracts the configuration of the underlying ConfigParser from obj" # If we ever want to add some default options this is where to do that cfg = {} for name in dir(obj): if name in CONFIG_PARSER_CFG: # argument of ConfigParser cfg[name] = getattr(obj, name) return cfg
python
def config_conf(obj): "Extracts the configuration of the underlying ConfigParser from obj" # If we ever want to add some default options this is where to do that cfg = {} for name in dir(obj): if name in CONFIG_PARSER_CFG: # argument of ConfigParser cfg[name] = getattr(obj, name) return cfg
[ "def", "config_conf", "(", "obj", ")", ":", "# If we ever want to add some default options this is where to do that", "cfg", "=", "{", "}", "for", "name", "in", "dir", "(", "obj", ")", ":", "if", "name", "in", "CONFIG_PARSER_CFG", ":", "# argument of ConfigParser", "cfg", "[", "name", "]", "=", "getattr", "(", "obj", ",", "name", ")", "return", "cfg" ]
Extracts the configuration of the underlying ConfigParser from obj
[ "Extracts", "the", "configuration", "of", "the", "underlying", "ConfigParser", "from", "obj" ]
3f343d88326df9e5faf3a77fcffdd4ca3f5bec04
https://github.com/fprimex/plac_ini/blob/3f343d88326df9e5faf3a77fcffdd4ca3f5bec04/plac_ini.py#L30-L37
248,978
fprimex/plac_ini
plac_ini.py
add_gnu_argument
def add_gnu_argument(self, *args, **kwargs): "Prevent the addition of any single hyphen, multiple letter args" gnu_args = [] for arg in args: # Fix if we have at least 3 chars where the first is a hyphen # and the second is not a hyphen (e.g. -op becomes --op) if len(arg) > 3 and arg[0] == '-' and arg[1] != '-': gnu_args.append('-' + arg) else: gnu_args.append(arg) argparse.ArgumentParser.add_argument(self, *gnu_args, **kwargs)
python
def add_gnu_argument(self, *args, **kwargs): "Prevent the addition of any single hyphen, multiple letter args" gnu_args = [] for arg in args: # Fix if we have at least 3 chars where the first is a hyphen # and the second is not a hyphen (e.g. -op becomes --op) if len(arg) > 3 and arg[0] == '-' and arg[1] != '-': gnu_args.append('-' + arg) else: gnu_args.append(arg) argparse.ArgumentParser.add_argument(self, *gnu_args, **kwargs)
[ "def", "add_gnu_argument", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "gnu_args", "=", "[", "]", "for", "arg", "in", "args", ":", "# Fix if we have at least 3 chars where the first is a hyphen", "# and the second is not a hyphen (e.g. -op becomes --op)", "if", "len", "(", "arg", ")", ">", "3", "and", "arg", "[", "0", "]", "==", "'-'", "and", "arg", "[", "1", "]", "!=", "'-'", ":", "gnu_args", ".", "append", "(", "'-'", "+", "arg", ")", "else", ":", "gnu_args", ".", "append", "(", "arg", ")", "argparse", ".", "ArgumentParser", ".", "add_argument", "(", "self", ",", "*", "gnu_args", ",", "*", "*", "kwargs", ")" ]
Prevent the addition of any single hyphen, multiple letter args
[ "Prevent", "the", "addition", "of", "any", "single", "hyphen", "multiple", "letter", "args" ]
3f343d88326df9e5faf3a77fcffdd4ca3f5bec04
https://github.com/fprimex/plac_ini/blob/3f343d88326df9e5faf3a77fcffdd4ca3f5bec04/plac_ini.py#L81-L94
248,979
ipconfiger/result2
example.py
get_valid_user_by_email
def get_valid_user_by_email(email): """ Return user instance """ user = get_user(email) if user: if user.valid is False: return Err("user not valid") return Ok(user) return Err("user not exists")
python
def get_valid_user_by_email(email): """ Return user instance """ user = get_user(email) if user: if user.valid is False: return Err("user not valid") return Ok(user) return Err("user not exists")
[ "def", "get_valid_user_by_email", "(", "email", ")", ":", "user", "=", "get_user", "(", "email", ")", "if", "user", ":", "if", "user", ".", "valid", "is", "False", ":", "return", "Err", "(", "\"user not valid\"", ")", "return", "Ok", "(", "user", ")", "return", "Err", "(", "\"user not exists\"", ")" ]
Return user instance
[ "Return", "user", "instance" ]
7e05054cefd051bd5ae3d3199348c988af4bac7c
https://github.com/ipconfiger/result2/blob/7e05054cefd051bd5ae3d3199348c988af4bac7c/example.py#L6-L15
248,980
tBaxter/django-fretboard
fretboard/templatetags/truncatechars.py
truncatechars
def truncatechars(value,arg=50): ''' Takes a string and truncates it to the requested amount, by inserting an ellipses into the middle. ''' arg = int(arg) if arg < len(value): half = (arg-3)/2 return "%s...%s" % (value[:half],value[-half:]) return value
python
def truncatechars(value,arg=50): ''' Takes a string and truncates it to the requested amount, by inserting an ellipses into the middle. ''' arg = int(arg) if arg < len(value): half = (arg-3)/2 return "%s...%s" % (value[:half],value[-half:]) return value
[ "def", "truncatechars", "(", "value", ",", "arg", "=", "50", ")", ":", "arg", "=", "int", "(", "arg", ")", "if", "arg", "<", "len", "(", "value", ")", ":", "half", "=", "(", "arg", "-", "3", ")", "/", "2", "return", "\"%s...%s\"", "%", "(", "value", "[", ":", "half", "]", ",", "value", "[", "-", "half", ":", "]", ")", "return", "value" ]
Takes a string and truncates it to the requested amount, by inserting an ellipses into the middle.
[ "Takes", "a", "string", "and", "truncates", "it", "to", "the", "requested", "amount", "by", "inserting", "an", "ellipses", "into", "the", "middle", "." ]
3c3f9557089821283f315a07f3e5a57a2725ab3b
https://github.com/tBaxter/django-fretboard/blob/3c3f9557089821283f315a07f3e5a57a2725ab3b/fretboard/templatetags/truncatechars.py#L5-L13
248,981
insilicolife/micti
build/lib/MICTI/GM.py
GM.logpdf_diagonal_gaussian
def logpdf_diagonal_gaussian(self, x, mean, cov): ''' Compute logpdf of a multivariate Gaussian distribution with diagonal covariance at a given point x. A multivariate Gaussian distribution with a diagonal covariance is equivalent to a collection of independent Gaussian random variables. x should be a sparse matrix. The logpdf will be computed for each row of x. mean and cov should be given as 1D numpy arrays mean[i] : mean of i-th variable cov[i] : variance of i-th variable''' n = x.shape[0] dim = x.shape[1] assert(dim == len(mean) and dim == len(cov)) # multiply each i-th column of x by (1/(2*sigma_i)), where sigma_i is sqrt of variance of i-th variable. scaled_x = x.dot(self.diag(1./(2*np.sqrt(cov))) ) # multiply each i-th entry of mean by (1/(2*sigma_i)) scaled_mean = mean/(2*np.sqrt(cov)) # sum of pairwise squared Eulidean distances gives SUM[(x_i - mean_i)^2/(2*sigma_i^2)] return -np.sum(np.log(np.sqrt(2*np.pi*cov))) - pairwise_distances(scaled_x, [scaled_mean], 'euclidean').flatten()**2
python
def logpdf_diagonal_gaussian(self, x, mean, cov): ''' Compute logpdf of a multivariate Gaussian distribution with diagonal covariance at a given point x. A multivariate Gaussian distribution with a diagonal covariance is equivalent to a collection of independent Gaussian random variables. x should be a sparse matrix. The logpdf will be computed for each row of x. mean and cov should be given as 1D numpy arrays mean[i] : mean of i-th variable cov[i] : variance of i-th variable''' n = x.shape[0] dim = x.shape[1] assert(dim == len(mean) and dim == len(cov)) # multiply each i-th column of x by (1/(2*sigma_i)), where sigma_i is sqrt of variance of i-th variable. scaled_x = x.dot(self.diag(1./(2*np.sqrt(cov))) ) # multiply each i-th entry of mean by (1/(2*sigma_i)) scaled_mean = mean/(2*np.sqrt(cov)) # sum of pairwise squared Eulidean distances gives SUM[(x_i - mean_i)^2/(2*sigma_i^2)] return -np.sum(np.log(np.sqrt(2*np.pi*cov))) - pairwise_distances(scaled_x, [scaled_mean], 'euclidean').flatten()**2
[ "def", "logpdf_diagonal_gaussian", "(", "self", ",", "x", ",", "mean", ",", "cov", ")", ":", "n", "=", "x", ".", "shape", "[", "0", "]", "dim", "=", "x", ".", "shape", "[", "1", "]", "assert", "(", "dim", "==", "len", "(", "mean", ")", "and", "dim", "==", "len", "(", "cov", ")", ")", "# multiply each i-th column of x by (1/(2*sigma_i)), where sigma_i is sqrt of variance of i-th variable.", "scaled_x", "=", "x", ".", "dot", "(", "self", ".", "diag", "(", "1.", "/", "(", "2", "*", "np", ".", "sqrt", "(", "cov", ")", ")", ")", ")", "# multiply each i-th entry of mean by (1/(2*sigma_i))", "scaled_mean", "=", "mean", "/", "(", "2", "*", "np", ".", "sqrt", "(", "cov", ")", ")", "# sum of pairwise squared Eulidean distances gives SUM[(x_i - mean_i)^2/(2*sigma_i^2)]", "return", "-", "np", ".", "sum", "(", "np", ".", "log", "(", "np", ".", "sqrt", "(", "2", "*", "np", ".", "pi", "*", "cov", ")", ")", ")", "-", "pairwise_distances", "(", "scaled_x", ",", "[", "scaled_mean", "]", ",", "'euclidean'", ")", ".", "flatten", "(", ")", "**", "2" ]
Compute logpdf of a multivariate Gaussian distribution with diagonal covariance at a given point x. A multivariate Gaussian distribution with a diagonal covariance is equivalent to a collection of independent Gaussian random variables. x should be a sparse matrix. The logpdf will be computed for each row of x. mean and cov should be given as 1D numpy arrays mean[i] : mean of i-th variable cov[i] : variance of i-th variable
[ "Compute", "logpdf", "of", "a", "multivariate", "Gaussian", "distribution", "with", "diagonal", "covariance", "at", "a", "given", "point", "x", ".", "A", "multivariate", "Gaussian", "distribution", "with", "a", "diagonal", "covariance", "is", "equivalent", "to", "a", "collection", "of", "independent", "Gaussian", "random", "variables", "." ]
f12f46724295b57c4859e6acf7eab580fc355eb1
https://github.com/insilicolife/micti/blob/f12f46724295b57c4859e6acf7eab580fc355eb1/build/lib/MICTI/GM.py#L33-L54
248,982
insilicolife/micti
build/lib/MICTI/GM.py
GM.log_sum_exp
def log_sum_exp(self,x, axis): '''Compute the log of a sum of exponentials''' x_max = np.max(x, axis=axis) if axis == 1: return x_max + np.log( np.sum(np.exp(x-x_max[:,np.newaxis]), axis=1) ) else: return x_max + np.log( np.sum(np.exp(x-x_max), axis=0) )
python
def log_sum_exp(self,x, axis): '''Compute the log of a sum of exponentials''' x_max = np.max(x, axis=axis) if axis == 1: return x_max + np.log( np.sum(np.exp(x-x_max[:,np.newaxis]), axis=1) ) else: return x_max + np.log( np.sum(np.exp(x-x_max), axis=0) )
[ "def", "log_sum_exp", "(", "self", ",", "x", ",", "axis", ")", ":", "x_max", "=", "np", ".", "max", "(", "x", ",", "axis", "=", "axis", ")", "if", "axis", "==", "1", ":", "return", "x_max", "+", "np", ".", "log", "(", "np", ".", "sum", "(", "np", ".", "exp", "(", "x", "-", "x_max", "[", ":", ",", "np", ".", "newaxis", "]", ")", ",", "axis", "=", "1", ")", ")", "else", ":", "return", "x_max", "+", "np", ".", "log", "(", "np", ".", "sum", "(", "np", ".", "exp", "(", "x", "-", "x_max", ")", ",", "axis", "=", "0", ")", ")" ]
Compute the log of a sum of exponentials
[ "Compute", "the", "log", "of", "a", "sum", "of", "exponentials" ]
f12f46724295b57c4859e6acf7eab580fc355eb1
https://github.com/insilicolife/micti/blob/f12f46724295b57c4859e6acf7eab580fc355eb1/build/lib/MICTI/GM.py#L100-L106
248,983
neuroticnerd/armory
armory/phone/cli.py
lookup
def lookup(ctx, number, comment, cache): """Get the carrier and country code for a phone number""" phone = PhoneNumber(number, comment=comment) info('{0} | {1}'.format(phone.number, ctx.obj['config']['lookups'].keys())) if phone.number in ctx.obj['config']['lookups']: info('{0} is already cached:'.format(phone.number)) info(jsonify(ctx.obj['config']['lookups'][phone.number])) return data = phone.lookup() info('carrier = {0}'.format(phone.carrier)) info('type = {0}'.format(phone.type)) info('cache = {0}'.format(cache)) if cache: ctx.obj['config']['lookups'][phone.number] = phone.raw with open(CONFIG_FILE, 'wb') as cfg: cfg.write(json.dumps(ctx.obj['config'], indent=2))
python
def lookup(ctx, number, comment, cache): """Get the carrier and country code for a phone number""" phone = PhoneNumber(number, comment=comment) info('{0} | {1}'.format(phone.number, ctx.obj['config']['lookups'].keys())) if phone.number in ctx.obj['config']['lookups']: info('{0} is already cached:'.format(phone.number)) info(jsonify(ctx.obj['config']['lookups'][phone.number])) return data = phone.lookup() info('carrier = {0}'.format(phone.carrier)) info('type = {0}'.format(phone.type)) info('cache = {0}'.format(cache)) if cache: ctx.obj['config']['lookups'][phone.number] = phone.raw with open(CONFIG_FILE, 'wb') as cfg: cfg.write(json.dumps(ctx.obj['config'], indent=2))
[ "def", "lookup", "(", "ctx", ",", "number", ",", "comment", ",", "cache", ")", ":", "phone", "=", "PhoneNumber", "(", "number", ",", "comment", "=", "comment", ")", "info", "(", "'{0} | {1}'", ".", "format", "(", "phone", ".", "number", ",", "ctx", ".", "obj", "[", "'config'", "]", "[", "'lookups'", "]", ".", "keys", "(", ")", ")", ")", "if", "phone", ".", "number", "in", "ctx", ".", "obj", "[", "'config'", "]", "[", "'lookups'", "]", ":", "info", "(", "'{0} is already cached:'", ".", "format", "(", "phone", ".", "number", ")", ")", "info", "(", "jsonify", "(", "ctx", ".", "obj", "[", "'config'", "]", "[", "'lookups'", "]", "[", "phone", ".", "number", "]", ")", ")", "return", "data", "=", "phone", ".", "lookup", "(", ")", "info", "(", "'carrier = {0}'", ".", "format", "(", "phone", ".", "carrier", ")", ")", "info", "(", "'type = {0}'", ".", "format", "(", "phone", ".", "type", ")", ")", "info", "(", "'cache = {0}'", ".", "format", "(", "cache", ")", ")", "if", "cache", ":", "ctx", ".", "obj", "[", "'config'", "]", "[", "'lookups'", "]", "[", "phone", ".", "number", "]", "=", "phone", ".", "raw", "with", "open", "(", "CONFIG_FILE", ",", "'wb'", ")", "as", "cfg", ":", "cfg", ".", "write", "(", "json", ".", "dumps", "(", "ctx", ".", "obj", "[", "'config'", "]", ",", "indent", "=", "2", ")", ")" ]
Get the carrier and country code for a phone number
[ "Get", "the", "carrier", "and", "country", "code", "for", "a", "phone", "number" ]
d37c5ca1dbdd60dddb968e35f0bbe4bc1299dca1
https://github.com/neuroticnerd/armory/blob/d37c5ca1dbdd60dddb968e35f0bbe4bc1299dca1/armory/phone/cli.py#L149-L164
248,984
dlancer/django-appcore
appcore/views/decorators.py
anonymous_required
def anonymous_required(function): """Redirect to user profile if user is already logged-in""" def wrapper(*args, **kwargs): if args[0].user.is_authenticated(): url = settings.ANONYMOUS_REQUIRED_REDIRECT_URL return HttpResponseRedirect(reverse(url)) return function(*args, **kwargs) return wrapper
python
def anonymous_required(function): """Redirect to user profile if user is already logged-in""" def wrapper(*args, **kwargs): if args[0].user.is_authenticated(): url = settings.ANONYMOUS_REQUIRED_REDIRECT_URL return HttpResponseRedirect(reverse(url)) return function(*args, **kwargs) return wrapper
[ "def", "anonymous_required", "(", "function", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "args", "[", "0", "]", ".", "user", ".", "is_authenticated", "(", ")", ":", "url", "=", "settings", ".", "ANONYMOUS_REQUIRED_REDIRECT_URL", "return", "HttpResponseRedirect", "(", "reverse", "(", "url", ")", ")", "return", "function", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Redirect to user profile if user is already logged-in
[ "Redirect", "to", "user", "profile", "if", "user", "is", "already", "logged", "-", "in" ]
8ba82a9268f23afd451e6ffd0367c54509108348
https://github.com/dlancer/django-appcore/blob/8ba82a9268f23afd451e6ffd0367c54509108348/appcore/views/decorators.py#L7-L16
248,985
minhhoit/yacms
yacms/generic/templatetags/disqus_tags.py
disqus_sso_script
def disqus_sso_script(context): """ Provides a generic context variable which adds single-sign-on support to DISQUS if ``COMMENTS_DISQUS_API_PUBLIC_KEY`` and ``COMMENTS_DISQUS_API_SECRET_KEY`` are specified. """ settings = context["settings"] public_key = getattr(settings, "COMMENTS_DISQUS_API_PUBLIC_KEY", "") secret_key = getattr(settings, "COMMENTS_DISQUS_API_SECRET_KEY", "") user = context["request"].user if public_key and secret_key and user.is_authenticated(): context["public_key"] = public_key context["sso_data"] = _get_disqus_sso(user, public_key, secret_key) return context
python
def disqus_sso_script(context): """ Provides a generic context variable which adds single-sign-on support to DISQUS if ``COMMENTS_DISQUS_API_PUBLIC_KEY`` and ``COMMENTS_DISQUS_API_SECRET_KEY`` are specified. """ settings = context["settings"] public_key = getattr(settings, "COMMENTS_DISQUS_API_PUBLIC_KEY", "") secret_key = getattr(settings, "COMMENTS_DISQUS_API_SECRET_KEY", "") user = context["request"].user if public_key and secret_key and user.is_authenticated(): context["public_key"] = public_key context["sso_data"] = _get_disqus_sso(user, public_key, secret_key) return context
[ "def", "disqus_sso_script", "(", "context", ")", ":", "settings", "=", "context", "[", "\"settings\"", "]", "public_key", "=", "getattr", "(", "settings", ",", "\"COMMENTS_DISQUS_API_PUBLIC_KEY\"", ",", "\"\"", ")", "secret_key", "=", "getattr", "(", "settings", ",", "\"COMMENTS_DISQUS_API_SECRET_KEY\"", ",", "\"\"", ")", "user", "=", "context", "[", "\"request\"", "]", ".", "user", "if", "public_key", "and", "secret_key", "and", "user", ".", "is_authenticated", "(", ")", ":", "context", "[", "\"public_key\"", "]", "=", "public_key", "context", "[", "\"sso_data\"", "]", "=", "_get_disqus_sso", "(", "user", ",", "public_key", ",", "secret_key", ")", "return", "context" ]
Provides a generic context variable which adds single-sign-on support to DISQUS if ``COMMENTS_DISQUS_API_PUBLIC_KEY`` and ``COMMENTS_DISQUS_API_SECRET_KEY`` are specified.
[ "Provides", "a", "generic", "context", "variable", "which", "adds", "single", "-", "sign", "-", "on", "support", "to", "DISQUS", "if", "COMMENTS_DISQUS_API_PUBLIC_KEY", "and", "COMMENTS_DISQUS_API_SECRET_KEY", "are", "specified", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/templatetags/disqus_tags.py#L26-L39
248,986
coghost/izen
izen/prettify.py
Prettify.log_random_sleep
def log_random_sleep(self, minimum=3.0, scale=1.0, hints=None): """wrap random sleep. - log it for debug purpose only """ hints = '{} slept'.format(hints) if hints else 'slept' st = time.time() helper.random_sleep(minimum, scale) log.debug('{} {} {}s'.format( self.symbols.get('sleep', ''), hints, self.color_log(time.time() - st)))
python
def log_random_sleep(self, minimum=3.0, scale=1.0, hints=None): """wrap random sleep. - log it for debug purpose only """ hints = '{} slept'.format(hints) if hints else 'slept' st = time.time() helper.random_sleep(minimum, scale) log.debug('{} {} {}s'.format( self.symbols.get('sleep', ''), hints, self.color_log(time.time() - st)))
[ "def", "log_random_sleep", "(", "self", ",", "minimum", "=", "3.0", ",", "scale", "=", "1.0", ",", "hints", "=", "None", ")", ":", "hints", "=", "'{} slept'", ".", "format", "(", "hints", ")", "if", "hints", "else", "'slept'", "st", "=", "time", ".", "time", "(", ")", "helper", ".", "random_sleep", "(", "minimum", ",", "scale", ")", "log", ".", "debug", "(", "'{} {} {}s'", ".", "format", "(", "self", ".", "symbols", ".", "get", "(", "'sleep'", ",", "''", ")", ",", "hints", ",", "self", ".", "color_log", "(", "time", ".", "time", "(", ")", "-", "st", ")", ")", ")" ]
wrap random sleep. - log it for debug purpose only
[ "wrap", "random", "sleep", "." ]
432db017f99dd2ba809e1ba1792145ab6510263d
https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/prettify.py#L93-L102
248,987
klorenz/python-argdeco
argdeco/config.py
config_factory
def config_factory(ConfigClass=dict, prefix=None, config_file=None ): '''return a class, which implements the compiler_factory API :param ConfigClass: defaults to dict. A simple factory (without parameter) for a dictionary-like object, which implements __setitem__() method. Additionally you can implement following methods: :``init_args``: A method to be called to initialize the config object by passing :py:class:`~argparse.Namespace` object resulting from :py:class:`~argparse.ArgumentParser.parseargs` method. You could load data from a configuration file here. :``compile_args``: A method, which can return the same like a ``compile`` function does. If there is no such method, a tuple with a ConfigClass instance as single element is returned. :param prefix: Add this prefix to config_name. (e.g. if prefix="foo" and you have config_name="x.y" final config_path results in "foo.x.y") :param config_file: An :py:class:`~argdeco.arguments.arg` to provide a config file. If you provide this argument, you can implement one of the following methods in your ``ConfigClass`` to load data from the configfile: :``load``: If you pass ``config_file`` argument, this method can be implemented to load configuration data from resulting stream. If config_file is '-', stdin stream is passed. :``load_from_file``: If you prefer to open the file yourself, you can do this, by implementing ``load_from_file`` instead which has the filename as its single argument. :``update``: method like :py:meth:`dict.update`. If neither of ``load`` or ``load_from_file`` is present, but ``update`` is, it is assumed, that config_file is of type YAML (or JSON) and configuration is updated by calling ``update`` with the parsed data as parameter. If you implement neither of these, it is assumed, that configuration file is of type YAML (or plain JSON, as YAML is a superset of it). Data is loaded from file and will update configuration object using dict-like :py:meth:`dict.update` method. :type config_file: argdeco.arguments.arg :returns: ConfigFactory class, which implements compiler_factory API. ''' config_factory = ConfigClass class ConfigFactory: def __init__(self, command): self.command = command if config_file: from .arguments import arg assert isinstance(config_file, arg), "config_file must be of type arg" try: self.command.add_argument(config_file) except: pass def __call__(self, args, **opts): cfg = ConfigClass() if hasattr(cfg, 'init_args'): cfg.init_args(args) if config_file is not None: if hasattr(args, config_file.dest): fn = getattr(args, config_file.dest) if fn is not None: if hasattr(cfg, 'load'): if config_file.dest == '-': cfg.load(sys.stdin) else: with open(fn, 'r') as f: cfg.load(f) elif hasattr(cfg, 'load_from_file'): cfg.load_from_file(fn) elif hasattr(cfg, 'update'): # assume yaml file import yaml with open(fn, 'r') as f: data = yaml.load(f) cfg.update(data) for k,v in opts.items(): config_name = self.command.get_config_name(args.action, k) if config_name is None: continue if prefix is not None: config_name = '.'.join([prefix, config_name]) cfg[config_name] = v if hasattr(cfg, 'compile_args'): return cfg.compile_args() else: return (cfg,) return ConfigFactory
python
def config_factory(ConfigClass=dict, prefix=None, config_file=None ): '''return a class, which implements the compiler_factory API :param ConfigClass: defaults to dict. A simple factory (without parameter) for a dictionary-like object, which implements __setitem__() method. Additionally you can implement following methods: :``init_args``: A method to be called to initialize the config object by passing :py:class:`~argparse.Namespace` object resulting from :py:class:`~argparse.ArgumentParser.parseargs` method. You could load data from a configuration file here. :``compile_args``: A method, which can return the same like a ``compile`` function does. If there is no such method, a tuple with a ConfigClass instance as single element is returned. :param prefix: Add this prefix to config_name. (e.g. if prefix="foo" and you have config_name="x.y" final config_path results in "foo.x.y") :param config_file: An :py:class:`~argdeco.arguments.arg` to provide a config file. If you provide this argument, you can implement one of the following methods in your ``ConfigClass`` to load data from the configfile: :``load``: If you pass ``config_file`` argument, this method can be implemented to load configuration data from resulting stream. If config_file is '-', stdin stream is passed. :``load_from_file``: If you prefer to open the file yourself, you can do this, by implementing ``load_from_file`` instead which has the filename as its single argument. :``update``: method like :py:meth:`dict.update`. If neither of ``load`` or ``load_from_file`` is present, but ``update`` is, it is assumed, that config_file is of type YAML (or JSON) and configuration is updated by calling ``update`` with the parsed data as parameter. If you implement neither of these, it is assumed, that configuration file is of type YAML (or plain JSON, as YAML is a superset of it). Data is loaded from file and will update configuration object using dict-like :py:meth:`dict.update` method. :type config_file: argdeco.arguments.arg :returns: ConfigFactory class, which implements compiler_factory API. ''' config_factory = ConfigClass class ConfigFactory: def __init__(self, command): self.command = command if config_file: from .arguments import arg assert isinstance(config_file, arg), "config_file must be of type arg" try: self.command.add_argument(config_file) except: pass def __call__(self, args, **opts): cfg = ConfigClass() if hasattr(cfg, 'init_args'): cfg.init_args(args) if config_file is not None: if hasattr(args, config_file.dest): fn = getattr(args, config_file.dest) if fn is not None: if hasattr(cfg, 'load'): if config_file.dest == '-': cfg.load(sys.stdin) else: with open(fn, 'r') as f: cfg.load(f) elif hasattr(cfg, 'load_from_file'): cfg.load_from_file(fn) elif hasattr(cfg, 'update'): # assume yaml file import yaml with open(fn, 'r') as f: data = yaml.load(f) cfg.update(data) for k,v in opts.items(): config_name = self.command.get_config_name(args.action, k) if config_name is None: continue if prefix is not None: config_name = '.'.join([prefix, config_name]) cfg[config_name] = v if hasattr(cfg, 'compile_args'): return cfg.compile_args() else: return (cfg,) return ConfigFactory
[ "def", "config_factory", "(", "ConfigClass", "=", "dict", ",", "prefix", "=", "None", ",", "config_file", "=", "None", ")", ":", "config_factory", "=", "ConfigClass", "class", "ConfigFactory", ":", "def", "__init__", "(", "self", ",", "command", ")", ":", "self", ".", "command", "=", "command", "if", "config_file", ":", "from", ".", "arguments", "import", "arg", "assert", "isinstance", "(", "config_file", ",", "arg", ")", ",", "\"config_file must be of type arg\"", "try", ":", "self", ".", "command", ".", "add_argument", "(", "config_file", ")", "except", ":", "pass", "def", "__call__", "(", "self", ",", "args", ",", "*", "*", "opts", ")", ":", "cfg", "=", "ConfigClass", "(", ")", "if", "hasattr", "(", "cfg", ",", "'init_args'", ")", ":", "cfg", ".", "init_args", "(", "args", ")", "if", "config_file", "is", "not", "None", ":", "if", "hasattr", "(", "args", ",", "config_file", ".", "dest", ")", ":", "fn", "=", "getattr", "(", "args", ",", "config_file", ".", "dest", ")", "if", "fn", "is", "not", "None", ":", "if", "hasattr", "(", "cfg", ",", "'load'", ")", ":", "if", "config_file", ".", "dest", "==", "'-'", ":", "cfg", ".", "load", "(", "sys", ".", "stdin", ")", "else", ":", "with", "open", "(", "fn", ",", "'r'", ")", "as", "f", ":", "cfg", ".", "load", "(", "f", ")", "elif", "hasattr", "(", "cfg", ",", "'load_from_file'", ")", ":", "cfg", ".", "load_from_file", "(", "fn", ")", "elif", "hasattr", "(", "cfg", ",", "'update'", ")", ":", "# assume yaml file", "import", "yaml", "with", "open", "(", "fn", ",", "'r'", ")", "as", "f", ":", "data", "=", "yaml", ".", "load", "(", "f", ")", "cfg", ".", "update", "(", "data", ")", "for", "k", ",", "v", "in", "opts", ".", "items", "(", ")", ":", "config_name", "=", "self", ".", "command", ".", "get_config_name", "(", "args", ".", "action", ",", "k", ")", "if", "config_name", "is", "None", ":", "continue", "if", "prefix", "is", "not", "None", ":", "config_name", "=", "'.'", ".", "join", "(", "[", "prefix", ",", "config_name", "]", ")", "cfg", "[", "config_name", "]", "=", "v", "if", "hasattr", "(", "cfg", ",", "'compile_args'", ")", ":", "return", "cfg", ".", "compile_args", "(", ")", "else", ":", "return", "(", "cfg", ",", ")", "return", "ConfigFactory" ]
return a class, which implements the compiler_factory API :param ConfigClass: defaults to dict. A simple factory (without parameter) for a dictionary-like object, which implements __setitem__() method. Additionally you can implement following methods: :``init_args``: A method to be called to initialize the config object by passing :py:class:`~argparse.Namespace` object resulting from :py:class:`~argparse.ArgumentParser.parseargs` method. You could load data from a configuration file here. :``compile_args``: A method, which can return the same like a ``compile`` function does. If there is no such method, a tuple with a ConfigClass instance as single element is returned. :param prefix: Add this prefix to config_name. (e.g. if prefix="foo" and you have config_name="x.y" final config_path results in "foo.x.y") :param config_file: An :py:class:`~argdeco.arguments.arg` to provide a config file. If you provide this argument, you can implement one of the following methods in your ``ConfigClass`` to load data from the configfile: :``load``: If you pass ``config_file`` argument, this method can be implemented to load configuration data from resulting stream. If config_file is '-', stdin stream is passed. :``load_from_file``: If you prefer to open the file yourself, you can do this, by implementing ``load_from_file`` instead which has the filename as its single argument. :``update``: method like :py:meth:`dict.update`. If neither of ``load`` or ``load_from_file`` is present, but ``update`` is, it is assumed, that config_file is of type YAML (or JSON) and configuration is updated by calling ``update`` with the parsed data as parameter. If you implement neither of these, it is assumed, that configuration file is of type YAML (or plain JSON, as YAML is a superset of it). Data is loaded from file and will update configuration object using dict-like :py:meth:`dict.update` method. :type config_file: argdeco.arguments.arg :returns: ConfigFactory class, which implements compiler_factory API.
[ "return", "a", "class", "which", "implements", "the", "compiler_factory", "API" ]
8d01acef8c19d6883873689d017b14857876412d
https://github.com/klorenz/python-argdeco/blob/8d01acef8c19d6883873689d017b14857876412d/argdeco/config.py#L167-L277
248,988
klorenz/python-argdeco
argdeco/config.py
ConfigDict.flatten
def flatten(self, D): '''flatten a nested dictionary D to a flat dictionary nested keys are separated by '.' ''' if not isinstance(D, dict): return D result = {} for k,v in D.items(): if isinstance(v, dict): for _k,_v in self.flatten(v).items(): result['.'.join([k,_k])] = _v else: result[k] = v return result
python
def flatten(self, D): '''flatten a nested dictionary D to a flat dictionary nested keys are separated by '.' ''' if not isinstance(D, dict): return D result = {} for k,v in D.items(): if isinstance(v, dict): for _k,_v in self.flatten(v).items(): result['.'.join([k,_k])] = _v else: result[k] = v return result
[ "def", "flatten", "(", "self", ",", "D", ")", ":", "if", "not", "isinstance", "(", "D", ",", "dict", ")", ":", "return", "D", "result", "=", "{", "}", "for", "k", ",", "v", "in", "D", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "for", "_k", ",", "_v", "in", "self", ".", "flatten", "(", "v", ")", ".", "items", "(", ")", ":", "result", "[", "'.'", ".", "join", "(", "[", "k", ",", "_k", "]", ")", "]", "=", "_v", "else", ":", "result", "[", "k", "]", "=", "v", "return", "result" ]
flatten a nested dictionary D to a flat dictionary nested keys are separated by '.'
[ "flatten", "a", "nested", "dictionary", "D", "to", "a", "flat", "dictionary" ]
8d01acef8c19d6883873689d017b14857876412d
https://github.com/klorenz/python-argdeco/blob/8d01acef8c19d6883873689d017b14857876412d/argdeco/config.py#L115-L131
248,989
klorenz/python-argdeco
argdeco/config.py
ConfigDict.update
def update(self, E=None, **F): '''flatten nested dictionaries to update pathwise >>> Config({'foo': {'bar': 'glork'}}).update({'foo': {'blub': 'bla'}}) {'foo': {'bar': 'glork', 'blub': 'bla'} In contrast to: >>> {'foo': {'bar': 'glork'}}.update({'foo': {'blub': 'bla'}}) {'foo: {'blub': 'bla'}'} ''' def _update(D): for k,v in D.items(): if super(ConfigDict, self).__contains__(k): if isinstance(self[k], ConfigDict): self[k].update(v) else: self[k] = self.assimilate(v) else: self[k] = self.assimilate(v) if E is not None: if not hasattr(E, 'keys'): E = self.assimilate(dict(E)) _update(E) _update(F) return self
python
def update(self, E=None, **F): '''flatten nested dictionaries to update pathwise >>> Config({'foo': {'bar': 'glork'}}).update({'foo': {'blub': 'bla'}}) {'foo': {'bar': 'glork', 'blub': 'bla'} In contrast to: >>> {'foo': {'bar': 'glork'}}.update({'foo': {'blub': 'bla'}}) {'foo: {'blub': 'bla'}'} ''' def _update(D): for k,v in D.items(): if super(ConfigDict, self).__contains__(k): if isinstance(self[k], ConfigDict): self[k].update(v) else: self[k] = self.assimilate(v) else: self[k] = self.assimilate(v) if E is not None: if not hasattr(E, 'keys'): E = self.assimilate(dict(E)) _update(E) _update(F) return self
[ "def", "update", "(", "self", ",", "E", "=", "None", ",", "*", "*", "F", ")", ":", "def", "_update", "(", "D", ")", ":", "for", "k", ",", "v", "in", "D", ".", "items", "(", ")", ":", "if", "super", "(", "ConfigDict", ",", "self", ")", ".", "__contains__", "(", "k", ")", ":", "if", "isinstance", "(", "self", "[", "k", "]", ",", "ConfigDict", ")", ":", "self", "[", "k", "]", ".", "update", "(", "v", ")", "else", ":", "self", "[", "k", "]", "=", "self", ".", "assimilate", "(", "v", ")", "else", ":", "self", "[", "k", "]", "=", "self", ".", "assimilate", "(", "v", ")", "if", "E", "is", "not", "None", ":", "if", "not", "hasattr", "(", "E", ",", "'keys'", ")", ":", "E", "=", "self", ".", "assimilate", "(", "dict", "(", "E", ")", ")", "_update", "(", "E", ")", "_update", "(", "F", ")", "return", "self" ]
flatten nested dictionaries to update pathwise >>> Config({'foo': {'bar': 'glork'}}).update({'foo': {'blub': 'bla'}}) {'foo': {'bar': 'glork', 'blub': 'bla'} In contrast to: >>> {'foo': {'bar': 'glork'}}.update({'foo': {'blub': 'bla'}}) {'foo: {'blub': 'bla'}'}
[ "flatten", "nested", "dictionaries", "to", "update", "pathwise" ]
8d01acef8c19d6883873689d017b14857876412d
https://github.com/klorenz/python-argdeco/blob/8d01acef8c19d6883873689d017b14857876412d/argdeco/config.py#L134-L163
248,990
hackthefed/govtrack2csv
govtrack2csv/__init__.py
import_legislators
def import_legislators(src): """ Read the legislators from the csv files into a single Dataframe. Intended for importing new data. """ logger.info("Importing Legislators From: {0}".format(src)) current = pd.read_csv("{0}/{1}/legislators-current.csv".format( src, LEGISLATOR_DIR)) historic = pd.read_csv("{0}/{1}/legislators-historic.csv".format( src, LEGISLATOR_DIR)) legislators = current.append(historic) return legislators
python
def import_legislators(src): """ Read the legislators from the csv files into a single Dataframe. Intended for importing new data. """ logger.info("Importing Legislators From: {0}".format(src)) current = pd.read_csv("{0}/{1}/legislators-current.csv".format( src, LEGISLATOR_DIR)) historic = pd.read_csv("{0}/{1}/legislators-historic.csv".format( src, LEGISLATOR_DIR)) legislators = current.append(historic) return legislators
[ "def", "import_legislators", "(", "src", ")", ":", "logger", ".", "info", "(", "\"Importing Legislators From: {0}\"", ".", "format", "(", "src", ")", ")", "current", "=", "pd", ".", "read_csv", "(", "\"{0}/{1}/legislators-current.csv\"", ".", "format", "(", "src", ",", "LEGISLATOR_DIR", ")", ")", "historic", "=", "pd", ".", "read_csv", "(", "\"{0}/{1}/legislators-historic.csv\"", ".", "format", "(", "src", ",", "LEGISLATOR_DIR", ")", ")", "legislators", "=", "current", ".", "append", "(", "historic", ")", "return", "legislators" ]
Read the legislators from the csv files into a single Dataframe. Intended for importing new data.
[ "Read", "the", "legislators", "from", "the", "csv", "files", "into", "a", "single", "Dataframe", ".", "Intended", "for", "importing", "new", "data", "." ]
db991f5fcd3dfda6e6d51fadd286cba983f493e5
https://github.com/hackthefed/govtrack2csv/blob/db991f5fcd3dfda6e6d51fadd286cba983f493e5/govtrack2csv/__init__.py#L38-L50
248,991
hackthefed/govtrack2csv
govtrack2csv/__init__.py
save_legislators
def save_legislators(legislators, destination): """ Output legislators datafrom to csv. """ logger.info("Saving Legislators To: {0}".format(destination)) legislators.to_csv("{0}/legislators.csv".format(destination), encoding='utf-8')
python
def save_legislators(legislators, destination): """ Output legislators datafrom to csv. """ logger.info("Saving Legislators To: {0}".format(destination)) legislators.to_csv("{0}/legislators.csv".format(destination), encoding='utf-8')
[ "def", "save_legislators", "(", "legislators", ",", "destination", ")", ":", "logger", ".", "info", "(", "\"Saving Legislators To: {0}\"", ".", "format", "(", "destination", ")", ")", "legislators", ".", "to_csv", "(", "\"{0}/legislators.csv\"", ".", "format", "(", "destination", ")", ",", "encoding", "=", "'utf-8'", ")" ]
Output legislators datafrom to csv.
[ "Output", "legislators", "datafrom", "to", "csv", "." ]
db991f5fcd3dfda6e6d51fadd286cba983f493e5
https://github.com/hackthefed/govtrack2csv/blob/db991f5fcd3dfda6e6d51fadd286cba983f493e5/govtrack2csv/__init__.py#L53-L59
248,992
hackthefed/govtrack2csv
govtrack2csv/__init__.py
import_committees
def import_committees(src): """ Read the committees from the csv files into a single Dataframe. Intended for importing new data. """ committees = [] subcommittees = [] with open("{0}/{1}/committees-current.yaml".format(src, LEGISLATOR_DIR), 'r') as stream: committees += yaml.load(stream) with open("{0}/{1}/committees-historical.yaml".format(src, LEGISLATOR_DIR), 'r') as stream: committees += yaml.load(stream) # Sub Committees are not Committees # And unfortunately the good folk at thomas thought modeling data with duplicate id's was a good idea. # you can have two subcommittees with the ID 12. Makes a simple membership map impossible. for com in committees: com['committee_id'] = com['thomas_id'] if 'subcommittees' in com: # process sub committees into separate DataFrame for subcom in com.get('subcommittees'): subcom['committee_id'] = com[ 'thomas_id' ] # we use committee_id so we can easily merge dataframes subcom['subcommittee_id'] = "{0}-{1}".format( subcom['committee_id'], subcom['thomas_id']) subcommittees.append(subcom) del com['subcommittees'] committees_df = pd.DataFrame(committees) subcommittees_df = pd.DataFrame(subcommittees) return [committees_df, subcommittees_df]
python
def import_committees(src): """ Read the committees from the csv files into a single Dataframe. Intended for importing new data. """ committees = [] subcommittees = [] with open("{0}/{1}/committees-current.yaml".format(src, LEGISLATOR_DIR), 'r') as stream: committees += yaml.load(stream) with open("{0}/{1}/committees-historical.yaml".format(src, LEGISLATOR_DIR), 'r') as stream: committees += yaml.load(stream) # Sub Committees are not Committees # And unfortunately the good folk at thomas thought modeling data with duplicate id's was a good idea. # you can have two subcommittees with the ID 12. Makes a simple membership map impossible. for com in committees: com['committee_id'] = com['thomas_id'] if 'subcommittees' in com: # process sub committees into separate DataFrame for subcom in com.get('subcommittees'): subcom['committee_id'] = com[ 'thomas_id' ] # we use committee_id so we can easily merge dataframes subcom['subcommittee_id'] = "{0}-{1}".format( subcom['committee_id'], subcom['thomas_id']) subcommittees.append(subcom) del com['subcommittees'] committees_df = pd.DataFrame(committees) subcommittees_df = pd.DataFrame(subcommittees) return [committees_df, subcommittees_df]
[ "def", "import_committees", "(", "src", ")", ":", "committees", "=", "[", "]", "subcommittees", "=", "[", "]", "with", "open", "(", "\"{0}/{1}/committees-current.yaml\"", ".", "format", "(", "src", ",", "LEGISLATOR_DIR", ")", ",", "'r'", ")", "as", "stream", ":", "committees", "+=", "yaml", ".", "load", "(", "stream", ")", "with", "open", "(", "\"{0}/{1}/committees-historical.yaml\"", ".", "format", "(", "src", ",", "LEGISLATOR_DIR", ")", ",", "'r'", ")", "as", "stream", ":", "committees", "+=", "yaml", ".", "load", "(", "stream", ")", "# Sub Committees are not Committees", "# And unfortunately the good folk at thomas thought modeling data with duplicate id's was a good idea.", "# you can have two subcommittees with the ID 12. Makes a simple membership map impossible.", "for", "com", "in", "committees", ":", "com", "[", "'committee_id'", "]", "=", "com", "[", "'thomas_id'", "]", "if", "'subcommittees'", "in", "com", ":", "# process sub committees into separate DataFrame", "for", "subcom", "in", "com", ".", "get", "(", "'subcommittees'", ")", ":", "subcom", "[", "'committee_id'", "]", "=", "com", "[", "'thomas_id'", "]", "# we use committee_id so we can easily merge dataframes", "subcom", "[", "'subcommittee_id'", "]", "=", "\"{0}-{1}\"", ".", "format", "(", "subcom", "[", "'committee_id'", "]", ",", "subcom", "[", "'thomas_id'", "]", ")", "subcommittees", ".", "append", "(", "subcom", ")", "del", "com", "[", "'subcommittees'", "]", "committees_df", "=", "pd", ".", "DataFrame", "(", "committees", ")", "subcommittees_df", "=", "pd", ".", "DataFrame", "(", "subcommittees", ")", "return", "[", "committees_df", ",", "subcommittees_df", "]" ]
Read the committees from the csv files into a single Dataframe. Intended for importing new data.
[ "Read", "the", "committees", "from", "the", "csv", "files", "into", "a", "single", "Dataframe", ".", "Intended", "for", "importing", "new", "data", "." ]
db991f5fcd3dfda6e6d51fadd286cba983f493e5
https://github.com/hackthefed/govtrack2csv/blob/db991f5fcd3dfda6e6d51fadd286cba983f493e5/govtrack2csv/__init__.py#L73-L108
248,993
hackthefed/govtrack2csv
govtrack2csv/__init__.py
move_committees
def move_committees(src, dest): """ Import stupid yaml files, convert to something useful. """ comm, sub_comm = import_committees(src) save_committees(comm, dest) save_subcommittees(comm, dest)
python
def move_committees(src, dest): """ Import stupid yaml files, convert to something useful. """ comm, sub_comm = import_committees(src) save_committees(comm, dest) save_subcommittees(comm, dest)
[ "def", "move_committees", "(", "src", ",", "dest", ")", ":", "comm", ",", "sub_comm", "=", "import_committees", "(", "src", ")", "save_committees", "(", "comm", ",", "dest", ")", "save_subcommittees", "(", "comm", ",", "dest", ")" ]
Import stupid yaml files, convert to something useful.
[ "Import", "stupid", "yaml", "files", "convert", "to", "something", "useful", "." ]
db991f5fcd3dfda6e6d51fadd286cba983f493e5
https://github.com/hackthefed/govtrack2csv/blob/db991f5fcd3dfda6e6d51fadd286cba983f493e5/govtrack2csv/__init__.py#L126-L132
248,994
hackthefed/govtrack2csv
govtrack2csv/__init__.py
make_congress_dir
def make_congress_dir(congress, dest): """ If the directory for a given congress does not exist. Make it. """ congress_dir = "{0}/{1}".format(dest, congress) path = os.path.dirname(congress_dir) logger.debug("CSV DIR: {}".format(path)) if not os.path.exists(congress_dir): logger.info("Created: {0}".format(congress_dir)) os.mkdir(congress_dir) return congress_dir
python
def make_congress_dir(congress, dest): """ If the directory for a given congress does not exist. Make it. """ congress_dir = "{0}/{1}".format(dest, congress) path = os.path.dirname(congress_dir) logger.debug("CSV DIR: {}".format(path)) if not os.path.exists(congress_dir): logger.info("Created: {0}".format(congress_dir)) os.mkdir(congress_dir) return congress_dir
[ "def", "make_congress_dir", "(", "congress", ",", "dest", ")", ":", "congress_dir", "=", "\"{0}/{1}\"", ".", "format", "(", "dest", ",", "congress", ")", "path", "=", "os", ".", "path", ".", "dirname", "(", "congress_dir", ")", "logger", ".", "debug", "(", "\"CSV DIR: {}\"", ".", "format", "(", "path", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "congress_dir", ")", ":", "logger", ".", "info", "(", "\"Created: {0}\"", ".", "format", "(", "congress_dir", ")", ")", "os", ".", "mkdir", "(", "congress_dir", ")", "return", "congress_dir" ]
If the directory for a given congress does not exist. Make it.
[ "If", "the", "directory", "for", "a", "given", "congress", "does", "not", "exist", ".", "Make", "it", "." ]
db991f5fcd3dfda6e6d51fadd286cba983f493e5
https://github.com/hackthefed/govtrack2csv/blob/db991f5fcd3dfda6e6d51fadd286cba983f493e5/govtrack2csv/__init__.py#L135-L146
248,995
hackthefed/govtrack2csv
govtrack2csv/__init__.py
save_congress
def save_congress(congress, dest): """ Takes a congress object with legislation, sponser, cosponsor, commities and subjects attributes and saves each item to it's own csv file. """ try: logger.debug(congress.name) logger.debug(dest) congress_dir = make_congress_dir(congress.name, dest) congress.legislation.to_csv("{0}/legislation.csv".format(congress_dir), encoding='utf-8') logger.debug(congress_dir) congress.sponsors.to_csv("{0}/sponsor_map.csv".format(congress_dir), encoding='utf-8') congress.cosponsors.to_csv( "{0}/cosponsor_map.csv".format(congress_dir), encoding='utf-8') congress.events.to_csv("{0}/events.csv".format(congress_dir), encoding='utf-8') congress.committees.to_csv( "{0}/committees_map.csv".format(congress_dir), encoding='utf-8') congress.subjects.to_csv("{0}/subjects_map.csv".format(congress_dir), encoding='utf-8') congress.votes.to_csv("{0}/votes.csv".format(congress_dir), encoding='utf-8') congress.votes_people.to_csv( "{0}/votes_people.csv".format(congress_dir), encoding='utf-8') if hasattr(congress, 'amendments'): congress.amendments.to_csv( "{0}/amendments.csv".format(congress_dir), encoding='utf-8') except Exception: logger.error("############################################shoot me") exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] logger.error(exc_type, fname, exc_tb.tb_lineno)
python
def save_congress(congress, dest): """ Takes a congress object with legislation, sponser, cosponsor, commities and subjects attributes and saves each item to it's own csv file. """ try: logger.debug(congress.name) logger.debug(dest) congress_dir = make_congress_dir(congress.name, dest) congress.legislation.to_csv("{0}/legislation.csv".format(congress_dir), encoding='utf-8') logger.debug(congress_dir) congress.sponsors.to_csv("{0}/sponsor_map.csv".format(congress_dir), encoding='utf-8') congress.cosponsors.to_csv( "{0}/cosponsor_map.csv".format(congress_dir), encoding='utf-8') congress.events.to_csv("{0}/events.csv".format(congress_dir), encoding='utf-8') congress.committees.to_csv( "{0}/committees_map.csv".format(congress_dir), encoding='utf-8') congress.subjects.to_csv("{0}/subjects_map.csv".format(congress_dir), encoding='utf-8') congress.votes.to_csv("{0}/votes.csv".format(congress_dir), encoding='utf-8') congress.votes_people.to_csv( "{0}/votes_people.csv".format(congress_dir), encoding='utf-8') if hasattr(congress, 'amendments'): congress.amendments.to_csv( "{0}/amendments.csv".format(congress_dir), encoding='utf-8') except Exception: logger.error("############################################shoot me") exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] logger.error(exc_type, fname, exc_tb.tb_lineno)
[ "def", "save_congress", "(", "congress", ",", "dest", ")", ":", "try", ":", "logger", ".", "debug", "(", "congress", ".", "name", ")", "logger", ".", "debug", "(", "dest", ")", "congress_dir", "=", "make_congress_dir", "(", "congress", ".", "name", ",", "dest", ")", "congress", ".", "legislation", ".", "to_csv", "(", "\"{0}/legislation.csv\"", ".", "format", "(", "congress_dir", ")", ",", "encoding", "=", "'utf-8'", ")", "logger", ".", "debug", "(", "congress_dir", ")", "congress", ".", "sponsors", ".", "to_csv", "(", "\"{0}/sponsor_map.csv\"", ".", "format", "(", "congress_dir", ")", ",", "encoding", "=", "'utf-8'", ")", "congress", ".", "cosponsors", ".", "to_csv", "(", "\"{0}/cosponsor_map.csv\"", ".", "format", "(", "congress_dir", ")", ",", "encoding", "=", "'utf-8'", ")", "congress", ".", "events", ".", "to_csv", "(", "\"{0}/events.csv\"", ".", "format", "(", "congress_dir", ")", ",", "encoding", "=", "'utf-8'", ")", "congress", ".", "committees", ".", "to_csv", "(", "\"{0}/committees_map.csv\"", ".", "format", "(", "congress_dir", ")", ",", "encoding", "=", "'utf-8'", ")", "congress", ".", "subjects", ".", "to_csv", "(", "\"{0}/subjects_map.csv\"", ".", "format", "(", "congress_dir", ")", ",", "encoding", "=", "'utf-8'", ")", "congress", ".", "votes", ".", "to_csv", "(", "\"{0}/votes.csv\"", ".", "format", "(", "congress_dir", ")", ",", "encoding", "=", "'utf-8'", ")", "congress", ".", "votes_people", ".", "to_csv", "(", "\"{0}/votes_people.csv\"", ".", "format", "(", "congress_dir", ")", ",", "encoding", "=", "'utf-8'", ")", "if", "hasattr", "(", "congress", ",", "'amendments'", ")", ":", "congress", ".", "amendments", ".", "to_csv", "(", "\"{0}/amendments.csv\"", ".", "format", "(", "congress_dir", ")", ",", "encoding", "=", "'utf-8'", ")", "except", "Exception", ":", "logger", ".", "error", "(", "\"############################################shoot me\"", ")", "exc_type", ",", "exc_obj", ",", "exc_tb", "=", "sys", ".", "exc_info", "(", ")", "fname", "=", "os", ".", "path", ".", "split", "(", "exc_tb", ".", "tb_frame", ".", "f_code", ".", "co_filename", ")", "[", "1", "]", "logger", ".", "error", "(", "exc_type", ",", "fname", ",", "exc_tb", ".", "tb_lineno", ")" ]
Takes a congress object with legislation, sponser, cosponsor, commities and subjects attributes and saves each item to it's own csv file.
[ "Takes", "a", "congress", "object", "with", "legislation", "sponser", "cosponsor", "commities", "and", "subjects", "attributes", "and", "saves", "each", "item", "to", "it", "s", "own", "csv", "file", "." ]
db991f5fcd3dfda6e6d51fadd286cba983f493e5
https://github.com/hackthefed/govtrack2csv/blob/db991f5fcd3dfda6e6d51fadd286cba983f493e5/govtrack2csv/__init__.py#L158-L195
248,996
hackthefed/govtrack2csv
govtrack2csv/__init__.py
extract_sponsor
def extract_sponsor(bill): """ Return a list of the fields we need to map a sponser to a bill """ logger.debug("Extracting Sponsor") sponsor_map = [] sponsor = bill.get('sponsor', None) if sponsor: sponsor_map.append(sponsor.get('type')) sponsor_map.append(sponsor.get('thomas_id')) sponsor_map.append(bill.get('bill_id')) sponsor_map.append(sponsor.get('district')) sponsor_map.append(sponsor.get('state')) logger.debug("END Extracting Sponsor") return sponsor_map if sponsor_map else None
python
def extract_sponsor(bill): """ Return a list of the fields we need to map a sponser to a bill """ logger.debug("Extracting Sponsor") sponsor_map = [] sponsor = bill.get('sponsor', None) if sponsor: sponsor_map.append(sponsor.get('type')) sponsor_map.append(sponsor.get('thomas_id')) sponsor_map.append(bill.get('bill_id')) sponsor_map.append(sponsor.get('district')) sponsor_map.append(sponsor.get('state')) logger.debug("END Extracting Sponsor") return sponsor_map if sponsor_map else None
[ "def", "extract_sponsor", "(", "bill", ")", ":", "logger", ".", "debug", "(", "\"Extracting Sponsor\"", ")", "sponsor_map", "=", "[", "]", "sponsor", "=", "bill", ".", "get", "(", "'sponsor'", ",", "None", ")", "if", "sponsor", ":", "sponsor_map", ".", "append", "(", "sponsor", ".", "get", "(", "'type'", ")", ")", "sponsor_map", ".", "append", "(", "sponsor", ".", "get", "(", "'thomas_id'", ")", ")", "sponsor_map", ".", "append", "(", "bill", ".", "get", "(", "'bill_id'", ")", ")", "sponsor_map", ".", "append", "(", "sponsor", ".", "get", "(", "'district'", ")", ")", "sponsor_map", ".", "append", "(", "sponsor", ".", "get", "(", "'state'", ")", ")", "logger", ".", "debug", "(", "\"END Extracting Sponsor\"", ")", "return", "sponsor_map", "if", "sponsor_map", "else", "None" ]
Return a list of the fields we need to map a sponser to a bill
[ "Return", "a", "list", "of", "the", "fields", "we", "need", "to", "map", "a", "sponser", "to", "a", "bill" ]
db991f5fcd3dfda6e6d51fadd286cba983f493e5
https://github.com/hackthefed/govtrack2csv/blob/db991f5fcd3dfda6e6d51fadd286cba983f493e5/govtrack2csv/__init__.py#L246-L260
248,997
hackthefed/govtrack2csv
govtrack2csv/__init__.py
extract_cosponsors
def extract_cosponsors(bill): """ Return a list of list relating cosponsors to legislation. """ logger.debug("Extracting Cosponsors") cosponsor_map = [] cosponsors = bill.get('cosponsors', []) bill_id = bill.get('bill_id', None) for co in cosponsors: co_list = [] co_list.append(co.get('thomas_id')) co_list.append(bill_id) co_list.append(co.get('district')) co_list.append(co.get('state')) cosponsor_map.append(co_list) logger.debug("End Extractioning Cosponsors") return cosponsor_map
python
def extract_cosponsors(bill): """ Return a list of list relating cosponsors to legislation. """ logger.debug("Extracting Cosponsors") cosponsor_map = [] cosponsors = bill.get('cosponsors', []) bill_id = bill.get('bill_id', None) for co in cosponsors: co_list = [] co_list.append(co.get('thomas_id')) co_list.append(bill_id) co_list.append(co.get('district')) co_list.append(co.get('state')) cosponsor_map.append(co_list) logger.debug("End Extractioning Cosponsors") return cosponsor_map
[ "def", "extract_cosponsors", "(", "bill", ")", ":", "logger", ".", "debug", "(", "\"Extracting Cosponsors\"", ")", "cosponsor_map", "=", "[", "]", "cosponsors", "=", "bill", ".", "get", "(", "'cosponsors'", ",", "[", "]", ")", "bill_id", "=", "bill", ".", "get", "(", "'bill_id'", ",", "None", ")", "for", "co", "in", "cosponsors", ":", "co_list", "=", "[", "]", "co_list", ".", "append", "(", "co", ".", "get", "(", "'thomas_id'", ")", ")", "co_list", ".", "append", "(", "bill_id", ")", "co_list", ".", "append", "(", "co", ".", "get", "(", "'district'", ")", ")", "co_list", ".", "append", "(", "co", ".", "get", "(", "'state'", ")", ")", "cosponsor_map", ".", "append", "(", "co_list", ")", "logger", ".", "debug", "(", "\"End Extractioning Cosponsors\"", ")", "return", "cosponsor_map" ]
Return a list of list relating cosponsors to legislation.
[ "Return", "a", "list", "of", "list", "relating", "cosponsors", "to", "legislation", "." ]
db991f5fcd3dfda6e6d51fadd286cba983f493e5
https://github.com/hackthefed/govtrack2csv/blob/db991f5fcd3dfda6e6d51fadd286cba983f493e5/govtrack2csv/__init__.py#L263-L282
248,998
hackthefed/govtrack2csv
govtrack2csv/__init__.py
extract_subjects
def extract_subjects(bill): """ Return a list subject for legislation. """ logger.debug("Extracting Subjects") subject_map = [] subjects = bill.get('subjects', []) bill_id = bill.get('bill_id', None) bill_type = bill.get('bill_type', None) for sub in subjects: subject_map.append((bill_id, bill_type, sub)) logger.debug("End Extractioning Subjects") return subject_map
python
def extract_subjects(bill): """ Return a list subject for legislation. """ logger.debug("Extracting Subjects") subject_map = [] subjects = bill.get('subjects', []) bill_id = bill.get('bill_id', None) bill_type = bill.get('bill_type', None) for sub in subjects: subject_map.append((bill_id, bill_type, sub)) logger.debug("End Extractioning Subjects") return subject_map
[ "def", "extract_subjects", "(", "bill", ")", ":", "logger", ".", "debug", "(", "\"Extracting Subjects\"", ")", "subject_map", "=", "[", "]", "subjects", "=", "bill", ".", "get", "(", "'subjects'", ",", "[", "]", ")", "bill_id", "=", "bill", ".", "get", "(", "'bill_id'", ",", "None", ")", "bill_type", "=", "bill", ".", "get", "(", "'bill_type'", ",", "None", ")", "for", "sub", "in", "subjects", ":", "subject_map", ".", "append", "(", "(", "bill_id", ",", "bill_type", ",", "sub", ")", ")", "logger", ".", "debug", "(", "\"End Extractioning Subjects\"", ")", "return", "subject_map" ]
Return a list subject for legislation.
[ "Return", "a", "list", "subject", "for", "legislation", "." ]
db991f5fcd3dfda6e6d51fadd286cba983f493e5
https://github.com/hackthefed/govtrack2csv/blob/db991f5fcd3dfda6e6d51fadd286cba983f493e5/govtrack2csv/__init__.py#L285-L300
248,999
hackthefed/govtrack2csv
govtrack2csv/__init__.py
extract_committees
def extract_committees(bill): """ Returns committee associations from a bill. """ bill_id = bill.get('bill_id', None) logger.debug("Extracting Committees for {0}".format(bill_id)) committees = bill.get('committees', None) committee_map = [] for c in committees: logger.debug("Processing committee {0}".format(c.get('committee_id'))) c_list = [] sub = c.get('subcommittee_id') if sub: logger.debug("is subcommittee") c_list.append('subcommittee') # type c_list.append(c.get('subcommittee')) sub_id = "{0}-{1}".format( c.get('committee_id'), c.get('subcommittee_id')) logger.debug("Processing subcommittee {0}".format(sub_id)) c_list.append(sub_id) else: c_list.append('committee') c_list.append(c.get('committee')) c_list.append(c.get('committee_id')) c_list.append(bill_id) committee_map.append(c_list) return committee_map
python
def extract_committees(bill): """ Returns committee associations from a bill. """ bill_id = bill.get('bill_id', None) logger.debug("Extracting Committees for {0}".format(bill_id)) committees = bill.get('committees', None) committee_map = [] for c in committees: logger.debug("Processing committee {0}".format(c.get('committee_id'))) c_list = [] sub = c.get('subcommittee_id') if sub: logger.debug("is subcommittee") c_list.append('subcommittee') # type c_list.append(c.get('subcommittee')) sub_id = "{0}-{1}".format( c.get('committee_id'), c.get('subcommittee_id')) logger.debug("Processing subcommittee {0}".format(sub_id)) c_list.append(sub_id) else: c_list.append('committee') c_list.append(c.get('committee')) c_list.append(c.get('committee_id')) c_list.append(bill_id) committee_map.append(c_list) return committee_map
[ "def", "extract_committees", "(", "bill", ")", ":", "bill_id", "=", "bill", ".", "get", "(", "'bill_id'", ",", "None", ")", "logger", ".", "debug", "(", "\"Extracting Committees for {0}\"", ".", "format", "(", "bill_id", ")", ")", "committees", "=", "bill", ".", "get", "(", "'committees'", ",", "None", ")", "committee_map", "=", "[", "]", "for", "c", "in", "committees", ":", "logger", ".", "debug", "(", "\"Processing committee {0}\"", ".", "format", "(", "c", ".", "get", "(", "'committee_id'", ")", ")", ")", "c_list", "=", "[", "]", "sub", "=", "c", ".", "get", "(", "'subcommittee_id'", ")", "if", "sub", ":", "logger", ".", "debug", "(", "\"is subcommittee\"", ")", "c_list", ".", "append", "(", "'subcommittee'", ")", "# type", "c_list", ".", "append", "(", "c", ".", "get", "(", "'subcommittee'", ")", ")", "sub_id", "=", "\"{0}-{1}\"", ".", "format", "(", "c", ".", "get", "(", "'committee_id'", ")", ",", "c", ".", "get", "(", "'subcommittee_id'", ")", ")", "logger", ".", "debug", "(", "\"Processing subcommittee {0}\"", ".", "format", "(", "sub_id", ")", ")", "c_list", ".", "append", "(", "sub_id", ")", "else", ":", "c_list", ".", "append", "(", "'committee'", ")", "c_list", ".", "append", "(", "c", ".", "get", "(", "'committee'", ")", ")", "c_list", ".", "append", "(", "c", ".", "get", "(", "'committee_id'", ")", ")", "c_list", ".", "append", "(", "bill_id", ")", "committee_map", ".", "append", "(", "c_list", ")", "return", "committee_map" ]
Returns committee associations from a bill.
[ "Returns", "committee", "associations", "from", "a", "bill", "." ]
db991f5fcd3dfda6e6d51fadd286cba983f493e5
https://github.com/hackthefed/govtrack2csv/blob/db991f5fcd3dfda6e6d51fadd286cba983f493e5/govtrack2csv/__init__.py#L303-L331