repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L57-L84
def jaccardIndex(s1, s2, stranded=False): """ Compute the Jaccard index for two collections of genomic intervals :param s1: the first set of genomic intervals :param s2: the second set of genomic intervals :param stranded: if True, treat regions on different strands as not intersecting each other, even if they occupy the same genomic region. :return: Jaccard index """ def count(s): """ sum the size of regions in s. """ tot = 0 for r in s: tot += len(r) return tot if stranded: raise GenomicIntervalError("Sorry, stranded mode for computing Jaccard " + "index hasn't been implemented yet.") s1 = collapseRegions(s1) s2 = collapseRegions(s2) intersection = regionsIntersection(s1, s2) c_i = count(intersection) return c_i / float(count(s1) + count(s2) - c_i)
[ "def", "jaccardIndex", "(", "s1", ",", "s2", ",", "stranded", "=", "False", ")", ":", "def", "count", "(", "s", ")", ":", "\"\"\" sum the size of regions in s. \"\"\"", "tot", "=", "0", "for", "r", "in", "s", ":", "tot", "+=", "len", "(", "r", ")", "return", "tot", "if", "stranded", ":", "raise", "GenomicIntervalError", "(", "\"Sorry, stranded mode for computing Jaccard \"", "+", "\"index hasn't been implemented yet.\"", ")", "s1", "=", "collapseRegions", "(", "s1", ")", "s2", "=", "collapseRegions", "(", "s2", ")", "intersection", "=", "regionsIntersection", "(", "s1", ",", "s2", ")", "c_i", "=", "count", "(", "intersection", ")", "return", "c_i", "/", "float", "(", "count", "(", "s1", ")", "+", "count", "(", "s2", ")", "-", "c_i", ")" ]
Compute the Jaccard index for two collections of genomic intervals :param s1: the first set of genomic intervals :param s2: the second set of genomic intervals :param stranded: if True, treat regions on different strands as not intersecting each other, even if they occupy the same genomic region. :return: Jaccard index
[ "Compute", "the", "Jaccard", "index", "for", "two", "collections", "of", "genomic", "intervals" ]
python
train
30.714286
edeposit/marcxml2mods
src/marcxml2mods/transformators.py
https://github.com/edeposit/marcxml2mods/blob/7b44157e859b4d2a372f79598ddbf77e43d39812/src/marcxml2mods/transformators.py#L83-L109
def transform_to_mods_multimono(marc_xml, uuid, url): """ Convert `marc_xml` to multimonograph MODS data format. Args: marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. uuid (str): UUID string giving the package ID. url (str): URL of the publication (public or not). Returns: list: Collection of transformed xml strings. """ marc_xml = _read_content_or_path(marc_xml) transformed = xslt_transformation( marc_xml, _absolute_template_path("MARC21toMultiMonographTitle.xsl") ) return _apply_postprocessing( marc_xml=marc_xml, xml=transformed, func=mods_postprocessor.postprocess_multi_mono, uuid=uuid, url=url, )
[ "def", "transform_to_mods_multimono", "(", "marc_xml", ",", "uuid", ",", "url", ")", ":", "marc_xml", "=", "_read_content_or_path", "(", "marc_xml", ")", "transformed", "=", "xslt_transformation", "(", "marc_xml", ",", "_absolute_template_path", "(", "\"MARC21toMultiMonographTitle.xsl\"", ")", ")", "return", "_apply_postprocessing", "(", "marc_xml", "=", "marc_xml", ",", "xml", "=", "transformed", ",", "func", "=", "mods_postprocessor", ".", "postprocess_multi_mono", ",", "uuid", "=", "uuid", ",", "url", "=", "url", ",", ")" ]
Convert `marc_xml` to multimonograph MODS data format. Args: marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. uuid (str): UUID string giving the package ID. url (str): URL of the publication (public or not). Returns: list: Collection of transformed xml strings.
[ "Convert", "marc_xml", "to", "multimonograph", "MODS", "data", "format", "." ]
python
train
28.444444
andrewramsay/sk8-drivers
pysk8/pysk8/core.py
https://github.com/andrewramsay/sk8-drivers/blob/67347a71762fb421f5ae65a595def5c7879e8b0c/pysk8/pysk8/core.py#L270-L298
def load_calibration(self, calibration_file=None): """Load calibration data for IMU(s) connected to this SK8. This method attempts to load a set of calibration data from a .ini file produced by the sk8_calibration_gui application (TODO link!). By default, it will look for a file name "sk8calib.ini" in the current working directory. This can be overridden using the `calibration_file` parameter. Args: calibration_file (str): Path to a user-specified calibration file (ini format). Returns: True if any calibration data was loaded, False if none was. Note that True will be returned even if for example only 1 IMU had any calibration data available. """ logger.debug('Loading calibration for {}'.format(self.addr)) calibration_data = ConfigParser() path = calibration_file or os.path.join(os.getcwd(), 'sk8calib.ini') logger.debug('Attempting to load calibration from {}'.format(path)) calibration_data.read(path) success = False for i in range(MAX_IMUS): s = '{}_IMU{}'.format(self.name, i) if s in calibration_data.sections(): logger.debug('Calibration data for device {} was detected, extracting...'.format(s)) success = success or self.imus[i]._load_calibration(calibration_data[s]) return success
[ "def", "load_calibration", "(", "self", ",", "calibration_file", "=", "None", ")", ":", "logger", ".", "debug", "(", "'Loading calibration for {}'", ".", "format", "(", "self", ".", "addr", ")", ")", "calibration_data", "=", "ConfigParser", "(", ")", "path", "=", "calibration_file", "or", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'sk8calib.ini'", ")", "logger", ".", "debug", "(", "'Attempting to load calibration from {}'", ".", "format", "(", "path", ")", ")", "calibration_data", ".", "read", "(", "path", ")", "success", "=", "False", "for", "i", "in", "range", "(", "MAX_IMUS", ")", ":", "s", "=", "'{}_IMU{}'", ".", "format", "(", "self", ".", "name", ",", "i", ")", "if", "s", "in", "calibration_data", ".", "sections", "(", ")", ":", "logger", ".", "debug", "(", "'Calibration data for device {} was detected, extracting...'", ".", "format", "(", "s", ")", ")", "success", "=", "success", "or", "self", ".", "imus", "[", "i", "]", ".", "_load_calibration", "(", "calibration_data", "[", "s", "]", ")", "return", "success" ]
Load calibration data for IMU(s) connected to this SK8. This method attempts to load a set of calibration data from a .ini file produced by the sk8_calibration_gui application (TODO link!). By default, it will look for a file name "sk8calib.ini" in the current working directory. This can be overridden using the `calibration_file` parameter. Args: calibration_file (str): Path to a user-specified calibration file (ini format). Returns: True if any calibration data was loaded, False if none was. Note that True will be returned even if for example only 1 IMU had any calibration data available.
[ "Load", "calibration", "data", "for", "IMU", "(", "s", ")", "connected", "to", "this", "SK8", "." ]
python
train
48.275862
hosford42/xcs
xcs/framework.py
https://github.com/hosford42/xcs/blob/183bdd0dd339e19ded3be202f86e1b38bdb9f1e5/xcs/framework.py#L940-L986
def add(self, rule): """Add a new classifier rule to the classifier set. Return a list containing zero or more rules that were deleted from the classifier by the algorithm in order to make room for the new rule. The rule argument should be a ClassifierRule instance. The behavior of this method depends on whether the rule already exists in the classifier set. When a rule is already present, the rule's numerosity is added to that of the version of the rule already present in the population. Otherwise, the new rule is captured. Note that this means that for rules already present in the classifier set, the metadata of the existing rule is not overwritten by that of the one passed in as an argument. Usage: displaced_rules = model.add(rule) Arguments: rule: A ClassifierRule instance which is to be added to this classifier set. Return: A possibly empty list of ClassifierRule instances which were removed altogether from the classifier set (as opposed to simply having their numerosities decremented) in order to make room for the newly added rule. """ assert isinstance(rule, ClassifierRule) condition = rule.condition action = rule.action # If the rule already exists in the population, then we virtually # add the rule by incrementing the existing rule's numerosity. This # prevents redundancy in the rule set. Otherwise we capture the # new rule. if condition not in self._population: self._population[condition] = {} if action in self._population[condition]: existing_rule = self._population[condition][action] existing_rule.numerosity += rule.numerosity else: self._population[condition][action] = rule # Any time we add a rule, we need to call this to keep the # population size under control. return self._algorithm.prune(self)
[ "def", "add", "(", "self", ",", "rule", ")", ":", "assert", "isinstance", "(", "rule", ",", "ClassifierRule", ")", "condition", "=", "rule", ".", "condition", "action", "=", "rule", ".", "action", "# If the rule already exists in the population, then we virtually", "# add the rule by incrementing the existing rule's numerosity. This", "# prevents redundancy in the rule set. Otherwise we capture the", "# new rule.", "if", "condition", "not", "in", "self", ".", "_population", ":", "self", ".", "_population", "[", "condition", "]", "=", "{", "}", "if", "action", "in", "self", ".", "_population", "[", "condition", "]", ":", "existing_rule", "=", "self", ".", "_population", "[", "condition", "]", "[", "action", "]", "existing_rule", ".", "numerosity", "+=", "rule", ".", "numerosity", "else", ":", "self", ".", "_population", "[", "condition", "]", "[", "action", "]", "=", "rule", "# Any time we add a rule, we need to call this to keep the", "# population size under control.", "return", "self", ".", "_algorithm", ".", "prune", "(", "self", ")" ]
Add a new classifier rule to the classifier set. Return a list containing zero or more rules that were deleted from the classifier by the algorithm in order to make room for the new rule. The rule argument should be a ClassifierRule instance. The behavior of this method depends on whether the rule already exists in the classifier set. When a rule is already present, the rule's numerosity is added to that of the version of the rule already present in the population. Otherwise, the new rule is captured. Note that this means that for rules already present in the classifier set, the metadata of the existing rule is not overwritten by that of the one passed in as an argument. Usage: displaced_rules = model.add(rule) Arguments: rule: A ClassifierRule instance which is to be added to this classifier set. Return: A possibly empty list of ClassifierRule instances which were removed altogether from the classifier set (as opposed to simply having their numerosities decremented) in order to make room for the newly added rule.
[ "Add", "a", "new", "classifier", "rule", "to", "the", "classifier", "set", ".", "Return", "a", "list", "containing", "zero", "or", "more", "rules", "that", "were", "deleted", "from", "the", "classifier", "by", "the", "algorithm", "in", "order", "to", "make", "room", "for", "the", "new", "rule", ".", "The", "rule", "argument", "should", "be", "a", "ClassifierRule", "instance", ".", "The", "behavior", "of", "this", "method", "depends", "on", "whether", "the", "rule", "already", "exists", "in", "the", "classifier", "set", ".", "When", "a", "rule", "is", "already", "present", "the", "rule", "s", "numerosity", "is", "added", "to", "that", "of", "the", "version", "of", "the", "rule", "already", "present", "in", "the", "population", ".", "Otherwise", "the", "new", "rule", "is", "captured", ".", "Note", "that", "this", "means", "that", "for", "rules", "already", "present", "in", "the", "classifier", "set", "the", "metadata", "of", "the", "existing", "rule", "is", "not", "overwritten", "by", "that", "of", "the", "one", "passed", "in", "as", "an", "argument", "." ]
python
train
43.851064
zhexiao/ezhost
ezhost/ServerCommon.py
https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L365-L374
def reset_server_env(self, server_name, configure): """ reset server env to server-name :param server_name: :param configure: :return: """ env.host_string = configure[server_name]['host'] env.user = configure[server_name]['user'] env.password = configure[server_name]['passwd']
[ "def", "reset_server_env", "(", "self", ",", "server_name", ",", "configure", ")", ":", "env", ".", "host_string", "=", "configure", "[", "server_name", "]", "[", "'host'", "]", "env", ".", "user", "=", "configure", "[", "server_name", "]", "[", "'user'", "]", "env", ".", "password", "=", "configure", "[", "server_name", "]", "[", "'passwd'", "]" ]
reset server env to server-name :param server_name: :param configure: :return:
[ "reset", "server", "env", "to", "server", "-", "name", ":", "param", "server_name", ":", ":", "param", "configure", ":", ":", "return", ":" ]
python
train
34
iotile/coretools
iotileemulate/iotile/emulate/internal/rpc_queue.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/internal/rpc_queue.py#L152-L163
async def stop(self): """Stop the rpc queue from inside the event loop.""" if self._rpc_task is not None: self._rpc_task.cancel() try: await self._rpc_task except asyncio.CancelledError: pass self._rpc_task = None
[ "async", "def", "stop", "(", "self", ")", ":", "if", "self", ".", "_rpc_task", "is", "not", "None", ":", "self", ".", "_rpc_task", ".", "cancel", "(", ")", "try", ":", "await", "self", ".", "_rpc_task", "except", "asyncio", ".", "CancelledError", ":", "pass", "self", ".", "_rpc_task", "=", "None" ]
Stop the rpc queue from inside the event loop.
[ "Stop", "the", "rpc", "queue", "from", "inside", "the", "event", "loop", "." ]
python
train
23.416667
gbiggs/rtsprofile
rtsprofile/message_sending.py
https://github.com/gbiggs/rtsprofile/blob/fded6eddcb0b25fe9808b1b12336a4413ea00905/rtsprofile/message_sending.py#L282-L296
def parse_xml_node(self, node): '''Parse an xml.dom Node object representing a condition into this object. ''' self.sequence = int(node.getAttributeNS(RTS_NS, 'sequence')) c = node.getElementsByTagNameNS(RTS_NS, 'TargetComponent') if c.length != 1: raise InvalidParticipantNodeError self.target_component = TargetExecutionContext().parse_xml_node(c[0]) for c in get_direct_child_elements_xml(node, prefix=RTS_EXT_NS, local_name='Properties'): name, value = parse_properties_xml(c) self._properties[name] = value return self
[ "def", "parse_xml_node", "(", "self", ",", "node", ")", ":", "self", ".", "sequence", "=", "int", "(", "node", ".", "getAttributeNS", "(", "RTS_NS", ",", "'sequence'", ")", ")", "c", "=", "node", ".", "getElementsByTagNameNS", "(", "RTS_NS", ",", "'TargetComponent'", ")", "if", "c", ".", "length", "!=", "1", ":", "raise", "InvalidParticipantNodeError", "self", ".", "target_component", "=", "TargetExecutionContext", "(", ")", ".", "parse_xml_node", "(", "c", "[", "0", "]", ")", "for", "c", "in", "get_direct_child_elements_xml", "(", "node", ",", "prefix", "=", "RTS_EXT_NS", ",", "local_name", "=", "'Properties'", ")", ":", "name", ",", "value", "=", "parse_properties_xml", "(", "c", ")", "self", ".", "_properties", "[", "name", "]", "=", "value", "return", "self" ]
Parse an xml.dom Node object representing a condition into this object.
[ "Parse", "an", "xml", ".", "dom", "Node", "object", "representing", "a", "condition", "into", "this", "object", "." ]
python
train
44.333333
improbable-research/keanu
keanu-python/keanu/vertex/generated.py
https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/keanu-python/keanu/vertex/generated.py#L357-L364
def Power(base: vertex_constructor_param_types, exponent: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ Raises a vertex to the power of another :param base: the base vertex :param exponent: the exponent vertex """ return Double(context.jvm_view().PowerVertex, label, cast_to_double_vertex(base), cast_to_double_vertex(exponent))
[ "def", "Power", "(", "base", ":", "vertex_constructor_param_types", ",", "exponent", ":", "vertex_constructor_param_types", ",", "label", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Vertex", ":", "return", "Double", "(", "context", ".", "jvm_view", "(", ")", ".", "PowerVertex", ",", "label", ",", "cast_to_double_vertex", "(", "base", ")", ",", "cast_to_double_vertex", "(", "exponent", ")", ")" ]
Raises a vertex to the power of another :param base: the base vertex :param exponent: the exponent vertex
[ "Raises", "a", "vertex", "to", "the", "power", "of", "another", ":", "param", "base", ":", "the", "base", "vertex", ":", "param", "exponent", ":", "the", "exponent", "vertex" ]
python
train
47.25
fabioz/PyDev.Debugger
_pydev_bundle/pydev_versioncheck.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydev_bundle/pydev_versioncheck.py#L3-L15
def versionok_for_gui(): ''' Return True if running Python is suitable for GUI Event Integration and deeper IPython integration ''' # We require Python 2.6+ ... if sys.hexversion < 0x02060000: return False # Or Python 3.2+ if sys.hexversion >= 0x03000000 and sys.hexversion < 0x03020000: return False # Not supported under Jython nor IronPython if sys.platform.startswith("java") or sys.platform.startswith('cli'): return False return True
[ "def", "versionok_for_gui", "(", ")", ":", "# We require Python 2.6+ ...", "if", "sys", ".", "hexversion", "<", "0x02060000", ":", "return", "False", "# Or Python 3.2+", "if", "sys", ".", "hexversion", ">=", "0x03000000", "and", "sys", ".", "hexversion", "<", "0x03020000", ":", "return", "False", "# Not supported under Jython nor IronPython", "if", "sys", ".", "platform", ".", "startswith", "(", "\"java\"", ")", "or", "sys", ".", "platform", ".", "startswith", "(", "'cli'", ")", ":", "return", "False", "return", "True" ]
Return True if running Python is suitable for GUI Event Integration and deeper IPython integration
[ "Return", "True", "if", "running", "Python", "is", "suitable", "for", "GUI", "Event", "Integration", "and", "deeper", "IPython", "integration" ]
python
train
37.230769
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Scanner/Fortran.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Scanner/Fortran.py#L126-L310
def FortranScan(path_variable="FORTRANPATH"): """Return a prototype Scanner instance for scanning source files for Fortran USE & INCLUDE statements""" # The USE statement regex matches the following: # # USE module_name # USE :: module_name # USE, INTRINSIC :: module_name # USE, NON_INTRINSIC :: module_name # # Limitations # # -- While the regex can handle multiple USE statements on one line, # it cannot properly handle them if they are commented out. # In either of the following cases: # # ! USE mod_a ; USE mod_b [entire line is commented out] # USE mod_a ! ; USE mod_b [in-line comment of second USE statement] # # the second module name (mod_b) will be picked up as a dependency # even though it should be ignored. The only way I can see # to rectify this would be to modify the scanner to eliminate # the call to re.findall, read in the contents of the file, # treating the comment character as an end-of-line character # in addition to the normal linefeed, loop over each line, # weeding out the comments, and looking for the USE statements. # One advantage to this is that the regex passed to the scanner # would no longer need to match a semicolon. # # -- I question whether or not we need to detect dependencies to # INTRINSIC modules because these are built-in to the compiler. # If we consider them a dependency, will SCons look for them, not # find them, and kill the build? Or will we there be standard # compiler-specific directories we will need to point to so the # compiler and SCons can locate the proper object and mod files? # Here is a breakdown of the regex: # # (?i) : regex is case insensitive # ^ : start of line # (?: : group a collection of regex symbols without saving the match as a "group" # ^|; : matches either the start of the line or a semicolon - semicolon # ) : end the unsaved grouping # \s* : any amount of white space # USE : match the string USE, case insensitive # (?: : group a collection of regex symbols without saving the match as a "group" # \s+| : match one or more whitespace OR .... (the next entire grouped set of regex symbols) # (?: : group a collection of regex symbols without saving the match as a "group" # (?: : establish another unsaved grouping of regex symbols # \s* : any amount of white space # , : match a comma # \s* : any amount of white space # (?:NON_)? : optionally match the prefix NON_, case insensitive # INTRINSIC : match the string INTRINSIC, case insensitive # )? : optionally match the ", INTRINSIC/NON_INTRINSIC" grouped expression # \s* : any amount of white space # :: : match a double colon that must appear after the INTRINSIC/NON_INTRINSIC attribute # ) : end the unsaved grouping # ) : end the unsaved grouping # \s* : match any amount of white space # (\w+) : match the module name that is being USE'd # # use_regex = "(?i)(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)" # The INCLUDE statement regex matches the following: # # INCLUDE 'some_Text' # INCLUDE "some_Text" # INCLUDE "some_Text" ; INCLUDE "some_Text" # INCLUDE kind_"some_Text" # INCLUDE kind_'some_Text" # # where some_Text can include any alphanumeric and/or special character # as defined by the Fortran 2003 standard. # # Limitations: # # -- The Fortran standard dictates that a " or ' in the INCLUDE'd # string must be represented as a "" or '', if the quotes that wrap # the entire string are either a ' or ", respectively. While the # regular expression below can detect the ' or " characters just fine, # the scanning logic, presently is unable to detect them and reduce # them to a single instance. This probably isn't an issue since, # in practice, ' or " are not generally used in filenames. # # -- This regex will not properly deal with multiple INCLUDE statements # when the entire line has been commented out, ala # # ! INCLUDE 'some_file' ; INCLUDE 'some_file' # # In such cases, it will properly ignore the first INCLUDE file, # but will actually still pick up the second. Interestingly enough, # the regex will properly deal with these cases: # # INCLUDE 'some_file' # INCLUDE 'some_file' !; INCLUDE 'some_file' # # To get around the above limitation, the FORTRAN programmer could # simply comment each INCLUDE statement separately, like this # # ! INCLUDE 'some_file' !; INCLUDE 'some_file' # # The way I see it, the only way to get around this limitation would # be to modify the scanning logic to replace the calls to re.findall # with a custom loop that processes each line separately, throwing # away fully commented out lines before attempting to match against # the INCLUDE syntax. # # Here is a breakdown of the regex: # # (?i) : regex is case insensitive # (?: : begin a non-saving group that matches the following: # ^ : either the start of the line # | : or # ['">]\s*; : a semicolon that follows a single quote, # double quote or greater than symbol (with any # amount of whitespace in between). This will # allow the regex to match multiple INCLUDE # statements per line (although it also requires # the positive lookahead assertion that is # used below). It will even properly deal with # (i.e. ignore) cases in which the additional # INCLUDES are part of an in-line comment, ala # " INCLUDE 'someFile' ! ; INCLUDE 'someFile2' " # ) : end of non-saving group # \s* : any amount of white space # INCLUDE : match the string INCLUDE, case insensitive # \s+ : match one or more white space characters # (?\w+_)? : match the optional "kind-param _" prefix allowed by the standard # [<"'] : match the include delimiter - an apostrophe, double quote, or less than symbol # (.+?) : match one or more characters that make up # the included path and file name and save it # in a group. The Fortran standard allows for # any non-control character to be used. The dot # operator will pick up any character, including # control codes, but I can't conceive of anyone # putting control codes in their file names. # The question mark indicates it is non-greedy so # that regex will match only up to the next quote, # double quote, or greater than symbol # (?=["'>]) : positive lookahead assertion to match the include # delimiter - an apostrophe, double quote, or # greater than symbol. This level of complexity # is required so that the include delimiter is # not consumed by the match, thus allowing the # sub-regex discussed above to uniquely match a # set of semicolon-separated INCLUDE statements # (as allowed by the F2003 standard) include_regex = """(?i)(?:^|['">]\s*;)\s*INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])""" # The MODULE statement regex finds module definitions by matching # the following: # # MODULE module_name # # but *not* the following: # # MODULE PROCEDURE procedure_name # # Here is a breakdown of the regex: # # (?i) : regex is case insensitive # ^\s* : any amount of white space # MODULE : match the string MODULE, case insensitive # \s+ : match one or more white space characters # (?!PROCEDURE) : but *don't* match if the next word matches # PROCEDURE (negative lookahead assertion), # case insensitive # (\w+) : match one or more alphanumeric characters # that make up the defined module name and # save it in a group def_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)""" scanner = F90Scanner("FortranScan", "$FORTRANSUFFIXES", path_variable, use_regex, include_regex, def_regex) return scanner
[ "def", "FortranScan", "(", "path_variable", "=", "\"FORTRANPATH\"", ")", ":", "# The USE statement regex matches the following:", "#", "# USE module_name", "# USE :: module_name", "# USE, INTRINSIC :: module_name", "# USE, NON_INTRINSIC :: module_name", "#", "# Limitations", "#", "# -- While the regex can handle multiple USE statements on one line,", "# it cannot properly handle them if they are commented out.", "# In either of the following cases:", "#", "# ! USE mod_a ; USE mod_b [entire line is commented out]", "# USE mod_a ! ; USE mod_b [in-line comment of second USE statement]", "#", "# the second module name (mod_b) will be picked up as a dependency", "# even though it should be ignored. The only way I can see", "# to rectify this would be to modify the scanner to eliminate", "# the call to re.findall, read in the contents of the file,", "# treating the comment character as an end-of-line character", "# in addition to the normal linefeed, loop over each line,", "# weeding out the comments, and looking for the USE statements.", "# One advantage to this is that the regex passed to the scanner", "# would no longer need to match a semicolon.", "#", "# -- I question whether or not we need to detect dependencies to", "# INTRINSIC modules because these are built-in to the compiler.", "# If we consider them a dependency, will SCons look for them, not", "# find them, and kill the build? Or will we there be standard", "# compiler-specific directories we will need to point to so the", "# compiler and SCons can locate the proper object and mod files?", "# Here is a breakdown of the regex:", "#", "# (?i) : regex is case insensitive", "# ^ : start of line", "# (?: : group a collection of regex symbols without saving the match as a \"group\"", "# ^|; : matches either the start of the line or a semicolon - semicolon", "# ) : end the unsaved grouping", "# \\s* : any amount of white space", "# USE : match the string USE, case insensitive", "# (?: : group a collection of regex symbols without saving the match as a \"group\"", "# \\s+| : match one or more whitespace OR .... (the next entire grouped set of regex symbols)", "# (?: : group a collection of regex symbols without saving the match as a \"group\"", "# (?: : establish another unsaved grouping of regex symbols", "# \\s* : any amount of white space", "# , : match a comma", "# \\s* : any amount of white space", "# (?:NON_)? : optionally match the prefix NON_, case insensitive", "# INTRINSIC : match the string INTRINSIC, case insensitive", "# )? : optionally match the \", INTRINSIC/NON_INTRINSIC\" grouped expression", "# \\s* : any amount of white space", "# :: : match a double colon that must appear after the INTRINSIC/NON_INTRINSIC attribute", "# ) : end the unsaved grouping", "# ) : end the unsaved grouping", "# \\s* : match any amount of white space", "# (\\w+) : match the module name that is being USE'd", "#", "#", "use_regex", "=", "\"(?i)(?:^|;)\\s*USE(?:\\s+|(?:(?:\\s*,\\s*(?:NON_)?INTRINSIC)?\\s*::))\\s*(\\w+)\"", "# The INCLUDE statement regex matches the following:", "#", "# INCLUDE 'some_Text'", "# INCLUDE \"some_Text\"", "# INCLUDE \"some_Text\" ; INCLUDE \"some_Text\"", "# INCLUDE kind_\"some_Text\"", "# INCLUDE kind_'some_Text\"", "#", "# where some_Text can include any alphanumeric and/or special character", "# as defined by the Fortran 2003 standard.", "#", "# Limitations:", "#", "# -- The Fortran standard dictates that a \" or ' in the INCLUDE'd", "# string must be represented as a \"\" or '', if the quotes that wrap", "# the entire string are either a ' or \", respectively. While the", "# regular expression below can detect the ' or \" characters just fine,", "# the scanning logic, presently is unable to detect them and reduce", "# them to a single instance. This probably isn't an issue since,", "# in practice, ' or \" are not generally used in filenames.", "#", "# -- This regex will not properly deal with multiple INCLUDE statements", "# when the entire line has been commented out, ala", "#", "# ! INCLUDE 'some_file' ; INCLUDE 'some_file'", "#", "# In such cases, it will properly ignore the first INCLUDE file,", "# but will actually still pick up the second. Interestingly enough,", "# the regex will properly deal with these cases:", "#", "# INCLUDE 'some_file'", "# INCLUDE 'some_file' !; INCLUDE 'some_file'", "#", "# To get around the above limitation, the FORTRAN programmer could", "# simply comment each INCLUDE statement separately, like this", "#", "# ! INCLUDE 'some_file' !; INCLUDE 'some_file'", "#", "# The way I see it, the only way to get around this limitation would", "# be to modify the scanning logic to replace the calls to re.findall", "# with a custom loop that processes each line separately, throwing", "# away fully commented out lines before attempting to match against", "# the INCLUDE syntax.", "#", "# Here is a breakdown of the regex:", "#", "# (?i) : regex is case insensitive", "# (?: : begin a non-saving group that matches the following:", "# ^ : either the start of the line", "# | : or", "# ['\">]\\s*; : a semicolon that follows a single quote,", "# double quote or greater than symbol (with any", "# amount of whitespace in between). This will", "# allow the regex to match multiple INCLUDE", "# statements per line (although it also requires", "# the positive lookahead assertion that is", "# used below). It will even properly deal with", "# (i.e. ignore) cases in which the additional", "# INCLUDES are part of an in-line comment, ala", "# \" INCLUDE 'someFile' ! ; INCLUDE 'someFile2' \"", "# ) : end of non-saving group", "# \\s* : any amount of white space", "# INCLUDE : match the string INCLUDE, case insensitive", "# \\s+ : match one or more white space characters", "# (?\\w+_)? : match the optional \"kind-param _\" prefix allowed by the standard", "# [<\"'] : match the include delimiter - an apostrophe, double quote, or less than symbol", "# (.+?) : match one or more characters that make up", "# the included path and file name and save it", "# in a group. The Fortran standard allows for", "# any non-control character to be used. The dot", "# operator will pick up any character, including", "# control codes, but I can't conceive of anyone", "# putting control codes in their file names.", "# The question mark indicates it is non-greedy so", "# that regex will match only up to the next quote,", "# double quote, or greater than symbol", "# (?=[\"'>]) : positive lookahead assertion to match the include", "# delimiter - an apostrophe, double quote, or", "# greater than symbol. This level of complexity", "# is required so that the include delimiter is", "# not consumed by the match, thus allowing the", "# sub-regex discussed above to uniquely match a", "# set of semicolon-separated INCLUDE statements", "# (as allowed by the F2003 standard)", "include_regex", "=", "\"\"\"(?i)(?:^|['\">]\\s*;)\\s*INCLUDE\\s+(?:\\w+_)?[<\"'](.+?)(?=[\"'>])\"\"\"", "# The MODULE statement regex finds module definitions by matching", "# the following:", "#", "# MODULE module_name", "#", "# but *not* the following:", "#", "# MODULE PROCEDURE procedure_name", "#", "# Here is a breakdown of the regex:", "#", "# (?i) : regex is case insensitive", "# ^\\s* : any amount of white space", "# MODULE : match the string MODULE, case insensitive", "# \\s+ : match one or more white space characters", "# (?!PROCEDURE) : but *don't* match if the next word matches", "# PROCEDURE (negative lookahead assertion),", "# case insensitive", "# (\\w+) : match one or more alphanumeric characters", "# that make up the defined module name and", "# save it in a group", "def_regex", "=", "\"\"\"(?i)^\\s*MODULE\\s+(?!PROCEDURE)(\\w+)\"\"\"", "scanner", "=", "F90Scanner", "(", "\"FortranScan\"", ",", "\"$FORTRANSUFFIXES\"", ",", "path_variable", ",", "use_regex", ",", "include_regex", ",", "def_regex", ")", "return", "scanner" ]
Return a prototype Scanner instance for scanning source files for Fortran USE & INCLUDE statements
[ "Return", "a", "prototype", "Scanner", "instance", "for", "scanning", "source", "files", "for", "Fortran", "USE", "&", "INCLUDE", "statements" ]
python
train
48.897297
saltstack/salt
salt/modules/rh_ip.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_ip.py#L320-L356
def _parse_settings_bond_1(opts, iface, bond_def): ''' Filters given options and outputs valid settings for bond1. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' bond = {'mode': '1'} for binding in ['miimon', 'downdelay', 'updelay']: if binding in opts: try: int(opts[binding]) bond.update({binding: opts[binding]}) except Exception: _raise_error_iface(iface, binding, ['integer']) else: _log_default_iface(iface, binding, bond_def[binding]) bond.update({binding: bond_def[binding]}) if 'use_carrier' in opts: if opts['use_carrier'] in _CONFIG_TRUE: bond.update({'use_carrier': '1'}) elif opts['use_carrier'] in _CONFIG_FALSE: bond.update({'use_carrier': '0'}) else: valid = _CONFIG_TRUE + _CONFIG_FALSE _raise_error_iface(iface, 'use_carrier', valid) else: _log_default_iface(iface, 'use_carrier', bond_def['use_carrier']) bond.update({'use_carrier': bond_def['use_carrier']}) if 'primary' in opts: bond.update({'primary': opts['primary']}) return bond
[ "def", "_parse_settings_bond_1", "(", "opts", ",", "iface", ",", "bond_def", ")", ":", "bond", "=", "{", "'mode'", ":", "'1'", "}", "for", "binding", "in", "[", "'miimon'", ",", "'downdelay'", ",", "'updelay'", "]", ":", "if", "binding", "in", "opts", ":", "try", ":", "int", "(", "opts", "[", "binding", "]", ")", "bond", ".", "update", "(", "{", "binding", ":", "opts", "[", "binding", "]", "}", ")", "except", "Exception", ":", "_raise_error_iface", "(", "iface", ",", "binding", ",", "[", "'integer'", "]", ")", "else", ":", "_log_default_iface", "(", "iface", ",", "binding", ",", "bond_def", "[", "binding", "]", ")", "bond", ".", "update", "(", "{", "binding", ":", "bond_def", "[", "binding", "]", "}", ")", "if", "'use_carrier'", "in", "opts", ":", "if", "opts", "[", "'use_carrier'", "]", "in", "_CONFIG_TRUE", ":", "bond", ".", "update", "(", "{", "'use_carrier'", ":", "'1'", "}", ")", "elif", "opts", "[", "'use_carrier'", "]", "in", "_CONFIG_FALSE", ":", "bond", ".", "update", "(", "{", "'use_carrier'", ":", "'0'", "}", ")", "else", ":", "valid", "=", "_CONFIG_TRUE", "+", "_CONFIG_FALSE", "_raise_error_iface", "(", "iface", ",", "'use_carrier'", ",", "valid", ")", "else", ":", "_log_default_iface", "(", "iface", ",", "'use_carrier'", ",", "bond_def", "[", "'use_carrier'", "]", ")", "bond", ".", "update", "(", "{", "'use_carrier'", ":", "bond_def", "[", "'use_carrier'", "]", "}", ")", "if", "'primary'", "in", "opts", ":", "bond", ".", "update", "(", "{", "'primary'", ":", "opts", "[", "'primary'", "]", "}", ")", "return", "bond" ]
Filters given options and outputs valid settings for bond1. If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting.
[ "Filters", "given", "options", "and", "outputs", "valid", "settings", "for", "bond1", ".", "If", "an", "option", "has", "a", "value", "that", "is", "not", "expected", "this", "function", "will", "log", "what", "the", "Interface", "Setting", "and", "what", "it", "was", "expecting", "." ]
python
train
34.054054
ic-labs/django-icekit
icekit/utils/search/facets.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/utils/search/facets.py#L118-L120
def get_applicable_values(self): """Return selected values that will affect the search result""" return [v for v in self._values if v.is_active and not v.is_all_results]
[ "def", "get_applicable_values", "(", "self", ")", ":", "return", "[", "v", "for", "v", "in", "self", ".", "_values", "if", "v", ".", "is_active", "and", "not", "v", ".", "is_all_results", "]" ]
Return selected values that will affect the search result
[ "Return", "selected", "values", "that", "will", "affect", "the", "search", "result" ]
python
train
61
BlueBrain/NeuroM
examples/synthesis_json.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/examples/synthesis_json.py#L92-L100
def transform_header(mtype_name): '''Add header to json output to wrap around distribution data. ''' head_dict = OrderedDict() head_dict["m-type"] = mtype_name head_dict["components"] = defaultdict(OrderedDict) return head_dict
[ "def", "transform_header", "(", "mtype_name", ")", ":", "head_dict", "=", "OrderedDict", "(", ")", "head_dict", "[", "\"m-type\"", "]", "=", "mtype_name", "head_dict", "[", "\"components\"", "]", "=", "defaultdict", "(", "OrderedDict", ")", "return", "head_dict" ]
Add header to json output to wrap around distribution data.
[ "Add", "header", "to", "json", "output", "to", "wrap", "around", "distribution", "data", "." ]
python
train
27.222222
project-ncl/pnc-cli
pnc_cli/swagger_client/apis/buildconfigurationsets_api.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/swagger_client/apis/buildconfigurationsets_api.py#L511-L535
def delete_specific(self, id, **kwargs): """ Removes a specific Build Configuration Set This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_specific(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration Set id (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_specific_with_http_info(id, **kwargs) else: (data) = self.delete_specific_with_http_info(id, **kwargs) return data
[ "def", "delete_specific", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "delete_specific_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "delete_specific_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Removes a specific Build Configuration Set This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_specific(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build Configuration Set id (required) :return: None If the method is called asynchronously, returns the request thread.
[ "Removes", "a", "specific", "Build", "Configuration", "Set", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "define", "a", "callback", "function", "to", "be", "invoked", "when", "receiving", "the", "response", ".", ">>>", "def", "callback_function", "(", "response", ")", ":", ">>>", "pprint", "(", "response", ")", ">>>", ">>>", "thread", "=", "api", ".", "delete_specific", "(", "id", "callback", "=", "callback_function", ")" ]
python
train
40.52
numenta/htmresearch
htmresearch/data/sm_sequences.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/data/sm_sequences.py#L349-L362
def decodeMotorInput(self, motorInputPattern): """ Decode motor command from bit vector. @param motorInputPattern (1D numpy.array) Encoded motor command. @return (1D numpy.array) Decoded motor command. """ key = self.motorEncoder.decode(motorInputPattern)[0].keys()[0] motorCommand = self.motorEncoder.decode(motorInputPattern)[0][key][1][0] return motorCommand
[ "def", "decodeMotorInput", "(", "self", ",", "motorInputPattern", ")", ":", "key", "=", "self", ".", "motorEncoder", ".", "decode", "(", "motorInputPattern", ")", "[", "0", "]", ".", "keys", "(", ")", "[", "0", "]", "motorCommand", "=", "self", ".", "motorEncoder", ".", "decode", "(", "motorInputPattern", ")", "[", "0", "]", "[", "key", "]", "[", "1", "]", "[", "0", "]", "return", "motorCommand" ]
Decode motor command from bit vector. @param motorInputPattern (1D numpy.array) Encoded motor command. @return (1D numpy.array) Decoded motor command.
[ "Decode", "motor", "command", "from", "bit", "vector", "." ]
python
train
29.714286
EventRegistry/event-registry-python
eventregistry/ReturnInfo.py
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/ReturnInfo.py#L15-L20
def _setFlag(self, name, val, defVal): """set the objects property propName if the dictKey key exists in dict and it is not the same as default value defVal""" if not hasattr(self, "flags"): self.flags = {} if val != defVal: self.flags[name] = val
[ "def", "_setFlag", "(", "self", ",", "name", ",", "val", ",", "defVal", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"flags\"", ")", ":", "self", ".", "flags", "=", "{", "}", "if", "val", "!=", "defVal", ":", "self", ".", "flags", "[", "name", "]", "=", "val" ]
set the objects property propName if the dictKey key exists in dict and it is not the same as default value defVal
[ "set", "the", "objects", "property", "propName", "if", "the", "dictKey", "key", "exists", "in", "dict", "and", "it", "is", "not", "the", "same", "as", "default", "value", "defVal" ]
python
train
48.333333
Jajcus/pyxmpp2
pyxmpp2/ext/muc/muccore.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/ext/muc/muccore.py#L512-L526
def clear(self): """ Clear the content of `self.xmlnode` removing all <item/>, <status/>, etc. """ if not self.xmlnode.children: return n=self.xmlnode.children while n: ns=n.ns() if ns and ns.getContent()!=MUC_USER_NS: pass else: n.unlinkNode() n.freeNode() n=n.next
[ "def", "clear", "(", "self", ")", ":", "if", "not", "self", ".", "xmlnode", ".", "children", ":", "return", "n", "=", "self", ".", "xmlnode", ".", "children", "while", "n", ":", "ns", "=", "n", ".", "ns", "(", ")", "if", "ns", "and", "ns", ".", "getContent", "(", ")", "!=", "MUC_USER_NS", ":", "pass", "else", ":", "n", ".", "unlinkNode", "(", ")", "n", ".", "freeNode", "(", ")", "n", "=", "n", ".", "next" ]
Clear the content of `self.xmlnode` removing all <item/>, <status/>, etc.
[ "Clear", "the", "content", "of", "self", ".", "xmlnode", "removing", "all", "<item", "/", ">", "<status", "/", ">", "etc", "." ]
python
valid
27.2
PyCQA/astroid
astroid/bases.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/bases.py#L359-L382
def infer_call_result(self, caller, context): """ The boundnode of the regular context with a function called on ``object.__new__`` will be of type ``object``, which is incorrect for the argument in general. If no context is given the ``object.__new__`` call argument will correctly inferred except when inside a call that requires the additional context (such as a classmethod) of the boundnode to determine which class the method was called from """ # If we're unbound method __new__ of builtin object, the result is an # instance of the class given as first argument. if ( self._proxied.name == "__new__" and self._proxied.parent.frame().qname() == "%s.object" % BUILTINS ): if caller.args: node_context = context.extra_context.get(caller.args[0]) infer = caller.args[0].infer(context=node_context) else: infer = [] return (Instance(x) if x is not util.Uninferable else x for x in infer) return self._proxied.infer_call_result(caller, context)
[ "def", "infer_call_result", "(", "self", ",", "caller", ",", "context", ")", ":", "# If we're unbound method __new__ of builtin object, the result is an", "# instance of the class given as first argument.", "if", "(", "self", ".", "_proxied", ".", "name", "==", "\"__new__\"", "and", "self", ".", "_proxied", ".", "parent", ".", "frame", "(", ")", ".", "qname", "(", ")", "==", "\"%s.object\"", "%", "BUILTINS", ")", ":", "if", "caller", ".", "args", ":", "node_context", "=", "context", ".", "extra_context", ".", "get", "(", "caller", ".", "args", "[", "0", "]", ")", "infer", "=", "caller", ".", "args", "[", "0", "]", ".", "infer", "(", "context", "=", "node_context", ")", "else", ":", "infer", "=", "[", "]", "return", "(", "Instance", "(", "x", ")", "if", "x", "is", "not", "util", ".", "Uninferable", "else", "x", "for", "x", "in", "infer", ")", "return", "self", ".", "_proxied", ".", "infer_call_result", "(", "caller", ",", "context", ")" ]
The boundnode of the regular context with a function called on ``object.__new__`` will be of type ``object``, which is incorrect for the argument in general. If no context is given the ``object.__new__`` call argument will correctly inferred except when inside a call that requires the additional context (such as a classmethod) of the boundnode to determine which class the method was called from
[ "The", "boundnode", "of", "the", "regular", "context", "with", "a", "function", "called", "on", "object", ".", "__new__", "will", "be", "of", "type", "object", "which", "is", "incorrect", "for", "the", "argument", "in", "general", ".", "If", "no", "context", "is", "given", "the", "object", ".", "__new__", "call", "argument", "will", "correctly", "inferred", "except", "when", "inside", "a", "call", "that", "requires", "the", "additional", "context", "(", "such", "as", "a", "classmethod", ")", "of", "the", "boundnode", "to", "determine", "which", "class", "the", "method", "was", "called", "from" ]
python
train
47.666667
explosion/thinc
thinc/rates.py
https://github.com/explosion/thinc/blob/90129be5f0d6c665344245a7c37dbe1b8afceea2/thinc/rates.py#L26-L40
def compounding(start, stop, compound, t=0.0): """Yield an infinite series of compounding values. Each time the generator is called, a value is produced by multiplying the previous value by the compound rate. EXAMPLE: >>> sizes = compounding(1., 10., 1.5) >>> assert next(sizes) == 1. >>> assert next(sizes) == 1 * 1.5 >>> assert next(sizes) == 1.5 * 1.5 """ curr = float(start) while True: yield _clip(curr, start, stop) curr *= compound
[ "def", "compounding", "(", "start", ",", "stop", ",", "compound", ",", "t", "=", "0.0", ")", ":", "curr", "=", "float", "(", "start", ")", "while", "True", ":", "yield", "_clip", "(", "curr", ",", "start", ",", "stop", ")", "curr", "*=", "compound" ]
Yield an infinite series of compounding values. Each time the generator is called, a value is produced by multiplying the previous value by the compound rate. EXAMPLE: >>> sizes = compounding(1., 10., 1.5) >>> assert next(sizes) == 1. >>> assert next(sizes) == 1 * 1.5 >>> assert next(sizes) == 1.5 * 1.5
[ "Yield", "an", "infinite", "series", "of", "compounding", "values", ".", "Each", "time", "the", "generator", "is", "called", "a", "value", "is", "produced", "by", "multiplying", "the", "previous", "value", "by", "the", "compound", "rate", "." ]
python
train
32.866667
inasafe/inasafe
safe/gui/widgets/dock.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/widgets/dock.py#L811-L864
def show_print_dialog(self): """Open the print dialog""" if not self.impact_function: # Now try to read the keywords and show them in the dock try: active_layer = self.iface.activeLayer() keywords = self.keyword_io.read_keywords(active_layer) provenances = keywords.get('provenance_data', {}) extra_keywords = keywords.get('extra_keywords', {}) is_multi_exposure = ( extra_keywords.get(extra_keyword_analysis_type['key']) == ( MULTI_EXPOSURE_ANALYSIS_FLAG)) if provenances and is_multi_exposure: self.impact_function = ( MultiExposureImpactFunction.load_from_output_metadata( keywords)) else: self.impact_function = ( ImpactFunction.load_from_output_metadata(keywords)) except (KeywordNotFoundError, HashNotFoundError, InvalidParameterError, NoKeywordsFoundError, MetadataReadError, # AttributeError This is hiding some real error. ET ) as e: # Added this check in 3.2 for #1861 active_layer = self.iface.activeLayer() LOGGER.debug(e) if active_layer is None: if self.conflicting_plugin_detected: send_static_message(self, conflicting_plugin_message()) else: send_static_message(self, getting_started_message()) else: show_no_keywords_message(self) except Exception as e: # pylint: disable=broad-except error_message = get_error_message(e) send_error_message(self, error_message) if self.impact_function: dialog = PrintReportDialog( self.impact_function, self.iface, dock=self, parent=self) dialog.show() else: display_critical_message_bar( "InaSAFE", self.tr('Please select a valid layer before printing. ' 'No Impact Function found.'), iface_object=self )
[ "def", "show_print_dialog", "(", "self", ")", ":", "if", "not", "self", ".", "impact_function", ":", "# Now try to read the keywords and show them in the dock", "try", ":", "active_layer", "=", "self", ".", "iface", ".", "activeLayer", "(", ")", "keywords", "=", "self", ".", "keyword_io", ".", "read_keywords", "(", "active_layer", ")", "provenances", "=", "keywords", ".", "get", "(", "'provenance_data'", ",", "{", "}", ")", "extra_keywords", "=", "keywords", ".", "get", "(", "'extra_keywords'", ",", "{", "}", ")", "is_multi_exposure", "=", "(", "extra_keywords", ".", "get", "(", "extra_keyword_analysis_type", "[", "'key'", "]", ")", "==", "(", "MULTI_EXPOSURE_ANALYSIS_FLAG", ")", ")", "if", "provenances", "and", "is_multi_exposure", ":", "self", ".", "impact_function", "=", "(", "MultiExposureImpactFunction", ".", "load_from_output_metadata", "(", "keywords", ")", ")", "else", ":", "self", ".", "impact_function", "=", "(", "ImpactFunction", ".", "load_from_output_metadata", "(", "keywords", ")", ")", "except", "(", "KeywordNotFoundError", ",", "HashNotFoundError", ",", "InvalidParameterError", ",", "NoKeywordsFoundError", ",", "MetadataReadError", ",", "# AttributeError This is hiding some real error. ET", ")", "as", "e", ":", "# Added this check in 3.2 for #1861", "active_layer", "=", "self", ".", "iface", ".", "activeLayer", "(", ")", "LOGGER", ".", "debug", "(", "e", ")", "if", "active_layer", "is", "None", ":", "if", "self", ".", "conflicting_plugin_detected", ":", "send_static_message", "(", "self", ",", "conflicting_plugin_message", "(", ")", ")", "else", ":", "send_static_message", "(", "self", ",", "getting_started_message", "(", ")", ")", "else", ":", "show_no_keywords_message", "(", "self", ")", "except", "Exception", "as", "e", ":", "# pylint: disable=broad-except", "error_message", "=", "get_error_message", "(", "e", ")", "send_error_message", "(", "self", ",", "error_message", ")", "if", "self", ".", "impact_function", ":", "dialog", "=", "PrintReportDialog", "(", "self", ".", "impact_function", ",", "self", ".", "iface", ",", "dock", "=", "self", ",", "parent", "=", "self", ")", "dialog", ".", "show", "(", ")", "else", ":", "display_critical_message_bar", "(", "\"InaSAFE\"", ",", "self", ".", "tr", "(", "'Please select a valid layer before printing. '", "'No Impact Function found.'", ")", ",", "iface_object", "=", "self", ")" ]
Open the print dialog
[ "Open", "the", "print", "dialog" ]
python
train
43.296296
coops/r53
src/r53/r53.py
https://github.com/coops/r53/blob/3c4e7242ad65b0e1ad4ba6b4ac893c7d501ceb0a/src/r53/r53.py#L26-L37
def lookup_zone(conn, zone): """Look up a zone ID for a zone string. Args: conn: boto.route53.Route53Connection zone: string eg. foursquare.com Returns: zone ID eg. ZE2DYFZDWGSL4. Raises: ZoneNotFoundError if zone not found.""" all_zones = conn.get_all_hosted_zones() for resp in all_zones['ListHostedZonesResponse']['HostedZones']: if resp['Name'].rstrip('.') == zone.rstrip('.'): return resp['Id'].replace('/hostedzone/', '') raise ZoneNotFoundError('zone %s not found in response' % zone)
[ "def", "lookup_zone", "(", "conn", ",", "zone", ")", ":", "all_zones", "=", "conn", ".", "get_all_hosted_zones", "(", ")", "for", "resp", "in", "all_zones", "[", "'ListHostedZonesResponse'", "]", "[", "'HostedZones'", "]", ":", "if", "resp", "[", "'Name'", "]", ".", "rstrip", "(", "'.'", ")", "==", "zone", ".", "rstrip", "(", "'.'", ")", ":", "return", "resp", "[", "'Id'", "]", ".", "replace", "(", "'/hostedzone/'", ",", "''", ")", "raise", "ZoneNotFoundError", "(", "'zone %s not found in response'", "%", "zone", ")" ]
Look up a zone ID for a zone string. Args: conn: boto.route53.Route53Connection zone: string eg. foursquare.com Returns: zone ID eg. ZE2DYFZDWGSL4. Raises: ZoneNotFoundError if zone not found.
[ "Look", "up", "a", "zone", "ID", "for", "a", "zone", "string", "." ]
python
test
42.75
summa-tx/riemann
riemann/simple.py
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/simple.py#L173-L192
def p2sh_input_and_witness(outpoint, stack_script, redeem_script, sequence=None): ''' OutPoint, str, str, int -> (TxIn, InputWitness) Create a signed legacy TxIn from a p2pkh prevout Create an empty InputWitness for it Useful for transactions spending some witness and some legacy prevouts ''' if sequence is None: sequence = guess_sequence(redeem_script) stack_script = script_ser.serialize(stack_script) redeem_script = script_ser.hex_serialize(redeem_script) redeem_script = script_ser.serialize(redeem_script) return tb.make_legacy_input_and_empty_witness( outpoint=outpoint, stack_script=stack_script, redeem_script=redeem_script, sequence=sequence)
[ "def", "p2sh_input_and_witness", "(", "outpoint", ",", "stack_script", ",", "redeem_script", ",", "sequence", "=", "None", ")", ":", "if", "sequence", "is", "None", ":", "sequence", "=", "guess_sequence", "(", "redeem_script", ")", "stack_script", "=", "script_ser", ".", "serialize", "(", "stack_script", ")", "redeem_script", "=", "script_ser", ".", "hex_serialize", "(", "redeem_script", ")", "redeem_script", "=", "script_ser", ".", "serialize", "(", "redeem_script", ")", "return", "tb", ".", "make_legacy_input_and_empty_witness", "(", "outpoint", "=", "outpoint", ",", "stack_script", "=", "stack_script", ",", "redeem_script", "=", "redeem_script", ",", "sequence", "=", "sequence", ")" ]
OutPoint, str, str, int -> (TxIn, InputWitness) Create a signed legacy TxIn from a p2pkh prevout Create an empty InputWitness for it Useful for transactions spending some witness and some legacy prevouts
[ "OutPoint", "str", "str", "int", "-", ">", "(", "TxIn", "InputWitness", ")", "Create", "a", "signed", "legacy", "TxIn", "from", "a", "p2pkh", "prevout", "Create", "an", "empty", "InputWitness", "for", "it", "Useful", "for", "transactions", "spending", "some", "witness", "and", "some", "legacy", "prevouts" ]
python
train
37.4
bitcraze/crazyflie-lib-python
cflib/crtp/radiodriver.py
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crtp/radiodriver.py#L434-L451
def _send_packet_safe(self, cr, packet): """ Adds 1bit counter to CRTP header to guarantee that no ack (downlink) payload are lost and no uplink packet are duplicated. The caller should resend packet if not acked (ie. same as with a direct call to crazyradio.send_packet) """ # packet = bytearray(packet) packet[0] &= 0xF3 packet[0] |= self._curr_up << 3 | self._curr_down << 2 resp = cr.send_packet(packet) if resp and resp.ack and len(resp.data) and \ (resp.data[0] & 0x04) == (self._curr_down << 2): self._curr_down = 1 - self._curr_down if resp and resp.ack: self._curr_up = 1 - self._curr_up return resp
[ "def", "_send_packet_safe", "(", "self", ",", "cr", ",", "packet", ")", ":", "# packet = bytearray(packet)", "packet", "[", "0", "]", "&=", "0xF3", "packet", "[", "0", "]", "|=", "self", ".", "_curr_up", "<<", "3", "|", "self", ".", "_curr_down", "<<", "2", "resp", "=", "cr", ".", "send_packet", "(", "packet", ")", "if", "resp", "and", "resp", ".", "ack", "and", "len", "(", "resp", ".", "data", ")", "and", "(", "resp", ".", "data", "[", "0", "]", "&", "0x04", ")", "==", "(", "self", ".", "_curr_down", "<<", "2", ")", ":", "self", ".", "_curr_down", "=", "1", "-", "self", ".", "_curr_down", "if", "resp", "and", "resp", ".", "ack", ":", "self", ".", "_curr_up", "=", "1", "-", "self", ".", "_curr_up", "return", "resp" ]
Adds 1bit counter to CRTP header to guarantee that no ack (downlink) payload are lost and no uplink packet are duplicated. The caller should resend packet if not acked (ie. same as with a direct call to crazyradio.send_packet)
[ "Adds", "1bit", "counter", "to", "CRTP", "header", "to", "guarantee", "that", "no", "ack", "(", "downlink", ")", "payload", "are", "lost", "and", "no", "uplink", "packet", "are", "duplicated", ".", "The", "caller", "should", "resend", "packet", "if", "not", "acked", "(", "ie", ".", "same", "as", "with", "a", "direct", "call", "to", "crazyradio", ".", "send_packet", ")" ]
python
train
40.611111
cisco-sas/kitty
kitty/model/high_level/base.py
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/model/high_level/base.py#L190-L202
def get_stages(self): ''' :return: dictionary of information regarding the stages in the fuzzing session .. note:: structure: { current: ['stage1', 'stage2', 'stage3'], 'stages': {'source1': ['dest1', 'dest2'], 'source2': ['dest1', 'dest3']}} ''' sequence = self.get_sequence() return { 'current': [e.dst.get_name() for e in sequence], 'stages': {e.src.get_name(): [e.dst.get_name()] for e in sequence} }
[ "def", "get_stages", "(", "self", ")", ":", "sequence", "=", "self", ".", "get_sequence", "(", ")", "return", "{", "'current'", ":", "[", "e", ".", "dst", ".", "get_name", "(", ")", "for", "e", "in", "sequence", "]", ",", "'stages'", ":", "{", "e", ".", "src", ".", "get_name", "(", ")", ":", "[", "e", ".", "dst", ".", "get_name", "(", ")", "]", "for", "e", "in", "sequence", "}", "}" ]
:return: dictionary of information regarding the stages in the fuzzing session .. note:: structure: { current: ['stage1', 'stage2', 'stage3'], 'stages': {'source1': ['dest1', 'dest2'], 'source2': ['dest1', 'dest3']}}
[ ":", "return", ":", "dictionary", "of", "information", "regarding", "the", "stages", "in", "the", "fuzzing", "session" ]
python
train
37.384615
christophertbrown/bioscripts
ctbBio/sam2fastq.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/sam2fastq.py#L13-L24
def print_single(line, rev): """ print single reads to stderr """ if rev is True: seq = rc(['', line[9]])[1] qual = line[10][::-1] else: seq = line[9] qual = line[10] fq = ['@%s' % line[0], seq, '+%s' % line[0], qual] print('\n'.join(fq), file = sys.stderr)
[ "def", "print_single", "(", "line", ",", "rev", ")", ":", "if", "rev", "is", "True", ":", "seq", "=", "rc", "(", "[", "''", ",", "line", "[", "9", "]", "]", ")", "[", "1", "]", "qual", "=", "line", "[", "10", "]", "[", ":", ":", "-", "1", "]", "else", ":", "seq", "=", "line", "[", "9", "]", "qual", "=", "line", "[", "10", "]", "fq", "=", "[", "'@%s'", "%", "line", "[", "0", "]", ",", "seq", ",", "'+%s'", "%", "line", "[", "0", "]", ",", "qual", "]", "print", "(", "'\\n'", ".", "join", "(", "fq", ")", ",", "file", "=", "sys", ".", "stderr", ")" ]
print single reads to stderr
[ "print", "single", "reads", "to", "stderr" ]
python
train
25.5
bsolomon1124/pyfinance
pyfinance/returns.py
https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/returns.py#L585-L599
def gain_to_loss_ratio(self): """Gain-to-loss ratio, ratio of positive to negative returns. Formula: (n pos. / n neg.) * (avg. up-month return / avg. down-month return) [Source: CFA Institute] Returns ------- float """ gt = self > 0 lt = self < 0 return (nansum(gt) / nansum(lt)) * (self[gt].mean() / self[lt].mean())
[ "def", "gain_to_loss_ratio", "(", "self", ")", ":", "gt", "=", "self", ">", "0", "lt", "=", "self", "<", "0", "return", "(", "nansum", "(", "gt", ")", "/", "nansum", "(", "lt", ")", ")", "*", "(", "self", "[", "gt", "]", ".", "mean", "(", ")", "/", "self", "[", "lt", "]", ".", "mean", "(", ")", ")" ]
Gain-to-loss ratio, ratio of positive to negative returns. Formula: (n pos. / n neg.) * (avg. up-month return / avg. down-month return) [Source: CFA Institute] Returns ------- float
[ "Gain", "-", "to", "-", "loss", "ratio", "ratio", "of", "positive", "to", "negative", "returns", "." ]
python
train
26.266667
eqcorrscan/EQcorrscan
eqcorrscan/utils/mag_calc.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/mag_calc.py#L474-L810
def amp_pick_event(event, st, respdir, chans=['Z'], var_wintype=True, winlen=0.9, pre_pick=0.2, pre_filt=True, lowcut=1.0, highcut=20.0, corners=4, min_snr=1.0, plot=False, remove_old=False, ps_multiplier=0.34, velocity=False): """ Pick amplitudes for local magnitude for a single event. Looks for maximum peak-to-trough amplitude for a channel in a stream, and picks this amplitude and period. There are a few things it does internally to stabilise the result: 1. Applies a given filter to the data - very necessary for small magnitude earthquakes; 2. Keeps track of the poles and zeros of this filter and removes them from the picked amplitude; 3. Picks the peak-to-trough amplitude, but records half of this: the specification for the local magnitude is to use a peak amplitude on a horizontal, however, with modern digital seismometers, the peak amplitude often has an additional, DC-shift applied to it, to stabilise this, and to remove possible issues with de-meaning data recorded during the wave-train of an event (e.g. the mean may not be the same as it would be for longer durations), we use half the peak-to-trough amplitude; 4. Despite the original definition of local magnitude requiring the use of a horizontal channel, more recent work has shown that the vertical channels give more consistent magnitude estimations between stations, due to a reduction in site-amplification effects, we therefore use the vertical channels by default, but allow the user to chose which channels they deem appropriate; 5. We do not specify that the maximum amplitude should be the S-phase: The original definition holds that the maximum body-wave amplitude should be used - while this is often the S-phase, we do not discriminate against the P-phase. We do note that, unless the user takes care when assigning winlen and filters, they may end up with amplitude picks for surface waves; 6. We use a variable window-length by default that takes into account P-S times if available, this is in an effort to include only the body waves. When P-S times are not available we us the ps_multiplier variable, which defaults to 0.34 x hypocentral distance. :type event: obspy.core.event.event.Event :param event: Event to pick :type st: obspy.core.stream.Stream :param st: Stream associated with event :type respdir: str :param respdir: Path to the response information directory :type chans: list :param chans: List of the channels to pick on, defaults to ['Z'] - should just be the orientations, e.g. Z, 1, 2, N, E :type var_wintype: bool :param var_wintype: If True, the winlen will be multiplied by the P-S time if both P and S picks are available, otherwise it will be multiplied by the hypocentral distance*ps_multiplier, defaults to True :type winlen: float :param winlen: Length of window, see above parameter, if var_wintype is False then this will be in seconds, otherwise it is the multiplier to the p-s time, defaults to 0.9. :type pre_pick: float :param pre_pick: Time before the s-pick to start the cut window, defaults to 0.2. :type pre_filt: bool :param pre_filt: To apply a pre-filter or not, defaults to True :type lowcut: float :param lowcut: Lowcut in Hz for the pre-filter, defaults to 1.0 :type highcut: float :param highcut: Highcut in Hz for the pre-filter, defaults to 20.0 :type corners: int :param corners: Number of corners to use in the pre-filter :type min_snr: float :param min_snr: Minimum signal-to-noise ratio to allow a pick - see note below on signal-to-noise ratio calculation. :type plot: bool :param plot: Turn plotting on or off. :type remove_old: bool :param remove_old: If True, will remove old amplitude picks from event and overwrite with new picks. Defaults to False. :type ps_multiplier: float :param ps_multiplier: A p-s time multiplier of hypocentral distance - defaults to 0.34, based on p-s ratio of 1.68 and an S-velocity 0f 1.5km/s, deliberately chosen to be quite slow. :type velocity: bool :param velocity: Whether to make the pick in velocity space or not. Original definition of local magnitude used displacement of Wood-Anderson, MLv in seiscomp and Antelope uses a velocity measurement. :returns: Picked event :rtype: :class:`obspy.core.event.Event` .. Note:: Signal-to-noise ratio is calculated using the filtered data by dividing the maximum amplitude in the signal window (pick window) by the normalized noise amplitude (taken from the whole window supplied). .. Warning:: Works in place on data - will filter and remove response from data, you are recommended to give this function a copy of the data if you are using it in a loop. """ # Convert these picks into a lists stations = [] # List of stations channels = [] # List of channels picktimes = [] # List of pick times picktypes = [] # List of pick types picks_out = [] try: depth = _get_origin(event).depth except MatchFilterError: depth = 0 if remove_old and event.amplitudes: for amp in event.amplitudes: # Find the pick and remove it too pick = [p for p in event.picks if p.resource_id == amp.pick_id][0] event.picks.remove(pick) event.amplitudes = [] for pick in event.picks: if pick.phase_hint in ['P', 'S']: picks_out.append(pick) # Need to be able to remove this if there # isn't data for a station! stations.append(pick.waveform_id.station_code) channels.append(pick.waveform_id.channel_code) picktimes.append(pick.time) picktypes.append(pick.phase_hint) if len(picktypes) == 0: warnings.warn('No P or S picks found') st.merge() # merge the data, just in case! # For each station cut the window uniq_stas = list(set(stations)) for sta in uniq_stas: for chan in chans: print('Working on ' + sta + ' ' + chan) tr = st.select(station=sta, channel='*' + chan) if not tr: warnings.warn( 'There is no station and channel match in the wavefile!') continue else: tr = tr[0] # Apply the pre-filter if pre_filt: try: tr.split().detrend('simple').merge(fill_value=0) except: print('Some issue splitting this one') dummy = tr.split() dummy.detrend('simple') tr = dummy.merge(fill_value=0) try: tr.filter('bandpass', freqmin=lowcut, freqmax=highcut, corners=corners) except NotImplementedError: print('For some reason trace is not continuous:') print(tr) continue # Find the response information resp_info = _find_resp( tr.stats.station, tr.stats.channel, tr.stats.network, tr.stats.starttime, tr.stats.delta, respdir) PAZ = [] seedresp = [] if resp_info and 'gain' in resp_info: PAZ = resp_info elif resp_info: seedresp = resp_info # Simulate a Wood Anderson Seismograph if PAZ and len(tr.data) > 10: # Set ten data points to be the minimum to pass tr = _sim_WA(tr, PAZ, None, 10, velocity=velocity) elif seedresp and len(tr.data) > 10: tr = _sim_WA(tr, None, seedresp, 10, velocity=velocity) elif len(tr.data) > 10: warnings.warn('No PAZ for ' + tr.stats.station + ' ' + tr.stats.channel + ' at time: ' + str(tr.stats.starttime)) continue sta_picks = [i for i in range(len(stations)) if stations[i] == sta] pick_id = event.picks[sta_picks[0]].resource_id arrival = [arrival for arrival in event.origins[0].arrivals if arrival.pick_id == pick_id][0] hypo_dist = np.sqrt( np.square(degrees2kilometers(arrival.distance)) + np.square(depth / 1000)) if var_wintype and hypo_dist: if 'S' in [picktypes[i] for i in sta_picks] and\ 'P' in [picktypes[i] for i in sta_picks]: # If there is an S-pick we can use this :D s_pick = [picktimes[i] for i in sta_picks if picktypes[i] == 'S'] s_pick = min(s_pick) p_pick = [picktimes[i] for i in sta_picks if picktypes[i] == 'P'] p_pick = min(p_pick) try: tr.trim(starttime=s_pick - pre_pick, endtime=s_pick + (s_pick - p_pick) * winlen) except ValueError: continue elif 'S' in [picktypes[i] for i in sta_picks]: s_pick = [picktimes[i] for i in sta_picks if picktypes[i] == 'S'] s_pick = min(s_pick) p_modelled = s_pick - (hypo_dist * ps_multiplier) try: tr.trim(starttime=s_pick - pre_pick, endtime=s_pick + (s_pick - p_modelled) * winlen) except ValueError: continue else: # In this case we only have a P pick p_pick = [picktimes[i] for i in sta_picks if picktypes[i] == 'P'] p_pick = min(p_pick) s_modelled = p_pick + (hypo_dist * ps_multiplier) print('P_pick=%s' % str(p_pick)) print('hypo_dist: %s' % str(hypo_dist)) print('S modelled=%s' % str(s_modelled)) try: tr.trim(starttime=s_modelled - pre_pick, endtime=s_modelled + (s_modelled - p_pick) * winlen) print(tr) except ValueError: continue # Work out the window length based on p-s time or distance elif 'S' in [picktypes[i] for i in sta_picks]: # If the window is fixed we still need to find the start time, # which can be based either on the S-pick (this elif), or # on the hypocentral distance and the P-pick # Take the minimum S-pick time if more than one S-pick is # available s_pick = [picktimes[i] for i in sta_picks if picktypes[i] == 'S'] s_pick = min(s_pick) try: tr.trim(starttime=s_pick - pre_pick, endtime=s_pick + winlen) except ValueError: continue else: # In this case, there is no S-pick and the window length is # fixed we need to calculate an expected S_pick based on the # hypocentral distance, this will be quite hand-wavey as we # are not using any kind of velocity model. p_pick = [picktimes[i] for i in sta_picks if picktypes[i] == 'P'] print(picktimes) p_pick = min(p_pick) s_modelled = p_pick + hypo_dist * ps_multiplier try: tr.trim(starttime=s_modelled - pre_pick, endtime=s_modelled + winlen) except ValueError: continue if len(tr.data) <= 10: warnings.warn('No data found for: ' + tr.stats.station) continue # Get the amplitude try: amplitude, period, delay = _max_p2t(tr.data, tr.stats.delta) except ValueError: print('No amplitude picked for tr %s' % str(tr)) continue # Calculate the normalized noise amplitude noise_amplitude = np.sqrt(np.mean(np.square(tr.data))) if amplitude == 0.0: continue if amplitude / noise_amplitude < min_snr: print('Signal to noise ratio of %s is below threshold.' % (amplitude / noise_amplitude)) continue if plot: plt.plot(np.arange(len(tr.data)), tr.data, 'k') plt.scatter(tr.stats.sampling_rate * delay, amplitude / 2) plt.scatter(tr.stats.sampling_rate * (delay + period), -amplitude / 2) plt.show() print('Amplitude picked: ' + str(amplitude)) print('Signal-to-noise ratio is: %s' % (amplitude / noise_amplitude)) # Note, amplitude should be in meters at the moment! # Remove the pre-filter response if pre_filt: # Generate poles and zeros for the filter we used earlier: this # is how the filter is designed in the convenience methods of # filtering in obspy. z, p, k = iirfilter( corners, [lowcut / (0.5 * tr.stats.sampling_rate), highcut / (0.5 * tr.stats.sampling_rate)], btype='band', ftype='butter', output='zpk') filt_paz = {'poles': list(p), 'zeros': list(z), 'gain': k, 'sensitivity': 1.0} amplitude /= (paz_2_amplitude_value_of_freq_resp( filt_paz, 1 / period) * filt_paz['sensitivity']) if PAZ: amplitude /= 1000 if seedresp: # Seedresp method returns mm amplitude *= 1000000 # Write out the half amplitude, approximately the peak amplitude as # used directly in magnitude calculations amplitude *= 0.5 # Append an amplitude reading to the event _waveform_id = WaveformStreamID( station_code=tr.stats.station, channel_code=tr.stats.channel, network_code=tr.stats.network) pick_ind = len(event.picks) event.picks.append(Pick( waveform_id=_waveform_id, phase_hint='IAML', polarity='undecidable', time=tr.stats.starttime + delay, evaluation_mode='automatic')) if not velocity: event.amplitudes.append(Amplitude( generic_amplitude=amplitude / 1e9, period=period, pick_id=event.picks[pick_ind].resource_id, waveform_id=event.picks[pick_ind].waveform_id, unit='m', magnitude_hint='ML', type='AML', category='point')) else: event.amplitudes.append(Amplitude( generic_amplitude=amplitude / 1e9, period=period, pick_id=event.picks[pick_ind].resource_id, waveform_id=event.picks[pick_ind].waveform_id, unit='m/s', magnitude_hint='ML', type='AML', category='point')) return event
[ "def", "amp_pick_event", "(", "event", ",", "st", ",", "respdir", ",", "chans", "=", "[", "'Z'", "]", ",", "var_wintype", "=", "True", ",", "winlen", "=", "0.9", ",", "pre_pick", "=", "0.2", ",", "pre_filt", "=", "True", ",", "lowcut", "=", "1.0", ",", "highcut", "=", "20.0", ",", "corners", "=", "4", ",", "min_snr", "=", "1.0", ",", "plot", "=", "False", ",", "remove_old", "=", "False", ",", "ps_multiplier", "=", "0.34", ",", "velocity", "=", "False", ")", ":", "# Convert these picks into a lists", "stations", "=", "[", "]", "# List of stations", "channels", "=", "[", "]", "# List of channels", "picktimes", "=", "[", "]", "# List of pick times", "picktypes", "=", "[", "]", "# List of pick types", "picks_out", "=", "[", "]", "try", ":", "depth", "=", "_get_origin", "(", "event", ")", ".", "depth", "except", "MatchFilterError", ":", "depth", "=", "0", "if", "remove_old", "and", "event", ".", "amplitudes", ":", "for", "amp", "in", "event", ".", "amplitudes", ":", "# Find the pick and remove it too", "pick", "=", "[", "p", "for", "p", "in", "event", ".", "picks", "if", "p", ".", "resource_id", "==", "amp", ".", "pick_id", "]", "[", "0", "]", "event", ".", "picks", ".", "remove", "(", "pick", ")", "event", ".", "amplitudes", "=", "[", "]", "for", "pick", "in", "event", ".", "picks", ":", "if", "pick", ".", "phase_hint", "in", "[", "'P'", ",", "'S'", "]", ":", "picks_out", ".", "append", "(", "pick", ")", "# Need to be able to remove this if there", "# isn't data for a station!", "stations", ".", "append", "(", "pick", ".", "waveform_id", ".", "station_code", ")", "channels", ".", "append", "(", "pick", ".", "waveform_id", ".", "channel_code", ")", "picktimes", ".", "append", "(", "pick", ".", "time", ")", "picktypes", ".", "append", "(", "pick", ".", "phase_hint", ")", "if", "len", "(", "picktypes", ")", "==", "0", ":", "warnings", ".", "warn", "(", "'No P or S picks found'", ")", "st", ".", "merge", "(", ")", "# merge the data, just in case!", "# For each station cut the window", "uniq_stas", "=", "list", "(", "set", "(", "stations", ")", ")", "for", "sta", "in", "uniq_stas", ":", "for", "chan", "in", "chans", ":", "print", "(", "'Working on '", "+", "sta", "+", "' '", "+", "chan", ")", "tr", "=", "st", ".", "select", "(", "station", "=", "sta", ",", "channel", "=", "'*'", "+", "chan", ")", "if", "not", "tr", ":", "warnings", ".", "warn", "(", "'There is no station and channel match in the wavefile!'", ")", "continue", "else", ":", "tr", "=", "tr", "[", "0", "]", "# Apply the pre-filter", "if", "pre_filt", ":", "try", ":", "tr", ".", "split", "(", ")", ".", "detrend", "(", "'simple'", ")", ".", "merge", "(", "fill_value", "=", "0", ")", "except", ":", "print", "(", "'Some issue splitting this one'", ")", "dummy", "=", "tr", ".", "split", "(", ")", "dummy", ".", "detrend", "(", "'simple'", ")", "tr", "=", "dummy", ".", "merge", "(", "fill_value", "=", "0", ")", "try", ":", "tr", ".", "filter", "(", "'bandpass'", ",", "freqmin", "=", "lowcut", ",", "freqmax", "=", "highcut", ",", "corners", "=", "corners", ")", "except", "NotImplementedError", ":", "print", "(", "'For some reason trace is not continuous:'", ")", "print", "(", "tr", ")", "continue", "# Find the response information", "resp_info", "=", "_find_resp", "(", "tr", ".", "stats", ".", "station", ",", "tr", ".", "stats", ".", "channel", ",", "tr", ".", "stats", ".", "network", ",", "tr", ".", "stats", ".", "starttime", ",", "tr", ".", "stats", ".", "delta", ",", "respdir", ")", "PAZ", "=", "[", "]", "seedresp", "=", "[", "]", "if", "resp_info", "and", "'gain'", "in", "resp_info", ":", "PAZ", "=", "resp_info", "elif", "resp_info", ":", "seedresp", "=", "resp_info", "# Simulate a Wood Anderson Seismograph", "if", "PAZ", "and", "len", "(", "tr", ".", "data", ")", ">", "10", ":", "# Set ten data points to be the minimum to pass", "tr", "=", "_sim_WA", "(", "tr", ",", "PAZ", ",", "None", ",", "10", ",", "velocity", "=", "velocity", ")", "elif", "seedresp", "and", "len", "(", "tr", ".", "data", ")", ">", "10", ":", "tr", "=", "_sim_WA", "(", "tr", ",", "None", ",", "seedresp", ",", "10", ",", "velocity", "=", "velocity", ")", "elif", "len", "(", "tr", ".", "data", ")", ">", "10", ":", "warnings", ".", "warn", "(", "'No PAZ for '", "+", "tr", ".", "stats", ".", "station", "+", "' '", "+", "tr", ".", "stats", ".", "channel", "+", "' at time: '", "+", "str", "(", "tr", ".", "stats", ".", "starttime", ")", ")", "continue", "sta_picks", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "stations", ")", ")", "if", "stations", "[", "i", "]", "==", "sta", "]", "pick_id", "=", "event", ".", "picks", "[", "sta_picks", "[", "0", "]", "]", ".", "resource_id", "arrival", "=", "[", "arrival", "for", "arrival", "in", "event", ".", "origins", "[", "0", "]", ".", "arrivals", "if", "arrival", ".", "pick_id", "==", "pick_id", "]", "[", "0", "]", "hypo_dist", "=", "np", ".", "sqrt", "(", "np", ".", "square", "(", "degrees2kilometers", "(", "arrival", ".", "distance", ")", ")", "+", "np", ".", "square", "(", "depth", "/", "1000", ")", ")", "if", "var_wintype", "and", "hypo_dist", ":", "if", "'S'", "in", "[", "picktypes", "[", "i", "]", "for", "i", "in", "sta_picks", "]", "and", "'P'", "in", "[", "picktypes", "[", "i", "]", "for", "i", "in", "sta_picks", "]", ":", "# If there is an S-pick we can use this :D", "s_pick", "=", "[", "picktimes", "[", "i", "]", "for", "i", "in", "sta_picks", "if", "picktypes", "[", "i", "]", "==", "'S'", "]", "s_pick", "=", "min", "(", "s_pick", ")", "p_pick", "=", "[", "picktimes", "[", "i", "]", "for", "i", "in", "sta_picks", "if", "picktypes", "[", "i", "]", "==", "'P'", "]", "p_pick", "=", "min", "(", "p_pick", ")", "try", ":", "tr", ".", "trim", "(", "starttime", "=", "s_pick", "-", "pre_pick", ",", "endtime", "=", "s_pick", "+", "(", "s_pick", "-", "p_pick", ")", "*", "winlen", ")", "except", "ValueError", ":", "continue", "elif", "'S'", "in", "[", "picktypes", "[", "i", "]", "for", "i", "in", "sta_picks", "]", ":", "s_pick", "=", "[", "picktimes", "[", "i", "]", "for", "i", "in", "sta_picks", "if", "picktypes", "[", "i", "]", "==", "'S'", "]", "s_pick", "=", "min", "(", "s_pick", ")", "p_modelled", "=", "s_pick", "-", "(", "hypo_dist", "*", "ps_multiplier", ")", "try", ":", "tr", ".", "trim", "(", "starttime", "=", "s_pick", "-", "pre_pick", ",", "endtime", "=", "s_pick", "+", "(", "s_pick", "-", "p_modelled", ")", "*", "winlen", ")", "except", "ValueError", ":", "continue", "else", ":", "# In this case we only have a P pick", "p_pick", "=", "[", "picktimes", "[", "i", "]", "for", "i", "in", "sta_picks", "if", "picktypes", "[", "i", "]", "==", "'P'", "]", "p_pick", "=", "min", "(", "p_pick", ")", "s_modelled", "=", "p_pick", "+", "(", "hypo_dist", "*", "ps_multiplier", ")", "print", "(", "'P_pick=%s'", "%", "str", "(", "p_pick", ")", ")", "print", "(", "'hypo_dist: %s'", "%", "str", "(", "hypo_dist", ")", ")", "print", "(", "'S modelled=%s'", "%", "str", "(", "s_modelled", ")", ")", "try", ":", "tr", ".", "trim", "(", "starttime", "=", "s_modelled", "-", "pre_pick", ",", "endtime", "=", "s_modelled", "+", "(", "s_modelled", "-", "p_pick", ")", "*", "winlen", ")", "print", "(", "tr", ")", "except", "ValueError", ":", "continue", "# Work out the window length based on p-s time or distance", "elif", "'S'", "in", "[", "picktypes", "[", "i", "]", "for", "i", "in", "sta_picks", "]", ":", "# If the window is fixed we still need to find the start time,", "# which can be based either on the S-pick (this elif), or", "# on the hypocentral distance and the P-pick", "# Take the minimum S-pick time if more than one S-pick is", "# available", "s_pick", "=", "[", "picktimes", "[", "i", "]", "for", "i", "in", "sta_picks", "if", "picktypes", "[", "i", "]", "==", "'S'", "]", "s_pick", "=", "min", "(", "s_pick", ")", "try", ":", "tr", ".", "trim", "(", "starttime", "=", "s_pick", "-", "pre_pick", ",", "endtime", "=", "s_pick", "+", "winlen", ")", "except", "ValueError", ":", "continue", "else", ":", "# In this case, there is no S-pick and the window length is", "# fixed we need to calculate an expected S_pick based on the", "# hypocentral distance, this will be quite hand-wavey as we", "# are not using any kind of velocity model.", "p_pick", "=", "[", "picktimes", "[", "i", "]", "for", "i", "in", "sta_picks", "if", "picktypes", "[", "i", "]", "==", "'P'", "]", "print", "(", "picktimes", ")", "p_pick", "=", "min", "(", "p_pick", ")", "s_modelled", "=", "p_pick", "+", "hypo_dist", "*", "ps_multiplier", "try", ":", "tr", ".", "trim", "(", "starttime", "=", "s_modelled", "-", "pre_pick", ",", "endtime", "=", "s_modelled", "+", "winlen", ")", "except", "ValueError", ":", "continue", "if", "len", "(", "tr", ".", "data", ")", "<=", "10", ":", "warnings", ".", "warn", "(", "'No data found for: '", "+", "tr", ".", "stats", ".", "station", ")", "continue", "# Get the amplitude", "try", ":", "amplitude", ",", "period", ",", "delay", "=", "_max_p2t", "(", "tr", ".", "data", ",", "tr", ".", "stats", ".", "delta", ")", "except", "ValueError", ":", "print", "(", "'No amplitude picked for tr %s'", "%", "str", "(", "tr", ")", ")", "continue", "# Calculate the normalized noise amplitude", "noise_amplitude", "=", "np", ".", "sqrt", "(", "np", ".", "mean", "(", "np", ".", "square", "(", "tr", ".", "data", ")", ")", ")", "if", "amplitude", "==", "0.0", ":", "continue", "if", "amplitude", "/", "noise_amplitude", "<", "min_snr", ":", "print", "(", "'Signal to noise ratio of %s is below threshold.'", "%", "(", "amplitude", "/", "noise_amplitude", ")", ")", "continue", "if", "plot", ":", "plt", ".", "plot", "(", "np", ".", "arange", "(", "len", "(", "tr", ".", "data", ")", ")", ",", "tr", ".", "data", ",", "'k'", ")", "plt", ".", "scatter", "(", "tr", ".", "stats", ".", "sampling_rate", "*", "delay", ",", "amplitude", "/", "2", ")", "plt", ".", "scatter", "(", "tr", ".", "stats", ".", "sampling_rate", "*", "(", "delay", "+", "period", ")", ",", "-", "amplitude", "/", "2", ")", "plt", ".", "show", "(", ")", "print", "(", "'Amplitude picked: '", "+", "str", "(", "amplitude", ")", ")", "print", "(", "'Signal-to-noise ratio is: %s'", "%", "(", "amplitude", "/", "noise_amplitude", ")", ")", "# Note, amplitude should be in meters at the moment!", "# Remove the pre-filter response", "if", "pre_filt", ":", "# Generate poles and zeros for the filter we used earlier: this", "# is how the filter is designed in the convenience methods of", "# filtering in obspy.", "z", ",", "p", ",", "k", "=", "iirfilter", "(", "corners", ",", "[", "lowcut", "/", "(", "0.5", "*", "tr", ".", "stats", ".", "sampling_rate", ")", ",", "highcut", "/", "(", "0.5", "*", "tr", ".", "stats", ".", "sampling_rate", ")", "]", ",", "btype", "=", "'band'", ",", "ftype", "=", "'butter'", ",", "output", "=", "'zpk'", ")", "filt_paz", "=", "{", "'poles'", ":", "list", "(", "p", ")", ",", "'zeros'", ":", "list", "(", "z", ")", ",", "'gain'", ":", "k", ",", "'sensitivity'", ":", "1.0", "}", "amplitude", "/=", "(", "paz_2_amplitude_value_of_freq_resp", "(", "filt_paz", ",", "1", "/", "period", ")", "*", "filt_paz", "[", "'sensitivity'", "]", ")", "if", "PAZ", ":", "amplitude", "/=", "1000", "if", "seedresp", ":", "# Seedresp method returns mm", "amplitude", "*=", "1000000", "# Write out the half amplitude, approximately the peak amplitude as", "# used directly in magnitude calculations", "amplitude", "*=", "0.5", "# Append an amplitude reading to the event", "_waveform_id", "=", "WaveformStreamID", "(", "station_code", "=", "tr", ".", "stats", ".", "station", ",", "channel_code", "=", "tr", ".", "stats", ".", "channel", ",", "network_code", "=", "tr", ".", "stats", ".", "network", ")", "pick_ind", "=", "len", "(", "event", ".", "picks", ")", "event", ".", "picks", ".", "append", "(", "Pick", "(", "waveform_id", "=", "_waveform_id", ",", "phase_hint", "=", "'IAML'", ",", "polarity", "=", "'undecidable'", ",", "time", "=", "tr", ".", "stats", ".", "starttime", "+", "delay", ",", "evaluation_mode", "=", "'automatic'", ")", ")", "if", "not", "velocity", ":", "event", ".", "amplitudes", ".", "append", "(", "Amplitude", "(", "generic_amplitude", "=", "amplitude", "/", "1e9", ",", "period", "=", "period", ",", "pick_id", "=", "event", ".", "picks", "[", "pick_ind", "]", ".", "resource_id", ",", "waveform_id", "=", "event", ".", "picks", "[", "pick_ind", "]", ".", "waveform_id", ",", "unit", "=", "'m'", ",", "magnitude_hint", "=", "'ML'", ",", "type", "=", "'AML'", ",", "category", "=", "'point'", ")", ")", "else", ":", "event", ".", "amplitudes", ".", "append", "(", "Amplitude", "(", "generic_amplitude", "=", "amplitude", "/", "1e9", ",", "period", "=", "period", ",", "pick_id", "=", "event", ".", "picks", "[", "pick_ind", "]", ".", "resource_id", ",", "waveform_id", "=", "event", ".", "picks", "[", "pick_ind", "]", ".", "waveform_id", ",", "unit", "=", "'m/s'", ",", "magnitude_hint", "=", "'ML'", ",", "type", "=", "'AML'", ",", "category", "=", "'point'", ")", ")", "return", "event" ]
Pick amplitudes for local magnitude for a single event. Looks for maximum peak-to-trough amplitude for a channel in a stream, and picks this amplitude and period. There are a few things it does internally to stabilise the result: 1. Applies a given filter to the data - very necessary for small magnitude earthquakes; 2. Keeps track of the poles and zeros of this filter and removes them from the picked amplitude; 3. Picks the peak-to-trough amplitude, but records half of this: the specification for the local magnitude is to use a peak amplitude on a horizontal, however, with modern digital seismometers, the peak amplitude often has an additional, DC-shift applied to it, to stabilise this, and to remove possible issues with de-meaning data recorded during the wave-train of an event (e.g. the mean may not be the same as it would be for longer durations), we use half the peak-to-trough amplitude; 4. Despite the original definition of local magnitude requiring the use of a horizontal channel, more recent work has shown that the vertical channels give more consistent magnitude estimations between stations, due to a reduction in site-amplification effects, we therefore use the vertical channels by default, but allow the user to chose which channels they deem appropriate; 5. We do not specify that the maximum amplitude should be the S-phase: The original definition holds that the maximum body-wave amplitude should be used - while this is often the S-phase, we do not discriminate against the P-phase. We do note that, unless the user takes care when assigning winlen and filters, they may end up with amplitude picks for surface waves; 6. We use a variable window-length by default that takes into account P-S times if available, this is in an effort to include only the body waves. When P-S times are not available we us the ps_multiplier variable, which defaults to 0.34 x hypocentral distance. :type event: obspy.core.event.event.Event :param event: Event to pick :type st: obspy.core.stream.Stream :param st: Stream associated with event :type respdir: str :param respdir: Path to the response information directory :type chans: list :param chans: List of the channels to pick on, defaults to ['Z'] - should just be the orientations, e.g. Z, 1, 2, N, E :type var_wintype: bool :param var_wintype: If True, the winlen will be multiplied by the P-S time if both P and S picks are available, otherwise it will be multiplied by the hypocentral distance*ps_multiplier, defaults to True :type winlen: float :param winlen: Length of window, see above parameter, if var_wintype is False then this will be in seconds, otherwise it is the multiplier to the p-s time, defaults to 0.9. :type pre_pick: float :param pre_pick: Time before the s-pick to start the cut window, defaults to 0.2. :type pre_filt: bool :param pre_filt: To apply a pre-filter or not, defaults to True :type lowcut: float :param lowcut: Lowcut in Hz for the pre-filter, defaults to 1.0 :type highcut: float :param highcut: Highcut in Hz for the pre-filter, defaults to 20.0 :type corners: int :param corners: Number of corners to use in the pre-filter :type min_snr: float :param min_snr: Minimum signal-to-noise ratio to allow a pick - see note below on signal-to-noise ratio calculation. :type plot: bool :param plot: Turn plotting on or off. :type remove_old: bool :param remove_old: If True, will remove old amplitude picks from event and overwrite with new picks. Defaults to False. :type ps_multiplier: float :param ps_multiplier: A p-s time multiplier of hypocentral distance - defaults to 0.34, based on p-s ratio of 1.68 and an S-velocity 0f 1.5km/s, deliberately chosen to be quite slow. :type velocity: bool :param velocity: Whether to make the pick in velocity space or not. Original definition of local magnitude used displacement of Wood-Anderson, MLv in seiscomp and Antelope uses a velocity measurement. :returns: Picked event :rtype: :class:`obspy.core.event.Event` .. Note:: Signal-to-noise ratio is calculated using the filtered data by dividing the maximum amplitude in the signal window (pick window) by the normalized noise amplitude (taken from the whole window supplied). .. Warning:: Works in place on data - will filter and remove response from data, you are recommended to give this function a copy of the data if you are using it in a loop.
[ "Pick", "amplitudes", "for", "local", "magnitude", "for", "a", "single", "event", "." ]
python
train
46.985163
HazyResearch/pdftotree
pdftotree/utils/display_utils.py
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/display_utils.py#L65-L74
def pdf_to_img(pdf_file, page_num, page_width, page_height): """ Converts pdf file into image :param pdf_file: path to the pdf file :param page_num: page number to convert (index starting at 1) :return: wand image object """ img = Image(filename="{}[{}]".format(pdf_file, page_num - 1)) img.resize(page_width, page_height) return img
[ "def", "pdf_to_img", "(", "pdf_file", ",", "page_num", ",", "page_width", ",", "page_height", ")", ":", "img", "=", "Image", "(", "filename", "=", "\"{}[{}]\"", ".", "format", "(", "pdf_file", ",", "page_num", "-", "1", ")", ")", "img", ".", "resize", "(", "page_width", ",", "page_height", ")", "return", "img" ]
Converts pdf file into image :param pdf_file: path to the pdf file :param page_num: page number to convert (index starting at 1) :return: wand image object
[ "Converts", "pdf", "file", "into", "image", ":", "param", "pdf_file", ":", "path", "to", "the", "pdf", "file", ":", "param", "page_num", ":", "page", "number", "to", "convert", "(", "index", "starting", "at", "1", ")", ":", "return", ":", "wand", "image", "object" ]
python
train
36
frostming/marko
marko/inline.py
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/inline.py#L39-L43
def find(cls, text): """This method should return an iterable containing matches of this element.""" if isinstance(cls.pattern, string_types): cls.pattern = re.compile(cls.pattern) return cls.pattern.finditer(text)
[ "def", "find", "(", "cls", ",", "text", ")", ":", "if", "isinstance", "(", "cls", ".", "pattern", ",", "string_types", ")", ":", "cls", ".", "pattern", "=", "re", ".", "compile", "(", "cls", ".", "pattern", ")", "return", "cls", ".", "pattern", ".", "finditer", "(", "text", ")" ]
This method should return an iterable containing matches of this element.
[ "This", "method", "should", "return", "an", "iterable", "containing", "matches", "of", "this", "element", "." ]
python
train
49.2
ethpm/py-ethpm
ethpm/utils/uri.py
https://github.com/ethpm/py-ethpm/blob/81ed58d7c636fe00c6770edeb0401812b1a5e8fc/ethpm/utils/uri.py#L55-L79
def is_valid_github_uri(uri: URI, expected_path_terms: Tuple[str, ...]) -> bool: """ Return a bool indicating whether or not the URI fulfills the following specs Valid Github URIs *must*: - Have 'https' scheme - Have 'api.github.com' authority - Have a path that contains all "expected_path_terms" """ if not is_text(uri): return False parsed = parse.urlparse(uri) path, scheme, authority = parsed.path, parsed.scheme, parsed.netloc if not all((path, scheme, authority)): return False if any(term for term in expected_path_terms if term not in path): return False if scheme != "https": return False if authority != GITHUB_API_AUTHORITY: return False return True
[ "def", "is_valid_github_uri", "(", "uri", ":", "URI", ",", "expected_path_terms", ":", "Tuple", "[", "str", ",", "...", "]", ")", "->", "bool", ":", "if", "not", "is_text", "(", "uri", ")", ":", "return", "False", "parsed", "=", "parse", ".", "urlparse", "(", "uri", ")", "path", ",", "scheme", ",", "authority", "=", "parsed", ".", "path", ",", "parsed", ".", "scheme", ",", "parsed", ".", "netloc", "if", "not", "all", "(", "(", "path", ",", "scheme", ",", "authority", ")", ")", ":", "return", "False", "if", "any", "(", "term", "for", "term", "in", "expected_path_terms", "if", "term", "not", "in", "path", ")", ":", "return", "False", "if", "scheme", "!=", "\"https\"", ":", "return", "False", "if", "authority", "!=", "GITHUB_API_AUTHORITY", ":", "return", "False", "return", "True" ]
Return a bool indicating whether or not the URI fulfills the following specs Valid Github URIs *must*: - Have 'https' scheme - Have 'api.github.com' authority - Have a path that contains all "expected_path_terms"
[ "Return", "a", "bool", "indicating", "whether", "or", "not", "the", "URI", "fulfills", "the", "following", "specs", "Valid", "Github", "URIs", "*", "must", "*", ":", "-", "Have", "https", "scheme", "-", "Have", "api", ".", "github", ".", "com", "authority", "-", "Have", "a", "path", "that", "contains", "all", "expected_path_terms" ]
python
train
29.64
Azure/azure-cosmos-table-python
azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py#L202-L248
def generate_account_shared_access_signature(self, resource_types, permission, expiry, start=None, ip=None, protocol=None): ''' Generates a shared access signature for the table service. Use the returned signature with the sas_token parameter of TableService. :param ResourceTypes resource_types: Specifies the resource types that are accessible with the account SAS. :param AccountPermissions permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: datetime or str :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type start: datetime or str :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. The default value is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. :return: A Shared Access Signature (sas) token. :rtype: str ''' _validate_not_none('self.account_name', self.account_name) _validate_not_none('self.account_key', self.account_key) sas = TableSharedAccessSignature(self.account_name, self.account_key) return sas.generate_account(TableServices(), resource_types, permission, expiry, start=start, ip=ip, protocol=protocol)
[ "def", "generate_account_shared_access_signature", "(", "self", ",", "resource_types", ",", "permission", ",", "expiry", ",", "start", "=", "None", ",", "ip", "=", "None", ",", "protocol", "=", "None", ")", ":", "_validate_not_none", "(", "'self.account_name'", ",", "self", ".", "account_name", ")", "_validate_not_none", "(", "'self.account_key'", ",", "self", ".", "account_key", ")", "sas", "=", "TableSharedAccessSignature", "(", "self", ".", "account_name", ",", "self", ".", "account_key", ")", "return", "sas", ".", "generate_account", "(", "TableServices", "(", ")", ",", "resource_types", ",", "permission", ",", "expiry", ",", "start", "=", "start", ",", "ip", "=", "ip", ",", "protocol", "=", "protocol", ")" ]
Generates a shared access signature for the table service. Use the returned signature with the sas_token parameter of TableService. :param ResourceTypes resource_types: Specifies the resource types that are accessible with the account SAS. :param AccountPermissions permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: datetime or str :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type start: datetime or str :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. The default value is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. :return: A Shared Access Signature (sas) token. :rtype: str
[ "Generates", "a", "shared", "access", "signature", "for", "the", "table", "service", ".", "Use", "the", "returned", "signature", "with", "the", "sas_token", "parameter", "of", "TableService", "." ]
python
train
60.893617
funilrys/PyFunceble
PyFunceble/mining.py
https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/mining.py#L243-L252
def _backup(self): """ Backup the mined informations. """ if PyFunceble.CONFIGURATION["mining"]: # The mining is activated. # We backup our mined informations. Dict(PyFunceble.INTERN["mined"]).to_json(self.file)
[ "def", "_backup", "(", "self", ")", ":", "if", "PyFunceble", ".", "CONFIGURATION", "[", "\"mining\"", "]", ":", "# The mining is activated.", "# We backup our mined informations.", "Dict", "(", "PyFunceble", ".", "INTERN", "[", "\"mined\"", "]", ")", ".", "to_json", "(", "self", ".", "file", ")" ]
Backup the mined informations.
[ "Backup", "the", "mined", "informations", "." ]
python
test
27.2
magrathealabs/feito
feito/messages.py
https://github.com/magrathealabs/feito/blob/4179e40233ccf6e5a6c9892e528595690ce9ef43/feito/messages.py#L10-L24
def commit_format(self): """ Formats the analysis into a simpler dictionary with the line, file and message values to be commented on a commit. Returns a list of dictionaries """ formatted_analyses = [] for analyze in self.analysis['messages']: formatted_analyses.append({ 'message': f"{analyze['source']}: {analyze['message']}. Code: {analyze['code']}", 'file': analyze['location']['path'], 'line': analyze['location']['line'], }) return formatted_analyses
[ "def", "commit_format", "(", "self", ")", ":", "formatted_analyses", "=", "[", "]", "for", "analyze", "in", "self", ".", "analysis", "[", "'messages'", "]", ":", "formatted_analyses", ".", "append", "(", "{", "'message'", ":", "f\"{analyze['source']}: {analyze['message']}. Code: {analyze['code']}\"", ",", "'file'", ":", "analyze", "[", "'location'", "]", "[", "'path'", "]", ",", "'line'", ":", "analyze", "[", "'location'", "]", "[", "'line'", "]", ",", "}", ")", "return", "formatted_analyses" ]
Formats the analysis into a simpler dictionary with the line, file and message values to be commented on a commit. Returns a list of dictionaries
[ "Formats", "the", "analysis", "into", "a", "simpler", "dictionary", "with", "the", "line", "file", "and", "message", "values", "to", "be", "commented", "on", "a", "commit", ".", "Returns", "a", "list", "of", "dictionaries" ]
python
train
38.933333
linnarsson-lab/loompy
loompy/loom_view.py
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loom_view.py#L45-L68
def permute(self, ordering: np.ndarray, *, axis: int) -> None: """ Permute the view, by permuting its layers, attributes and graphs Args: ordering (np.ndarray): The desired ordering along the axis axis (int): 0, permute rows; 1, permute columns """ if axis not in (0, 1): raise ValueError("Axis must be 0 (rows) or 1 (columns)") for layer in self.layers.values(): layer._permute(ordering, axis=axis) if axis == 0: if self.row_graphs is not None: for g in self.row_graphs.values(): g._permute(ordering) for a in self.row_attrs.values(): a._permute(ordering) elif axis == 1: if self.col_graphs is not None: for g in self.col_graphs.values(): g._permute(ordering) for a in self.col_attrs.values(): a._permute(ordering)
[ "def", "permute", "(", "self", ",", "ordering", ":", "np", ".", "ndarray", ",", "*", ",", "axis", ":", "int", ")", "->", "None", ":", "if", "axis", "not", "in", "(", "0", ",", "1", ")", ":", "raise", "ValueError", "(", "\"Axis must be 0 (rows) or 1 (columns)\"", ")", "for", "layer", "in", "self", ".", "layers", ".", "values", "(", ")", ":", "layer", ".", "_permute", "(", "ordering", ",", "axis", "=", "axis", ")", "if", "axis", "==", "0", ":", "if", "self", ".", "row_graphs", "is", "not", "None", ":", "for", "g", "in", "self", ".", "row_graphs", ".", "values", "(", ")", ":", "g", ".", "_permute", "(", "ordering", ")", "for", "a", "in", "self", ".", "row_attrs", ".", "values", "(", ")", ":", "a", ".", "_permute", "(", "ordering", ")", "elif", "axis", "==", "1", ":", "if", "self", ".", "col_graphs", "is", "not", "None", ":", "for", "g", "in", "self", ".", "col_graphs", ".", "values", "(", ")", ":", "g", ".", "_permute", "(", "ordering", ")", "for", "a", "in", "self", ".", "col_attrs", ".", "values", "(", ")", ":", "a", ".", "_permute", "(", "ordering", ")" ]
Permute the view, by permuting its layers, attributes and graphs Args: ordering (np.ndarray): The desired ordering along the axis axis (int): 0, permute rows; 1, permute columns
[ "Permute", "the", "view", "by", "permuting", "its", "layers", "attributes", "and", "graphs" ]
python
train
31.75
zimeon/iiif
iiif/info.py
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/info.py#L296-L310
def level(self): """Extract level number from compliance profile URI. Returns integer level number or raises IIIFInfoError """ m = re.match( self.compliance_prefix + r'(\d)' + self.compliance_suffix + r'$', self.compliance) if (m): return int(m.group(1)) raise IIIFInfoError( "Bad compliance profile URI, failed to extract level number")
[ "def", "level", "(", "self", ")", ":", "m", "=", "re", ".", "match", "(", "self", ".", "compliance_prefix", "+", "r'(\\d)'", "+", "self", ".", "compliance_suffix", "+", "r'$'", ",", "self", ".", "compliance", ")", "if", "(", "m", ")", ":", "return", "int", "(", "m", ".", "group", "(", "1", ")", ")", "raise", "IIIFInfoError", "(", "\"Bad compliance profile URI, failed to extract level number\"", ")" ]
Extract level number from compliance profile URI. Returns integer level number or raises IIIFInfoError
[ "Extract", "level", "number", "from", "compliance", "profile", "URI", "." ]
python
train
30.4
brutasse/graphite-api
graphite_api/functions.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L477-L520
def keepLastValue(requestContext, seriesList, limit=INF): """ Takes one metric or a wildcard seriesList, and optionally a limit to the number of 'None' values to skip over. Continues the line with the last received value when gaps ('None' values) appear in your data, rather than breaking your line. Example:: &target=keepLastValue(Server01.connections.handled) &target=keepLastValue(Server01.connections.handled, 10) """ for series in seriesList: series.name = "keepLastValue(%s)" % (series.name) series.pathExpression = series.name consecutiveNones = 0 for i, value in enumerate(series): series[i] = value # No 'keeping' can be done on the first value because we have no # idea what came before it. if i == 0: continue if value is None: consecutiveNones += 1 else: if 0 < consecutiveNones <= limit: # If a non-None value is seen before the limit of Nones is # hit, backfill all the missing datapoints with the last # known value. for index in range(i - consecutiveNones, i): series[index] = series[i - consecutiveNones - 1] consecutiveNones = 0 # If the series ends with some None values, try to backfill a bit to # cover it. if 0 < consecutiveNones <= limit: for index in range(len(series) - consecutiveNones, len(series)): series[index] = series[len(series) - consecutiveNones - 1] return seriesList
[ "def", "keepLastValue", "(", "requestContext", ",", "seriesList", ",", "limit", "=", "INF", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "name", "=", "\"keepLastValue(%s)\"", "%", "(", "series", ".", "name", ")", "series", ".", "pathExpression", "=", "series", ".", "name", "consecutiveNones", "=", "0", "for", "i", ",", "value", "in", "enumerate", "(", "series", ")", ":", "series", "[", "i", "]", "=", "value", "# No 'keeping' can be done on the first value because we have no", "# idea what came before it.", "if", "i", "==", "0", ":", "continue", "if", "value", "is", "None", ":", "consecutiveNones", "+=", "1", "else", ":", "if", "0", "<", "consecutiveNones", "<=", "limit", ":", "# If a non-None value is seen before the limit of Nones is", "# hit, backfill all the missing datapoints with the last", "# known value.", "for", "index", "in", "range", "(", "i", "-", "consecutiveNones", ",", "i", ")", ":", "series", "[", "index", "]", "=", "series", "[", "i", "-", "consecutiveNones", "-", "1", "]", "consecutiveNones", "=", "0", "# If the series ends with some None values, try to backfill a bit to", "# cover it.", "if", "0", "<", "consecutiveNones", "<=", "limit", ":", "for", "index", "in", "range", "(", "len", "(", "series", ")", "-", "consecutiveNones", ",", "len", "(", "series", ")", ")", ":", "series", "[", "index", "]", "=", "series", "[", "len", "(", "series", ")", "-", "consecutiveNones", "-", "1", "]", "return", "seriesList" ]
Takes one metric or a wildcard seriesList, and optionally a limit to the number of 'None' values to skip over. Continues the line with the last received value when gaps ('None' values) appear in your data, rather than breaking your line. Example:: &target=keepLastValue(Server01.connections.handled) &target=keepLastValue(Server01.connections.handled, 10)
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "and", "optionally", "a", "limit", "to", "the", "number", "of", "None", "values", "to", "skip", "over", ".", "Continues", "the", "line", "with", "the", "last", "received", "value", "when", "gaps", "(", "None", "values", ")", "appear", "in", "your", "data", "rather", "than", "breaking", "your", "line", "." ]
python
train
37.363636
pantsbuild/pants
src/python/pants/option/arg_splitter.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/option/arg_splitter.py#L214-L230
def _consume_scope(self): """Returns a pair (scope, list of flags encountered in that scope). Note that the flag may be explicitly scoped, and therefore not actually belong to this scope. For example, in: ./pants --compile-java-partition-size-hint=100 compile <target> --compile-java-partition-size-hint should be treated as if it were --partition-size-hint=100 in the compile.java scope. """ if not self._at_scope(): return None, [] scope = self._unconsumed_args.pop() flags = self._consume_flags() return scope, flags
[ "def", "_consume_scope", "(", "self", ")", ":", "if", "not", "self", ".", "_at_scope", "(", ")", ":", "return", "None", ",", "[", "]", "scope", "=", "self", ".", "_unconsumed_args", ".", "pop", "(", ")", "flags", "=", "self", ".", "_consume_flags", "(", ")", "return", "scope", ",", "flags" ]
Returns a pair (scope, list of flags encountered in that scope). Note that the flag may be explicitly scoped, and therefore not actually belong to this scope. For example, in: ./pants --compile-java-partition-size-hint=100 compile <target> --compile-java-partition-size-hint should be treated as if it were --partition-size-hint=100 in the compile.java scope.
[ "Returns", "a", "pair", "(", "scope", "list", "of", "flags", "encountered", "in", "that", "scope", ")", "." ]
python
train
32.764706
craigahobbs/chisel
src/chisel/util.py
https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/util.py#L149-L165
def import_submodules(package, parent_package=None, exclude_submodules=None): """ Generator which imports all submodules of a module, recursively, including subpackages :param package: package name (e.g 'chisel.util'); may be relative if parent_package is provided :type package: str :param parent_package: parent package name (e.g 'chisel') :type package: str :rtype: iterator of modules """ exclude_submodules_dot = [x + '.' for x in exclude_submodules] if exclude_submodules else exclude_submodules package = importlib.import_module(package, parent_package) for _, name, _ in pkgutil.walk_packages(package.__path__, package.__name__ + '.'): if exclude_submodules and (name in exclude_submodules or any(name.startswith(x) for x in exclude_submodules_dot)): continue yield importlib.import_module(name)
[ "def", "import_submodules", "(", "package", ",", "parent_package", "=", "None", ",", "exclude_submodules", "=", "None", ")", ":", "exclude_submodules_dot", "=", "[", "x", "+", "'.'", "for", "x", "in", "exclude_submodules", "]", "if", "exclude_submodules", "else", "exclude_submodules", "package", "=", "importlib", ".", "import_module", "(", "package", ",", "parent_package", ")", "for", "_", ",", "name", ",", "_", "in", "pkgutil", ".", "walk_packages", "(", "package", ".", "__path__", ",", "package", ".", "__name__", "+", "'.'", ")", ":", "if", "exclude_submodules", "and", "(", "name", "in", "exclude_submodules", "or", "any", "(", "name", ".", "startswith", "(", "x", ")", "for", "x", "in", "exclude_submodules_dot", ")", ")", ":", "continue", "yield", "importlib", ".", "import_module", "(", "name", ")" ]
Generator which imports all submodules of a module, recursively, including subpackages :param package: package name (e.g 'chisel.util'); may be relative if parent_package is provided :type package: str :param parent_package: parent package name (e.g 'chisel') :type package: str :rtype: iterator of modules
[ "Generator", "which", "imports", "all", "submodules", "of", "a", "module", "recursively", "including", "subpackages" ]
python
train
50.647059
rbuffat/pyepw
pyepw/epw.py
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L7391-L7418
def _create_datadict(cls, internal_name): """Creates an object depending on `internal_name` Args: internal_name (str): IDD name Raises: ValueError: if `internal_name` cannot be matched to a data dictionary object """ if internal_name == "LOCATION": return Location() if internal_name == "DESIGN CONDITIONS": return DesignConditions() if internal_name == "TYPICAL/EXTREME PERIODS": return TypicalOrExtremePeriods() if internal_name == "GROUND TEMPERATURES": return GroundTemperatures() if internal_name == "HOLIDAYS/DAYLIGHT SAVINGS": return HolidaysOrDaylightSavings() if internal_name == "COMMENTS 1": return Comments1() if internal_name == "COMMENTS 2": return Comments2() if internal_name == "DATA PERIODS": return DataPeriods() raise ValueError( "No DataDictionary known for {}".format(internal_name))
[ "def", "_create_datadict", "(", "cls", ",", "internal_name", ")", ":", "if", "internal_name", "==", "\"LOCATION\"", ":", "return", "Location", "(", ")", "if", "internal_name", "==", "\"DESIGN CONDITIONS\"", ":", "return", "DesignConditions", "(", ")", "if", "internal_name", "==", "\"TYPICAL/EXTREME PERIODS\"", ":", "return", "TypicalOrExtremePeriods", "(", ")", "if", "internal_name", "==", "\"GROUND TEMPERATURES\"", ":", "return", "GroundTemperatures", "(", ")", "if", "internal_name", "==", "\"HOLIDAYS/DAYLIGHT SAVINGS\"", ":", "return", "HolidaysOrDaylightSavings", "(", ")", "if", "internal_name", "==", "\"COMMENTS 1\"", ":", "return", "Comments1", "(", ")", "if", "internal_name", "==", "\"COMMENTS 2\"", ":", "return", "Comments2", "(", ")", "if", "internal_name", "==", "\"DATA PERIODS\"", ":", "return", "DataPeriods", "(", ")", "raise", "ValueError", "(", "\"No DataDictionary known for {}\"", ".", "format", "(", "internal_name", ")", ")" ]
Creates an object depending on `internal_name` Args: internal_name (str): IDD name Raises: ValueError: if `internal_name` cannot be matched to a data dictionary object
[ "Creates", "an", "object", "depending", "on", "internal_name" ]
python
train
36.321429
mlperf/training
image_classification/tensorflow/official/resnet/resnet_model.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/image_classification/tensorflow/official/resnet/resnet_model.py#L67-L91
def fixed_padding(inputs, kernel_size, data_format): """Pads the input along the spatial dimensions independently of input size. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. kernel_size: The kernel to be used in the conv2d or max_pool2d operation. Should be a positive integer. data_format: The input format ('channels_last' or 'channels_first'). Returns: A tensor with the same format as the input with the data either intact (if kernel_size == 1) or padded (if kernel_size > 1). """ pad_total = kernel_size - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg if data_format == 'channels_first': padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]]) else: padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) return padded_inputs
[ "def", "fixed_padding", "(", "inputs", ",", "kernel_size", ",", "data_format", ")", ":", "pad_total", "=", "kernel_size", "-", "1", "pad_beg", "=", "pad_total", "//", "2", "pad_end", "=", "pad_total", "-", "pad_beg", "if", "data_format", "==", "'channels_first'", ":", "padded_inputs", "=", "tf", ".", "pad", "(", "inputs", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "0", "]", ",", "[", "pad_beg", ",", "pad_end", "]", ",", "[", "pad_beg", ",", "pad_end", "]", "]", ")", "else", ":", "padded_inputs", "=", "tf", ".", "pad", "(", "inputs", ",", "[", "[", "0", ",", "0", "]", ",", "[", "pad_beg", ",", "pad_end", "]", ",", "[", "pad_beg", ",", "pad_end", "]", ",", "[", "0", ",", "0", "]", "]", ")", "return", "padded_inputs" ]
Pads the input along the spatial dimensions independently of input size. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. kernel_size: The kernel to be used in the conv2d or max_pool2d operation. Should be a positive integer. data_format: The input format ('channels_last' or 'channels_first'). Returns: A tensor with the same format as the input with the data either intact (if kernel_size == 1) or padded (if kernel_size > 1).
[ "Pads", "the", "input", "along", "the", "spatial", "dimensions", "independently", "of", "input", "size", "." ]
python
train
40.92
tanghaibao/goatools
goatools/semantic.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/semantic.py#L111-L117
def get_term_freq(self, go_id): ''' Returns the frequency at which a particular GO term has been observed in the annotations. ''' num_ns = float(self.get_total_count(self.go2obj[go_id].namespace)) return float(self.get_count(go_id))/num_ns if num_ns != 0 else 0
[ "def", "get_term_freq", "(", "self", ",", "go_id", ")", ":", "num_ns", "=", "float", "(", "self", ".", "get_total_count", "(", "self", ".", "go2obj", "[", "go_id", "]", ".", "namespace", ")", ")", "return", "float", "(", "self", ".", "get_count", "(", "go_id", ")", ")", "/", "num_ns", "if", "num_ns", "!=", "0", "else", "0" ]
Returns the frequency at which a particular GO term has been observed in the annotations.
[ "Returns", "the", "frequency", "at", "which", "a", "particular", "GO", "term", "has", "been", "observed", "in", "the", "annotations", "." ]
python
train
44.428571
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/bson/__init__.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/bson/__init__.py#L746-L763
def _dict_to_bson(doc, check_keys, opts, top_level=True): """Encode a document to BSON.""" if _raw_document_class(doc): return doc.raw try: elements = [] if top_level and "_id" in doc: elements.append(_name_value_to_bson(b"_id\x00", doc["_id"], check_keys, opts)) for (key, value) in iteritems(doc): if not top_level or key != "_id": elements.append(_element_to_bson(key, value, check_keys, opts)) except AttributeError: raise TypeError("encoder expected a mapping type but got: %r" % (doc,)) encoded = b"".join(elements) return _PACK_INT(len(encoded) + 5) + encoded + b"\x00"
[ "def", "_dict_to_bson", "(", "doc", ",", "check_keys", ",", "opts", ",", "top_level", "=", "True", ")", ":", "if", "_raw_document_class", "(", "doc", ")", ":", "return", "doc", ".", "raw", "try", ":", "elements", "=", "[", "]", "if", "top_level", "and", "\"_id\"", "in", "doc", ":", "elements", ".", "append", "(", "_name_value_to_bson", "(", "b\"_id\\x00\"", ",", "doc", "[", "\"_id\"", "]", ",", "check_keys", ",", "opts", ")", ")", "for", "(", "key", ",", "value", ")", "in", "iteritems", "(", "doc", ")", ":", "if", "not", "top_level", "or", "key", "!=", "\"_id\"", ":", "elements", ".", "append", "(", "_element_to_bson", "(", "key", ",", "value", ",", "check_keys", ",", "opts", ")", ")", "except", "AttributeError", ":", "raise", "TypeError", "(", "\"encoder expected a mapping type but got: %r\"", "%", "(", "doc", ",", ")", ")", "encoded", "=", "b\"\"", ".", "join", "(", "elements", ")", "return", "_PACK_INT", "(", "len", "(", "encoded", ")", "+", "5", ")", "+", "encoded", "+", "b\"\\x00\"" ]
Encode a document to BSON.
[ "Encode", "a", "document", "to", "BSON", "." ]
python
train
42.277778
skorch-dev/skorch
skorch/history.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/history.py#L169-L179
def from_file(cls, f): """Load the history of a ``NeuralNet`` from a json file. Parameters ---------- f : file-like object or str """ with open_file_like(f, 'r') as fp: return cls(json.load(fp))
[ "def", "from_file", "(", "cls", ",", "f", ")", ":", "with", "open_file_like", "(", "f", ",", "'r'", ")", "as", "fp", ":", "return", "cls", "(", "json", ".", "load", "(", "fp", ")", ")" ]
Load the history of a ``NeuralNet`` from a json file. Parameters ---------- f : file-like object or str
[ "Load", "the", "history", "of", "a", "NeuralNet", "from", "a", "json", "file", "." ]
python
train
22.454545
openego/ding0
ding0/core/__init__.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/__init__.py#L1640-L1667
def connect_generators(self, debug=False): """ Connects generators (graph nodes) to grid (graph) for every MV and LV Grid District Args ---- debug: bool, defaults to False If True, information is printed during process. """ for mv_grid_district in self.mv_grid_districts(): mv_grid_district.mv_grid.connect_generators(debug=debug) # get predefined random seed and initialize random generator seed = int(cfg_ding0.get('random', 'seed')) random.seed(a=seed) for load_area in mv_grid_district.lv_load_areas(): if not load_area.is_aggregated: for lv_grid_district in load_area.lv_grid_districts(): lv_grid_district.lv_grid.connect_generators(debug=debug) if debug: lv_grid_district.lv_grid.graph_draw(mode='LV') else: logger.info( '{} is of type aggregated. LV generators are not connected to LV grids.'.format(repr(load_area))) logger.info('=====> Generators connected')
[ "def", "connect_generators", "(", "self", ",", "debug", "=", "False", ")", ":", "for", "mv_grid_district", "in", "self", ".", "mv_grid_districts", "(", ")", ":", "mv_grid_district", ".", "mv_grid", ".", "connect_generators", "(", "debug", "=", "debug", ")", "# get predefined random seed and initialize random generator", "seed", "=", "int", "(", "cfg_ding0", ".", "get", "(", "'random'", ",", "'seed'", ")", ")", "random", ".", "seed", "(", "a", "=", "seed", ")", "for", "load_area", "in", "mv_grid_district", ".", "lv_load_areas", "(", ")", ":", "if", "not", "load_area", ".", "is_aggregated", ":", "for", "lv_grid_district", "in", "load_area", ".", "lv_grid_districts", "(", ")", ":", "lv_grid_district", ".", "lv_grid", ".", "connect_generators", "(", "debug", "=", "debug", ")", "if", "debug", ":", "lv_grid_district", ".", "lv_grid", ".", "graph_draw", "(", "mode", "=", "'LV'", ")", "else", ":", "logger", ".", "info", "(", "'{} is of type aggregated. LV generators are not connected to LV grids.'", ".", "format", "(", "repr", "(", "load_area", ")", ")", ")", "logger", ".", "info", "(", "'=====> Generators connected'", ")" ]
Connects generators (graph nodes) to grid (graph) for every MV and LV Grid District Args ---- debug: bool, defaults to False If True, information is printed during process.
[ "Connects", "generators", "(", "graph", "nodes", ")", "to", "grid", "(", "graph", ")", "for", "every", "MV", "and", "LV", "Grid", "District" ]
python
train
40.928571
mitsei/dlkit
dlkit/records/adaptive/magic_parts/assessment_part_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/adaptive/magic_parts/assessment_part_records.py#L125-L150
def get_parts(self, parts=None, reference_level=0): """Recursively returns a depth-first list of all known magic parts""" if parts is None: parts = list() new_reference_level = reference_level else: self._level_in_section = self._level + reference_level new_reference_level = self._level_in_section parts.append(self.my_osid_object) if self._child_parts is None: if self.has_magic_children(): self.generate_children() else: return parts for part in self._child_parts: part.get_parts(parts, new_reference_level) # Don't need to append here, because parts is passed by reference # so appending is redundant # child_parts = part.get_parts(parts, new_reference_level) # known_part_ids = [str(part.ident) for part in parts] # # for child_part in child_parts: # if str(child_part.ident) not in known_part_ids: # parts.append(child_part) # known_part_ids.append(str(child_part.ident)) return parts
[ "def", "get_parts", "(", "self", ",", "parts", "=", "None", ",", "reference_level", "=", "0", ")", ":", "if", "parts", "is", "None", ":", "parts", "=", "list", "(", ")", "new_reference_level", "=", "reference_level", "else", ":", "self", ".", "_level_in_section", "=", "self", ".", "_level", "+", "reference_level", "new_reference_level", "=", "self", ".", "_level_in_section", "parts", ".", "append", "(", "self", ".", "my_osid_object", ")", "if", "self", ".", "_child_parts", "is", "None", ":", "if", "self", ".", "has_magic_children", "(", ")", ":", "self", ".", "generate_children", "(", ")", "else", ":", "return", "parts", "for", "part", "in", "self", ".", "_child_parts", ":", "part", ".", "get_parts", "(", "parts", ",", "new_reference_level", ")", "# Don't need to append here, because parts is passed by reference", "# so appending is redundant", "# child_parts = part.get_parts(parts, new_reference_level)", "# known_part_ids = [str(part.ident) for part in parts]", "#", "# for child_part in child_parts:", "# if str(child_part.ident) not in known_part_ids:", "# parts.append(child_part)", "# known_part_ids.append(str(child_part.ident))", "return", "parts" ]
Recursively returns a depth-first list of all known magic parts
[ "Recursively", "returns", "a", "depth", "-", "first", "list", "of", "all", "known", "magic", "parts" ]
python
train
44.961538
fermiPy/fermipy
fermipy/diffuse/model_component.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/model_component.py#L161-L172
def add_component_info(self, compinfo): """Add sub-component specific information to a particular data selection Parameters ---------- compinfo : `ModelComponentInfo` object Sub-component being added """ if self.components is None: self.components = {} self.components[compinfo.comp_key] = compinfo
[ "def", "add_component_info", "(", "self", ",", "compinfo", ")", ":", "if", "self", ".", "components", "is", "None", ":", "self", ".", "components", "=", "{", "}", "self", ".", "components", "[", "compinfo", ".", "comp_key", "]", "=", "compinfo" ]
Add sub-component specific information to a particular data selection Parameters ---------- compinfo : `ModelComponentInfo` object Sub-component being added
[ "Add", "sub", "-", "component", "specific", "information", "to", "a", "particular", "data", "selection" ]
python
train
30.75
WebarchivCZ/WA-KAT
src/wa_kat/templates/static/js/Lib/site-packages/components/input_controller.py
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/templates/static/js/Lib/site-packages/components/input_controller.py#L158-L178
def get_el(el): """ Get value of given `el` tag element. Automatically choose proper method to set the `value` based on the type of the `el`. Args: el (obj): Element reference to the input you want to convert to typeahead. Returns: str: Value of the object. """ tag_name = el.elt.tagName.lower() if tag_name in {"input", "textarea", "select"}: return el.value else: raise ValueError( "Getter for %s (%s) not implemented!" % (tag_name, el.id) )
[ "def", "get_el", "(", "el", ")", ":", "tag_name", "=", "el", ".", "elt", ".", "tagName", ".", "lower", "(", ")", "if", "tag_name", "in", "{", "\"input\"", ",", "\"textarea\"", ",", "\"select\"", "}", ":", "return", "el", ".", "value", "else", ":", "raise", "ValueError", "(", "\"Getter for %s (%s) not implemented!\"", "%", "(", "tag_name", ",", "el", ".", "id", ")", ")" ]
Get value of given `el` tag element. Automatically choose proper method to set the `value` based on the type of the `el`. Args: el (obj): Element reference to the input you want to convert to typeahead. Returns: str: Value of the object.
[ "Get", "value", "of", "given", "el", "tag", "element", "." ]
python
train
28.47619
TheHive-Project/Cortex-Analyzers
analyzers/MISP/mispclient.py
https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/MISP/mispclient.py#L238-L244
def search_url(self, searchterm): """Search for URLs :type searchterm: str :rtype: list """ return self.__search(type_attribute=self.__mispurltypes(), value=searchterm)
[ "def", "search_url", "(", "self", ",", "searchterm", ")", ":", "return", "self", ".", "__search", "(", "type_attribute", "=", "self", ".", "__mispurltypes", "(", ")", ",", "value", "=", "searchterm", ")" ]
Search for URLs :type searchterm: str :rtype: list
[ "Search", "for", "URLs", ":", "type", "searchterm", ":", "str", ":", "rtype", ":", "list" ]
python
train
30.142857
saltstack/salt
salt/modules/acme.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/acme.py#L92-L104
def _renew_by(name, window=None): ''' Date before a certificate should be renewed :param name: Common Name of the certificate (DNS name of certificate) :param window: days before expiry date to renew :return datetime object of first renewal date ''' expiry = _expires(name) if window is not None: expiry = expiry - datetime.timedelta(days=window) return expiry
[ "def", "_renew_by", "(", "name", ",", "window", "=", "None", ")", ":", "expiry", "=", "_expires", "(", "name", ")", "if", "window", "is", "not", "None", ":", "expiry", "=", "expiry", "-", "datetime", ".", "timedelta", "(", "days", "=", "window", ")", "return", "expiry" ]
Date before a certificate should be renewed :param name: Common Name of the certificate (DNS name of certificate) :param window: days before expiry date to renew :return datetime object of first renewal date
[ "Date", "before", "a", "certificate", "should", "be", "renewed" ]
python
train
30.307692
saltstack/salt
salt/states/rdp.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/rdp.py#L15-L38
def enabled(name): ''' Enable the RDP service and make sure access to the RDP port is allowed in the firewall configuration ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} stat = __salt__['rdp.status']() if not stat: if __opts__['test']: ret['result'] = None ret['comment'] = 'RDP will be enabled' return ret ret['result'] = __salt__['rdp.enable']() ret['changes'] = {'RDP was enabled': True} return ret ret['comment'] = 'RDP is enabled' return ret
[ "def", "enabled", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "stat", "=", "__salt__", "[", "'rdp.status'", "]", "(", ")", "if", "not", "stat", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'RDP will be enabled'", "return", "ret", "ret", "[", "'result'", "]", "=", "__salt__", "[", "'rdp.enable'", "]", "(", ")", "ret", "[", "'changes'", "]", "=", "{", "'RDP was enabled'", ":", "True", "}", "return", "ret", "ret", "[", "'comment'", "]", "=", "'RDP is enabled'", "return", "ret" ]
Enable the RDP service and make sure access to the RDP port is allowed in the firewall configuration
[ "Enable", "the", "RDP", "service", "and", "make", "sure", "access", "to", "the", "RDP", "port", "is", "allowed", "in", "the", "firewall", "configuration" ]
python
train
24.541667
KnowledgeLinks/rdfframework
rdfframework/utilities/baseutilities.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/utilities/baseutilities.py#L170-L202
def nz(value, none_value, strict=True): ''' This function is named after an old VBA function. It returns a default value if the passed in value is None. If strict is False it will treat an empty string as None as well. example: x = None nz(x,"hello") --> "hello" nz(x,"") --> "" y = "" nz(y,"hello") --> "" nz(y,"hello", False) --> "hello" ''' if not DEBUG: debug = False else: debug = False if debug: print("START nz frameworkutilities.py ----------------------\n") if value is None and strict: return_val = none_value elif strict and value is not None: return_val = value elif not strict and not is_not_null(value): return_val = none_value else: return_val = value if debug: print("value: %s | none_value: %s | return_val: %s" % (value, none_value, return_val)) if debug: print("END nz frameworkutilities.py ----------------------\n") return return_val
[ "def", "nz", "(", "value", ",", "none_value", ",", "strict", "=", "True", ")", ":", "if", "not", "DEBUG", ":", "debug", "=", "False", "else", ":", "debug", "=", "False", "if", "debug", ":", "print", "(", "\"START nz frameworkutilities.py ----------------------\\n\"", ")", "if", "value", "is", "None", "and", "strict", ":", "return_val", "=", "none_value", "elif", "strict", "and", "value", "is", "not", "None", ":", "return_val", "=", "value", "elif", "not", "strict", "and", "not", "is_not_null", "(", "value", ")", ":", "return_val", "=", "none_value", "else", ":", "return_val", "=", "value", "if", "debug", ":", "print", "(", "\"value: %s | none_value: %s | return_val: %s\"", "%", "(", "value", ",", "none_value", ",", "return_val", ")", ")", "if", "debug", ":", "print", "(", "\"END nz frameworkutilities.py ----------------------\\n\"", ")", "return", "return_val" ]
This function is named after an old VBA function. It returns a default value if the passed in value is None. If strict is False it will treat an empty string as None as well. example: x = None nz(x,"hello") --> "hello" nz(x,"") --> "" y = "" nz(y,"hello") --> "" nz(y,"hello", False) --> "hello"
[ "This", "function", "is", "named", "after", "an", "old", "VBA", "function", ".", "It", "returns", "a", "default", "value", "if", "the", "passed", "in", "value", "is", "None", ".", "If", "strict", "is", "False", "it", "will", "treat", "an", "empty", "string", "as", "None", "as", "well", "." ]
python
train
31.121212
bachya/py17track
py17track/profile.py
https://github.com/bachya/py17track/blob/e6e64f2a79571433df7ee702cb4ebc4127b7ad6d/py17track/profile.py#L22-L45
async def login(self, email: str, password: str) -> bool: """Login to the profile.""" login_resp = await self._request( 'post', API_URL_USER, json={ 'version': '1.0', 'method': 'Signin', 'param': { 'Email': email, 'Password': password, 'CaptchaCode': '' }, 'sourcetype': 0 }) _LOGGER.debug('Login response: %s', login_resp) if login_resp.get('Code') != 0: return False self.account_id = login_resp['Json']['gid'] return True
[ "async", "def", "login", "(", "self", ",", "email", ":", "str", ",", "password", ":", "str", ")", "->", "bool", ":", "login_resp", "=", "await", "self", ".", "_request", "(", "'post'", ",", "API_URL_USER", ",", "json", "=", "{", "'version'", ":", "'1.0'", ",", "'method'", ":", "'Signin'", ",", "'param'", ":", "{", "'Email'", ":", "email", ",", "'Password'", ":", "password", ",", "'CaptchaCode'", ":", "''", "}", ",", "'sourcetype'", ":", "0", "}", ")", "_LOGGER", ".", "debug", "(", "'Login response: %s'", ",", "login_resp", ")", "if", "login_resp", ".", "get", "(", "'Code'", ")", "!=", "0", ":", "return", "False", "self", ".", "account_id", "=", "login_resp", "[", "'Json'", "]", "[", "'gid'", "]", "return", "True" ]
Login to the profile.
[ "Login", "to", "the", "profile", "." ]
python
train
27.208333
mdickinson/bigfloat
bigfloat/context.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/context.py#L296-L327
def _apply_function_in_context(cls, f, args, context): """ Apply an MPFR function 'f' to the given arguments 'args', rounding to the given context. Returns a new Mpfr object with precision taken from the current context. """ rounding = context.rounding bf = mpfr.Mpfr_t.__new__(cls) mpfr.mpfr_init2(bf, context.precision) args = (bf,) + args + (rounding,) ternary = f(*args) with _temporary_exponent_bounds(context.emin, context.emax): ternary = mpfr.mpfr_check_range(bf, ternary, rounding) if context.subnormalize: # mpfr_subnormalize doesn't set underflow and # subnormal flags, so we do that ourselves. We choose # to set the underflow flag for *all* cases where the # 'after rounding' result is smaller than the smallest # normal number, even if that result is exact. # if bf is zero but ternary is nonzero, the underflow # flag will already have been set by mpfr_check_range; underflow = ( mpfr.mpfr_number_p(bf) and not mpfr.mpfr_zero_p(bf) and mpfr.mpfr_get_exp(bf) < context.precision - 1 + context.emin) if underflow: mpfr.mpfr_set_underflow() ternary = mpfr.mpfr_subnormalize(bf, ternary, rounding) if ternary: mpfr.mpfr_set_inexflag() return bf
[ "def", "_apply_function_in_context", "(", "cls", ",", "f", ",", "args", ",", "context", ")", ":", "rounding", "=", "context", ".", "rounding", "bf", "=", "mpfr", ".", "Mpfr_t", ".", "__new__", "(", "cls", ")", "mpfr", ".", "mpfr_init2", "(", "bf", ",", "context", ".", "precision", ")", "args", "=", "(", "bf", ",", ")", "+", "args", "+", "(", "rounding", ",", ")", "ternary", "=", "f", "(", "*", "args", ")", "with", "_temporary_exponent_bounds", "(", "context", ".", "emin", ",", "context", ".", "emax", ")", ":", "ternary", "=", "mpfr", ".", "mpfr_check_range", "(", "bf", ",", "ternary", ",", "rounding", ")", "if", "context", ".", "subnormalize", ":", "# mpfr_subnormalize doesn't set underflow and", "# subnormal flags, so we do that ourselves. We choose", "# to set the underflow flag for *all* cases where the", "# 'after rounding' result is smaller than the smallest", "# normal number, even if that result is exact.", "# if bf is zero but ternary is nonzero, the underflow", "# flag will already have been set by mpfr_check_range;", "underflow", "=", "(", "mpfr", ".", "mpfr_number_p", "(", "bf", ")", "and", "not", "mpfr", ".", "mpfr_zero_p", "(", "bf", ")", "and", "mpfr", ".", "mpfr_get_exp", "(", "bf", ")", "<", "context", ".", "precision", "-", "1", "+", "context", ".", "emin", ")", "if", "underflow", ":", "mpfr", ".", "mpfr_set_underflow", "(", ")", "ternary", "=", "mpfr", ".", "mpfr_subnormalize", "(", "bf", ",", "ternary", ",", "rounding", ")", "if", "ternary", ":", "mpfr", ".", "mpfr_set_inexflag", "(", ")", "return", "bf" ]
Apply an MPFR function 'f' to the given arguments 'args', rounding to the given context. Returns a new Mpfr object with precision taken from the current context.
[ "Apply", "an", "MPFR", "function", "f", "to", "the", "given", "arguments", "args", "rounding", "to", "the", "given", "context", ".", "Returns", "a", "new", "Mpfr", "object", "with", "precision", "taken", "from", "the", "current", "context", "." ]
python
train
43.75
zhanglab/psamm
psamm/datasource/sbml.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/datasource/sbml.py#L1309-L1334
def create_convert_sbml_id_function( compartment_prefix='C_', reaction_prefix='R_', compound_prefix='M_', decode_id=entry_id_from_cobra_encoding): """Create function for converting SBML IDs. The returned function will strip prefixes, decode the ID using the provided function. These prefixes are common on IDs in SBML models because the IDs live in a global namespace. """ def convert_sbml_id(entry): if isinstance(entry, BaseCompartmentEntry): prefix = compartment_prefix elif isinstance(entry, BaseReactionEntry): prefix = reaction_prefix elif isinstance(entry, BaseCompoundEntry): prefix = compound_prefix new_id = entry.id if decode_id is not None: new_id = decode_id(new_id) if prefix is not None and new_id.startswith(prefix): new_id = new_id[len(prefix):] return new_id return convert_sbml_id
[ "def", "create_convert_sbml_id_function", "(", "compartment_prefix", "=", "'C_'", ",", "reaction_prefix", "=", "'R_'", ",", "compound_prefix", "=", "'M_'", ",", "decode_id", "=", "entry_id_from_cobra_encoding", ")", ":", "def", "convert_sbml_id", "(", "entry", ")", ":", "if", "isinstance", "(", "entry", ",", "BaseCompartmentEntry", ")", ":", "prefix", "=", "compartment_prefix", "elif", "isinstance", "(", "entry", ",", "BaseReactionEntry", ")", ":", "prefix", "=", "reaction_prefix", "elif", "isinstance", "(", "entry", ",", "BaseCompoundEntry", ")", ":", "prefix", "=", "compound_prefix", "new_id", "=", "entry", ".", "id", "if", "decode_id", "is", "not", "None", ":", "new_id", "=", "decode_id", "(", "new_id", ")", "if", "prefix", "is", "not", "None", "and", "new_id", ".", "startswith", "(", "prefix", ")", ":", "new_id", "=", "new_id", "[", "len", "(", "prefix", ")", ":", "]", "return", "new_id", "return", "convert_sbml_id" ]
Create function for converting SBML IDs. The returned function will strip prefixes, decode the ID using the provided function. These prefixes are common on IDs in SBML models because the IDs live in a global namespace.
[ "Create", "function", "for", "converting", "SBML", "IDs", "." ]
python
train
36.076923
Neurita/boyle
boyle/nifti/utils.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/utils.py#L95-L127
def thr_img(img, thr=2., mode='+'): """ Use the given magic function name `func` to threshold with value `thr` the data of `img` and return a new nibabel.Nifti1Image. Parameters ---------- img: img-like thr: float or int The threshold value. mode: str Choices: '+' for positive threshold, '+-' for positive and negative threshold and '-' for negative threshold. Returns ------- thr_img: nibabel.Nifti1Image Thresholded image """ vol = read_img(img).get_data() if mode == '+': mask = vol > thr elif mode == '+-' or mode == '-+': mask = np.abs(vol) > thr elif mode == '-': mask = vol < -thr else: raise ValueError("Expected `mode` to be one of ('+', '+-', '-+', '-'), " "got {}.".format(mode)) return vol * mask
[ "def", "thr_img", "(", "img", ",", "thr", "=", "2.", ",", "mode", "=", "'+'", ")", ":", "vol", "=", "read_img", "(", "img", ")", ".", "get_data", "(", ")", "if", "mode", "==", "'+'", ":", "mask", "=", "vol", ">", "thr", "elif", "mode", "==", "'+-'", "or", "mode", "==", "'-+'", ":", "mask", "=", "np", ".", "abs", "(", "vol", ")", ">", "thr", "elif", "mode", "==", "'-'", ":", "mask", "=", "vol", "<", "-", "thr", "else", ":", "raise", "ValueError", "(", "\"Expected `mode` to be one of ('+', '+-', '-+', '-'), \"", "\"got {}.\"", ".", "format", "(", "mode", ")", ")", "return", "vol", "*", "mask" ]
Use the given magic function name `func` to threshold with value `thr` the data of `img` and return a new nibabel.Nifti1Image. Parameters ---------- img: img-like thr: float or int The threshold value. mode: str Choices: '+' for positive threshold, '+-' for positive and negative threshold and '-' for negative threshold. Returns ------- thr_img: nibabel.Nifti1Image Thresholded image
[ "Use", "the", "given", "magic", "function", "name", "func", "to", "threshold", "with", "value", "thr", "the", "data", "of", "img", "and", "return", "a", "new", "nibabel", ".", "Nifti1Image", ".", "Parameters", "----------", "img", ":", "img", "-", "like" ]
python
valid
26.242424
MacHu-GWU/angora-project
angora/bot/macro.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/bot/macro.py#L165-L171
def Alt_Fn(self, n, dl = 0): """Alt + Fn1~12 组合键 """ self.Delay(dl) self.keyboard.press_key(self.keyboard.alt_key) self.keyboard.tap_key(self.keyboard.function_keys[n]) self.keyboard.release_key(self.keyboard.alt_key)
[ "def", "Alt_Fn", "(", "self", ",", "n", ",", "dl", "=", "0", ")", ":", "self", ".", "Delay", "(", "dl", ")", "self", ".", "keyboard", ".", "press_key", "(", "self", ".", "keyboard", ".", "alt_key", ")", "self", ".", "keyboard", ".", "tap_key", "(", "self", ".", "keyboard", ".", "function_keys", "[", "n", "]", ")", "self", ".", "keyboard", ".", "release_key", "(", "self", ".", "keyboard", ".", "alt_key", ")" ]
Alt + Fn1~12 组合键
[ "Alt", "+", "Fn1~12", "组合键" ]
python
train
37
santoshphilip/eppy
eppy/iddgaps.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/iddgaps.py#L152-L170
def missingkeys_nonstandard(block, commdct, dtls, objectlist, afield='afiled %s'): """This is an object list where thre is no first field name to give a hint of what the first field name should be""" afield = 'afield %s' for key_txt in objectlist: key_i = dtls.index(key_txt.upper()) comm = commdct[key_i] if block: blk = block[key_i] for i, cmt in enumerate(comm): if cmt == {}: first_i = i break for i, cmt in enumerate(comm): if i >= first_i: if block: comm[i]['field'] = ['%s' % (blk[i])] else: comm[i]['field'] = [afield % (i - first_i + 1,),]
[ "def", "missingkeys_nonstandard", "(", "block", ",", "commdct", ",", "dtls", ",", "objectlist", ",", "afield", "=", "'afiled %s'", ")", ":", "afield", "=", "'afield %s'", "for", "key_txt", "in", "objectlist", ":", "key_i", "=", "dtls", ".", "index", "(", "key_txt", ".", "upper", "(", ")", ")", "comm", "=", "commdct", "[", "key_i", "]", "if", "block", ":", "blk", "=", "block", "[", "key_i", "]", "for", "i", ",", "cmt", "in", "enumerate", "(", "comm", ")", ":", "if", "cmt", "==", "{", "}", ":", "first_i", "=", "i", "break", "for", "i", ",", "cmt", "in", "enumerate", "(", "comm", ")", ":", "if", "i", ">=", "first_i", ":", "if", "block", ":", "comm", "[", "i", "]", "[", "'field'", "]", "=", "[", "'%s'", "%", "(", "blk", "[", "i", "]", ")", "]", "else", ":", "comm", "[", "i", "]", "[", "'field'", "]", "=", "[", "afield", "%", "(", "i", "-", "first_i", "+", "1", ",", ")", ",", "]" ]
This is an object list where thre is no first field name to give a hint of what the first field name should be
[ "This", "is", "an", "object", "list", "where", "thre", "is", "no", "first", "field", "name", "to", "give", "a", "hint", "of", "what", "the", "first", "field", "name", "should", "be" ]
python
train
38.210526
zetaops/zengine
zengine/messaging/views.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/messaging/views.py#L305-L342
def unread_count(current): """ Number of unread messages for current user .. code-block:: python # request: { 'view':'_zops_unread_count', } # response: { 'status': 'OK', 'code': 200, 'notifications': int, 'messages': int, } """ unread_ntf = 0 unread_msg = 0 for sbs in current.user.subscriptions.objects.filter(is_visible=True): try: if sbs.channel.key == current.user.prv_exchange: unread_ntf += sbs.unread_count() else: unread_msg += sbs.unread_count() except ObjectDoesNotExist: # FIXME: This should not happen, log.exception("MULTIPLE PRV EXCHANGES!!!!") sbs.delete() current.output = { 'status': 'OK', 'code': 200, 'notifications': unread_ntf, 'messages': unread_msg }
[ "def", "unread_count", "(", "current", ")", ":", "unread_ntf", "=", "0", "unread_msg", "=", "0", "for", "sbs", "in", "current", ".", "user", ".", "subscriptions", ".", "objects", ".", "filter", "(", "is_visible", "=", "True", ")", ":", "try", ":", "if", "sbs", ".", "channel", ".", "key", "==", "current", ".", "user", ".", "prv_exchange", ":", "unread_ntf", "+=", "sbs", ".", "unread_count", "(", ")", "else", ":", "unread_msg", "+=", "sbs", ".", "unread_count", "(", ")", "except", "ObjectDoesNotExist", ":", "# FIXME: This should not happen,", "log", ".", "exception", "(", "\"MULTIPLE PRV EXCHANGES!!!!\"", ")", "sbs", ".", "delete", "(", ")", "current", ".", "output", "=", "{", "'status'", ":", "'OK'", ",", "'code'", ":", "200", ",", "'notifications'", ":", "unread_ntf", ",", "'messages'", ":", "unread_msg", "}" ]
Number of unread messages for current user .. code-block:: python # request: { 'view':'_zops_unread_count', } # response: { 'status': 'OK', 'code': 200, 'notifications': int, 'messages': int, }
[ "Number", "of", "unread", "messages", "for", "current", "user" ]
python
train
26.289474
jamesturk/scrapelib
scrapelib/__init__.py
https://github.com/jamesturk/scrapelib/blob/dcae9fa86f1fdcc4b4e90dbca12c8063bcb36525/scrapelib/__init__.py#L295-L333
def urlretrieve(self, url, filename=None, method='GET', body=None, dir=None, **kwargs): """ Save result of a request to a file, similarly to :func:`urllib.urlretrieve`. If an error is encountered may raise any of the scrapelib `exceptions`_. A filename may be provided or :meth:`urlretrieve` will safely create a temporary file. If a directory is provided, a file will be given a random name within the specified directory. Either way, it is the responsibility of the caller to ensure that the temporary file is deleted when it is no longer needed. :param url: URL for request :param filename: optional name for file :param method: any valid HTTP method, but generally GET or POST :param body: optional body for request, to turn parameters into an appropriate string use :func:`urllib.urlencode()` :param dir: optional directory to place file in :returns filename, response: tuple with filename for saved response (will be same as given filename if one was given, otherwise will be a temp file in the OS temp directory) and a :class:`Response` object that can be used to inspect the response headers. """ result = self.request(method, url, data=body, **kwargs) result.code = result.status_code # backwards compat if not filename: fd, filename = tempfile.mkstemp(dir=dir) f = os.fdopen(fd, 'wb') else: f = open(filename, 'wb') f.write(result.content) f.close() return filename, result
[ "def", "urlretrieve", "(", "self", ",", "url", ",", "filename", "=", "None", ",", "method", "=", "'GET'", ",", "body", "=", "None", ",", "dir", "=", "None", ",", "*", "*", "kwargs", ")", ":", "result", "=", "self", ".", "request", "(", "method", ",", "url", ",", "data", "=", "body", ",", "*", "*", "kwargs", ")", "result", ".", "code", "=", "result", ".", "status_code", "# backwards compat", "if", "not", "filename", ":", "fd", ",", "filename", "=", "tempfile", ".", "mkstemp", "(", "dir", "=", "dir", ")", "f", "=", "os", ".", "fdopen", "(", "fd", ",", "'wb'", ")", "else", ":", "f", "=", "open", "(", "filename", ",", "'wb'", ")", "f", ".", "write", "(", "result", ".", "content", ")", "f", ".", "close", "(", ")", "return", "filename", ",", "result" ]
Save result of a request to a file, similarly to :func:`urllib.urlretrieve`. If an error is encountered may raise any of the scrapelib `exceptions`_. A filename may be provided or :meth:`urlretrieve` will safely create a temporary file. If a directory is provided, a file will be given a random name within the specified directory. Either way, it is the responsibility of the caller to ensure that the temporary file is deleted when it is no longer needed. :param url: URL for request :param filename: optional name for file :param method: any valid HTTP method, but generally GET or POST :param body: optional body for request, to turn parameters into an appropriate string use :func:`urllib.urlencode()` :param dir: optional directory to place file in :returns filename, response: tuple with filename for saved response (will be same as given filename if one was given, otherwise will be a temp file in the OS temp directory) and a :class:`Response` object that can be used to inspect the response headers.
[ "Save", "result", "of", "a", "request", "to", "a", "file", "similarly", "to", ":", "func", ":", "urllib", ".", "urlretrieve", "." ]
python
train
42.102564
macbre/sql-metadata
sql_metadata.py
https://github.com/macbre/sql-metadata/blob/4b7b4ae0a961d568075aefe78535cf5aee74583c/sql_metadata.py#L269-L306
def generalize_sql(sql): """ Removes most variables from an SQL query and replaces them with X or N for numbers. Based on Mediawiki's DatabaseBase::generalizeSQL :type sql str|None :rtype: str """ if sql is None: return None # multiple spaces sql = re.sub(r'\s{2,}', ' ', sql) # MW comments # e.g. /* CategoryDataService::getMostVisited N.N.N.N */ sql = remove_comments_from_sql(sql) # handle LIKE statements sql = normalize_likes(sql) sql = re.sub(r"\\\\", '', sql) sql = re.sub(r"\\'", '', sql) sql = re.sub(r'\\"', '', sql) sql = re.sub(r"'[^\']*'", 'X', sql) sql = re.sub(r'"[^\"]*"', 'X', sql) # All newlines, tabs, etc replaced by single space sql = re.sub(r'\s+', ' ', sql) # All numbers => N sql = re.sub(r'-?[0-9]+', 'N', sql) # WHERE foo IN ('880987','882618','708228','522330') sql = re.sub(r' (IN|VALUES)\s*\([^,]+,[^)]+\)', ' \\1 (XYZ)', sql, flags=re.IGNORECASE) return sql.strip()
[ "def", "generalize_sql", "(", "sql", ")", ":", "if", "sql", "is", "None", ":", "return", "None", "# multiple spaces", "sql", "=", "re", ".", "sub", "(", "r'\\s{2,}'", ",", "' '", ",", "sql", ")", "# MW comments", "# e.g. /* CategoryDataService::getMostVisited N.N.N.N */", "sql", "=", "remove_comments_from_sql", "(", "sql", ")", "# handle LIKE statements", "sql", "=", "normalize_likes", "(", "sql", ")", "sql", "=", "re", ".", "sub", "(", "r\"\\\\\\\\\"", ",", "''", ",", "sql", ")", "sql", "=", "re", ".", "sub", "(", "r\"\\\\'\"", ",", "''", ",", "sql", ")", "sql", "=", "re", ".", "sub", "(", "r'\\\\\"'", ",", "''", ",", "sql", ")", "sql", "=", "re", ".", "sub", "(", "r\"'[^\\']*'\"", ",", "'X'", ",", "sql", ")", "sql", "=", "re", ".", "sub", "(", "r'\"[^\\\"]*\"'", ",", "'X'", ",", "sql", ")", "# All newlines, tabs, etc replaced by single space", "sql", "=", "re", ".", "sub", "(", "r'\\s+'", ",", "' '", ",", "sql", ")", "# All numbers => N", "sql", "=", "re", ".", "sub", "(", "r'-?[0-9]+'", ",", "'N'", ",", "sql", ")", "# WHERE foo IN ('880987','882618','708228','522330')", "sql", "=", "re", ".", "sub", "(", "r' (IN|VALUES)\\s*\\([^,]+,[^)]+\\)'", ",", "' \\\\1 (XYZ)'", ",", "sql", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "return", "sql", ".", "strip", "(", ")" ]
Removes most variables from an SQL query and replaces them with X or N for numbers. Based on Mediawiki's DatabaseBase::generalizeSQL :type sql str|None :rtype: str
[ "Removes", "most", "variables", "from", "an", "SQL", "query", "and", "replaces", "them", "with", "X", "or", "N", "for", "numbers", "." ]
python
train
25.789474
openpermissions/koi
koi/utils.py
https://github.com/openpermissions/koi/blob/d721f8e1dfa8f07ad265d9dec32e8aaf80a9f281/koi/utils.py#L36-L49
def listify(*args): """ Convert args to a list, unless there's one arg and it's a function, then acts a decorator. """ if (len(args) == 1) and callable(args[0]): func = args[0] @wraps(func) def _inner(*args, **kwargs): return list(func(*args, **kwargs)) return _inner else: return list(args)
[ "def", "listify", "(", "*", "args", ")", ":", "if", "(", "len", "(", "args", ")", "==", "1", ")", "and", "callable", "(", "args", "[", "0", "]", ")", ":", "func", "=", "args", "[", "0", "]", "@", "wraps", "(", "func", ")", "def", "_inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "list", "(", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "return", "_inner", "else", ":", "return", "list", "(", "args", ")" ]
Convert args to a list, unless there's one arg and it's a function, then acts a decorator.
[ "Convert", "args", "to", "a", "list", "unless", "there", "s", "one", "arg", "and", "it", "s", "a", "function", "then", "acts", "a", "decorator", "." ]
python
train
25.357143
philipsoutham/py-mysql2pgsql
mysql2pgsql/lib/postgres_db_writer.py
https://github.com/philipsoutham/py-mysql2pgsql/blob/66dc2a3a3119263b3fe77300fb636346509787ef/mysql2pgsql/lib/postgres_db_writer.py#L196-L206
def write_contents(self, table, reader): """Write the contents of `table` :Parameters: - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write. - `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source. Returns None """ f = self.FileObjFaker(table, reader.read(table), self.process_row, self.verbose) self.copy_from(f, '"%s"' % table.name, ['"%s"' % c['name'] for c in table.columns])
[ "def", "write_contents", "(", "self", ",", "table", ",", "reader", ")", ":", "f", "=", "self", ".", "FileObjFaker", "(", "table", ",", "reader", ".", "read", "(", "table", ")", ",", "self", ".", "process_row", ",", "self", ".", "verbose", ")", "self", ".", "copy_from", "(", "f", ",", "'\"%s\"'", "%", "table", ".", "name", ",", "[", "'\"%s\"'", "%", "c", "[", "'name'", "]", "for", "c", "in", "table", ".", "columns", "]", ")" ]
Write the contents of `table` :Parameters: - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write. - `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source. Returns None
[ "Write", "the", "contents", "of", "table" ]
python
test
54
etcher-be/emiz
emiz/mission_time.py
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/mission_time.py#L51-L77
def from_string(input_str) -> 'MissionTime': # noinspection SpellCheckingInspection """ Creates a MissionTime instance from a string Format: YYYYMMDDHHMMSS Args: input_str: string to parse Returns: MissionTime instance """ match = RE_INPUT_STRING.match(input_str) if not match: raise ValueError(f'badly formatted date/time: {input_str}') return MissionTime( datetime.datetime( int(match.group('year')), int(match.group('month')), int(match.group('day')), int(match.group('hour')), int(match.group('minute')), int(match.group('second')), ) )
[ "def", "from_string", "(", "input_str", ")", "->", "'MissionTime'", ":", "# noinspection SpellCheckingInspection", "match", "=", "RE_INPUT_STRING", ".", "match", "(", "input_str", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "f'badly formatted date/time: {input_str}'", ")", "return", "MissionTime", "(", "datetime", ".", "datetime", "(", "int", "(", "match", ".", "group", "(", "'year'", ")", ")", ",", "int", "(", "match", ".", "group", "(", "'month'", ")", ")", ",", "int", "(", "match", ".", "group", "(", "'day'", ")", ")", ",", "int", "(", "match", ".", "group", "(", "'hour'", ")", ")", ",", "int", "(", "match", ".", "group", "(", "'minute'", ")", ")", ",", "int", "(", "match", ".", "group", "(", "'second'", ")", ")", ",", ")", ")" ]
Creates a MissionTime instance from a string Format: YYYYMMDDHHMMSS Args: input_str: string to parse Returns: MissionTime instance
[ "Creates", "a", "MissionTime", "instance", "from", "a", "string" ]
python
train
27.814815
HolmesNL/confidence
confidence/io.py
https://github.com/HolmesNL/confidence/blob/e14d2d8769a01fa55676716f7a2f22714c2616d3/confidence/io.py#L273-L304
def load_name(*names, load_order=DEFAULT_LOAD_ORDER, extension='yaml', missing=Missing.silent): """ Read a `.Configuration` instance by name, trying to read from files in increasing significance. The default load order is `.system`, `.user`, `.application`, `.environment`. Multiple names are combined with multiple loaders using names as the 'inner loop / selector', loading ``/etc/name1.yaml`` and ``/etc/name2.yaml`` before ``./name1.yaml`` and ``./name2.yaml``. :param names: application or configuration set names, in increasing significance :param load_order: ordered list of name templates or `callable`s, in increasing order of significance :param extension: file extension to be used :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instances providing values loaded from *names* in *load_order* ordering """ def generate_sources(): # argument order for product matters, for names "foo" and "bar": # /etc/foo.yaml before /etc/bar.yaml, but both of them before ~/.foo.yaml and ~/.bar.yaml for source, name in product(load_order, names): if callable(source): yield source(name, extension) else: # expand user to turn ~/.name.yaml into /home/user/.name.yaml candidate = path.expanduser(source.format(name=name, extension=extension)) yield loadf(candidate, default=NotConfigured) return Configuration(*generate_sources(), missing=missing)
[ "def", "load_name", "(", "*", "names", ",", "load_order", "=", "DEFAULT_LOAD_ORDER", ",", "extension", "=", "'yaml'", ",", "missing", "=", "Missing", ".", "silent", ")", ":", "def", "generate_sources", "(", ")", ":", "# argument order for product matters, for names \"foo\" and \"bar\":", "# /etc/foo.yaml before /etc/bar.yaml, but both of them before ~/.foo.yaml and ~/.bar.yaml", "for", "source", ",", "name", "in", "product", "(", "load_order", ",", "names", ")", ":", "if", "callable", "(", "source", ")", ":", "yield", "source", "(", "name", ",", "extension", ")", "else", ":", "# expand user to turn ~/.name.yaml into /home/user/.name.yaml", "candidate", "=", "path", ".", "expanduser", "(", "source", ".", "format", "(", "name", "=", "name", ",", "extension", "=", "extension", ")", ")", "yield", "loadf", "(", "candidate", ",", "default", "=", "NotConfigured", ")", "return", "Configuration", "(", "*", "generate_sources", "(", ")", ",", "missing", "=", "missing", ")" ]
Read a `.Configuration` instance by name, trying to read from files in increasing significance. The default load order is `.system`, `.user`, `.application`, `.environment`. Multiple names are combined with multiple loaders using names as the 'inner loop / selector', loading ``/etc/name1.yaml`` and ``/etc/name2.yaml`` before ``./name1.yaml`` and ``./name2.yaml``. :param names: application or configuration set names, in increasing significance :param load_order: ordered list of name templates or `callable`s, in increasing order of significance :param extension: file extension to be used :param missing: policy to be used when a configured key is missing, either as a `.Missing` instance or a default value :return: a `.Configuration` instances providing values loaded from *names* in *load_order* ordering
[ "Read", "a", ".", "Configuration", "instance", "by", "name", "trying", "to", "read", "from", "files", "in", "increasing", "significance", ".", "The", "default", "load", "order", "is", ".", "system", ".", "user", ".", "application", ".", "environment", "." ]
python
train
50.53125
kennethreitz/legit
legit/utils.py
https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/utils.py#L8-L20
def status_log(func, message, *args, **kwargs): """Emits header message, executes a callable, and echoes the return strings.""" click.echo(message) log = func(*args, **kwargs) if log: out = [] for line in log.split('\n'): if not line.startswith('#'): out.append(line) click.echo(black('\n'.join(out)))
[ "def", "status_log", "(", "func", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "click", ".", "echo", "(", "message", ")", "log", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "log", ":", "out", "=", "[", "]", "for", "line", "in", "log", ".", "split", "(", "'\\n'", ")", ":", "if", "not", "line", ".", "startswith", "(", "'#'", ")", ":", "out", ".", "append", "(", "line", ")", "click", ".", "echo", "(", "black", "(", "'\\n'", ".", "join", "(", "out", ")", ")", ")" ]
Emits header message, executes a callable, and echoes the return strings.
[ "Emits", "header", "message", "executes", "a", "callable", "and", "echoes", "the", "return", "strings", "." ]
python
train
27.692308
instana/python-sensor
instana/fsm.py
https://github.com/instana/python-sensor/blob/58aecb90924c48bafcbc4f93bd9b7190980918bc/instana/fsm.py#L168-L193
def __get_real_pid(self): """ Attempts to determine the true process ID by querying the /proc/<pid>/sched file. This works on systems with a proc filesystem. Otherwise default to os default. """ pid = None if os.path.exists("/proc/"): sched_file = "/proc/%d/sched" % os.getpid() if os.path.isfile(sched_file): try: file = open(sched_file) line = file.readline() g = re.search(r'\((\d+),', line) if len(g.groups()) == 1: pid = int(g.groups()[0]) except Exception: logger.debug("parsing sched file failed", exc_info=True) pass if pid is None: pid = os.getpid() return pid
[ "def", "__get_real_pid", "(", "self", ")", ":", "pid", "=", "None", "if", "os", ".", "path", ".", "exists", "(", "\"/proc/\"", ")", ":", "sched_file", "=", "\"/proc/%d/sched\"", "%", "os", ".", "getpid", "(", ")", "if", "os", ".", "path", ".", "isfile", "(", "sched_file", ")", ":", "try", ":", "file", "=", "open", "(", "sched_file", ")", "line", "=", "file", ".", "readline", "(", ")", "g", "=", "re", ".", "search", "(", "r'\\((\\d+),'", ",", "line", ")", "if", "len", "(", "g", ".", "groups", "(", ")", ")", "==", "1", ":", "pid", "=", "int", "(", "g", ".", "groups", "(", ")", "[", "0", "]", ")", "except", "Exception", ":", "logger", ".", "debug", "(", "\"parsing sched file failed\"", ",", "exc_info", "=", "True", ")", "pass", "if", "pid", "is", "None", ":", "pid", "=", "os", ".", "getpid", "(", ")", "return", "pid" ]
Attempts to determine the true process ID by querying the /proc/<pid>/sched file. This works on systems with a proc filesystem. Otherwise default to os default.
[ "Attempts", "to", "determine", "the", "true", "process", "ID", "by", "querying", "the", "/", "proc", "/", "<pid", ">", "/", "sched", "file", ".", "This", "works", "on", "systems", "with", "a", "proc", "filesystem", ".", "Otherwise", "default", "to", "os", "default", "." ]
python
train
32.038462
shaiguitar/snowclient.py
snowclient/api.py
https://github.com/shaiguitar/snowclient.py/blob/6bb513576d3b37612a7a4da225140d134f3e1c82/snowclient/api.py#L80-L89
def req(self, meth, url, http_data=''): """ sugar that wraps the 'requests' module with basic auth and some headers. """ self.logger.debug("Making request: %s %s\nBody:%s" % (meth, url, http_data)) req_method = getattr(requests, meth) return (req_method(url, auth=(self.__username, self.__password), data=http_data, headers=({'user-agent': self.user_agent(), 'Accept': 'application/json'})))
[ "def", "req", "(", "self", ",", "meth", ",", "url", ",", "http_data", "=", "''", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Making request: %s %s\\nBody:%s\"", "%", "(", "meth", ",", "url", ",", "http_data", ")", ")", "req_method", "=", "getattr", "(", "requests", ",", "meth", ")", "return", "(", "req_method", "(", "url", ",", "auth", "=", "(", "self", ".", "__username", ",", "self", ".", "__password", ")", ",", "data", "=", "http_data", ",", "headers", "=", "(", "{", "'user-agent'", ":", "self", ".", "user_agent", "(", ")", ",", "'Accept'", ":", "'application/json'", "}", ")", ")", ")" ]
sugar that wraps the 'requests' module with basic auth and some headers.
[ "sugar", "that", "wraps", "the", "requests", "module", "with", "basic", "auth", "and", "some", "headers", "." ]
python
train
51.1
PmagPy/PmagPy
programs/orientation_magic.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/orientation_magic.py#L8-L111
def main(): """ NAME orientation_magic.py DESCRIPTION takes tab delimited field notebook information and converts to MagIC formatted tables SYNTAX orientation_magic.py [command line options] OPTIONS -f FILE: specify input file, default is: orient.txt -Fsa FILE: specify output file, default is: er_samples.txt -Fsi FILE: specify output site location file, default is: er_sites.txt -app append/update these data in existing er_samples.txt, er_sites.txt files -ocn OCON: specify orientation convention, default is #1 below -dcn DCON [DEC]: specify declination convention, default is #1 below if DCON = 2, you must supply the declination correction -BCN don't correct bedding_dip_dir for magnetic declination -already corrected -ncn NCON: specify naming convention: default is #1 below -a: averages all bedding poles and uses average for all samples: default is NO -gmt HRS: specify hours to subtract from local time to get GMT: default is 0 -mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM] FS-FD field sampling done with a drill FS-H field sampling done with hand samples FS-LOC-GPS field location done with GPS FS-LOC-MAP field location done with map SO-POM a Pomeroy orientation device was used SO-ASC an ASC orientation device was used -DM: specify data model (2 or 3). Default: 3. Will output to the appropriate format. Orientation convention: Samples are oriented in the field with a "field arrow" and measured in the laboratory with a "lab arrow". The lab arrow is the positive X direction of the right handed coordinate system of the specimen measurements. The lab and field arrows may not be the same. In the MagIC database, we require the orientation (azimuth and plunge) of the X direction of the measurements (lab arrow). Here are some popular conventions that convert the field arrow azimuth (mag_azimuth in the orient.txt file) and dip (field_dip in orient.txt) to the azimuth and plunge of the laboratory arrow (sample_azimuth and sample_dip in er_samples.txt). The two angles, mag_azimuth and field_dip are explained below. [1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down) of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth; lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade. [2] Field arrow is the strike of the plane orthogonal to the drill direction, Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90 Lab arrow dip = -field_dip [3] Lab arrow is the same as the drill direction; hade was measured in the field. Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip [4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too [5] Same as AZDIP convention explained below - azimuth and inclination of the drill direction are mag_azimuth and field_dip; lab arrow is as in [1] above. lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90 [6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip [7] all others you will have to either customize your self or e-mail [email protected] for help. Magnetic declination convention: [1] Use the IGRF value at the lat/long and date supplied [default] [2] Will supply declination correction [3] mag_az is already corrected in file [4] Correct mag_az but not bedding_dip_dir Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name = sample name [6] site name entered in site_name column in the orient.txt format input file [7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY NB: all others you will have to either customize your self or e-mail [email protected] for help. OUTPUT output saved in er_samples.txt and er_sites.txt (or samples.txt and sites.txt if using data model 3.0) - this will overwrite any existing files """ args = sys.argv if "-h" in args: print(main.__doc__) sys.exit() else: info = [['WD', False, '.'], ['ID', False, ''], ['f', False, 'orient.txt'], ['app', False, False], ['ocn', False, 1], ['dcn', False, 1], ['BCN', False, True], ['ncn', False, '1'], ['gmt', False, 0], ['mcd', False, ''], ['a', False, False], ['DM', False, 3]] #output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, samp_con, hours_from_gmt, method_codes, average_bedding # leave off -Fsa, -Fsi b/c defaults in command_line_extractor dataframe = extractor.command_line_dataframe(info) checked_args = extractor.extract_and_check_args(args, dataframe) output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, bed_correction, samp_con, hours_from_gmt, method_codes, average_bedding, samp_file, site_file, data_model = extractor.get_vars(['WD', 'ID', 'f', 'app', 'ocn', 'dcn', 'BCN', 'ncn', 'gmt', 'mcd', 'a', 'Fsa', 'Fsi', 'DM'], checked_args) if input_dir_path == '.': input_dir_path = output_dir_path if not isinstance(dec_correction_con, int): if len(dec_correction_con) > 1: dec_correction = int(dec_correction_con.split()[1]) dec_correction_con = int(dec_correction_con.split()[0]) else: dec_correction = 0 else: dec_correction = 0 ipmag.orientation_magic(or_con, dec_correction_con, dec_correction, bed_correction, samp_con, hours_from_gmt, method_codes, average_bedding, orient_file, samp_file, site_file, output_dir_path, input_dir_path, append, data_model)
[ "def", "main", "(", ")", ":", "args", "=", "sys", ".", "argv", "if", "\"-h\"", "in", "args", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "else", ":", "info", "=", "[", "[", "'WD'", ",", "False", ",", "'.'", "]", ",", "[", "'ID'", ",", "False", ",", "''", "]", ",", "[", "'f'", ",", "False", ",", "'orient.txt'", "]", ",", "[", "'app'", ",", "False", ",", "False", "]", ",", "[", "'ocn'", ",", "False", ",", "1", "]", ",", "[", "'dcn'", ",", "False", ",", "1", "]", ",", "[", "'BCN'", ",", "False", ",", "True", "]", ",", "[", "'ncn'", ",", "False", ",", "'1'", "]", ",", "[", "'gmt'", ",", "False", ",", "0", "]", ",", "[", "'mcd'", ",", "False", ",", "''", "]", ",", "[", "'a'", ",", "False", ",", "False", "]", ",", "[", "'DM'", ",", "False", ",", "3", "]", "]", "#output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, samp_con, hours_from_gmt, method_codes, average_bedding", "# leave off -Fsa, -Fsi b/c defaults in command_line_extractor", "dataframe", "=", "extractor", ".", "command_line_dataframe", "(", "info", ")", "checked_args", "=", "extractor", ".", "extract_and_check_args", "(", "args", ",", "dataframe", ")", "output_dir_path", ",", "input_dir_path", ",", "orient_file", ",", "append", ",", "or_con", ",", "dec_correction_con", ",", "bed_correction", ",", "samp_con", ",", "hours_from_gmt", ",", "method_codes", ",", "average_bedding", ",", "samp_file", ",", "site_file", ",", "data_model", "=", "extractor", ".", "get_vars", "(", "[", "'WD'", ",", "'ID'", ",", "'f'", ",", "'app'", ",", "'ocn'", ",", "'dcn'", ",", "'BCN'", ",", "'ncn'", ",", "'gmt'", ",", "'mcd'", ",", "'a'", ",", "'Fsa'", ",", "'Fsi'", ",", "'DM'", "]", ",", "checked_args", ")", "if", "input_dir_path", "==", "'.'", ":", "input_dir_path", "=", "output_dir_path", "if", "not", "isinstance", "(", "dec_correction_con", ",", "int", ")", ":", "if", "len", "(", "dec_correction_con", ")", ">", "1", ":", "dec_correction", "=", "int", "(", "dec_correction_con", ".", "split", "(", ")", "[", "1", "]", ")", "dec_correction_con", "=", "int", "(", "dec_correction_con", ".", "split", "(", ")", "[", "0", "]", ")", "else", ":", "dec_correction", "=", "0", "else", ":", "dec_correction", "=", "0", "ipmag", ".", "orientation_magic", "(", "or_con", ",", "dec_correction_con", ",", "dec_correction", ",", "bed_correction", ",", "samp_con", ",", "hours_from_gmt", ",", "method_codes", ",", "average_bedding", ",", "orient_file", ",", "samp_file", ",", "site_file", ",", "output_dir_path", ",", "input_dir_path", ",", "append", ",", "data_model", ")" ]
NAME orientation_magic.py DESCRIPTION takes tab delimited field notebook information and converts to MagIC formatted tables SYNTAX orientation_magic.py [command line options] OPTIONS -f FILE: specify input file, default is: orient.txt -Fsa FILE: specify output file, default is: er_samples.txt -Fsi FILE: specify output site location file, default is: er_sites.txt -app append/update these data in existing er_samples.txt, er_sites.txt files -ocn OCON: specify orientation convention, default is #1 below -dcn DCON [DEC]: specify declination convention, default is #1 below if DCON = 2, you must supply the declination correction -BCN don't correct bedding_dip_dir for magnetic declination -already corrected -ncn NCON: specify naming convention: default is #1 below -a: averages all bedding poles and uses average for all samples: default is NO -gmt HRS: specify hours to subtract from local time to get GMT: default is 0 -mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM] FS-FD field sampling done with a drill FS-H field sampling done with hand samples FS-LOC-GPS field location done with GPS FS-LOC-MAP field location done with map SO-POM a Pomeroy orientation device was used SO-ASC an ASC orientation device was used -DM: specify data model (2 or 3). Default: 3. Will output to the appropriate format. Orientation convention: Samples are oriented in the field with a "field arrow" and measured in the laboratory with a "lab arrow". The lab arrow is the positive X direction of the right handed coordinate system of the specimen measurements. The lab and field arrows may not be the same. In the MagIC database, we require the orientation (azimuth and plunge) of the X direction of the measurements (lab arrow). Here are some popular conventions that convert the field arrow azimuth (mag_azimuth in the orient.txt file) and dip (field_dip in orient.txt) to the azimuth and plunge of the laboratory arrow (sample_azimuth and sample_dip in er_samples.txt). The two angles, mag_azimuth and field_dip are explained below. [1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down) of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth; lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade. [2] Field arrow is the strike of the plane orthogonal to the drill direction, Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90 Lab arrow dip = -field_dip [3] Lab arrow is the same as the drill direction; hade was measured in the field. Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip [4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too [5] Same as AZDIP convention explained below - azimuth and inclination of the drill direction are mag_azimuth and field_dip; lab arrow is as in [1] above. lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90 [6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip [7] all others you will have to either customize your self or e-mail [email protected] for help. Magnetic declination convention: [1] Use the IGRF value at the lat/long and date supplied [default] [2] Will supply declination correction [3] mag_az is already corrected in file [4] Correct mag_az but not bedding_dip_dir Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name = sample name [6] site name entered in site_name column in the orient.txt format input file [7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY NB: all others you will have to either customize your self or e-mail [email protected] for help. OUTPUT output saved in er_samples.txt and er_sites.txt (or samples.txt and sites.txt if using data model 3.0) - this will overwrite any existing files
[ "NAME", "orientation_magic", ".", "py" ]
python
train
63.548077
mastro35/tyler
tyler.py
https://github.com/mastro35/tyler/blob/9f26ca4db45308a006f7848fa58079ca28eb9873/tyler.py#L139-L158
def main(): '''Entry point''' if len(sys.argv) == 1: print("Usage: tyler [filename]") sys.exit(0) filename = sys.argv[1] if not os.path.isfile(filename): print("Specified file does not exists") sys.exit(8) my_tyler = Tyler(filename=filename) while True: try: for line in my_tyler: print(line) time.sleep(1) except KeyboardInterrupt: print("Quit signal received") sys.exit(0)
[ "def", "main", "(", ")", ":", "if", "len", "(", "sys", ".", "argv", ")", "==", "1", ":", "print", "(", "\"Usage: tyler [filename]\"", ")", "sys", ".", "exit", "(", "0", ")", "filename", "=", "sys", ".", "argv", "[", "1", "]", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "print", "(", "\"Specified file does not exists\"", ")", "sys", ".", "exit", "(", "8", ")", "my_tyler", "=", "Tyler", "(", "filename", "=", "filename", ")", "while", "True", ":", "try", ":", "for", "line", "in", "my_tyler", ":", "print", "(", "line", ")", "time", ".", "sleep", "(", "1", ")", "except", "KeyboardInterrupt", ":", "print", "(", "\"Quit signal received\"", ")", "sys", ".", "exit", "(", "0", ")" ]
Entry point
[ "Entry", "point" ]
python
train
24.65
django-import-export/django-import-export
import_export/resources.py
https://github.com/django-import-export/django-import-export/blob/127f00d03fd0ad282615b064b7f444a639e6ff0c/import_export/resources.py#L297-L310
def save_instance(self, instance, using_transactions=True, dry_run=False): """ Takes care of saving the object to the database. Keep in mind that this is done by calling ``instance.save()``, so objects are not created in bulk! """ self.before_save_instance(instance, using_transactions, dry_run) if not using_transactions and dry_run: # we don't have transactions and we want to do a dry_run pass else: instance.save() self.after_save_instance(instance, using_transactions, dry_run)
[ "def", "save_instance", "(", "self", ",", "instance", ",", "using_transactions", "=", "True", ",", "dry_run", "=", "False", ")", ":", "self", ".", "before_save_instance", "(", "instance", ",", "using_transactions", ",", "dry_run", ")", "if", "not", "using_transactions", "and", "dry_run", ":", "# we don't have transactions and we want to do a dry_run", "pass", "else", ":", "instance", ".", "save", "(", ")", "self", ".", "after_save_instance", "(", "instance", ",", "using_transactions", ",", "dry_run", ")" ]
Takes care of saving the object to the database. Keep in mind that this is done by calling ``instance.save()``, so objects are not created in bulk!
[ "Takes", "care", "of", "saving", "the", "object", "to", "the", "database", "." ]
python
train
41.285714
aws/aws-encryption-sdk-python
src/aws_encryption_sdk/internal/formatting/__init__.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/internal/formatting/__init__.py#L61-L76
def _final_frame_length(header, final_frame_bytes): """Calculates the length of a final ciphertext frame, given a complete header and the number of bytes of ciphertext in the final frame. :param header: Complete message header object :type header: aws_encryption_sdk.structures.MessageHeader :param int final_frame_bytes: Bytes of ciphertext in the final frame :rtype: int """ final_frame_length = 4 # Sequence Number End final_frame_length += 4 # Sequence Number final_frame_length += header.algorithm.iv_len # IV final_frame_length += 4 # Encrypted Content Length final_frame_length += final_frame_bytes # Encrypted Content final_frame_length += header.algorithm.auth_len # Authentication Tag return final_frame_length
[ "def", "_final_frame_length", "(", "header", ",", "final_frame_bytes", ")", ":", "final_frame_length", "=", "4", "# Sequence Number End", "final_frame_length", "+=", "4", "# Sequence Number", "final_frame_length", "+=", "header", ".", "algorithm", ".", "iv_len", "# IV", "final_frame_length", "+=", "4", "# Encrypted Content Length", "final_frame_length", "+=", "final_frame_bytes", "# Encrypted Content", "final_frame_length", "+=", "header", ".", "algorithm", ".", "auth_len", "# Authentication Tag", "return", "final_frame_length" ]
Calculates the length of a final ciphertext frame, given a complete header and the number of bytes of ciphertext in the final frame. :param header: Complete message header object :type header: aws_encryption_sdk.structures.MessageHeader :param int final_frame_bytes: Bytes of ciphertext in the final frame :rtype: int
[ "Calculates", "the", "length", "of", "a", "final", "ciphertext", "frame", "given", "a", "complete", "header", "and", "the", "number", "of", "bytes", "of", "ciphertext", "in", "the", "final", "frame", "." ]
python
train
48
brosner/django-timezones
timezones/utils.py
https://github.com/brosner/django-timezones/blob/43b437c39533e1832562a2c69247b89ae1af169e/timezones/utils.py#L16-L27
def adjust_datetime_to_timezone(value, from_tz, to_tz=None): """ Given a ``datetime`` object adjust it according to the from_tz timezone string into the to_tz timezone string. """ if to_tz is None: to_tz = settings.TIME_ZONE if value.tzinfo is None: if not hasattr(from_tz, "localize"): from_tz = pytz.timezone(smart_str(from_tz)) value = from_tz.localize(value) return value.astimezone(pytz.timezone(smart_str(to_tz)))
[ "def", "adjust_datetime_to_timezone", "(", "value", ",", "from_tz", ",", "to_tz", "=", "None", ")", ":", "if", "to_tz", "is", "None", ":", "to_tz", "=", "settings", ".", "TIME_ZONE", "if", "value", ".", "tzinfo", "is", "None", ":", "if", "not", "hasattr", "(", "from_tz", ",", "\"localize\"", ")", ":", "from_tz", "=", "pytz", ".", "timezone", "(", "smart_str", "(", "from_tz", ")", ")", "value", "=", "from_tz", ".", "localize", "(", "value", ")", "return", "value", ".", "astimezone", "(", "pytz", ".", "timezone", "(", "smart_str", "(", "to_tz", ")", ")", ")" ]
Given a ``datetime`` object adjust it according to the from_tz timezone string into the to_tz timezone string.
[ "Given", "a", "datetime", "object", "adjust", "it", "according", "to", "the", "from_tz", "timezone", "string", "into", "the", "to_tz", "timezone", "string", "." ]
python
train
39.333333
yyuu/botornado
botornado/s3/bucket.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/botornado/s3/bucket.py#L142-L173
def get_all_keys(self, headers=None, callback=None, **params): """ A lower-level method for listing contents of a bucket. This closely models the actual S3 API and requires you to manually handle the paging of results. For a higher-level method that handles the details of paging for you, you can use the list method. :type max_keys: int :param max_keys: The maximum number of keys to retrieve :type prefix: string :param prefix: The prefix of the keys you want to retrieve :type marker: string :param marker: The "marker" of where you are in the result set :type delimiter: string :param delimiter: If this optional, Unicode string parameter is included with your request, then keys that contain the same string between the prefix and the first occurrence of the delimiter will be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. :rtype: ResultSet :return: The result from S3 listing the keys requested """ return self._get_all([('Contents', self.key_class), ('CommonPrefixes', boto.s3.prefix.Prefix)], '', headers, callback=callback, **params)
[ "def", "get_all_keys", "(", "self", ",", "headers", "=", "None", ",", "callback", "=", "None", ",", "*", "*", "params", ")", ":", "return", "self", ".", "_get_all", "(", "[", "(", "'Contents'", ",", "self", ".", "key_class", ")", ",", "(", "'CommonPrefixes'", ",", "boto", ".", "s3", ".", "prefix", ".", "Prefix", ")", "]", ",", "''", ",", "headers", ",", "callback", "=", "callback", ",", "*", "*", "params", ")" ]
A lower-level method for listing contents of a bucket. This closely models the actual S3 API and requires you to manually handle the paging of results. For a higher-level method that handles the details of paging for you, you can use the list method. :type max_keys: int :param max_keys: The maximum number of keys to retrieve :type prefix: string :param prefix: The prefix of the keys you want to retrieve :type marker: string :param marker: The "marker" of where you are in the result set :type delimiter: string :param delimiter: If this optional, Unicode string parameter is included with your request, then keys that contain the same string between the prefix and the first occurrence of the delimiter will be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. :rtype: ResultSet :return: The result from S3 listing the keys requested
[ "A", "lower", "-", "level", "method", "for", "listing", "contents", "of", "a", "bucket", ".", "This", "closely", "models", "the", "actual", "S3", "API", "and", "requires", "you", "to", "manually", "handle", "the", "paging", "of", "results", ".", "For", "a", "higher", "-", "level", "method", "that", "handles", "the", "details", "of", "paging", "for", "you", "you", "can", "use", "the", "list", "method", ".", ":", "type", "max_keys", ":", "int", ":", "param", "max_keys", ":", "The", "maximum", "number", "of", "keys", "to", "retrieve", ":", "type", "prefix", ":", "string", ":", "param", "prefix", ":", "The", "prefix", "of", "the", "keys", "you", "want", "to", "retrieve", ":", "type", "marker", ":", "string", ":", "param", "marker", ":", "The", "marker", "of", "where", "you", "are", "in", "the", "result", "set", ":", "type", "delimiter", ":", "string", ":", "param", "delimiter", ":", "If", "this", "optional", "Unicode", "string", "parameter", "is", "included", "with", "your", "request", "then", "keys", "that", "contain", "the", "same", "string", "between", "the", "prefix", "and", "the", "first", "occurrence", "of", "the", "delimiter", "will", "be", "rolled", "up", "into", "a", "single", "result", "element", "in", "the", "CommonPrefixes", "collection", ".", "These", "rolled", "-", "up", "keys", "are", "not", "returned", "elsewhere", "in", "the", "response", "." ]
python
train
46.96875
getpelican/pelican-plugins
github_activity/github_activity.py
https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/github_activity/github_activity.py#L63-L72
def register(): """ Plugin registration """ try: signals.article_generator_init.connect(feed_parser_initialization) signals.article_generator_context.connect(fetch_github_activity) except ImportError: logger.warning('`github_activity` failed to load dependency `feedparser`.' '`github_activity` plugin not loaded.')
[ "def", "register", "(", ")", ":", "try", ":", "signals", ".", "article_generator_init", ".", "connect", "(", "feed_parser_initialization", ")", "signals", ".", "article_generator_context", ".", "connect", "(", "fetch_github_activity", ")", "except", "ImportError", ":", "logger", ".", "warning", "(", "'`github_activity` failed to load dependency `feedparser`.'", "'`github_activity` plugin not loaded.'", ")" ]
Plugin registration
[ "Plugin", "registration" ]
python
train
37.7
ic-labs/django-icekit
icekit/fields.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/fields.py#L54-L63
def register_model_once(cls, ModelClass, **kwargs): """ Tweaked version of `AnyUrlField.register_model` that only registers the given model after checking that it is not already registered. """ if cls._static_registry.get_for_model(ModelClass) is None: logger.warn("Model is already registered with {0}: '{1}'" .format(cls, ModelClass)) else: cls.register_model.register(ModelClass, **kwargs)
[ "def", "register_model_once", "(", "cls", ",", "ModelClass", ",", "*", "*", "kwargs", ")", ":", "if", "cls", ".", "_static_registry", ".", "get_for_model", "(", "ModelClass", ")", "is", "None", ":", "logger", ".", "warn", "(", "\"Model is already registered with {0}: '{1}'\"", ".", "format", "(", "cls", ",", "ModelClass", ")", ")", "else", ":", "cls", ".", "register_model", ".", "register", "(", "ModelClass", ",", "*", "*", "kwargs", ")" ]
Tweaked version of `AnyUrlField.register_model` that only registers the given model after checking that it is not already registered.
[ "Tweaked", "version", "of", "AnyUrlField", ".", "register_model", "that", "only", "registers", "the", "given", "model", "after", "checking", "that", "it", "is", "not", "already", "registered", "." ]
python
train
47.9
sloria/textblob-aptagger
textblob_aptagger/taggers.py
https://github.com/sloria/textblob-aptagger/blob/fb98bbd16a83650cab4819c4b89f0973e60fb3fe/textblob_aptagger/taggers.py#L108-L124
def _normalize(self, word): '''Normalization used in pre-processing. - All words are lower cased - Digits in the range 1800-2100 are represented as !YEAR; - Other digits are represented as !DIGITS :rtype: str ''' if '-' in word and word[0] != '-': return '!HYPHEN' elif word.isdigit() and len(word) == 4: return '!YEAR' elif word[0].isdigit(): return '!DIGITS' else: return word.lower()
[ "def", "_normalize", "(", "self", ",", "word", ")", ":", "if", "'-'", "in", "word", "and", "word", "[", "0", "]", "!=", "'-'", ":", "return", "'!HYPHEN'", "elif", "word", ".", "isdigit", "(", ")", "and", "len", "(", "word", ")", "==", "4", ":", "return", "'!YEAR'", "elif", "word", "[", "0", "]", ".", "isdigit", "(", ")", ":", "return", "'!DIGITS'", "else", ":", "return", "word", ".", "lower", "(", ")" ]
Normalization used in pre-processing. - All words are lower cased - Digits in the range 1800-2100 are represented as !YEAR; - Other digits are represented as !DIGITS :rtype: str
[ "Normalization", "used", "in", "pre", "-", "processing", "." ]
python
train
29.411765
wandb/client
wandb/vendor/prompt_toolkit/buffer.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/buffer.py#L1332-L1346
def indent(buffer, from_row, to_row, count=1): """ Indent text of a :class:`.Buffer` object. """ current_row = buffer.document.cursor_position_row line_range = range(from_row, to_row) # Apply transformation. new_text = buffer.transform_lines(line_range, lambda l: ' ' * count + l) buffer.document = Document( new_text, Document(new_text).translate_row_col_to_index(current_row, 0)) # Go to the start of the line. buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=True)
[ "def", "indent", "(", "buffer", ",", "from_row", ",", "to_row", ",", "count", "=", "1", ")", ":", "current_row", "=", "buffer", ".", "document", ".", "cursor_position_row", "line_range", "=", "range", "(", "from_row", ",", "to_row", ")", "# Apply transformation.", "new_text", "=", "buffer", ".", "transform_lines", "(", "line_range", ",", "lambda", "l", ":", "' '", "*", "count", "+", "l", ")", "buffer", ".", "document", "=", "Document", "(", "new_text", ",", "Document", "(", "new_text", ")", ".", "translate_row_col_to_index", "(", "current_row", ",", "0", ")", ")", "# Go to the start of the line.", "buffer", ".", "cursor_position", "+=", "buffer", ".", "document", ".", "get_start_of_line_position", "(", "after_whitespace", "=", "True", ")" ]
Indent text of a :class:`.Buffer` object.
[ "Indent", "text", "of", "a", ":", "class", ":", ".", "Buffer", "object", "." ]
python
train
36.733333
Cue/scales
src/greplin/scales/__init__.py
https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/__init__.py#L415-L418
def update(self, instance, oldValue, newValue): """Updates the aggregate based on a change in the child value.""" self.__set__(instance, self.__get__(instance, None) + newValue - (oldValue or 0))
[ "def", "update", "(", "self", ",", "instance", ",", "oldValue", ",", "newValue", ")", ":", "self", ".", "__set__", "(", "instance", ",", "self", ".", "__get__", "(", "instance", ",", "None", ")", "+", "newValue", "-", "(", "oldValue", "or", "0", ")", ")" ]
Updates the aggregate based on a change in the child value.
[ "Updates", "the", "aggregate", "based", "on", "a", "change", "in", "the", "child", "value", "." ]
python
train
54.25
jobovy/galpy
galpy/orbit/planarOrbit.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/planarOrbit.py#L567-L663
def _integrateOrbit(vxvv,pot,t,method,dt): """ NAME: _integrateOrbit PURPOSE: integrate an orbit in a Phi(R) potential in the (R,phi)-plane INPUT: vxvv - array with the initial conditions stacked like [R,vR,vT,phi]; vR outward! pot - Potential instance t - list of times at which to output (0 has to be in this!) method - 'odeint' or 'leapfrog' dt- if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize OUTPUT: [:,4] array of [R,vR,vT,phi] at each t HISTORY: 2010-07-20 - Written - Bovy (NYU) """ #First check that the potential has C if '_c' in method: if not ext_loaded or not _check_c(pot): if ('leapfrog' in method or 'symplec' in method): method= 'leapfrog' else: method= 'odeint' if not ext_loaded: # pragma: no cover warnings.warn("Cannot use C integration because C extension not loaded (using %s instead)" % (method), galpyWarning) else: warnings.warn("Cannot use C integration because some of the potentials are not implemented in C (using %s instead)" % (method), galpyWarning) if method.lower() == 'leapfrog': #go to the rectangular frame this_vxvv= nu.array([vxvv[0]*nu.cos(vxvv[3]), vxvv[0]*nu.sin(vxvv[3]), vxvv[1]*nu.cos(vxvv[3])-vxvv[2]*nu.sin(vxvv[3]), vxvv[2]*nu.cos(vxvv[3])+vxvv[1]*nu.sin(vxvv[3])]) #integrate tmp_out= symplecticode.leapfrog(_rectForce,this_vxvv, t,args=(pot,),rtol=10.**-8) #go back to the cylindrical frame R= nu.sqrt(tmp_out[:,0]**2.+tmp_out[:,1]**2.) phi= nu.arccos(tmp_out[:,0]/R) phi[(tmp_out[:,1] < 0.)]= 2.*nu.pi-phi[(tmp_out[:,1] < 0.)] vR= tmp_out[:,2]*nu.cos(phi)+tmp_out[:,3]*nu.sin(phi) vT= tmp_out[:,3]*nu.cos(phi)-tmp_out[:,2]*nu.sin(phi) out= nu.zeros((len(t),4)) out[:,0]= R out[:,1]= vR out[:,2]= vT out[:,3]= phi msg= 0 elif ext_loaded and \ (method.lower() == 'leapfrog_c' or method.lower() == 'rk4_c' \ or method.lower() == 'rk6_c' or method.lower() == 'symplec4_c' \ or method.lower() == 'symplec6_c' or method.lower() == 'dopr54_c' \ or method.lower() == 'dop853_c'): warnings.warn("Using C implementation to integrate orbits",galpyWarningVerbose) #go to the rectangular frame this_vxvv= nu.array([vxvv[0]*nu.cos(vxvv[3]), vxvv[0]*nu.sin(vxvv[3]), vxvv[1]*nu.cos(vxvv[3])-vxvv[2]*nu.sin(vxvv[3]), vxvv[2]*nu.cos(vxvv[3])+vxvv[1]*nu.sin(vxvv[3])]) #integrate tmp_out, msg= integratePlanarOrbit_c(pot,this_vxvv, t,method,dt=dt) #go back to the cylindrical frame R= nu.sqrt(tmp_out[:,0]**2.+tmp_out[:,1]**2.) phi= nu.arccos(tmp_out[:,0]/R) phi[(tmp_out[:,1] < 0.)]= 2.*nu.pi-phi[(tmp_out[:,1] < 0.)] vR= tmp_out[:,2]*nu.cos(phi)+tmp_out[:,3]*nu.sin(phi) vT= tmp_out[:,3]*nu.cos(phi)-tmp_out[:,2]*nu.sin(phi) out= nu.zeros((len(t),4)) out[:,0]= R out[:,1]= vR out[:,2]= vT out[:,3]= phi elif method.lower() == 'odeint' or method.lower() == 'dop853' or not ext_loaded: vphi= vxvv[2]/vxvv[0] init= [vxvv[0],vxvv[1],vxvv[3],vphi] if method == 'dop853': intOut = dop853(_EOM, init, t, args=(pot,)) else: intOut= integrate.odeint(_EOM,init,t,args=(pot,), rtol=10.**-8.)#,mxstep=100000000) out= nu.zeros((len(t),4)) out[:,0]= intOut[:,0] out[:,1]= intOut[:,1] out[:,3]= intOut[:,2] out[:,2]= out[:,0]*intOut[:,3] msg= 0 else: raise NotImplementedError("requested integration method does not exist") #post-process to remove negative radii neg_radii= (out[:,0] < 0.) out[neg_radii,0]= -out[neg_radii,0] out[neg_radii,3]+= m.pi _parse_warnmessage(msg) return (out,msg)
[ "def", "_integrateOrbit", "(", "vxvv", ",", "pot", ",", "t", ",", "method", ",", "dt", ")", ":", "#First check that the potential has C", "if", "'_c'", "in", "method", ":", "if", "not", "ext_loaded", "or", "not", "_check_c", "(", "pot", ")", ":", "if", "(", "'leapfrog'", "in", "method", "or", "'symplec'", "in", "method", ")", ":", "method", "=", "'leapfrog'", "else", ":", "method", "=", "'odeint'", "if", "not", "ext_loaded", ":", "# pragma: no cover", "warnings", ".", "warn", "(", "\"Cannot use C integration because C extension not loaded (using %s instead)\"", "%", "(", "method", ")", ",", "galpyWarning", ")", "else", ":", "warnings", ".", "warn", "(", "\"Cannot use C integration because some of the potentials are not implemented in C (using %s instead)\"", "%", "(", "method", ")", ",", "galpyWarning", ")", "if", "method", ".", "lower", "(", ")", "==", "'leapfrog'", ":", "#go to the rectangular frame", "this_vxvv", "=", "nu", ".", "array", "(", "[", "vxvv", "[", "0", "]", "*", "nu", ".", "cos", "(", "vxvv", "[", "3", "]", ")", ",", "vxvv", "[", "0", "]", "*", "nu", ".", "sin", "(", "vxvv", "[", "3", "]", ")", ",", "vxvv", "[", "1", "]", "*", "nu", ".", "cos", "(", "vxvv", "[", "3", "]", ")", "-", "vxvv", "[", "2", "]", "*", "nu", ".", "sin", "(", "vxvv", "[", "3", "]", ")", ",", "vxvv", "[", "2", "]", "*", "nu", ".", "cos", "(", "vxvv", "[", "3", "]", ")", "+", "vxvv", "[", "1", "]", "*", "nu", ".", "sin", "(", "vxvv", "[", "3", "]", ")", "]", ")", "#integrate", "tmp_out", "=", "symplecticode", ".", "leapfrog", "(", "_rectForce", ",", "this_vxvv", ",", "t", ",", "args", "=", "(", "pot", ",", ")", ",", "rtol", "=", "10.", "**", "-", "8", ")", "#go back to the cylindrical frame", "R", "=", "nu", ".", "sqrt", "(", "tmp_out", "[", ":", ",", "0", "]", "**", "2.", "+", "tmp_out", "[", ":", ",", "1", "]", "**", "2.", ")", "phi", "=", "nu", ".", "arccos", "(", "tmp_out", "[", ":", ",", "0", "]", "/", "R", ")", "phi", "[", "(", "tmp_out", "[", ":", ",", "1", "]", "<", "0.", ")", "]", "=", "2.", "*", "nu", ".", "pi", "-", "phi", "[", "(", "tmp_out", "[", ":", ",", "1", "]", "<", "0.", ")", "]", "vR", "=", "tmp_out", "[", ":", ",", "2", "]", "*", "nu", ".", "cos", "(", "phi", ")", "+", "tmp_out", "[", ":", ",", "3", "]", "*", "nu", ".", "sin", "(", "phi", ")", "vT", "=", "tmp_out", "[", ":", ",", "3", "]", "*", "nu", ".", "cos", "(", "phi", ")", "-", "tmp_out", "[", ":", ",", "2", "]", "*", "nu", ".", "sin", "(", "phi", ")", "out", "=", "nu", ".", "zeros", "(", "(", "len", "(", "t", ")", ",", "4", ")", ")", "out", "[", ":", ",", "0", "]", "=", "R", "out", "[", ":", ",", "1", "]", "=", "vR", "out", "[", ":", ",", "2", "]", "=", "vT", "out", "[", ":", ",", "3", "]", "=", "phi", "msg", "=", "0", "elif", "ext_loaded", "and", "(", "method", ".", "lower", "(", ")", "==", "'leapfrog_c'", "or", "method", ".", "lower", "(", ")", "==", "'rk4_c'", "or", "method", ".", "lower", "(", ")", "==", "'rk6_c'", "or", "method", ".", "lower", "(", ")", "==", "'symplec4_c'", "or", "method", ".", "lower", "(", ")", "==", "'symplec6_c'", "or", "method", ".", "lower", "(", ")", "==", "'dopr54_c'", "or", "method", ".", "lower", "(", ")", "==", "'dop853_c'", ")", ":", "warnings", ".", "warn", "(", "\"Using C implementation to integrate orbits\"", ",", "galpyWarningVerbose", ")", "#go to the rectangular frame", "this_vxvv", "=", "nu", ".", "array", "(", "[", "vxvv", "[", "0", "]", "*", "nu", ".", "cos", "(", "vxvv", "[", "3", "]", ")", ",", "vxvv", "[", "0", "]", "*", "nu", ".", "sin", "(", "vxvv", "[", "3", "]", ")", ",", "vxvv", "[", "1", "]", "*", "nu", ".", "cos", "(", "vxvv", "[", "3", "]", ")", "-", "vxvv", "[", "2", "]", "*", "nu", ".", "sin", "(", "vxvv", "[", "3", "]", ")", ",", "vxvv", "[", "2", "]", "*", "nu", ".", "cos", "(", "vxvv", "[", "3", "]", ")", "+", "vxvv", "[", "1", "]", "*", "nu", ".", "sin", "(", "vxvv", "[", "3", "]", ")", "]", ")", "#integrate", "tmp_out", ",", "msg", "=", "integratePlanarOrbit_c", "(", "pot", ",", "this_vxvv", ",", "t", ",", "method", ",", "dt", "=", "dt", ")", "#go back to the cylindrical frame", "R", "=", "nu", ".", "sqrt", "(", "tmp_out", "[", ":", ",", "0", "]", "**", "2.", "+", "tmp_out", "[", ":", ",", "1", "]", "**", "2.", ")", "phi", "=", "nu", ".", "arccos", "(", "tmp_out", "[", ":", ",", "0", "]", "/", "R", ")", "phi", "[", "(", "tmp_out", "[", ":", ",", "1", "]", "<", "0.", ")", "]", "=", "2.", "*", "nu", ".", "pi", "-", "phi", "[", "(", "tmp_out", "[", ":", ",", "1", "]", "<", "0.", ")", "]", "vR", "=", "tmp_out", "[", ":", ",", "2", "]", "*", "nu", ".", "cos", "(", "phi", ")", "+", "tmp_out", "[", ":", ",", "3", "]", "*", "nu", ".", "sin", "(", "phi", ")", "vT", "=", "tmp_out", "[", ":", ",", "3", "]", "*", "nu", ".", "cos", "(", "phi", ")", "-", "tmp_out", "[", ":", ",", "2", "]", "*", "nu", ".", "sin", "(", "phi", ")", "out", "=", "nu", ".", "zeros", "(", "(", "len", "(", "t", ")", ",", "4", ")", ")", "out", "[", ":", ",", "0", "]", "=", "R", "out", "[", ":", ",", "1", "]", "=", "vR", "out", "[", ":", ",", "2", "]", "=", "vT", "out", "[", ":", ",", "3", "]", "=", "phi", "elif", "method", ".", "lower", "(", ")", "==", "'odeint'", "or", "method", ".", "lower", "(", ")", "==", "'dop853'", "or", "not", "ext_loaded", ":", "vphi", "=", "vxvv", "[", "2", "]", "/", "vxvv", "[", "0", "]", "init", "=", "[", "vxvv", "[", "0", "]", ",", "vxvv", "[", "1", "]", ",", "vxvv", "[", "3", "]", ",", "vphi", "]", "if", "method", "==", "'dop853'", ":", "intOut", "=", "dop853", "(", "_EOM", ",", "init", ",", "t", ",", "args", "=", "(", "pot", ",", ")", ")", "else", ":", "intOut", "=", "integrate", ".", "odeint", "(", "_EOM", ",", "init", ",", "t", ",", "args", "=", "(", "pot", ",", ")", ",", "rtol", "=", "10.", "**", "-", "8.", ")", "#,mxstep=100000000)", "out", "=", "nu", ".", "zeros", "(", "(", "len", "(", "t", ")", ",", "4", ")", ")", "out", "[", ":", ",", "0", "]", "=", "intOut", "[", ":", ",", "0", "]", "out", "[", ":", ",", "1", "]", "=", "intOut", "[", ":", ",", "1", "]", "out", "[", ":", ",", "3", "]", "=", "intOut", "[", ":", ",", "2", "]", "out", "[", ":", ",", "2", "]", "=", "out", "[", ":", ",", "0", "]", "*", "intOut", "[", ":", ",", "3", "]", "msg", "=", "0", "else", ":", "raise", "NotImplementedError", "(", "\"requested integration method does not exist\"", ")", "#post-process to remove negative radii", "neg_radii", "=", "(", "out", "[", ":", ",", "0", "]", "<", "0.", ")", "out", "[", "neg_radii", ",", "0", "]", "=", "-", "out", "[", "neg_radii", ",", "0", "]", "out", "[", "neg_radii", ",", "3", "]", "+=", "m", ".", "pi", "_parse_warnmessage", "(", "msg", ")", "return", "(", "out", ",", "msg", ")" ]
NAME: _integrateOrbit PURPOSE: integrate an orbit in a Phi(R) potential in the (R,phi)-plane INPUT: vxvv - array with the initial conditions stacked like [R,vR,vT,phi]; vR outward! pot - Potential instance t - list of times at which to output (0 has to be in this!) method - 'odeint' or 'leapfrog' dt- if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize OUTPUT: [:,4] array of [R,vR,vT,phi] at each t HISTORY: 2010-07-20 - Written - Bovy (NYU)
[ "NAME", ":", "_integrateOrbit", "PURPOSE", ":", "integrate", "an", "orbit", "in", "a", "Phi", "(", "R", ")", "potential", "in", "the", "(", "R", "phi", ")", "-", "plane", "INPUT", ":", "vxvv", "-", "array", "with", "the", "initial", "conditions", "stacked", "like", "[", "R", "vR", "vT", "phi", "]", ";", "vR", "outward!", "pot", "-", "Potential", "instance", "t", "-", "list", "of", "times", "at", "which", "to", "output", "(", "0", "has", "to", "be", "in", "this!", ")", "method", "-", "odeint", "or", "leapfrog", "dt", "-", "if", "set", "force", "the", "integrator", "to", "use", "this", "basic", "stepsize", ";", "must", "be", "an", "integer", "divisor", "of", "output", "stepsize", "OUTPUT", ":", "[", ":", "4", "]", "array", "of", "[", "R", "vR", "vT", "phi", "]", "at", "each", "t", "HISTORY", ":", "2010", "-", "07", "-", "20", "-", "Written", "-", "Bovy", "(", "NYU", ")" ]
python
train
43.876289
tensorflow/tensor2tensor
tensor2tensor/layers/latent_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/latent_layers.py#L102-L130
def ae_latent_softmax(latents_pred, latents_discrete_hot, vocab_size, hparams): """Latent prediction and loss. Args: latents_pred: Tensor of shape [..., depth]. latents_discrete_hot: Tensor of shape [..., vocab_size]. vocab_size: an int representing the vocab size. hparams: HParams. Returns: sample: Tensor of shape [...], a sample from a multinomial distribution. loss: Tensor of shape [...], the softmax cross-entropy. """ with tf.variable_scope("latent_logits"): latents_logits = tf.layers.dense(latents_pred, vocab_size, name="logits_dense") if hparams.logit_normalization: latents_logits *= tf.rsqrt(1e-8 + tf.reduce_mean(tf.square(latents_logits))) loss = tf.nn.softmax_cross_entropy_with_logits_v2( labels=latents_discrete_hot, logits=latents_logits) # TODO(trandustin): tease this out from ae_latent_softmax. # we use just the loss portion to anchor prior / encoder on text. sample = multinomial_sample(latents_logits, vocab_size, hparams.sampling_method, hparams.sampling_temp) return sample, loss
[ "def", "ae_latent_softmax", "(", "latents_pred", ",", "latents_discrete_hot", ",", "vocab_size", ",", "hparams", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"latent_logits\"", ")", ":", "latents_logits", "=", "tf", ".", "layers", ".", "dense", "(", "latents_pred", ",", "vocab_size", ",", "name", "=", "\"logits_dense\"", ")", "if", "hparams", ".", "logit_normalization", ":", "latents_logits", "*=", "tf", ".", "rsqrt", "(", "1e-8", "+", "tf", ".", "reduce_mean", "(", "tf", ".", "square", "(", "latents_logits", ")", ")", ")", "loss", "=", "tf", ".", "nn", ".", "softmax_cross_entropy_with_logits_v2", "(", "labels", "=", "latents_discrete_hot", ",", "logits", "=", "latents_logits", ")", "# TODO(trandustin): tease this out from ae_latent_softmax.", "# we use just the loss portion to anchor prior / encoder on text.", "sample", "=", "multinomial_sample", "(", "latents_logits", ",", "vocab_size", ",", "hparams", ".", "sampling_method", ",", "hparams", ".", "sampling_temp", ")", "return", "sample", ",", "loss" ]
Latent prediction and loss. Args: latents_pred: Tensor of shape [..., depth]. latents_discrete_hot: Tensor of shape [..., vocab_size]. vocab_size: an int representing the vocab size. hparams: HParams. Returns: sample: Tensor of shape [...], a sample from a multinomial distribution. loss: Tensor of shape [...], the softmax cross-entropy.
[ "Latent", "prediction", "and", "loss", "." ]
python
train
42.206897
Azure/azure-multiapi-storage-python
azure/multiapi/storage/v2015_04_05/file/fileservice.py
https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/file/fileservice.py#L342-L442
def generate_file_shared_access_signature(self, share_name, directory_name=None, file_name=None, permission=None, expiry=None, start=None, id=None, ip=None, protocol=None, cache_control=None, content_disposition=None, content_encoding=None, content_language=None, content_type=None): ''' Generates a shared access signature for the file. Use the returned signature with the sas_token parameter of FileService. :param str share_name: Name of share. :param str directory_name: Name of directory. SAS tokens cannot be created for directories, so this parameter should only be present if file_name is provided. :param str file_name: Name of file. :param FilePermissions permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. Permissions must be ordered read, create, write, delete, list. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: date or str :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type start: date or str :param str id: A unique value up to 64 characters in length that correlates to a stored access policy. To create a stored access policy, use set_file_service_properties. :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. Note that HTTP only is not a permitted value. :param str cache_control: Response header value for Cache-Control when resource is accessed using this shared access signature. :param str content_disposition: Response header value for Content-Disposition when resource is accessed using this shared access signature. :param str content_encoding: Response header value for Content-Encoding when resource is accessed using this shared access signature. :param str content_language: Response header value for Content-Language when resource is accessed using this shared access signature. :param str content_type: Response header value for Content-Type when resource is accessed using this shared access signature. :return: A Shared Access Signature (sas) token. :rtype: str ''' _validate_not_none('share_name', share_name) _validate_not_none('file_name', file_name) _validate_not_none('self.account_name', self.account_name) _validate_not_none('self.account_key', self.account_key) sas = SharedAccessSignature(self.account_name, self.account_key) return sas.generate_file( share_name, directory_name, file_name, permission, expiry, start=start, id=id, ip=ip, protocol=protocol, cache_control=cache_control, content_disposition=content_disposition, content_encoding=content_encoding, content_language=content_language, content_type=content_type, )
[ "def", "generate_file_shared_access_signature", "(", "self", ",", "share_name", ",", "directory_name", "=", "None", ",", "file_name", "=", "None", ",", "permission", "=", "None", ",", "expiry", "=", "None", ",", "start", "=", "None", ",", "id", "=", "None", ",", "ip", "=", "None", ",", "protocol", "=", "None", ",", "cache_control", "=", "None", ",", "content_disposition", "=", "None", ",", "content_encoding", "=", "None", ",", "content_language", "=", "None", ",", "content_type", "=", "None", ")", ":", "_validate_not_none", "(", "'share_name'", ",", "share_name", ")", "_validate_not_none", "(", "'file_name'", ",", "file_name", ")", "_validate_not_none", "(", "'self.account_name'", ",", "self", ".", "account_name", ")", "_validate_not_none", "(", "'self.account_key'", ",", "self", ".", "account_key", ")", "sas", "=", "SharedAccessSignature", "(", "self", ".", "account_name", ",", "self", ".", "account_key", ")", "return", "sas", ".", "generate_file", "(", "share_name", ",", "directory_name", ",", "file_name", ",", "permission", ",", "expiry", ",", "start", "=", "start", ",", "id", "=", "id", ",", "ip", "=", "ip", ",", "protocol", "=", "protocol", ",", "cache_control", "=", "cache_control", ",", "content_disposition", "=", "content_disposition", ",", "content_encoding", "=", "content_encoding", ",", "content_language", "=", "content_language", ",", "content_type", "=", "content_type", ",", ")" ]
Generates a shared access signature for the file. Use the returned signature with the sas_token parameter of FileService. :param str share_name: Name of share. :param str directory_name: Name of directory. SAS tokens cannot be created for directories, so this parameter should only be present if file_name is provided. :param str file_name: Name of file. :param FilePermissions permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. Permissions must be ordered read, create, write, delete, list. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: date or str :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type start: date or str :param str id: A unique value up to 64 characters in length that correlates to a stored access policy. To create a stored access policy, use set_file_service_properties. :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. Note that HTTP only is not a permitted value. :param str cache_control: Response header value for Cache-Control when resource is accessed using this shared access signature. :param str content_disposition: Response header value for Content-Disposition when resource is accessed using this shared access signature. :param str content_encoding: Response header value for Content-Encoding when resource is accessed using this shared access signature. :param str content_language: Response header value for Content-Language when resource is accessed using this shared access signature. :param str content_type: Response header value for Content-Type when resource is accessed using this shared access signature. :return: A Shared Access Signature (sas) token. :rtype: str
[ "Generates", "a", "shared", "access", "signature", "for", "the", "file", ".", "Use", "the", "returned", "signature", "with", "the", "sas_token", "parameter", "of", "FileService", "." ]
python
train
51.564356
SmokinCaterpillar/pypet
pypet/trajectory.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/trajectory.py#L968-L1072
def f_expand(self, build_dict, fail_safe=True): """Similar to :func:`~pypet.trajectory.Trajectory.f_explore`, but can be used to enlarge already completed trajectories. Please ensure before usage, that all explored parameters are loaded! :param build_dict: Dictionary containing the expansion :param fail_safe: If old ranges should be **deep-copied** in order to allow to restore the original exploration if something fails during expansion. Set to `False` if deep-copying your parameter ranges causes errors. :raises: TypeError: If not all explored parameters are enlarged AttributeError: If keys of dictionary cannot be found in the trajectory NotUniqueNodeError: If dictionary keys do not unambiguously map to single parameters ValueError: If not all explored parameter ranges are of the same length """ if len(self._explored_parameters) == 0: self._logger.info('Your trajectory has not been explored, yet. ' 'I will call `f_explore` instead.') return self.f_explore(build_dict) enlarge_set = set([self.f_get(key).v_full_name for key in build_dict.keys()]) # Check if all explored parameters will be enlarged, otherwise # We cannot enlarge the trajectory if not set(self._explored_parameters.keys()) == enlarge_set: raise TypeError('You have to enlarge dimensions you have explored before! Currently' ' explored parameters are not the ones you specified in your building' ' dictionary, i.e. %s != %s' % (str(set(self._explored_parameters.keys())), str(set(build_dict.keys())))) if any(x is None for x in self._explored_parameters.values()): raise TypeError('At least one of your explored parameters is not fully loaded, ' 'please load it.') old_ranges = None if fail_safe: old_ranges = {} for param_name in self._explored_parameters: old_ranges[param_name] = self._explored_parameters[param_name].f_get_range() try: old_ranges = cp.deepcopy(old_ranges) except Exception: self._logger.error('Cannot deepcopy old parameter ranges, if ' 'something fails during `f_expand` I cannot revert the ' 'trajectory to old settings.') old_ranges = None try: count = 0 length = None for key, builditerable in build_dict.items(): act_param = self.f_get(key) act_param.f_unlock() act_param._expand(builditerable) name = act_param.v_full_name self._explored_parameters[name] = act_param # Compare the length of two consecutive parameters in the `build_dict` if count == 0: length = act_param.f_get_range_length() elif not length == act_param.f_get_range_length(): raise ValueError('The parameters to explore have not the same size!') count += 1 original_length = len(self) for irun in range(original_length, length): self._add_run_info(irun) self._test_run_addition(length) # We need to update the explored parameters in case they were stored: self._remove_exploration() except Exception: if old_ranges is not None: # Try to restore the original parameter exploration for param_name in old_ranges: param_range = old_ranges[param_name] param = self._explored_parameters[param_name] param.f_unlock() try: param._shrink() except Exception as exc: self._logger.error('Could not shrink parameter `%s` ' 'because of:`%s`' % (param_name, repr(exc))) param._explore(param_range) param._explored = True raise
[ "def", "f_expand", "(", "self", ",", "build_dict", ",", "fail_safe", "=", "True", ")", ":", "if", "len", "(", "self", ".", "_explored_parameters", ")", "==", "0", ":", "self", ".", "_logger", ".", "info", "(", "'Your trajectory has not been explored, yet. '", "'I will call `f_explore` instead.'", ")", "return", "self", ".", "f_explore", "(", "build_dict", ")", "enlarge_set", "=", "set", "(", "[", "self", ".", "f_get", "(", "key", ")", ".", "v_full_name", "for", "key", "in", "build_dict", ".", "keys", "(", ")", "]", ")", "# Check if all explored parameters will be enlarged, otherwise", "# We cannot enlarge the trajectory", "if", "not", "set", "(", "self", ".", "_explored_parameters", ".", "keys", "(", ")", ")", "==", "enlarge_set", ":", "raise", "TypeError", "(", "'You have to enlarge dimensions you have explored before! Currently'", "' explored parameters are not the ones you specified in your building'", "' dictionary, i.e. %s != %s'", "%", "(", "str", "(", "set", "(", "self", ".", "_explored_parameters", ".", "keys", "(", ")", ")", ")", ",", "str", "(", "set", "(", "build_dict", ".", "keys", "(", ")", ")", ")", ")", ")", "if", "any", "(", "x", "is", "None", "for", "x", "in", "self", ".", "_explored_parameters", ".", "values", "(", ")", ")", ":", "raise", "TypeError", "(", "'At least one of your explored parameters is not fully loaded, '", "'please load it.'", ")", "old_ranges", "=", "None", "if", "fail_safe", ":", "old_ranges", "=", "{", "}", "for", "param_name", "in", "self", ".", "_explored_parameters", ":", "old_ranges", "[", "param_name", "]", "=", "self", ".", "_explored_parameters", "[", "param_name", "]", ".", "f_get_range", "(", ")", "try", ":", "old_ranges", "=", "cp", ".", "deepcopy", "(", "old_ranges", ")", "except", "Exception", ":", "self", ".", "_logger", ".", "error", "(", "'Cannot deepcopy old parameter ranges, if '", "'something fails during `f_expand` I cannot revert the '", "'trajectory to old settings.'", ")", "old_ranges", "=", "None", "try", ":", "count", "=", "0", "length", "=", "None", "for", "key", ",", "builditerable", "in", "build_dict", ".", "items", "(", ")", ":", "act_param", "=", "self", ".", "f_get", "(", "key", ")", "act_param", ".", "f_unlock", "(", ")", "act_param", ".", "_expand", "(", "builditerable", ")", "name", "=", "act_param", ".", "v_full_name", "self", ".", "_explored_parameters", "[", "name", "]", "=", "act_param", "# Compare the length of two consecutive parameters in the `build_dict`", "if", "count", "==", "0", ":", "length", "=", "act_param", ".", "f_get_range_length", "(", ")", "elif", "not", "length", "==", "act_param", ".", "f_get_range_length", "(", ")", ":", "raise", "ValueError", "(", "'The parameters to explore have not the same size!'", ")", "count", "+=", "1", "original_length", "=", "len", "(", "self", ")", "for", "irun", "in", "range", "(", "original_length", ",", "length", ")", ":", "self", ".", "_add_run_info", "(", "irun", ")", "self", ".", "_test_run_addition", "(", "length", ")", "# We need to update the explored parameters in case they were stored:", "self", ".", "_remove_exploration", "(", ")", "except", "Exception", ":", "if", "old_ranges", "is", "not", "None", ":", "# Try to restore the original parameter exploration", "for", "param_name", "in", "old_ranges", ":", "param_range", "=", "old_ranges", "[", "param_name", "]", "param", "=", "self", ".", "_explored_parameters", "[", "param_name", "]", "param", ".", "f_unlock", "(", ")", "try", ":", "param", ".", "_shrink", "(", ")", "except", "Exception", "as", "exc", ":", "self", ".", "_logger", ".", "error", "(", "'Could not shrink parameter `%s` '", "'because of:`%s`'", "%", "(", "param_name", ",", "repr", "(", "exc", ")", ")", ")", "param", ".", "_explore", "(", "param_range", ")", "param", ".", "_explored", "=", "True", "raise" ]
Similar to :func:`~pypet.trajectory.Trajectory.f_explore`, but can be used to enlarge already completed trajectories. Please ensure before usage, that all explored parameters are loaded! :param build_dict: Dictionary containing the expansion :param fail_safe: If old ranges should be **deep-copied** in order to allow to restore the original exploration if something fails during expansion. Set to `False` if deep-copying your parameter ranges causes errors. :raises: TypeError: If not all explored parameters are enlarged AttributeError: If keys of dictionary cannot be found in the trajectory NotUniqueNodeError: If dictionary keys do not unambiguously map to single parameters ValueError: If not all explored parameter ranges are of the same length
[ "Similar", "to", ":", "func", ":", "~pypet", ".", "trajectory", ".", "Trajectory", ".", "f_explore", "but", "can", "be", "used", "to", "enlarge", "already", "completed", "trajectories", "." ]
python
test
41.590476
pysal/spglm
spglm/family.py
https://github.com/pysal/spglm/blob/1339898adcb7e1638f1da83d57aa37392525f018/spglm/family.py#L866-L898
def loglike(self, endog, mu, freq_weights=1, scale=1.): r""" The log-likelihood function in terms of the fitted mean response. Parameters ---------- endog : array-like Endogenous response variable mu : array-like Fitted mean response variable freq_weights : array-like 1d array of frequency weights. The default is 1. scale : float, optional Not used for the Binomial GLM. Returns ------- llf : float The value of the loglikelihood function evaluated at (endog,mu,freq_weights,scale) as defined below. """ if np.shape(self.n) == () and self.n == 1: return scale * np.sum((endog * np.log(mu/(1 - mu) + 1e-200) + np.log(1 - mu)) * freq_weights) else: y = endog * self.n # convert back to successes return scale * np.sum((special.gammaln(self.n + 1) - special.gammaln(y + 1) - special.gammaln(self.n - y + 1) + y * np.log(mu/(1 - mu)) + self.n * np.log(1 - mu)) * freq_weights)
[ "def", "loglike", "(", "self", ",", "endog", ",", "mu", ",", "freq_weights", "=", "1", ",", "scale", "=", "1.", ")", ":", "if", "np", ".", "shape", "(", "self", ".", "n", ")", "==", "(", ")", "and", "self", ".", "n", "==", "1", ":", "return", "scale", "*", "np", ".", "sum", "(", "(", "endog", "*", "np", ".", "log", "(", "mu", "/", "(", "1", "-", "mu", ")", "+", "1e-200", ")", "+", "np", ".", "log", "(", "1", "-", "mu", ")", ")", "*", "freq_weights", ")", "else", ":", "y", "=", "endog", "*", "self", ".", "n", "# convert back to successes", "return", "scale", "*", "np", ".", "sum", "(", "(", "special", ".", "gammaln", "(", "self", ".", "n", "+", "1", ")", "-", "special", ".", "gammaln", "(", "y", "+", "1", ")", "-", "special", ".", "gammaln", "(", "self", ".", "n", "-", "y", "+", "1", ")", "+", "y", "*", "np", ".", "log", "(", "mu", "/", "(", "1", "-", "mu", ")", ")", "+", "self", ".", "n", "*", "np", ".", "log", "(", "1", "-", "mu", ")", ")", "*", "freq_weights", ")" ]
r""" The log-likelihood function in terms of the fitted mean response. Parameters ---------- endog : array-like Endogenous response variable mu : array-like Fitted mean response variable freq_weights : array-like 1d array of frequency weights. The default is 1. scale : float, optional Not used for the Binomial GLM. Returns ------- llf : float The value of the loglikelihood function evaluated at (endog,mu,freq_weights,scale) as defined below.
[ "r", "The", "log", "-", "likelihood", "function", "in", "terms", "of", "the", "fitted", "mean", "response", "." ]
python
train
37.636364
astropy/photutils
photutils/psf/epsf.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf.py#L673-L755
def _build_epsf_step(self, stars, epsf=None): """ A single iteration of improving an ePSF. Parameters ---------- stars : `EPSFStars` object The stars used to build the ePSF. epsf : `EPSFModel` object, optional The initial ePSF model. If not input, then the ePSF will be built from scratch. Returns ------- epsf : `EPSFModel` object The updated ePSF. """ if len(stars) < 1: raise ValueError('stars must contain at least one EPSFStar or ' 'LinkedEPSFStar object.') if epsf is None: # create an initial ePSF (array of zeros) epsf = self._create_initial_epsf(stars) else: # improve the input ePSF epsf = copy.deepcopy(epsf) # compute a 3D stack of 2D residual images residuals = self._resample_residuals(stars, epsf) self._residuals.append(residuals) # compute the sigma-clipped median along the 3D stack with warnings.catch_warnings(): warnings.simplefilter('ignore', category=RuntimeWarning) warnings.simplefilter('ignore', category=AstropyUserWarning) residuals = self.sigclip(residuals, axis=0, masked=False, return_bounds=False) if HAS_BOTTLENECK: residuals = bottleneck.nanmedian(residuals, axis=0) else: residuals = np.nanmedian(residuals, axis=0) self._residuals_sigclip.append(residuals) # interpolate any missing data (np.nan) mask = ~np.isfinite(residuals) if np.any(mask): residuals = _interpolate_missing_data(residuals, mask, method='cubic') # fill any remaining nans (outer points) with zeros residuals[~np.isfinite(residuals)] = 0. self._residuals_interp.append(residuals) # add the residuals to the previous ePSF image new_epsf = epsf.normalized_data + residuals # smooth the ePSF new_epsf = self._smooth_epsf(new_epsf) # recenter the ePSF new_epsf = self._recenter_epsf(new_epsf, epsf, centroid_func=self.recentering_func, box_size=self.recentering_boxsize, maxiters=self.recentering_maxiters, center_accuracy=1.0e-4) # normalize the ePSF data new_epsf /= np.sum(new_epsf, dtype=np.float64) # return the new ePSF object xcenter = (new_epsf.shape[1] - 1) / 2. ycenter = (new_epsf.shape[0] - 1) / 2. epsf_new = EPSFModel(data=new_epsf, origin=(xcenter, ycenter), normalize=False, oversampling=epsf.oversampling) return epsf_new
[ "def", "_build_epsf_step", "(", "self", ",", "stars", ",", "epsf", "=", "None", ")", ":", "if", "len", "(", "stars", ")", "<", "1", ":", "raise", "ValueError", "(", "'stars must contain at least one EPSFStar or '", "'LinkedEPSFStar object.'", ")", "if", "epsf", "is", "None", ":", "# create an initial ePSF (array of zeros)", "epsf", "=", "self", ".", "_create_initial_epsf", "(", "stars", ")", "else", ":", "# improve the input ePSF", "epsf", "=", "copy", ".", "deepcopy", "(", "epsf", ")", "# compute a 3D stack of 2D residual images", "residuals", "=", "self", ".", "_resample_residuals", "(", "stars", ",", "epsf", ")", "self", ".", "_residuals", ".", "append", "(", "residuals", ")", "# compute the sigma-clipped median along the 3D stack", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "'ignore'", ",", "category", "=", "RuntimeWarning", ")", "warnings", ".", "simplefilter", "(", "'ignore'", ",", "category", "=", "AstropyUserWarning", ")", "residuals", "=", "self", ".", "sigclip", "(", "residuals", ",", "axis", "=", "0", ",", "masked", "=", "False", ",", "return_bounds", "=", "False", ")", "if", "HAS_BOTTLENECK", ":", "residuals", "=", "bottleneck", ".", "nanmedian", "(", "residuals", ",", "axis", "=", "0", ")", "else", ":", "residuals", "=", "np", ".", "nanmedian", "(", "residuals", ",", "axis", "=", "0", ")", "self", ".", "_residuals_sigclip", ".", "append", "(", "residuals", ")", "# interpolate any missing data (np.nan)", "mask", "=", "~", "np", ".", "isfinite", "(", "residuals", ")", "if", "np", ".", "any", "(", "mask", ")", ":", "residuals", "=", "_interpolate_missing_data", "(", "residuals", ",", "mask", ",", "method", "=", "'cubic'", ")", "# fill any remaining nans (outer points) with zeros", "residuals", "[", "~", "np", ".", "isfinite", "(", "residuals", ")", "]", "=", "0.", "self", ".", "_residuals_interp", ".", "append", "(", "residuals", ")", "# add the residuals to the previous ePSF image", "new_epsf", "=", "epsf", ".", "normalized_data", "+", "residuals", "# smooth the ePSF", "new_epsf", "=", "self", ".", "_smooth_epsf", "(", "new_epsf", ")", "# recenter the ePSF", "new_epsf", "=", "self", ".", "_recenter_epsf", "(", "new_epsf", ",", "epsf", ",", "centroid_func", "=", "self", ".", "recentering_func", ",", "box_size", "=", "self", ".", "recentering_boxsize", ",", "maxiters", "=", "self", ".", "recentering_maxiters", ",", "center_accuracy", "=", "1.0e-4", ")", "# normalize the ePSF data", "new_epsf", "/=", "np", ".", "sum", "(", "new_epsf", ",", "dtype", "=", "np", ".", "float64", ")", "# return the new ePSF object", "xcenter", "=", "(", "new_epsf", ".", "shape", "[", "1", "]", "-", "1", ")", "/", "2.", "ycenter", "=", "(", "new_epsf", ".", "shape", "[", "0", "]", "-", "1", ")", "/", "2.", "epsf_new", "=", "EPSFModel", "(", "data", "=", "new_epsf", ",", "origin", "=", "(", "xcenter", ",", "ycenter", ")", ",", "normalize", "=", "False", ",", "oversampling", "=", "epsf", ".", "oversampling", ")", "return", "epsf_new" ]
A single iteration of improving an ePSF. Parameters ---------- stars : `EPSFStars` object The stars used to build the ePSF. epsf : `EPSFModel` object, optional The initial ePSF model. If not input, then the ePSF will be built from scratch. Returns ------- epsf : `EPSFModel` object The updated ePSF.
[ "A", "single", "iteration", "of", "improving", "an", "ePSF", "." ]
python
train
34.963855
10gen/mongo-orchestration
mongo_orchestration/process.py
https://github.com/10gen/mongo-orchestration/blob/81fd2224205922ea2178b08190b53a33aec47261/mongo_orchestration/process.py#L161-L184
def repair_mongo(name, dbpath): """repair mongodb after usafe shutdown""" log_file = os.path.join(dbpath, 'mongod.log') cmd = [name, "--dbpath", dbpath, "--logpath", log_file, "--logappend", "--repair"] proc = subprocess.Popen( cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) timeout = 45 t_start = time.time() while time.time() - t_start < timeout: line = str(proc.stdout.readline()) logger.info("repair output: %s" % (line,)) return_code = proc.poll() if return_code is not None: if return_code: raise Exception("mongod --repair failed with exit code %s, " "check log file: %s" % (return_code, log_file)) # Success when poll() returns 0 return time.sleep(1) proc.terminate() raise Exception("mongod --repair failed to exit after %s seconds, " "check log file: %s" % (timeout, log_file))
[ "def", "repair_mongo", "(", "name", ",", "dbpath", ")", ":", "log_file", "=", "os", ".", "path", ".", "join", "(", "dbpath", ",", "'mongod.log'", ")", "cmd", "=", "[", "name", ",", "\"--dbpath\"", ",", "dbpath", ",", "\"--logpath\"", ",", "log_file", ",", "\"--logappend\"", ",", "\"--repair\"", "]", "proc", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "universal_newlines", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "timeout", "=", "45", "t_start", "=", "time", ".", "time", "(", ")", "while", "time", ".", "time", "(", ")", "-", "t_start", "<", "timeout", ":", "line", "=", "str", "(", "proc", ".", "stdout", ".", "readline", "(", ")", ")", "logger", ".", "info", "(", "\"repair output: %s\"", "%", "(", "line", ",", ")", ")", "return_code", "=", "proc", ".", "poll", "(", ")", "if", "return_code", "is", "not", "None", ":", "if", "return_code", ":", "raise", "Exception", "(", "\"mongod --repair failed with exit code %s, \"", "\"check log file: %s\"", "%", "(", "return_code", ",", "log_file", ")", ")", "# Success when poll() returns 0", "return", "time", ".", "sleep", "(", "1", ")", "proc", ".", "terminate", "(", ")", "raise", "Exception", "(", "\"mongod --repair failed to exit after %s seconds, \"", "\"check log file: %s\"", "%", "(", "timeout", ",", "log_file", ")", ")" ]
repair mongodb after usafe shutdown
[ "repair", "mongodb", "after", "usafe", "shutdown" ]
python
train
41.833333
ikegami-yukino/jaconv
jaconv/jaconv.py
https://github.com/ikegami-yukino/jaconv/blob/5319e4c6b4676ab27b5e9ebec9a299d09a5a62d7/jaconv/jaconv.py#L77-L102
def kata2hira(text, ignore=''): """Convert Full-width Katakana to Hiragana Parameters ---------- text : str Full-width Katakana string. ignore : str Characters to be ignored in converting. Return ------ str Hiragana string. Examples -------- >>> print(jaconv.kata2hira('巴マミ')) 巴まみ >>> print(jaconv.kata2hira('マミサン', ignore='ン')) まみさン """ if ignore: k2h_map = _exclude_ignorechar(ignore, K2H_TABLE.copy()) return _convert(text, k2h_map) return _convert(text, K2H_TABLE)
[ "def", "kata2hira", "(", "text", ",", "ignore", "=", "''", ")", ":", "if", "ignore", ":", "k2h_map", "=", "_exclude_ignorechar", "(", "ignore", ",", "K2H_TABLE", ".", "copy", "(", ")", ")", "return", "_convert", "(", "text", ",", "k2h_map", ")", "return", "_convert", "(", "text", ",", "K2H_TABLE", ")" ]
Convert Full-width Katakana to Hiragana Parameters ---------- text : str Full-width Katakana string. ignore : str Characters to be ignored in converting. Return ------ str Hiragana string. Examples -------- >>> print(jaconv.kata2hira('巴マミ')) 巴まみ >>> print(jaconv.kata2hira('マミサン', ignore='ン')) まみさン
[ "Convert", "Full", "-", "width", "Katakana", "to", "Hiragana" ]
python
train
21.307692
twilio/twilio-python
twilio/rest/api/v2010/account/usage/trigger.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/usage/trigger.py#L182-L191
def get(self, sid): """ Constructs a TriggerContext :param sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.usage.trigger.TriggerContext :rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerContext """ return TriggerContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
[ "def", "get", "(", "self", ",", "sid", ")", ":", "return", "TriggerContext", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "sid", "=", "sid", ",", ")" ]
Constructs a TriggerContext :param sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.usage.trigger.TriggerContext :rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerContext
[ "Constructs", "a", "TriggerContext" ]
python
train
39
twilio/twilio-python
twilio/rest/sync/v1/service/sync_list/sync_list_permission.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/sync/v1/service/sync_list/sync_list_permission.py#L229-L250
def fetch(self): """ Fetch a SyncListPermissionInstance :returns: Fetched SyncListPermissionInstance :rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return SyncListPermissionInstance( self._version, payload, service_sid=self._solution['service_sid'], list_sid=self._solution['list_sid'], identity=self._solution['identity'], )
[ "def", "fetch", "(", "self", ")", ":", "params", "=", "values", ".", "of", "(", "{", "}", ")", "payload", "=", "self", ".", "_version", ".", "fetch", "(", "'GET'", ",", "self", ".", "_uri", ",", "params", "=", "params", ",", ")", "return", "SyncListPermissionInstance", "(", "self", ".", "_version", ",", "payload", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "list_sid", "=", "self", ".", "_solution", "[", "'list_sid'", "]", ",", "identity", "=", "self", ".", "_solution", "[", "'identity'", "]", ",", ")" ]
Fetch a SyncListPermissionInstance :returns: Fetched SyncListPermissionInstance :rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionInstance
[ "Fetch", "a", "SyncListPermissionInstance" ]
python
train
28.318182
chaoss/grimoirelab-sigils
src/migration/to_kibana5.py
https://github.com/chaoss/grimoirelab-sigils/blob/33d395195acb316287143a535a2c6e4009bf0528/src/migration/to_kibana5.py#L86-L104
def parse_args(): """Parse arguments from the command line""" parser = argparse.ArgumentParser(description=TO_KIBANA5_DESC_MSG) parser.add_argument('-s', '--source', dest='src_path', \ required=True, help='source directory') parser.add_argument('-d', '--dest', dest='dest_path', \ required=True, help='destination directory') parser.add_argument('-o', '--old-size', dest='old_size', \ default='0', help='aggregation old size') parser.add_argument('-n', '--new-size', dest='new_size', \ default='1000', help='aggregation new size') parser.add_argument('-g', '--debug', dest='debug', action='store_true') return parser.parse_args()
[ "def", "parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "TO_KIBANA5_DESC_MSG", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "'--source'", ",", "dest", "=", "'src_path'", ",", "required", "=", "True", ",", "help", "=", "'source directory'", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--dest'", ",", "dest", "=", "'dest_path'", ",", "required", "=", "True", ",", "help", "=", "'destination directory'", ")", "parser", ".", "add_argument", "(", "'-o'", ",", "'--old-size'", ",", "dest", "=", "'old_size'", ",", "default", "=", "'0'", ",", "help", "=", "'aggregation old size'", ")", "parser", ".", "add_argument", "(", "'-n'", ",", "'--new-size'", ",", "dest", "=", "'new_size'", ",", "default", "=", "'1000'", ",", "help", "=", "'aggregation new size'", ")", "parser", ".", "add_argument", "(", "'-g'", ",", "'--debug'", ",", "dest", "=", "'debug'", ",", "action", "=", "'store_true'", ")", "return", "parser", ".", "parse_args", "(", ")" ]
Parse arguments from the command line
[ "Parse", "arguments", "from", "the", "command", "line" ]
python
train
37.052632
genialis/resolwe
resolwe/flow/views/data.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/data.py#L187-L213
def perform_create(self, serializer): """Create a resource.""" process = serializer.validated_data.get('process') if not process.is_active: raise exceptions.ParseError( 'Process retired (id: {}, slug: {}/{}).'.format(process.id, process.slug, process.version) ) with transaction.atomic(): instance = serializer.save() assign_contributor_permissions(instance) # Entity is added to the collection only when it is # created - when it only contains 1 Data object. entities = Entity.objects.annotate(num_data=Count('data')).filter(data=instance, num_data=1) # Assign data object to all specified collections. collection_pks = self.request.data.get('collections', []) for collection in Collection.objects.filter(pk__in=collection_pks): collection.data.add(instance) copy_permissions(collection, instance) # Add entities to which data belongs to the collection. for entity in entities: entity.collections.add(collection) copy_permissions(collection, entity)
[ "def", "perform_create", "(", "self", ",", "serializer", ")", ":", "process", "=", "serializer", ".", "validated_data", ".", "get", "(", "'process'", ")", "if", "not", "process", ".", "is_active", ":", "raise", "exceptions", ".", "ParseError", "(", "'Process retired (id: {}, slug: {}/{}).'", ".", "format", "(", "process", ".", "id", ",", "process", ".", "slug", ",", "process", ".", "version", ")", ")", "with", "transaction", ".", "atomic", "(", ")", ":", "instance", "=", "serializer", ".", "save", "(", ")", "assign_contributor_permissions", "(", "instance", ")", "# Entity is added to the collection only when it is", "# created - when it only contains 1 Data object.", "entities", "=", "Entity", ".", "objects", ".", "annotate", "(", "num_data", "=", "Count", "(", "'data'", ")", ")", ".", "filter", "(", "data", "=", "instance", ",", "num_data", "=", "1", ")", "# Assign data object to all specified collections.", "collection_pks", "=", "self", ".", "request", ".", "data", ".", "get", "(", "'collections'", ",", "[", "]", ")", "for", "collection", "in", "Collection", ".", "objects", ".", "filter", "(", "pk__in", "=", "collection_pks", ")", ":", "collection", ".", "data", ".", "add", "(", "instance", ")", "copy_permissions", "(", "collection", ",", "instance", ")", "# Add entities to which data belongs to the collection.", "for", "entity", "in", "entities", ":", "entity", ".", "collections", ".", "add", "(", "collection", ")", "copy_permissions", "(", "collection", ",", "entity", ")" ]
Create a resource.
[ "Create", "a", "resource", "." ]
python
train
44.481481
TadLeonard/tfatool
tfatool/sync.py
https://github.com/TadLeonard/tfatool/blob/12da2807b5fb538c5317ef255d846b32ceb174d0/tfatool/sync.py#L297-L304
def up_by_time(*filters, local_dir=".", remote_dir=DEFAULT_REMOTE_DIR, count=1): """Sync most recent file by date, time attribues""" remote_files = command.map_files_raw(remote_dir=remote_dir) local_files = list_local_files(*filters, local_dir=local_dir) most_recent = sorted(local_files, key=lambda f: f.datetime) to_sync = most_recent[-count:] _notify_sync(Direction.up, to_sync) up_by_files(to_sync[::-1], remote_dir, remote_files)
[ "def", "up_by_time", "(", "*", "filters", ",", "local_dir", "=", "\".\"", ",", "remote_dir", "=", "DEFAULT_REMOTE_DIR", ",", "count", "=", "1", ")", ":", "remote_files", "=", "command", ".", "map_files_raw", "(", "remote_dir", "=", "remote_dir", ")", "local_files", "=", "list_local_files", "(", "*", "filters", ",", "local_dir", "=", "local_dir", ")", "most_recent", "=", "sorted", "(", "local_files", ",", "key", "=", "lambda", "f", ":", "f", ".", "datetime", ")", "to_sync", "=", "most_recent", "[", "-", "count", ":", "]", "_notify_sync", "(", "Direction", ".", "up", ",", "to_sync", ")", "up_by_files", "(", "to_sync", "[", ":", ":", "-", "1", "]", ",", "remote_dir", ",", "remote_files", ")" ]
Sync most recent file by date, time attribues
[ "Sync", "most", "recent", "file", "by", "date", "time", "attribues" ]
python
train
56.875
VisTrails/tej
tej/submission.py
https://github.com/VisTrails/tej/blob/b8dedaeb6bdeb650b46cfe6d85e5aa9284fc7f0b/tej/submission.py#L243-L277
def _call(self, cmd, get_output): """Calls a command through the SSH connection. Remote stderr gets printed to this program's stderr. Output is captured and may be returned. """ server_err = self.server_logger() chan = self.get_client().get_transport().open_session() try: logger.debug("Invoking %r%s", cmd, " (stdout)" if get_output else "") chan.exec_command('/bin/sh -c %s' % shell_escape(cmd)) output = b'' while True: r, w, e = select.select([chan], [], []) if chan not in r: continue # pragma: no cover recvd = False while chan.recv_stderr_ready(): data = chan.recv_stderr(1024) server_err.append(data) recvd = True while chan.recv_ready(): data = chan.recv(1024) if get_output: output += data recvd = True if not recvd and chan.exit_status_ready(): break output = output.rstrip(b'\r\n') return chan.recv_exit_status(), output finally: server_err.done() chan.close()
[ "def", "_call", "(", "self", ",", "cmd", ",", "get_output", ")", ":", "server_err", "=", "self", ".", "server_logger", "(", ")", "chan", "=", "self", ".", "get_client", "(", ")", ".", "get_transport", "(", ")", ".", "open_session", "(", ")", "try", ":", "logger", ".", "debug", "(", "\"Invoking %r%s\"", ",", "cmd", ",", "\" (stdout)\"", "if", "get_output", "else", "\"\"", ")", "chan", ".", "exec_command", "(", "'/bin/sh -c %s'", "%", "shell_escape", "(", "cmd", ")", ")", "output", "=", "b''", "while", "True", ":", "r", ",", "w", ",", "e", "=", "select", ".", "select", "(", "[", "chan", "]", ",", "[", "]", ",", "[", "]", ")", "if", "chan", "not", "in", "r", ":", "continue", "# pragma: no cover", "recvd", "=", "False", "while", "chan", ".", "recv_stderr_ready", "(", ")", ":", "data", "=", "chan", ".", "recv_stderr", "(", "1024", ")", "server_err", ".", "append", "(", "data", ")", "recvd", "=", "True", "while", "chan", ".", "recv_ready", "(", ")", ":", "data", "=", "chan", ".", "recv", "(", "1024", ")", "if", "get_output", ":", "output", "+=", "data", "recvd", "=", "True", "if", "not", "recvd", "and", "chan", ".", "exit_status_ready", "(", ")", ":", "break", "output", "=", "output", ".", "rstrip", "(", "b'\\r\\n'", ")", "return", "chan", ".", "recv_exit_status", "(", ")", ",", "output", "finally", ":", "server_err", ".", "done", "(", ")", "chan", ".", "close", "(", ")" ]
Calls a command through the SSH connection. Remote stderr gets printed to this program's stderr. Output is captured and may be returned.
[ "Calls", "a", "command", "through", "the", "SSH", "connection", "." ]
python
train
37.314286
fitnr/convertdate
convertdate/french_republican.py
https://github.com/fitnr/convertdate/blob/e920f168a87f99183b0aa7290d6c3af222582d43/convertdate/french_republican.py#L291-L300
def _from_jd_equinox(jd): '''Calculate the FR day using the equinox as day 1''' jd = trunc(jd) + 0.5 equinoxe = premier_da_la_annee(jd) an = gregorian.from_jd(equinoxe)[0] - YEAR_EPOCH mois = trunc((jd - equinoxe) / 30.) + 1 jour = int((jd - equinoxe) % 30) + 1 return (an, mois, jour)
[ "def", "_from_jd_equinox", "(", "jd", ")", ":", "jd", "=", "trunc", "(", "jd", ")", "+", "0.5", "equinoxe", "=", "premier_da_la_annee", "(", "jd", ")", "an", "=", "gregorian", ".", "from_jd", "(", "equinoxe", ")", "[", "0", "]", "-", "YEAR_EPOCH", "mois", "=", "trunc", "(", "(", "jd", "-", "equinoxe", ")", "/", "30.", ")", "+", "1", "jour", "=", "int", "(", "(", "jd", "-", "equinoxe", ")", "%", "30", ")", "+", "1", "return", "(", "an", ",", "mois", ",", "jour", ")" ]
Calculate the FR day using the equinox as day 1
[ "Calculate", "the", "FR", "day", "using", "the", "equinox", "as", "day", "1" ]
python
train
30.6
svasilev94/GraphLibrary
graphlibrary/first_search.py
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/first_search.py#L35-L51
def BFS_Tree(G, start): """ Return an oriented tree constructed from bfs starting at 'start'. """ if start not in G.vertices: raise GraphInsertError("Vertex %s doesn't exist." % (start,)) pred = BFS(G, start) T = digraph.DiGraph() queue = Queue() queue.put(start) while queue.qsize() > 0: current = queue.get() for element in pred: if pred[element] == current: T.add_edge(current, element) queue.put(element) return T
[ "def", "BFS_Tree", "(", "G", ",", "start", ")", ":", "if", "start", "not", "in", "G", ".", "vertices", ":", "raise", "GraphInsertError", "(", "\"Vertex %s doesn't exist.\"", "%", "(", "start", ",", ")", ")", "pred", "=", "BFS", "(", "G", ",", "start", ")", "T", "=", "digraph", ".", "DiGraph", "(", ")", "queue", "=", "Queue", "(", ")", "queue", ".", "put", "(", "start", ")", "while", "queue", ".", "qsize", "(", ")", ">", "0", ":", "current", "=", "queue", ".", "get", "(", ")", "for", "element", "in", "pred", ":", "if", "pred", "[", "element", "]", "==", "current", ":", "T", ".", "add_edge", "(", "current", ",", "element", ")", "queue", ".", "put", "(", "element", ")", "return", "T" ]
Return an oriented tree constructed from bfs starting at 'start'.
[ "Return", "an", "oriented", "tree", "constructed", "from", "bfs", "starting", "at", "start", "." ]
python
train
30.882353
moliware/dicts
dicts/sorteddict.py
https://github.com/moliware/dicts/blob/0e8258cc3dc00fe929685cae9cda062222722715/dicts/sorteddict.py#L35-L40
def iteritems(self): """ Sort and then iterate the dictionary """ sorted_data = sorted(self.data.iteritems(), self.cmp, self.key, self.reverse) for k,v in sorted_data: yield k,v
[ "def", "iteritems", "(", "self", ")", ":", "sorted_data", "=", "sorted", "(", "self", ".", "data", ".", "iteritems", "(", ")", ",", "self", ".", "cmp", ",", "self", ".", "key", ",", "self", ".", "reverse", ")", "for", "k", ",", "v", "in", "sorted_data", ":", "yield", "k", ",", "v" ]
Sort and then iterate the dictionary
[ "Sort", "and", "then", "iterate", "the", "dictionary" ]
python
train
39.5
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/param_types.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/param_types.py#L57-L68
def Struct(fields): # pylint: disable=invalid-name """Construct a struct parameter type description protobuf. :type fields: list of :class:`type_pb2.StructType.Field` :param fields: the fields of the struct :rtype: :class:`type_pb2.Type` :returns: the appropriate struct-type protobuf """ return type_pb2.Type( code=type_pb2.STRUCT, struct_type=type_pb2.StructType(fields=fields) )
[ "def", "Struct", "(", "fields", ")", ":", "# pylint: disable=invalid-name", "return", "type_pb2", ".", "Type", "(", "code", "=", "type_pb2", ".", "STRUCT", ",", "struct_type", "=", "type_pb2", ".", "StructType", "(", "fields", "=", "fields", ")", ")" ]
Construct a struct parameter type description protobuf. :type fields: list of :class:`type_pb2.StructType.Field` :param fields: the fields of the struct :rtype: :class:`type_pb2.Type` :returns: the appropriate struct-type protobuf
[ "Construct", "a", "struct", "parameter", "type", "description", "protobuf", "." ]
python
train
34.416667
raphaelm/python-fints
fints/client.py
https://github.com/raphaelm/python-fints/blob/fee55ae37d3182d0adb40507d4acb98b06057e4a/fints/client.py#L947-L959
def get_data(self) -> bytes: """Return a compressed datablob representing this object. To restore the object, use :func:`fints.client.NeedRetryResponse.from_data`. """ data = { "_class_name": self.__class__.__name__, "version": 1, "segments_bin": SegmentSequence([self.command_seg, self.tan_request]).render_bytes(), "resume_method": self.resume_method, "tan_request_structured": self.tan_request_structured, } return compress_datablob(DATA_BLOB_MAGIC_RETRY, 1, data)
[ "def", "get_data", "(", "self", ")", "->", "bytes", ":", "data", "=", "{", "\"_class_name\"", ":", "self", ".", "__class__", ".", "__name__", ",", "\"version\"", ":", "1", ",", "\"segments_bin\"", ":", "SegmentSequence", "(", "[", "self", ".", "command_seg", ",", "self", ".", "tan_request", "]", ")", ".", "render_bytes", "(", ")", ",", "\"resume_method\"", ":", "self", ".", "resume_method", ",", "\"tan_request_structured\"", ":", "self", ".", "tan_request_structured", ",", "}", "return", "compress_datablob", "(", "DATA_BLOB_MAGIC_RETRY", ",", "1", ",", "data", ")" ]
Return a compressed datablob representing this object. To restore the object, use :func:`fints.client.NeedRetryResponse.from_data`.
[ "Return", "a", "compressed", "datablob", "representing", "this", "object", ".", "To", "restore", "the", "object", "use", ":", "func", ":", "fints", ".", "client", ".", "NeedRetryResponse", ".", "from_data", "." ]
python
train
44
michael-lazar/rtv
rtv/packages/praw/__init__.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L1090-L1109
def get_submission(self, url=None, submission_id=None, comment_limit=0, comment_sort=None, params=None): """Return a Submission object for the given url or submission_id. :param comment_limit: The desired number of comments to fetch. If <= 0 fetch the default number for the session's user. If None, fetch the maximum possible. :param comment_sort: The sort order for retrieved comments. When None use the default for the session's user. :param params: Dictionary containing extra GET data to put in the url. """ if bool(url) == bool(submission_id): raise TypeError('One (and only one) of id or url is required!') if submission_id: url = urljoin(self.config['comments'], submission_id) return objects.Submission.from_url(self, url, comment_limit=comment_limit, comment_sort=comment_sort, params=params)
[ "def", "get_submission", "(", "self", ",", "url", "=", "None", ",", "submission_id", "=", "None", ",", "comment_limit", "=", "0", ",", "comment_sort", "=", "None", ",", "params", "=", "None", ")", ":", "if", "bool", "(", "url", ")", "==", "bool", "(", "submission_id", ")", ":", "raise", "TypeError", "(", "'One (and only one) of id or url is required!'", ")", "if", "submission_id", ":", "url", "=", "urljoin", "(", "self", ".", "config", "[", "'comments'", "]", ",", "submission_id", ")", "return", "objects", ".", "Submission", ".", "from_url", "(", "self", ",", "url", ",", "comment_limit", "=", "comment_limit", ",", "comment_sort", "=", "comment_sort", ",", "params", "=", "params", ")" ]
Return a Submission object for the given url or submission_id. :param comment_limit: The desired number of comments to fetch. If <= 0 fetch the default number for the session's user. If None, fetch the maximum possible. :param comment_sort: The sort order for retrieved comments. When None use the default for the session's user. :param params: Dictionary containing extra GET data to put in the url.
[ "Return", "a", "Submission", "object", "for", "the", "given", "url", "or", "submission_id", "." ]
python
train
53.05
BreakingBytes/simkit
simkit/core/outputs.py
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/outputs.py#L31-L52
def register(self, new_outputs, *args, **kwargs): """ Register outputs and metadata. * ``initial_value`` - used in dynamic calculations * ``size`` - number of elements per timestep * ``uncertainty`` - in percent of nominal value * ``variance`` - dictionary of covariances, diagonal is square of uncertianties, no units * ``jacobian`` - dictionary of sensitivities dxi/dfj * ``isconstant`` - ``True`` if constant, ``False`` if periodic * ``isproperty`` - ``True`` if output stays at last value during thresholds, ``False`` if reverts to initial value * ``timeseries`` - name of corresponding time series output, ``None`` if no time series * ``output_source`` - name :param new_outputs: new outputs to register. """ kwargs.update(zip(self.meta_names, args)) # call super method super(OutputRegistry, self).register(new_outputs, **kwargs)
[ "def", "register", "(", "self", ",", "new_outputs", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "zip", "(", "self", ".", "meta_names", ",", "args", ")", ")", "# call super method", "super", "(", "OutputRegistry", ",", "self", ")", ".", "register", "(", "new_outputs", ",", "*", "*", "kwargs", ")" ]
Register outputs and metadata. * ``initial_value`` - used in dynamic calculations * ``size`` - number of elements per timestep * ``uncertainty`` - in percent of nominal value * ``variance`` - dictionary of covariances, diagonal is square of uncertianties, no units * ``jacobian`` - dictionary of sensitivities dxi/dfj * ``isconstant`` - ``True`` if constant, ``False`` if periodic * ``isproperty`` - ``True`` if output stays at last value during thresholds, ``False`` if reverts to initial value * ``timeseries`` - name of corresponding time series output, ``None`` if no time series * ``output_source`` - name :param new_outputs: new outputs to register.
[ "Register", "outputs", "and", "metadata", "." ]
python
train
44.272727
ensime/ensime-vim
ensime_shared/protocol.py
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/protocol.py#L163-L177
def handle_symbol_search(self, call_id, payload): """Handler for symbol search results""" self.log.debug('handle_symbol_search: in %s', Pretty(payload)) syms = payload["syms"] qfList = [] for sym in syms: p = sym.get("pos") if p: item = self.editor.to_quickfix_item(str(p["file"]), p["line"], str(sym["name"]), "info") qfList.append(item) self.editor.write_quickfix_list(qfList, "Symbol Search")
[ "def", "handle_symbol_search", "(", "self", ",", "call_id", ",", "payload", ")", ":", "self", ".", "log", ".", "debug", "(", "'handle_symbol_search: in %s'", ",", "Pretty", "(", "payload", ")", ")", "syms", "=", "payload", "[", "\"syms\"", "]", "qfList", "=", "[", "]", "for", "sym", "in", "syms", ":", "p", "=", "sym", ".", "get", "(", "\"pos\"", ")", "if", "p", ":", "item", "=", "self", ".", "editor", ".", "to_quickfix_item", "(", "str", "(", "p", "[", "\"file\"", "]", ")", ",", "p", "[", "\"line\"", "]", ",", "str", "(", "sym", "[", "\"name\"", "]", ")", ",", "\"info\"", ")", "qfList", ".", "append", "(", "item", ")", "self", ".", "editor", ".", "write_quickfix_list", "(", "qfList", ",", "\"Symbol Search\"", ")" ]
Handler for symbol search results
[ "Handler", "for", "symbol", "search", "results" ]
python
train
42.8