repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
Chilipp/psyplot
psyplot/plotter.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/plotter.py#L86-L103
def is_data_dependent(fmto, data): """Check whether a formatoption is data dependent Parameters ---------- fmto: Formatoption The :class:`Formatoption` instance to check data: xarray.DataArray The data array to use if the :attr:`~Formatoption.data_dependent` attribute is a callable Returns ------- bool True, if the formatoption depends on the data""" if callable(fmto.data_dependent): return fmto.data_dependent(data) return fmto.data_dependent
[ "def", "is_data_dependent", "(", "fmto", ",", "data", ")", ":", "if", "callable", "(", "fmto", ".", "data_dependent", ")", ":", "return", "fmto", ".", "data_dependent", "(", "data", ")", "return", "fmto", ".", "data_dependent" ]
Check whether a formatoption is data dependent Parameters ---------- fmto: Formatoption The :class:`Formatoption` instance to check data: xarray.DataArray The data array to use if the :attr:`~Formatoption.data_dependent` attribute is a callable Returns ------- bool True, if the formatoption depends on the data
[ "Check", "whether", "a", "formatoption", "is", "data", "dependent" ]
python
train
28.388889
openego/ding0
ding0/core/__init__.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/__init__.py#L1805-L1863
def metadata(self, run_id=None): """Provide metadata on a Ding0 run Parameters ---------- run_id: str, (defaults to current date) Distinguish multiple versions of Ding0 data by a `run_id`. If not set it defaults to current date in the format YYYYMMDDhhmmss Returns ------- dict Metadata """ # Get latest version and/or git commit hash try: version = subprocess.check_output( ["git", "describe", "--tags", "--always"]).decode('utf8') except: version = None # Collect names of database table used to run Ding0 and data version if self.config['input_data_source']['input_data'] == 'versioned': data_version = self.config['versioned']['version'] database_tables = self.config['versioned'] elif self.config['input_data_source']['input_data'] == 'model_draft': data_version = 'model_draft' database_tables = self.config['model_draft'] else: data_version = 'unknown' database_tables = 'unknown' # Collect assumptions assumptions = {} assumptions.update(self.config['assumptions']) assumptions.update(self.config['mv_connect']) assumptions.update(self.config['mv_routing']) assumptions.update(self.config['mv_routing_tech_constraints']) # Determine run_id if not set if not run_id: run_id = datetime.now().strftime("%Y%m%d%H%M%S") # Set instance attribute run_id if not self._run_id: self._run_id = run_id # Assing data to dict metadata = dict( version=version, mv_grid_districts=[int(_.id_db) for _ in self._mv_grid_districts], database_tables=database_tables, data_version=data_version, assumptions=assumptions, run_id=self._run_id ) return metadata
[ "def", "metadata", "(", "self", ",", "run_id", "=", "None", ")", ":", "# Get latest version and/or git commit hash", "try", ":", "version", "=", "subprocess", ".", "check_output", "(", "[", "\"git\"", ",", "\"describe\"", ",", "\"--tags\"", ",", "\"--always\"", "]", ")", ".", "decode", "(", "'utf8'", ")", "except", ":", "version", "=", "None", "# Collect names of database table used to run Ding0 and data version", "if", "self", ".", "config", "[", "'input_data_source'", "]", "[", "'input_data'", "]", "==", "'versioned'", ":", "data_version", "=", "self", ".", "config", "[", "'versioned'", "]", "[", "'version'", "]", "database_tables", "=", "self", ".", "config", "[", "'versioned'", "]", "elif", "self", ".", "config", "[", "'input_data_source'", "]", "[", "'input_data'", "]", "==", "'model_draft'", ":", "data_version", "=", "'model_draft'", "database_tables", "=", "self", ".", "config", "[", "'model_draft'", "]", "else", ":", "data_version", "=", "'unknown'", "database_tables", "=", "'unknown'", "# Collect assumptions", "assumptions", "=", "{", "}", "assumptions", ".", "update", "(", "self", ".", "config", "[", "'assumptions'", "]", ")", "assumptions", ".", "update", "(", "self", ".", "config", "[", "'mv_connect'", "]", ")", "assumptions", ".", "update", "(", "self", ".", "config", "[", "'mv_routing'", "]", ")", "assumptions", ".", "update", "(", "self", ".", "config", "[", "'mv_routing_tech_constraints'", "]", ")", "# Determine run_id if not set", "if", "not", "run_id", ":", "run_id", "=", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y%m%d%H%M%S\"", ")", "# Set instance attribute run_id", "if", "not", "self", ".", "_run_id", ":", "self", ".", "_run_id", "=", "run_id", "# Assing data to dict", "metadata", "=", "dict", "(", "version", "=", "version", ",", "mv_grid_districts", "=", "[", "int", "(", "_", ".", "id_db", ")", "for", "_", "in", "self", ".", "_mv_grid_districts", "]", ",", "database_tables", "=", "database_tables", ",", "data_version", "=", "data_version", ",", "assumptions", "=", "assumptions", ",", "run_id", "=", "self", ".", "_run_id", ")", "return", "metadata" ]
Provide metadata on a Ding0 run Parameters ---------- run_id: str, (defaults to current date) Distinguish multiple versions of Ding0 data by a `run_id`. If not set it defaults to current date in the format YYYYMMDDhhmmss Returns ------- dict Metadata
[ "Provide", "metadata", "on", "a", "Ding0", "run" ]
python
train
33.389831
mobinrg/rpi_spark_drives
JMRPiSpark/Drives/Attitude/MPU6050.py
https://github.com/mobinrg/rpi_spark_drives/blob/e1602d8268a5ef48e9e0a8b37de89e0233f946ea/JMRPiSpark/Drives/Attitude/MPU6050.py#L533-L555
def getAllData(self, temp = True, accel = True, gyro = True): """! Get all the available data. @param temp: True - Allow to return Temperature data @param accel: True - Allow to return Accelerometer data @param gyro: True - Allow to return Gyroscope data @return a dictionary data @retval {} Did not read any data @retval {"temp":32.3,"accel":{"x":0.45634,"y":0.2124,"z":1.334},"gyro":{"x":0.45634,"y":0.2124,"z":1.334}} Returned all data """ allData = {} if temp: allData["temp"] = self.getTemp() if accel: allData["accel"] = self.getAccelData( raw = False ) if gyro: allData["gyro"] = self.getGyroData() return allData
[ "def", "getAllData", "(", "self", ",", "temp", "=", "True", ",", "accel", "=", "True", ",", "gyro", "=", "True", ")", ":", "allData", "=", "{", "}", "if", "temp", ":", "allData", "[", "\"temp\"", "]", "=", "self", ".", "getTemp", "(", ")", "if", "accel", ":", "allData", "[", "\"accel\"", "]", "=", "self", ".", "getAccelData", "(", "raw", "=", "False", ")", "if", "gyro", ":", "allData", "[", "\"gyro\"", "]", "=", "self", ".", "getGyroData", "(", ")", "return", "allData" ]
! Get all the available data. @param temp: True - Allow to return Temperature data @param accel: True - Allow to return Accelerometer data @param gyro: True - Allow to return Gyroscope data @return a dictionary data @retval {} Did not read any data @retval {"temp":32.3,"accel":{"x":0.45634,"y":0.2124,"z":1.334},"gyro":{"x":0.45634,"y":0.2124,"z":1.334}} Returned all data
[ "!", "Get", "all", "the", "available", "data", "." ]
python
train
33
SeleniumHQ/selenium
py/selenium/webdriver/remote/webdriver.py
https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/remote/webdriver.py#L168-L194
def file_detector_context(self, file_detector_class, *args, **kwargs): """ Overrides the current file detector (if necessary) in limited context. Ensures the original file detector is set afterwards. Example: with webdriver.file_detector_context(UselessFileDetector): someinput.send_keys('/etc/hosts') :Args: - file_detector_class - Class of the desired file detector. If the class is different from the current file_detector, then the class is instantiated with args and kwargs and used as a file detector during the duration of the context manager. - args - Optional arguments that get passed to the file detector class during instantiation. - kwargs - Keyword arguments, passed the same way as args. """ last_detector = None if not isinstance(self.file_detector, file_detector_class): last_detector = self.file_detector self.file_detector = file_detector_class(*args, **kwargs) try: yield finally: if last_detector is not None: self.file_detector = last_detector
[ "def", "file_detector_context", "(", "self", ",", "file_detector_class", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "last_detector", "=", "None", "if", "not", "isinstance", "(", "self", ".", "file_detector", ",", "file_detector_class", ")", ":", "last_detector", "=", "self", ".", "file_detector", "self", ".", "file_detector", "=", "file_detector_class", "(", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "yield", "finally", ":", "if", "last_detector", "is", "not", "None", ":", "self", ".", "file_detector", "=", "last_detector" ]
Overrides the current file detector (if necessary) in limited context. Ensures the original file detector is set afterwards. Example: with webdriver.file_detector_context(UselessFileDetector): someinput.send_keys('/etc/hosts') :Args: - file_detector_class - Class of the desired file detector. If the class is different from the current file_detector, then the class is instantiated with args and kwargs and used as a file detector during the duration of the context manager. - args - Optional arguments that get passed to the file detector class during instantiation. - kwargs - Keyword arguments, passed the same way as args.
[ "Overrides", "the", "current", "file", "detector", "(", "if", "necessary", ")", "in", "limited", "context", ".", "Ensures", "the", "original", "file", "detector", "is", "set", "afterwards", "." ]
python
train
43.407407
darkfeline/animanager
animanager/animecmd.py
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/animecmd.py#L86-L109
def cmdloop(self): """Start CLI REPL.""" while True: cmdline = input(self.prompt) tokens = shlex.split(cmdline) if not tokens: if self.last_cmd: tokens = self.last_cmd else: print('No previous command.') continue if tokens[0] not in self.commands: print('Invalid command') continue command = self.commands[tokens[0]] self.last_cmd = tokens try: if command(self.state, tokens): break except CmdExit: continue except Exception as e: if e not in self.safe_exceptions: logger.exception('Error!')
[ "def", "cmdloop", "(", "self", ")", ":", "while", "True", ":", "cmdline", "=", "input", "(", "self", ".", "prompt", ")", "tokens", "=", "shlex", ".", "split", "(", "cmdline", ")", "if", "not", "tokens", ":", "if", "self", ".", "last_cmd", ":", "tokens", "=", "self", ".", "last_cmd", "else", ":", "print", "(", "'No previous command.'", ")", "continue", "if", "tokens", "[", "0", "]", "not", "in", "self", ".", "commands", ":", "print", "(", "'Invalid command'", ")", "continue", "command", "=", "self", ".", "commands", "[", "tokens", "[", "0", "]", "]", "self", ".", "last_cmd", "=", "tokens", "try", ":", "if", "command", "(", "self", ".", "state", ",", "tokens", ")", ":", "break", "except", "CmdExit", ":", "continue", "except", "Exception", "as", "e", ":", "if", "e", "not", "in", "self", ".", "safe_exceptions", ":", "logger", ".", "exception", "(", "'Error!'", ")" ]
Start CLI REPL.
[ "Start", "CLI", "REPL", "." ]
python
train
33.5
numenta/nupic
src/nupic/swarming/experiment_utils.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/experiment_utils.py#L154-L166
def isTemporal(inferenceType): """ Returns True if the inference type is 'temporal', i.e. requires a temporal memory in the network. """ if InferenceType.__temporalInferenceTypes is None: InferenceType.__temporalInferenceTypes = \ set([InferenceType.TemporalNextStep, InferenceType.TemporalClassification, InferenceType.TemporalAnomaly, InferenceType.TemporalMultiStep, InferenceType.NontemporalMultiStep]) return inferenceType in InferenceType.__temporalInferenceTypes
[ "def", "isTemporal", "(", "inferenceType", ")", ":", "if", "InferenceType", ".", "__temporalInferenceTypes", "is", "None", ":", "InferenceType", ".", "__temporalInferenceTypes", "=", "set", "(", "[", "InferenceType", ".", "TemporalNextStep", ",", "InferenceType", ".", "TemporalClassification", ",", "InferenceType", ".", "TemporalAnomaly", ",", "InferenceType", ".", "TemporalMultiStep", ",", "InferenceType", ".", "NontemporalMultiStep", "]", ")", "return", "inferenceType", "in", "InferenceType", ".", "__temporalInferenceTypes" ]
Returns True if the inference type is 'temporal', i.e. requires a temporal memory in the network.
[ "Returns", "True", "if", "the", "inference", "type", "is", "temporal", "i", ".", "e", ".", "requires", "a", "temporal", "memory", "in", "the", "network", "." ]
python
valid
51.076923
ibis-project/ibis
ibis/config.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/config.py#L703-L723
def is_instance_factory(_type): """ Parameters ---------- `_type` - the type to be checked against Returns ------- validator - a function of a single argument x , which returns the True if x is an instance of `_type` """ if isinstance(_type, (tuple, list)): _type = tuple(_type) type_repr = "|".join(map(str, _type)) else: type_repr = "'%s'" % _type def inner(x): if not isinstance(x, _type): raise ValueError("Value must be an instance of %s" % type_repr) return inner
[ "def", "is_instance_factory", "(", "_type", ")", ":", "if", "isinstance", "(", "_type", ",", "(", "tuple", ",", "list", ")", ")", ":", "_type", "=", "tuple", "(", "_type", ")", "type_repr", "=", "\"|\"", ".", "join", "(", "map", "(", "str", ",", "_type", ")", ")", "else", ":", "type_repr", "=", "\"'%s'\"", "%", "_type", "def", "inner", "(", "x", ")", ":", "if", "not", "isinstance", "(", "x", ",", "_type", ")", ":", "raise", "ValueError", "(", "\"Value must be an instance of %s\"", "%", "type_repr", ")", "return", "inner" ]
Parameters ---------- `_type` - the type to be checked against Returns ------- validator - a function of a single argument x , which returns the True if x is an instance of `_type`
[ "Parameters", "----------", "_type", "-", "the", "type", "to", "be", "checked", "against", "Returns", "-------", "validator", "-", "a", "function", "of", "a", "single", "argument", "x", "which", "returns", "the", "True", "if", "x", "is", "an", "instance", "of", "_type" ]
python
train
26.619048
nok/sklearn-porter
sklearn_porter/Porter.py
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/Porter.py#L456-L488
def _get_filename(class_name, language): """ Generate the specific filename. Parameters ---------- :param class_name : str The used class name. :param language : {'c', 'go', 'java', 'js', 'php', 'ruby'} The target programming language. Returns ------- filename : str The generated filename. """ name = str(class_name).strip() lang = str(language) # Name: if language in ['java', 'php']: name = "".join([name[0].upper() + name[1:]]) # Suffix: suffix = { 'c': 'c', 'java': 'java', 'js': 'js', 'go': 'go', 'php': 'php', 'ruby': 'rb' } suffix = suffix.get(lang, lang) # Filename: return '{}.{}'.format(name, suffix)
[ "def", "_get_filename", "(", "class_name", ",", "language", ")", ":", "name", "=", "str", "(", "class_name", ")", ".", "strip", "(", ")", "lang", "=", "str", "(", "language", ")", "# Name:", "if", "language", "in", "[", "'java'", ",", "'php'", "]", ":", "name", "=", "\"\"", ".", "join", "(", "[", "name", "[", "0", "]", ".", "upper", "(", ")", "+", "name", "[", "1", ":", "]", "]", ")", "# Suffix:", "suffix", "=", "{", "'c'", ":", "'c'", ",", "'java'", ":", "'java'", ",", "'js'", ":", "'js'", ",", "'go'", ":", "'go'", ",", "'php'", ":", "'php'", ",", "'ruby'", ":", "'rb'", "}", "suffix", "=", "suffix", ".", "get", "(", "lang", ",", "lang", ")", "# Filename:", "return", "'{}.{}'", ".", "format", "(", "name", ",", "suffix", ")" ]
Generate the specific filename. Parameters ---------- :param class_name : str The used class name. :param language : {'c', 'go', 'java', 'js', 'php', 'ruby'} The target programming language. Returns ------- filename : str The generated filename.
[ "Generate", "the", "specific", "filename", "." ]
python
train
24.878788
yandex/yandex-tank
yandextank/plugins/ResourceCheck/plugin.py
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/ResourceCheck/plugin.py#L72-L79
def __check_mem(self): ''' raise exception on RAM exceeded ''' mem_free = psutil.virtual_memory().available / 2**20 self.log.debug("Memory free: %s/%s", mem_free, self.mem_limit) if mem_free < self.mem_limit: raise RuntimeError( "Not enough resources: free memory less " "than %sMB: %sMB" % (self.mem_limit, mem_free))
[ "def", "__check_mem", "(", "self", ")", ":", "mem_free", "=", "psutil", ".", "virtual_memory", "(", ")", ".", "available", "/", "2", "**", "20", "self", ".", "log", ".", "debug", "(", "\"Memory free: %s/%s\"", ",", "mem_free", ",", "self", ".", "mem_limit", ")", "if", "mem_free", "<", "self", ".", "mem_limit", ":", "raise", "RuntimeError", "(", "\"Not enough resources: free memory less \"", "\"than %sMB: %sMB\"", "%", "(", "self", ".", "mem_limit", ",", "mem_free", ")", ")" ]
raise exception on RAM exceeded
[ "raise", "exception", "on", "RAM", "exceeded" ]
python
test
48.375
matthiask/django-cte-forest
cte_forest/models.py
https://github.com/matthiask/django-cte-forest/blob/7bff29d69eddfcf214e9cf61647c91d28655619c/cte_forest/models.py#L1227-L1252
def as_tree(self, visitor=None, children=None): """ Recursively traverses each tree (starting from each root) in order to generate a dictionary-based tree structure of the entire forest. Each level of the forest/tree is a list of nodes, and each node consists of a dictionary representation, where the entry ``children`` (by default) consists of a list of dictionary representations of its children. See :meth:`CTENodeManager.as_tree` and :meth:`CTENodeManager.node_as_tree` for details on how this method works, as well as its expected arguments. :param visitor: optional function responsible for generating the dictionary representation of a node. :param children: optional function responsible for generating a children key and list for a node. :return: a dictionary representation of the structure of the forest. """ _parameters = {"node": self} if visitor is not None: _parameters["visitor"] = visitor if children is not None: _parameters["children"] = children return self.__class__.objects.node_as_tree(**_parameters)
[ "def", "as_tree", "(", "self", ",", "visitor", "=", "None", ",", "children", "=", "None", ")", ":", "_parameters", "=", "{", "\"node\"", ":", "self", "}", "if", "visitor", "is", "not", "None", ":", "_parameters", "[", "\"visitor\"", "]", "=", "visitor", "if", "children", "is", "not", "None", ":", "_parameters", "[", "\"children\"", "]", "=", "children", "return", "self", ".", "__class__", ".", "objects", ".", "node_as_tree", "(", "*", "*", "_parameters", ")" ]
Recursively traverses each tree (starting from each root) in order to generate a dictionary-based tree structure of the entire forest. Each level of the forest/tree is a list of nodes, and each node consists of a dictionary representation, where the entry ``children`` (by default) consists of a list of dictionary representations of its children. See :meth:`CTENodeManager.as_tree` and :meth:`CTENodeManager.node_as_tree` for details on how this method works, as well as its expected arguments. :param visitor: optional function responsible for generating the dictionary representation of a node. :param children: optional function responsible for generating a children key and list for a node. :return: a dictionary representation of the structure of the forest.
[ "Recursively", "traverses", "each", "tree", "(", "starting", "from", "each", "root", ")", "in", "order", "to", "generate", "a", "dictionary", "-", "based", "tree", "structure", "of", "the", "entire", "forest", ".", "Each", "level", "of", "the", "forest", "/", "tree", "is", "a", "list", "of", "nodes", "and", "each", "node", "consists", "of", "a", "dictionary", "representation", "where", "the", "entry", "children", "(", "by", "default", ")", "consists", "of", "a", "list", "of", "dictionary", "representations", "of", "its", "children", "." ]
python
train
47.653846
timothyb0912/pylogit
pylogit/estimation.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/estimation.py#L399-L421
def convenience_calc_fisher_approx(self, params): """ Calculates the BHHH approximation of the Fisher Information Matrix for this model / dataset. """ shapes, intercepts, betas = self.convenience_split_params(params) args = [betas, self.design, self.alt_id_vector, self.rows_to_obs, self.rows_to_alts, self.choice_vector, self.utility_transform, self.calc_dh_d_shape, self.calc_dh_dv, self.calc_dh_d_alpha, intercepts, shapes, self.ridge, self.weights] return cc.calc_fisher_info_matrix(*args)
[ "def", "convenience_calc_fisher_approx", "(", "self", ",", "params", ")", ":", "shapes", ",", "intercepts", ",", "betas", "=", "self", ".", "convenience_split_params", "(", "params", ")", "args", "=", "[", "betas", ",", "self", ".", "design", ",", "self", ".", "alt_id_vector", ",", "self", ".", "rows_to_obs", ",", "self", ".", "rows_to_alts", ",", "self", ".", "choice_vector", ",", "self", ".", "utility_transform", ",", "self", ".", "calc_dh_d_shape", ",", "self", ".", "calc_dh_dv", ",", "self", ".", "calc_dh_d_alpha", ",", "intercepts", ",", "shapes", ",", "self", ".", "ridge", ",", "self", ".", "weights", "]", "return", "cc", ".", "calc_fisher_info_matrix", "(", "*", "args", ")" ]
Calculates the BHHH approximation of the Fisher Information Matrix for this model / dataset.
[ "Calculates", "the", "BHHH", "approximation", "of", "the", "Fisher", "Information", "Matrix", "for", "this", "model", "/", "dataset", "." ]
python
train
32.043478
jaraco/jaraco.windows
jaraco/windows/environ.py
https://github.com/jaraco/jaraco.windows/blob/51811efed50b46ad08daa25408a1cc806bc8d519/jaraco/windows/environ.py#L172-L253
def enver(*args): """ %prog [<name>=[value]] To show all environment variables, call with no parameters: %prog To Add/Modify/Delete environment variable: %prog <name>=[value] If <name> is PATH or PATHEXT, %prog will by default append the value using a semicolon as a separator. Use -r to disable this behavior or -a to force it for variables other than PATH and PATHEXT. If append is prescribed, but the value doesn't exist, the value will be created. If there is no value, %prog will delete the <name> environment variable. i.e. "PATH=" To remove a specific value or values from a semicolon-separated multi-value variable (such as PATH), use --remove-value. e.g. enver --remove-value PATH=C:\\Unwanted\\Dir\\In\\Path Remove-value matches case-insensitive and also matches any substring so the following would also be sufficient to remove the aforementioned undesirable dir. enver --remove-value PATH=UNWANTED Note that %prog does not affect the current running environment, and can only affect subsequently spawned applications. """ from optparse import OptionParser parser = OptionParser(usage=trim(enver.__doc__)) parser.add_option( '-U', '--user-environment', action='store_const', const=UserRegisteredEnvironment, default=MachineRegisteredEnvironment, dest='class_', help="Use the current user's environment", ) parser.add_option( '-a', '--append', action='store_true', default=False, help="Append the value to any existing value (default for PATH and PATHEXT)", ) parser.add_option( '-r', '--replace', action='store_true', default=False, help="Replace any existing value (used to override default append " "for PATH and PATHEXT)", ) parser.add_option( '--remove-value', action='store_true', default=False, help="Remove any matching values from a semicolon-separated " "multi-value variable", ) parser.add_option( '-e', '--edit', action='store_true', default=False, help="Edit the value in a local editor", ) options, args = parser.parse_args(*args) try: param = args.pop() if args: parser.error("Too many parameters specified") raise SystemExit(1) if '=' not in param and not options.edit: parser.error("Expected <name>= or <name>=<value>") raise SystemExit(2) name, sep, value = param.partition('=') method_name = 'set' if options.remove_value: method_name = 'remove_values' if options.edit: method_name = 'edit' method = getattr(options.class_, method_name) method(name, value, options) except IndexError: options.class_.show()
[ "def", "enver", "(", "*", "args", ")", ":", "from", "optparse", "import", "OptionParser", "parser", "=", "OptionParser", "(", "usage", "=", "trim", "(", "enver", ".", "__doc__", ")", ")", "parser", ".", "add_option", "(", "'-U'", ",", "'--user-environment'", ",", "action", "=", "'store_const'", ",", "const", "=", "UserRegisteredEnvironment", ",", "default", "=", "MachineRegisteredEnvironment", ",", "dest", "=", "'class_'", ",", "help", "=", "\"Use the current user's environment\"", ",", ")", "parser", ".", "add_option", "(", "'-a'", ",", "'--append'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "\"Append the value to any existing value (default for PATH and PATHEXT)\"", ",", ")", "parser", ".", "add_option", "(", "'-r'", ",", "'--replace'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "\"Replace any existing value (used to override default append \"", "\"for PATH and PATHEXT)\"", ",", ")", "parser", ".", "add_option", "(", "'--remove-value'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "\"Remove any matching values from a semicolon-separated \"", "\"multi-value variable\"", ",", ")", "parser", ".", "add_option", "(", "'-e'", ",", "'--edit'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "\"Edit the value in a local editor\"", ",", ")", "options", ",", "args", "=", "parser", ".", "parse_args", "(", "*", "args", ")", "try", ":", "param", "=", "args", ".", "pop", "(", ")", "if", "args", ":", "parser", ".", "error", "(", "\"Too many parameters specified\"", ")", "raise", "SystemExit", "(", "1", ")", "if", "'='", "not", "in", "param", "and", "not", "options", ".", "edit", ":", "parser", ".", "error", "(", "\"Expected <name>= or <name>=<value>\"", ")", "raise", "SystemExit", "(", "2", ")", "name", ",", "sep", ",", "value", "=", "param", ".", "partition", "(", "'='", ")", "method_name", "=", "'set'", "if", "options", ".", "remove_value", ":", "method_name", "=", "'remove_values'", "if", "options", ".", "edit", ":", "method_name", "=", "'edit'", "method", "=", "getattr", "(", "options", ".", "class_", ",", "method_name", ")", "method", "(", "name", ",", "value", ",", "options", ")", "except", "IndexError", ":", "options", ".", "class_", ".", "show", "(", ")" ]
%prog [<name>=[value]] To show all environment variables, call with no parameters: %prog To Add/Modify/Delete environment variable: %prog <name>=[value] If <name> is PATH or PATHEXT, %prog will by default append the value using a semicolon as a separator. Use -r to disable this behavior or -a to force it for variables other than PATH and PATHEXT. If append is prescribed, but the value doesn't exist, the value will be created. If there is no value, %prog will delete the <name> environment variable. i.e. "PATH=" To remove a specific value or values from a semicolon-separated multi-value variable (such as PATH), use --remove-value. e.g. enver --remove-value PATH=C:\\Unwanted\\Dir\\In\\Path Remove-value matches case-insensitive and also matches any substring so the following would also be sufficient to remove the aforementioned undesirable dir. enver --remove-value PATH=UNWANTED Note that %prog does not affect the current running environment, and can only affect subsequently spawned applications.
[ "%prog", "[", "<name", ">", "=", "[", "value", "]]" ]
python
train
30.195122
Opentrons/opentrons
api/src/opentrons/protocol_api/labware.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/protocol_api/labware.py#L667-L682
def save_tip_length(labware: Labware, length: float): """ Function to be used whenever an updated tip length is found for of a given tip rack. If an offset file does not exist, create the file using labware id as the filename. If the file does exist, load it and modify the length and the lastModified fields under the "tipLength" key. """ calibration_path = CONFIG['labware_calibration_offsets_dir_v4'] if not calibration_path.exists(): calibration_path.mkdir(parents=True, exist_ok=True) labware_offset_path = calibration_path/'{}.json'.format(labware._id) calibration_data = _helper_tip_length_data_format( str(labware_offset_path), length) with labware_offset_path.open('w') as f: json.dump(calibration_data, f) labware.tip_length = length
[ "def", "save_tip_length", "(", "labware", ":", "Labware", ",", "length", ":", "float", ")", ":", "calibration_path", "=", "CONFIG", "[", "'labware_calibration_offsets_dir_v4'", "]", "if", "not", "calibration_path", ".", "exists", "(", ")", ":", "calibration_path", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "labware_offset_path", "=", "calibration_path", "/", "'{}.json'", ".", "format", "(", "labware", ".", "_id", ")", "calibration_data", "=", "_helper_tip_length_data_format", "(", "str", "(", "labware_offset_path", ")", ",", "length", ")", "with", "labware_offset_path", ".", "open", "(", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "calibration_data", ",", "f", ")", "labware", ".", "tip_length", "=", "length" ]
Function to be used whenever an updated tip length is found for of a given tip rack. If an offset file does not exist, create the file using labware id as the filename. If the file does exist, load it and modify the length and the lastModified fields under the "tipLength" key.
[ "Function", "to", "be", "used", "whenever", "an", "updated", "tip", "length", "is", "found", "for", "of", "a", "given", "tip", "rack", ".", "If", "an", "offset", "file", "does", "not", "exist", "create", "the", "file", "using", "labware", "id", "as", "the", "filename", ".", "If", "the", "file", "does", "exist", "load", "it", "and", "modify", "the", "length", "and", "the", "lastModified", "fields", "under", "the", "tipLength", "key", "." ]
python
train
50
OSSOS/MOP
src/ossos/core/scripts/step2.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/scripts/step2.py#L86-L152
def step2(expnums, ccd, version, prefix=None, dry_run=False, default="WCS"): """run the actual step2 on the given exp/ccd combo""" jmp_trans = ['step2ajmp'] jmp_args = ['step2bjmp'] matt_args = ['step2matt_jmp'] idx = 0 for expnum in expnums: jmp_args.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8] ) jmp_trans.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.jmp', prefix=prefix)[0:-8] ) idx += 1 matt_args.append('-f%d' % idx) matt_args.append( storage.get_file(expnum, ccd=ccd, version=version, ext='obj.matt', prefix=prefix)[0:-9] ) logging.info(util.exec_prog(jmp_trans)) if default == "WCS": logging.info(compute_trans(expnums, ccd, version, prefix, default=default)) logging.info(util.exec_prog(jmp_args)) logging.info(util.exec_prog(matt_args)) ## check that the shifts from step2 are rational check_args = ['checktrans'] if os.access('proc-these-files', os.R_OK): os.unlink('proc-these-files') ptf = open('proc-these-files', 'w') ptf.write("# A dummy file that is created so checktrans could run.\n") ptf.write("# Frame FWHM PSF?\n") for expnum in expnums: filename = os.path.splitext(storage.get_image(expnum, ccd, version=version, prefix=prefix))[0] if not os.access(filename + ".bright.psf", os.R_OK): os.link(filename + ".bright.jmp", filename + ".bright.psf") if not os.access(filename + ".obj.psf", os.R_OK): os.link(filename + ".obj.jmp", filename + ".obj.psf") ptf.write("{:>19s}{:>10.1f}{:>5s}\n".format(filename, _FWHM, "NO")) ptf.close() if os.access('BAD_TRANS', os.F_OK): os.unlink('BAD_TRANS') logging.info(util.exec_prog(check_args)) if os.access('BAD_TRANS', os.F_OK): raise OSError(errno.EBADMSG, 'BAD_TRANS') if os.access('proc-these-files', os.F_OK): os.unlink('proc-these-files') if dry_run: return for expnum in expnums: for ext in ['unid.jmp', 'unid.matt', 'trans.jmp']: uri = storage.dbimages_uri(expnum, ccd=ccd, version=version, ext=ext, prefix=prefix) filename = os.path.basename(uri) storage.copy(filename, uri) return
[ "def", "step2", "(", "expnums", ",", "ccd", ",", "version", ",", "prefix", "=", "None", ",", "dry_run", "=", "False", ",", "default", "=", "\"WCS\"", ")", ":", "jmp_trans", "=", "[", "'step2ajmp'", "]", "jmp_args", "=", "[", "'step2bjmp'", "]", "matt_args", "=", "[", "'step2matt_jmp'", "]", "idx", "=", "0", "for", "expnum", "in", "expnums", ":", "jmp_args", ".", "append", "(", "storage", ".", "get_file", "(", "expnum", ",", "ccd", "=", "ccd", ",", "version", "=", "version", ",", "ext", "=", "'obj.jmp'", ",", "prefix", "=", "prefix", ")", "[", "0", ":", "-", "8", "]", ")", "jmp_trans", ".", "append", "(", "storage", ".", "get_file", "(", "expnum", ",", "ccd", "=", "ccd", ",", "version", "=", "version", ",", "ext", "=", "'obj.jmp'", ",", "prefix", "=", "prefix", ")", "[", "0", ":", "-", "8", "]", ")", "idx", "+=", "1", "matt_args", ".", "append", "(", "'-f%d'", "%", "idx", ")", "matt_args", ".", "append", "(", "storage", ".", "get_file", "(", "expnum", ",", "ccd", "=", "ccd", ",", "version", "=", "version", ",", "ext", "=", "'obj.matt'", ",", "prefix", "=", "prefix", ")", "[", "0", ":", "-", "9", "]", ")", "logging", ".", "info", "(", "util", ".", "exec_prog", "(", "jmp_trans", ")", ")", "if", "default", "==", "\"WCS\"", ":", "logging", ".", "info", "(", "compute_trans", "(", "expnums", ",", "ccd", ",", "version", ",", "prefix", ",", "default", "=", "default", ")", ")", "logging", ".", "info", "(", "util", ".", "exec_prog", "(", "jmp_args", ")", ")", "logging", ".", "info", "(", "util", ".", "exec_prog", "(", "matt_args", ")", ")", "## check that the shifts from step2 are rational", "check_args", "=", "[", "'checktrans'", "]", "if", "os", ".", "access", "(", "'proc-these-files'", ",", "os", ".", "R_OK", ")", ":", "os", ".", "unlink", "(", "'proc-these-files'", ")", "ptf", "=", "open", "(", "'proc-these-files'", ",", "'w'", ")", "ptf", ".", "write", "(", "\"# A dummy file that is created so checktrans could run.\\n\"", ")", "ptf", ".", "write", "(", "\"# Frame FWHM PSF?\\n\"", ")", "for", "expnum", "in", "expnums", ":", "filename", "=", "os", ".", "path", ".", "splitext", "(", "storage", ".", "get_image", "(", "expnum", ",", "ccd", ",", "version", "=", "version", ",", "prefix", "=", "prefix", ")", ")", "[", "0", "]", "if", "not", "os", ".", "access", "(", "filename", "+", "\".bright.psf\"", ",", "os", ".", "R_OK", ")", ":", "os", ".", "link", "(", "filename", "+", "\".bright.jmp\"", ",", "filename", "+", "\".bright.psf\"", ")", "if", "not", "os", ".", "access", "(", "filename", "+", "\".obj.psf\"", ",", "os", ".", "R_OK", ")", ":", "os", ".", "link", "(", "filename", "+", "\".obj.jmp\"", ",", "filename", "+", "\".obj.psf\"", ")", "ptf", ".", "write", "(", "\"{:>19s}{:>10.1f}{:>5s}\\n\"", ".", "format", "(", "filename", ",", "_FWHM", ",", "\"NO\"", ")", ")", "ptf", ".", "close", "(", ")", "if", "os", ".", "access", "(", "'BAD_TRANS'", ",", "os", ".", "F_OK", ")", ":", "os", ".", "unlink", "(", "'BAD_TRANS'", ")", "logging", ".", "info", "(", "util", ".", "exec_prog", "(", "check_args", ")", ")", "if", "os", ".", "access", "(", "'BAD_TRANS'", ",", "os", ".", "F_OK", ")", ":", "raise", "OSError", "(", "errno", ".", "EBADMSG", ",", "'BAD_TRANS'", ")", "if", "os", ".", "access", "(", "'proc-these-files'", ",", "os", ".", "F_OK", ")", ":", "os", ".", "unlink", "(", "'proc-these-files'", ")", "if", "dry_run", ":", "return", "for", "expnum", "in", "expnums", ":", "for", "ext", "in", "[", "'unid.jmp'", ",", "'unid.matt'", ",", "'trans.jmp'", "]", ":", "uri", "=", "storage", ".", "dbimages_uri", "(", "expnum", ",", "ccd", "=", "ccd", ",", "version", "=", "version", ",", "ext", "=", "ext", ",", "prefix", "=", "prefix", ")", "filename", "=", "os", ".", "path", ".", "basename", "(", "uri", ")", "storage", ".", "copy", "(", "filename", ",", "uri", ")", "return" ]
run the actual step2 on the given exp/ccd combo
[ "run", "the", "actual", "step2", "on", "the", "given", "exp", "/", "ccd", "combo" ]
python
train
36.119403
luismasuelli/python-cantrips
cantrips/patterns/broadcast.py
https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/broadcast.py#L88-L95
def broadcast(self, command, *args, **kwargs): """ Notifies each user with a specified command. """ criterion = kwargs.pop('criterion', self.BROADCAST_FILTER_ALL) for index, user in items(self.users()): if criterion(user, command, *args, **kwargs): self.notify(user, command, *args, **kwargs)
[ "def", "broadcast", "(", "self", ",", "command", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "criterion", "=", "kwargs", ".", "pop", "(", "'criterion'", ",", "self", ".", "BROADCAST_FILTER_ALL", ")", "for", "index", ",", "user", "in", "items", "(", "self", ".", "users", "(", ")", ")", ":", "if", "criterion", "(", "user", ",", "command", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "notify", "(", "user", ",", "command", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Notifies each user with a specified command.
[ "Notifies", "each", "user", "with", "a", "specified", "command", "." ]
python
train
44.125
goldsmith/Wikipedia
wikipedia/wikipedia.py
https://github.com/goldsmith/Wikipedia/blob/2065c568502b19b8634241b47fd96930d1bf948d/wikipedia/wikipedia.py#L254-L280
def page(title=None, pageid=None, auto_suggest=True, redirect=True, preload=False): ''' Get a WikipediaPage object for the page with title `title` or the pageid `pageid` (mutually exclusive). Keyword arguments: * title - the title of the page to load * pageid - the numeric pageid of the page to load * auto_suggest - let Wikipedia find a valid page title for the query * redirect - allow redirection without raising RedirectError * preload - load content, summary, images, references, and links during initialization ''' if title is not None: if auto_suggest: results, suggestion = search(title, results=1, suggestion=True) try: title = suggestion or results[0] except IndexError: # if there is no suggestion or search results, the page doesn't exist raise PageError(title) return WikipediaPage(title, redirect=redirect, preload=preload) elif pageid is not None: return WikipediaPage(pageid=pageid, preload=preload) else: raise ValueError("Either a title or a pageid must be specified")
[ "def", "page", "(", "title", "=", "None", ",", "pageid", "=", "None", ",", "auto_suggest", "=", "True", ",", "redirect", "=", "True", ",", "preload", "=", "False", ")", ":", "if", "title", "is", "not", "None", ":", "if", "auto_suggest", ":", "results", ",", "suggestion", "=", "search", "(", "title", ",", "results", "=", "1", ",", "suggestion", "=", "True", ")", "try", ":", "title", "=", "suggestion", "or", "results", "[", "0", "]", "except", "IndexError", ":", "# if there is no suggestion or search results, the page doesn't exist", "raise", "PageError", "(", "title", ")", "return", "WikipediaPage", "(", "title", ",", "redirect", "=", "redirect", ",", "preload", "=", "preload", ")", "elif", "pageid", "is", "not", "None", ":", "return", "WikipediaPage", "(", "pageid", "=", "pageid", ",", "preload", "=", "preload", ")", "else", ":", "raise", "ValueError", "(", "\"Either a title or a pageid must be specified\"", ")" ]
Get a WikipediaPage object for the page with title `title` or the pageid `pageid` (mutually exclusive). Keyword arguments: * title - the title of the page to load * pageid - the numeric pageid of the page to load * auto_suggest - let Wikipedia find a valid page title for the query * redirect - allow redirection without raising RedirectError * preload - load content, summary, images, references, and links during initialization
[ "Get", "a", "WikipediaPage", "object", "for", "the", "page", "with", "title", "title", "or", "the", "pageid", "pageid", "(", "mutually", "exclusive", ")", "." ]
python
train
38.814815
openstack/networking-cisco
networking_cisco/ml2_drivers/nexus/nexus_db_v2.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/nexus/nexus_db_v2.py#L576-L606
def _lookup_vpc_allocs(query_type, session=None, order=None, **bfilter): """Look up 'query_type' Nexus VPC Allocs matching the filter. :param query_type: 'all', 'one' or 'first' :param session: db session :param order: select what field to order data :param bfilter: filter for mappings query :returns: VPCs if query gave a result, else raise NexusVPCAllocNotFound. """ if session is None: session = bc.get_reader_session() if order: query_method = getattr(session.query( nexus_models_v2.NexusVPCAlloc).filter_by(**bfilter).order_by( order), query_type) else: query_method = getattr(session.query( nexus_models_v2.NexusVPCAlloc).filter_by(**bfilter), query_type) try: vpcs = query_method() if vpcs: return vpcs except sa_exc.NoResultFound: pass raise c_exc.NexusVPCAllocNotFound(**bfilter)
[ "def", "_lookup_vpc_allocs", "(", "query_type", ",", "session", "=", "None", ",", "order", "=", "None", ",", "*", "*", "bfilter", ")", ":", "if", "session", "is", "None", ":", "session", "=", "bc", ".", "get_reader_session", "(", ")", "if", "order", ":", "query_method", "=", "getattr", "(", "session", ".", "query", "(", "nexus_models_v2", ".", "NexusVPCAlloc", ")", ".", "filter_by", "(", "*", "*", "bfilter", ")", ".", "order_by", "(", "order", ")", ",", "query_type", ")", "else", ":", "query_method", "=", "getattr", "(", "session", ".", "query", "(", "nexus_models_v2", ".", "NexusVPCAlloc", ")", ".", "filter_by", "(", "*", "*", "bfilter", ")", ",", "query_type", ")", "try", ":", "vpcs", "=", "query_method", "(", ")", "if", "vpcs", ":", "return", "vpcs", "except", "sa_exc", ".", "NoResultFound", ":", "pass", "raise", "c_exc", ".", "NexusVPCAllocNotFound", "(", "*", "*", "bfilter", ")" ]
Look up 'query_type' Nexus VPC Allocs matching the filter. :param query_type: 'all', 'one' or 'first' :param session: db session :param order: select what field to order data :param bfilter: filter for mappings query :returns: VPCs if query gave a result, else raise NexusVPCAllocNotFound.
[ "Look", "up", "query_type", "Nexus", "VPC", "Allocs", "matching", "the", "filter", "." ]
python
train
30.354839
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L1213-L1217
def system_find_affiliates(input_params={}, always_retry=True, **kwargs): """ Invokes the /system/findAffiliates API method. """ return DXHTTPRequest('/system/findAffiliates', input_params, always_retry=always_retry, **kwargs)
[ "def", "system_find_affiliates", "(", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/system/findAffiliates'", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
Invokes the /system/findAffiliates API method.
[ "Invokes", "the", "/", "system", "/", "findAffiliates", "API", "method", "." ]
python
train
47.6
dcos/shakedown
shakedown/cli/helpers.py
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/cli/helpers.py#L153-L178
def echo(text, **kwargs): """ Print results to the console :param text: the text string to print :type text: str :return: a string :rtype: str """ if shakedown.cli.quiet: return if not 'n' in kwargs: kwargs['n'] = True if 'd' in kwargs: text = decorate(text, kwargs['d']) if 'TERM' in os.environ and os.environ['TERM'] == 'velocity': if text: print(text, end="", flush=True) if kwargs.get('n'): print() else: click.echo(text, nl=kwargs.get('n'))
[ "def", "echo", "(", "text", ",", "*", "*", "kwargs", ")", ":", "if", "shakedown", ".", "cli", ".", "quiet", ":", "return", "if", "not", "'n'", "in", "kwargs", ":", "kwargs", "[", "'n'", "]", "=", "True", "if", "'d'", "in", "kwargs", ":", "text", "=", "decorate", "(", "text", ",", "kwargs", "[", "'d'", "]", ")", "if", "'TERM'", "in", "os", ".", "environ", "and", "os", ".", "environ", "[", "'TERM'", "]", "==", "'velocity'", ":", "if", "text", ":", "print", "(", "text", ",", "end", "=", "\"\"", ",", "flush", "=", "True", ")", "if", "kwargs", ".", "get", "(", "'n'", ")", ":", "print", "(", ")", "else", ":", "click", ".", "echo", "(", "text", ",", "nl", "=", "kwargs", ".", "get", "(", "'n'", ")", ")" ]
Print results to the console :param text: the text string to print :type text: str :return: a string :rtype: str
[ "Print", "results", "to", "the", "console" ]
python
train
21.461538
Crunch-io/crunch-cube
src/cr/cube/dimension.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/dimension.py#L160-L173
def _base_type(self): """Return str like 'enum.numeric' representing dimension type. This string is a 'type.subclass' concatenation of the str keys used to identify the dimension type in the cube response JSON. The '.subclass' suffix only appears where a subtype is present. """ type_class = self._dimension_dict["type"]["class"] if type_class == "categorical": return "categorical" if type_class == "enum": subclass = self._dimension_dict["type"]["subtype"]["class"] return "enum.%s" % subclass raise NotImplementedError("unexpected dimension type class '%s'" % type_class)
[ "def", "_base_type", "(", "self", ")", ":", "type_class", "=", "self", ".", "_dimension_dict", "[", "\"type\"", "]", "[", "\"class\"", "]", "if", "type_class", "==", "\"categorical\"", ":", "return", "\"categorical\"", "if", "type_class", "==", "\"enum\"", ":", "subclass", "=", "self", ".", "_dimension_dict", "[", "\"type\"", "]", "[", "\"subtype\"", "]", "[", "\"class\"", "]", "return", "\"enum.%s\"", "%", "subclass", "raise", "NotImplementedError", "(", "\"unexpected dimension type class '%s'\"", "%", "type_class", ")" ]
Return str like 'enum.numeric' representing dimension type. This string is a 'type.subclass' concatenation of the str keys used to identify the dimension type in the cube response JSON. The '.subclass' suffix only appears where a subtype is present.
[ "Return", "str", "like", "enum", ".", "numeric", "representing", "dimension", "type", "." ]
python
train
47.857143
franciscogarate/pyliferisk
pyliferisk/__init__.py
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L215-L218
def tpx(mt, x, t): """ tpx : Returns the probability that x will survive within t years """ """ npx : Returns n years survival probability at age x """ return mt.lx[x + t] / mt.lx[x]
[ "def", "tpx", "(", "mt", ",", "x", ",", "t", ")", ":", "\"\"\" npx : Returns n years survival probability at age x \"\"\"", "return", "mt", ".", "lx", "[", "x", "+", "t", "]", "/", "mt", ".", "lx", "[", "x", "]" ]
tpx : Returns the probability that x will survive within t years
[ "tpx", ":", "Returns", "the", "probability", "that", "x", "will", "survive", "within", "t", "years" ]
python
train
47.75
polyaxon/polyaxon
polyaxon/query/parser.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/query/parser.py#L240-L256
def parse_field(field: str) -> Tuple[str, Optional[str]]: """Parses fields with underscores, and return field and suffix. Example: foo => foo, None metric.foo => metric, foo """ _field = field.split('.') _field = [f.strip() for f in _field] if len(_field) == 1 and _field[0]: return _field[0], None elif len(_field) == 2 and _field[0] and _field[1]: return _field[0], _field[1] raise QueryParserException('Query field must be either a single value,' 'possibly with single underscores, ' 'or a prefix double underscore field. ' 'Received `{}`'.format(field))
[ "def", "parse_field", "(", "field", ":", "str", ")", "->", "Tuple", "[", "str", ",", "Optional", "[", "str", "]", "]", ":", "_field", "=", "field", ".", "split", "(", "'.'", ")", "_field", "=", "[", "f", ".", "strip", "(", ")", "for", "f", "in", "_field", "]", "if", "len", "(", "_field", ")", "==", "1", "and", "_field", "[", "0", "]", ":", "return", "_field", "[", "0", "]", ",", "None", "elif", "len", "(", "_field", ")", "==", "2", "and", "_field", "[", "0", "]", "and", "_field", "[", "1", "]", ":", "return", "_field", "[", "0", "]", ",", "_field", "[", "1", "]", "raise", "QueryParserException", "(", "'Query field must be either a single value,'", "'possibly with single underscores, '", "'or a prefix double underscore field. '", "'Received `{}`'", ".", "format", "(", "field", ")", ")" ]
Parses fields with underscores, and return field and suffix. Example: foo => foo, None metric.foo => metric, foo
[ "Parses", "fields", "with", "underscores", "and", "return", "field", "and", "suffix", "." ]
python
train
41.117647
rwl/pylon
pylon/opf.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/opf.py#L433-L509
def _pwl_gen_costs(self, generators, base_mva): """ Returns the basin constraints for piece-wise linear gen cost variables. CCV cost formulation expressed as Ay * x <= by. Based on makeAy.m from MATPOWER by C. E. Murillo-Sanchez, developed at PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more information. """ ng = len(generators) gpwl = [g for g in generators if g.pcost_model == PW_LINEAR] # nq = len([g for g in gpwl if g.qcost_model is not None]) if self.dc: pgbas = 0 # starting index within x for active sources nq = 0 # number of Qg vars # qgbas = None # index of 1st Qg column in Ay ybas = ng # starting index within x for y variables else: pgbas = 0 nq = ng # qgbas = ng + 1 # index of 1st Qg column in Ay ybas = ng + nq # Number of extra y variables. ny = len(gpwl) if ny == 0: return None, None # Total number of cost points. nc = len([co for gn in gpwl for co in gn.p_cost]) # Ay = lil_matrix((nc - ny, ybas + ny)) # Fill rows and then transpose. Ay = lil_matrix((ybas + ny, nc - ny)) by = array([]) j = 0 k = 0 for i, g in enumerate(gpwl): # Number of cost points: segments = ns-1 ns = len(g.p_cost) p = array([x / base_mva for x, c in g.p_cost]) c = array([c for x, c in g.p_cost]) m = diff(c) / diff(p) # Slopes for Pg (or Qg). if 0.0 in diff(p): raise ValueError, "Bad Pcost data: %s (%s)" % (p, g.name) logger.error("Bad Pcost data: %s" % p) b = m * p[:ns-1] - c[:ns-1] # rhs by = r_[by, b.T] # if i > ng: # sidx = qgbas + (i-ng) - 1 # this was for a q cost # else: # sidx = pgbas + i - 1 # this was for a p cost Ay[pgbas + i, k:k + ns - 1] = m # FIXME: Repeat for Q costs. # Now fill the y rows with -1's Ay[ybas + j, k:k + ns - 1] = -ones(ns-1) k += (ns - 1) j += 1 y = Variable("y", ny) # Transpose Ay since lil_matrix stores in rows. if self.dc: ycon = LinearConstraint("ycon", Ay.T, None, by, ["Pg", "y"]) else: ycon = LinearConstraint("ycon", Ay.T, None, by, ["Pg", "Qg","y"]) return y, ycon
[ "def", "_pwl_gen_costs", "(", "self", ",", "generators", ",", "base_mva", ")", ":", "ng", "=", "len", "(", "generators", ")", "gpwl", "=", "[", "g", "for", "g", "in", "generators", "if", "g", ".", "pcost_model", "==", "PW_LINEAR", "]", "# nq = len([g for g in gpwl if g.qcost_model is not None])", "if", "self", ".", "dc", ":", "pgbas", "=", "0", "# starting index within x for active sources", "nq", "=", "0", "# number of Qg vars", "# qgbas = None # index of 1st Qg column in Ay", "ybas", "=", "ng", "# starting index within x for y variables", "else", ":", "pgbas", "=", "0", "nq", "=", "ng", "# qgbas = ng + 1 # index of 1st Qg column in Ay", "ybas", "=", "ng", "+", "nq", "# Number of extra y variables.", "ny", "=", "len", "(", "gpwl", ")", "if", "ny", "==", "0", ":", "return", "None", ",", "None", "# Total number of cost points.", "nc", "=", "len", "(", "[", "co", "for", "gn", "in", "gpwl", "for", "co", "in", "gn", ".", "p_cost", "]", ")", "# Ay = lil_matrix((nc - ny, ybas + ny))", "# Fill rows and then transpose.", "Ay", "=", "lil_matrix", "(", "(", "ybas", "+", "ny", ",", "nc", "-", "ny", ")", ")", "by", "=", "array", "(", "[", "]", ")", "j", "=", "0", "k", "=", "0", "for", "i", ",", "g", "in", "enumerate", "(", "gpwl", ")", ":", "# Number of cost points: segments = ns-1", "ns", "=", "len", "(", "g", ".", "p_cost", ")", "p", "=", "array", "(", "[", "x", "/", "base_mva", "for", "x", ",", "c", "in", "g", ".", "p_cost", "]", ")", "c", "=", "array", "(", "[", "c", "for", "x", ",", "c", "in", "g", ".", "p_cost", "]", ")", "m", "=", "diff", "(", "c", ")", "/", "diff", "(", "p", ")", "# Slopes for Pg (or Qg).", "if", "0.0", "in", "diff", "(", "p", ")", ":", "raise", "ValueError", ",", "\"Bad Pcost data: %s (%s)\"", "%", "(", "p", ",", "g", ".", "name", ")", "logger", ".", "error", "(", "\"Bad Pcost data: %s\"", "%", "p", ")", "b", "=", "m", "*", "p", "[", ":", "ns", "-", "1", "]", "-", "c", "[", ":", "ns", "-", "1", "]", "# rhs", "by", "=", "r_", "[", "by", ",", "b", ".", "T", "]", "# if i > ng:", "# sidx = qgbas + (i-ng) - 1 # this was for a q cost", "# else:", "# sidx = pgbas + i - 1 # this was for a p cost", "Ay", "[", "pgbas", "+", "i", ",", "k", ":", "k", "+", "ns", "-", "1", "]", "=", "m", "# FIXME: Repeat for Q costs.", "# Now fill the y rows with -1's", "Ay", "[", "ybas", "+", "j", ",", "k", ":", "k", "+", "ns", "-", "1", "]", "=", "-", "ones", "(", "ns", "-", "1", ")", "k", "+=", "(", "ns", "-", "1", ")", "j", "+=", "1", "y", "=", "Variable", "(", "\"y\"", ",", "ny", ")", "# Transpose Ay since lil_matrix stores in rows.", "if", "self", ".", "dc", ":", "ycon", "=", "LinearConstraint", "(", "\"ycon\"", ",", "Ay", ".", "T", ",", "None", ",", "by", ",", "[", "\"Pg\"", ",", "\"y\"", "]", ")", "else", ":", "ycon", "=", "LinearConstraint", "(", "\"ycon\"", ",", "Ay", ".", "T", ",", "None", ",", "by", ",", "[", "\"Pg\"", ",", "\"Qg\"", ",", "\"y\"", "]", ")", "return", "y", ",", "ycon" ]
Returns the basin constraints for piece-wise linear gen cost variables. CCV cost formulation expressed as Ay * x <= by. Based on makeAy.m from MATPOWER by C. E. Murillo-Sanchez, developed at PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more information.
[ "Returns", "the", "basin", "constraints", "for", "piece", "-", "wise", "linear", "gen", "cost", "variables", ".", "CCV", "cost", "formulation", "expressed", "as", "Ay", "*", "x", "<", "=", "by", "." ]
python
train
32.883117
fracpete/python-weka-wrapper3
python/weka/flow/control.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/control.py#L195-L209
def index_of(self, name): """ Returns the index of the actor with the given name. :param name: the name of the Actor to find :type name: str :return: the index, -1 if not found :rtype: int """ result = -1 for index, actor in enumerate(self.actors): if actor.name == name: result = index break return result
[ "def", "index_of", "(", "self", ",", "name", ")", ":", "result", "=", "-", "1", "for", "index", ",", "actor", "in", "enumerate", "(", "self", ".", "actors", ")", ":", "if", "actor", ".", "name", "==", "name", ":", "result", "=", "index", "break", "return", "result" ]
Returns the index of the actor with the given name. :param name: the name of the Actor to find :type name: str :return: the index, -1 if not found :rtype: int
[ "Returns", "the", "index", "of", "the", "actor", "with", "the", "given", "name", "." ]
python
train
27.8
googleapis/google-cloud-python
api_core/google/api_core/exceptions.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/exceptions.py#L447-L462
def from_grpc_error(rpc_exc): """Create a :class:`GoogleAPICallError` from a :class:`grpc.RpcError`. Args: rpc_exc (grpc.RpcError): The gRPC error. Returns: GoogleAPICallError: An instance of the appropriate subclass of :class:`GoogleAPICallError`. """ if isinstance(rpc_exc, grpc.Call): return from_grpc_status( rpc_exc.code(), rpc_exc.details(), errors=(rpc_exc,), response=rpc_exc ) else: return GoogleAPICallError(str(rpc_exc), errors=(rpc_exc,), response=rpc_exc)
[ "def", "from_grpc_error", "(", "rpc_exc", ")", ":", "if", "isinstance", "(", "rpc_exc", ",", "grpc", ".", "Call", ")", ":", "return", "from_grpc_status", "(", "rpc_exc", ".", "code", "(", ")", ",", "rpc_exc", ".", "details", "(", ")", ",", "errors", "=", "(", "rpc_exc", ",", ")", ",", "response", "=", "rpc_exc", ")", "else", ":", "return", "GoogleAPICallError", "(", "str", "(", "rpc_exc", ")", ",", "errors", "=", "(", "rpc_exc", ",", ")", ",", "response", "=", "rpc_exc", ")" ]
Create a :class:`GoogleAPICallError` from a :class:`grpc.RpcError`. Args: rpc_exc (grpc.RpcError): The gRPC error. Returns: GoogleAPICallError: An instance of the appropriate subclass of :class:`GoogleAPICallError`.
[ "Create", "a", ":", "class", ":", "GoogleAPICallError", "from", "a", ":", "class", ":", "grpc", ".", "RpcError", "." ]
python
train
33.9375
bitshares/python-bitshares
bitsharesapi/websocket.py
https://github.com/bitshares/python-bitshares/blob/8a3b5954a6abcaaff7c6a5c41d910e58eea3142f/bitsharesapi/websocket.py#L346-L354
def rpcexec(self, payload): """ Execute a call by sending the payload :param dict payload: Payload data :raises ValueError: if the server does not respond in proper JSON format :raises RPCError: if the server returns an error """ log.debug(json.dumps(payload)) self.ws.send(json.dumps(payload, ensure_ascii=False).encode("utf8"))
[ "def", "rpcexec", "(", "self", ",", "payload", ")", ":", "log", ".", "debug", "(", "json", ".", "dumps", "(", "payload", ")", ")", "self", ".", "ws", ".", "send", "(", "json", ".", "dumps", "(", "payload", ",", "ensure_ascii", "=", "False", ")", ".", "encode", "(", "\"utf8\"", ")", ")" ]
Execute a call by sending the payload :param dict payload: Payload data :raises ValueError: if the server does not respond in proper JSON format :raises RPCError: if the server returns an error
[ "Execute", "a", "call", "by", "sending", "the", "payload" ]
python
train
43.333333
openstack/pyghmi
pyghmi/ipmi/oem/generic.py
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/oem/generic.py#L45-L57
def process_event(self, event, ipmicmd, seldata): """Modify an event according with OEM understanding. Given an event, allow an OEM module to augment it. For example, event data fields can have OEM bytes. Other times an OEM may wish to apply some transform to some field to suit their conventions. """ event['oem_handler'] = None evdata = event['event_data_bytes'] if evdata[0] & 0b11000000 == 0b10000000: event['oem_byte2'] = evdata[1] if evdata[0] & 0b110000 == 0b100000: event['oem_byte3'] = evdata[2]
[ "def", "process_event", "(", "self", ",", "event", ",", "ipmicmd", ",", "seldata", ")", ":", "event", "[", "'oem_handler'", "]", "=", "None", "evdata", "=", "event", "[", "'event_data_bytes'", "]", "if", "evdata", "[", "0", "]", "&", "0b11000000", "==", "0b10000000", ":", "event", "[", "'oem_byte2'", "]", "=", "evdata", "[", "1", "]", "if", "evdata", "[", "0", "]", "&", "0b110000", "==", "0b100000", ":", "event", "[", "'oem_byte3'", "]", "=", "evdata", "[", "2", "]" ]
Modify an event according with OEM understanding. Given an event, allow an OEM module to augment it. For example, event data fields can have OEM bytes. Other times an OEM may wish to apply some transform to some field to suit their conventions.
[ "Modify", "an", "event", "according", "with", "OEM", "understanding", "." ]
python
train
45.461538
Azure/azure-event-hubs-python
azure/eventprocessorhost/partition_pump.py
https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventprocessorhost/partition_pump.py#L35-L40
def set_pump_status(self, status): """ Updates pump status and logs update to console. """ self.pump_status = status _logger.info("%r partition %r", status, self.lease.partition_id)
[ "def", "set_pump_status", "(", "self", ",", "status", ")", ":", "self", ".", "pump_status", "=", "status", "_logger", ".", "info", "(", "\"%r partition %r\"", ",", "status", ",", "self", ".", "lease", ".", "partition_id", ")" ]
Updates pump status and logs update to console.
[ "Updates", "pump", "status", "and", "logs", "update", "to", "console", "." ]
python
train
36
Cog-Creators/Red-Lavalink
lavalink/lavalink.py
https://github.com/Cog-Creators/Red-Lavalink/blob/5b3fc6eb31ee5db8bd2b633a523cf69749957111/lavalink/lavalink.py#L86-L109
async def connect(channel: discord.VoiceChannel): """ Connects to a discord voice channel. This is the publicly exposed way to connect to a discord voice channel. The :py:func:`initialize` function must be called first! Parameters ---------- channel Returns ------- Player The created Player object. Raises ------ IndexError If there are no available lavalink nodes ready to connect to discord. """ node_ = node.get_node(channel.guild.id) p = await node_.player_manager.create_player(channel) return p
[ "async", "def", "connect", "(", "channel", ":", "discord", ".", "VoiceChannel", ")", ":", "node_", "=", "node", ".", "get_node", "(", "channel", ".", "guild", ".", "id", ")", "p", "=", "await", "node_", ".", "player_manager", ".", "create_player", "(", "channel", ")", "return", "p" ]
Connects to a discord voice channel. This is the publicly exposed way to connect to a discord voice channel. The :py:func:`initialize` function must be called first! Parameters ---------- channel Returns ------- Player The created Player object. Raises ------ IndexError If there are no available lavalink nodes ready to connect to discord.
[ "Connects", "to", "a", "discord", "voice", "channel", "." ]
python
train
23.583333
aws/aws-iot-device-sdk-python
AWSIoTPythonSDK/core/protocol/paho/client.py
https://github.com/aws/aws-iot-device-sdk-python/blob/f0aa2ce34b21dd2e44f4fb7e1d058656aaf2fc62/AWSIoTPythonSDK/core/protocol/paho/client.py#L1141-L1162
def loop_read(self, max_packets=1): """Process read network events. Use in place of calling loop() if you wish to handle your client reads as part of your own application. Use socket() to obtain the client socket to call select() or equivalent on. Do not use if you are using the threaded interface loop_start().""" if self._sock is None and self._ssl is None: return MQTT_ERR_NO_CONN max_packets = len(self._out_messages) + len(self._in_messages) if max_packets < 1: max_packets = 1 for i in range(0, max_packets): rc = self._packet_read() if rc > 0: return self._loop_rc_handle(rc) elif rc == MQTT_ERR_AGAIN: return MQTT_ERR_SUCCESS return MQTT_ERR_SUCCESS
[ "def", "loop_read", "(", "self", ",", "max_packets", "=", "1", ")", ":", "if", "self", ".", "_sock", "is", "None", "and", "self", ".", "_ssl", "is", "None", ":", "return", "MQTT_ERR_NO_CONN", "max_packets", "=", "len", "(", "self", ".", "_out_messages", ")", "+", "len", "(", "self", ".", "_in_messages", ")", "if", "max_packets", "<", "1", ":", "max_packets", "=", "1", "for", "i", "in", "range", "(", "0", ",", "max_packets", ")", ":", "rc", "=", "self", ".", "_packet_read", "(", ")", "if", "rc", ">", "0", ":", "return", "self", ".", "_loop_rc_handle", "(", "rc", ")", "elif", "rc", "==", "MQTT_ERR_AGAIN", ":", "return", "MQTT_ERR_SUCCESS", "return", "MQTT_ERR_SUCCESS" ]
Process read network events. Use in place of calling loop() if you wish to handle your client reads as part of your own application. Use socket() to obtain the client socket to call select() or equivalent on. Do not use if you are using the threaded interface loop_start().
[ "Process", "read", "network", "events", ".", "Use", "in", "place", "of", "calling", "loop", "()", "if", "you", "wish", "to", "handle", "your", "client", "reads", "as", "part", "of", "your", "own", "application", "." ]
python
train
36.954545
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L9023-L9065
def occult(target1, shape1, frame1, target2, shape2, frame2, abcorr, observer, et): """ Determines the occultation condition (not occulted, partially, etc.) of one target relative to another target as seen by an observer at a given time. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/occult_c.html :param target1: Name or ID of first target. :type target1: str :param shape1: Type of shape model used for first target. :type shape1: str :param frame1: Body-fixed, body-centered frame for first body. :type frame1: str :param target2: Name or ID of second target. :type target2: str :param shape2: Type of shape model used for second target. :type shape2: str :param frame2: Body-fixed, body-centered frame for second body. :type frame2: str :param abcorr: Aberration correction flag. :type abcorr: str :param observer: Name or ID of the observer. :type observer: str :param et: Time of the observation (seconds past J2000). :type et: float :return: Occultation identification code. :rtype: int """ target1 = stypes.stringToCharP(target1) shape1 = stypes.stringToCharP(shape1) frame1 = stypes.stringToCharP(frame1) target2 = stypes.stringToCharP(target2) shape2 = stypes.stringToCharP(shape2) frame2 = stypes.stringToCharP(frame2) abcorr = stypes.stringToCharP(abcorr) observer = stypes.stringToCharP(observer) et = ctypes.c_double(et) occult_code = ctypes.c_int() libspice.occult_c(target1, shape1, frame1, target2, shape2, frame2, abcorr, observer, et, ctypes.byref(occult_code)) return occult_code.value
[ "def", "occult", "(", "target1", ",", "shape1", ",", "frame1", ",", "target2", ",", "shape2", ",", "frame2", ",", "abcorr", ",", "observer", ",", "et", ")", ":", "target1", "=", "stypes", ".", "stringToCharP", "(", "target1", ")", "shape1", "=", "stypes", ".", "stringToCharP", "(", "shape1", ")", "frame1", "=", "stypes", ".", "stringToCharP", "(", "frame1", ")", "target2", "=", "stypes", ".", "stringToCharP", "(", "target2", ")", "shape2", "=", "stypes", ".", "stringToCharP", "(", "shape2", ")", "frame2", "=", "stypes", ".", "stringToCharP", "(", "frame2", ")", "abcorr", "=", "stypes", ".", "stringToCharP", "(", "abcorr", ")", "observer", "=", "stypes", ".", "stringToCharP", "(", "observer", ")", "et", "=", "ctypes", ".", "c_double", "(", "et", ")", "occult_code", "=", "ctypes", ".", "c_int", "(", ")", "libspice", ".", "occult_c", "(", "target1", ",", "shape1", ",", "frame1", ",", "target2", ",", "shape2", ",", "frame2", ",", "abcorr", ",", "observer", ",", "et", ",", "ctypes", ".", "byref", "(", "occult_code", ")", ")", "return", "occult_code", ".", "value" ]
Determines the occultation condition (not occulted, partially, etc.) of one target relative to another target as seen by an observer at a given time. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/occult_c.html :param target1: Name or ID of first target. :type target1: str :param shape1: Type of shape model used for first target. :type shape1: str :param frame1: Body-fixed, body-centered frame for first body. :type frame1: str :param target2: Name or ID of second target. :type target2: str :param shape2: Type of shape model used for second target. :type shape2: str :param frame2: Body-fixed, body-centered frame for second body. :type frame2: str :param abcorr: Aberration correction flag. :type abcorr: str :param observer: Name or ID of the observer. :type observer: str :param et: Time of the observation (seconds past J2000). :type et: float :return: Occultation identification code. :rtype: int
[ "Determines", "the", "occultation", "condition", "(", "not", "occulted", "partially", "etc", ".", ")", "of", "one", "target", "relative", "to", "another", "target", "as", "seen", "by", "an", "observer", "at", "a", "given", "time", "." ]
python
train
38.604651
python-diamond/Diamond
src/collectors/kafka_consumer_lag/kafka_consumer_lag.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/kafka_consumer_lag/kafka_consumer_lag.py#L29-L39
def get_default_config(self): """ Returns the default collector settings """ config = super(KafkaConsumerLagCollector, self).get_default_config() config.update({ 'path': 'kafka.ConsumerLag', 'bin': '/opt/kafka/bin/kafka-run-class.sh', 'zookeeper': 'localhost:2181' }) return config
[ "def", "get_default_config", "(", "self", ")", ":", "config", "=", "super", "(", "KafkaConsumerLagCollector", ",", "self", ")", ".", "get_default_config", "(", ")", "config", ".", "update", "(", "{", "'path'", ":", "'kafka.ConsumerLag'", ",", "'bin'", ":", "'/opt/kafka/bin/kafka-run-class.sh'", ",", "'zookeeper'", ":", "'localhost:2181'", "}", ")", "return", "config" ]
Returns the default collector settings
[ "Returns", "the", "default", "collector", "settings" ]
python
train
33
quantopian/zipline
zipline/pipeline/factors/basic.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/basic.py#L198-L240
def from_span(cls, inputs, window_length, span, **kwargs): """ Convenience constructor for passing `decay_rate` in terms of `span`. Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the behavior equivalent to passing `span` to pandas.ewma. Examples -------- .. code-block:: python # Equivalent to: # my_ewma = EWMA( # inputs=[EquityPricing.close], # window_length=30, # decay_rate=(1 - (2.0 / (1 + 15.0))), # ) my_ewma = EWMA.from_span( inputs=[EquityPricing.close], window_length=30, span=15, ) Notes ----- This classmethod is provided by both :class:`ExponentialWeightedMovingAverage` and :class:`ExponentialWeightedMovingStdDev`. """ if span <= 1: raise ValueError( "`span` must be a positive number. %s was passed." % span ) decay_rate = (1.0 - (2.0 / (1.0 + span))) assert 0.0 < decay_rate <= 1.0 return cls( inputs=inputs, window_length=window_length, decay_rate=decay_rate, **kwargs )
[ "def", "from_span", "(", "cls", ",", "inputs", ",", "window_length", ",", "span", ",", "*", "*", "kwargs", ")", ":", "if", "span", "<=", "1", ":", "raise", "ValueError", "(", "\"`span` must be a positive number. %s was passed.\"", "%", "span", ")", "decay_rate", "=", "(", "1.0", "-", "(", "2.0", "/", "(", "1.0", "+", "span", ")", ")", ")", "assert", "0.0", "<", "decay_rate", "<=", "1.0", "return", "cls", "(", "inputs", "=", "inputs", ",", "window_length", "=", "window_length", ",", "decay_rate", "=", "decay_rate", ",", "*", "*", "kwargs", ")" ]
Convenience constructor for passing `decay_rate` in terms of `span`. Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the behavior equivalent to passing `span` to pandas.ewma. Examples -------- .. code-block:: python # Equivalent to: # my_ewma = EWMA( # inputs=[EquityPricing.close], # window_length=30, # decay_rate=(1 - (2.0 / (1 + 15.0))), # ) my_ewma = EWMA.from_span( inputs=[EquityPricing.close], window_length=30, span=15, ) Notes ----- This classmethod is provided by both :class:`ExponentialWeightedMovingAverage` and :class:`ExponentialWeightedMovingStdDev`.
[ "Convenience", "constructor", "for", "passing", "decay_rate", "in", "terms", "of", "span", "." ]
python
train
29.209302
gwpy/gwpy
gwpy/plot/bode.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/bode.py#L169-L226
def add_filter(self, filter_, frequencies=None, dB=True, analog=False, sample_rate=None, **kwargs): """Add a linear time-invariant filter to this BodePlot Parameters ---------- filter_ : `~scipy.signal.lti`, `tuple` the filter to plot, either as a `~scipy.signal.lti`, or a `tuple` with the following number and meaning of elements - 2: (numerator, denominator) - 3: (zeros, poles, gain) - 4: (A, B, C, D) frequencies : `numpy.ndarray`, optional list of frequencies (in Hertz) at which to plot dB : `bool`, optional if `True`, display magnitude in decibels, otherwise display amplitude, default: `True` **kwargs any other keyword arguments accepted by :meth:`~matplotlib.axes.Axes.plot` Returns ------- mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>` the lines drawn for the magnitude and phase of the filter. """ if not analog: if not sample_rate: raise ValueError("Must give sample_rate frequency to display " "digital (analog=False) filter") sample_rate = Quantity(sample_rate, 'Hz').value dt = 2 * pi / sample_rate if not isinstance(frequencies, (type(None), int)): frequencies = numpy.atleast_1d(frequencies).copy() frequencies *= dt # parse filter (without digital conversions) _, fcomp = parse_filter(filter_, analog=False) if analog: lti = signal.lti(*fcomp) else: lti = signal.dlti(*fcomp, dt=dt) # calculate frequency response w, mag, phase = lti.bode(w=frequencies) # convert from decibels if not dB: mag = 10 ** (mag / 10.) # draw mline = self.maxes.plot(w, mag, **kwargs)[0] pline = self.paxes.plot(w, phase, **kwargs)[0] return mline, pline
[ "def", "add_filter", "(", "self", ",", "filter_", ",", "frequencies", "=", "None", ",", "dB", "=", "True", ",", "analog", "=", "False", ",", "sample_rate", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "analog", ":", "if", "not", "sample_rate", ":", "raise", "ValueError", "(", "\"Must give sample_rate frequency to display \"", "\"digital (analog=False) filter\"", ")", "sample_rate", "=", "Quantity", "(", "sample_rate", ",", "'Hz'", ")", ".", "value", "dt", "=", "2", "*", "pi", "/", "sample_rate", "if", "not", "isinstance", "(", "frequencies", ",", "(", "type", "(", "None", ")", ",", "int", ")", ")", ":", "frequencies", "=", "numpy", ".", "atleast_1d", "(", "frequencies", ")", ".", "copy", "(", ")", "frequencies", "*=", "dt", "# parse filter (without digital conversions)", "_", ",", "fcomp", "=", "parse_filter", "(", "filter_", ",", "analog", "=", "False", ")", "if", "analog", ":", "lti", "=", "signal", ".", "lti", "(", "*", "fcomp", ")", "else", ":", "lti", "=", "signal", ".", "dlti", "(", "*", "fcomp", ",", "dt", "=", "dt", ")", "# calculate frequency response", "w", ",", "mag", ",", "phase", "=", "lti", ".", "bode", "(", "w", "=", "frequencies", ")", "# convert from decibels", "if", "not", "dB", ":", "mag", "=", "10", "**", "(", "mag", "/", "10.", ")", "# draw", "mline", "=", "self", ".", "maxes", ".", "plot", "(", "w", ",", "mag", ",", "*", "*", "kwargs", ")", "[", "0", "]", "pline", "=", "self", ".", "paxes", ".", "plot", "(", "w", ",", "phase", ",", "*", "*", "kwargs", ")", "[", "0", "]", "return", "mline", ",", "pline" ]
Add a linear time-invariant filter to this BodePlot Parameters ---------- filter_ : `~scipy.signal.lti`, `tuple` the filter to plot, either as a `~scipy.signal.lti`, or a `tuple` with the following number and meaning of elements - 2: (numerator, denominator) - 3: (zeros, poles, gain) - 4: (A, B, C, D) frequencies : `numpy.ndarray`, optional list of frequencies (in Hertz) at which to plot dB : `bool`, optional if `True`, display magnitude in decibels, otherwise display amplitude, default: `True` **kwargs any other keyword arguments accepted by :meth:`~matplotlib.axes.Axes.plot` Returns ------- mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>` the lines drawn for the magnitude and phase of the filter.
[ "Add", "a", "linear", "time", "-", "invariant", "filter", "to", "this", "BodePlot" ]
python
train
35.172414
quodlibet/mutagen
mutagen/mp4/__init__.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/mp4/__init__.py#L543-L556
def __update_offsets(self, fileobj, atoms, delta, offset): """Update offset tables in all 'stco' and 'co64' atoms.""" if delta == 0: return moov = atoms[b"moov"] for atom in moov.findall(b'stco', True): self.__update_offset_table(fileobj, ">%dI", atom, delta, offset) for atom in moov.findall(b'co64', True): self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset) try: for atom in atoms[b"moof"].findall(b'tfhd', True): self.__update_tfhd(fileobj, atom, delta, offset) except KeyError: pass
[ "def", "__update_offsets", "(", "self", ",", "fileobj", ",", "atoms", ",", "delta", ",", "offset", ")", ":", "if", "delta", "==", "0", ":", "return", "moov", "=", "atoms", "[", "b\"moov\"", "]", "for", "atom", "in", "moov", ".", "findall", "(", "b'stco'", ",", "True", ")", ":", "self", ".", "__update_offset_table", "(", "fileobj", ",", "\">%dI\"", ",", "atom", ",", "delta", ",", "offset", ")", "for", "atom", "in", "moov", ".", "findall", "(", "b'co64'", ",", "True", ")", ":", "self", ".", "__update_offset_table", "(", "fileobj", ",", "\">%dQ\"", ",", "atom", ",", "delta", ",", "offset", ")", "try", ":", "for", "atom", "in", "atoms", "[", "b\"moof\"", "]", ".", "findall", "(", "b'tfhd'", ",", "True", ")", ":", "self", ".", "__update_tfhd", "(", "fileobj", ",", "atom", ",", "delta", ",", "offset", ")", "except", "KeyError", ":", "pass" ]
Update offset tables in all 'stco' and 'co64' atoms.
[ "Update", "offset", "tables", "in", "all", "stco", "and", "co64", "atoms", "." ]
python
train
44.214286
intel-analytics/BigDL
pyspark/bigdl/keras/converter.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/keras/converter.py#L351-L359
def from_hdf5_path(cls, hdf5_path): """ :param hdf5_path: hdf5 path which can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system. :return: BigDL Model """ from keras.models import load_model hdf5_local_path = BCommon.get_local_file(hdf5_path) kmodel = load_model(hdf5_local_path) return kmodel, DefinitionLoader.from_kmodel(kmodel)
[ "def", "from_hdf5_path", "(", "cls", ",", "hdf5_path", ")", ":", "from", "keras", ".", "models", "import", "load_model", "hdf5_local_path", "=", "BCommon", ".", "get_local_file", "(", "hdf5_path", ")", "kmodel", "=", "load_model", "(", "hdf5_local_path", ")", "return", "kmodel", ",", "DefinitionLoader", ".", "from_kmodel", "(", "kmodel", ")" ]
:param hdf5_path: hdf5 path which can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system. :return: BigDL Model
[ ":", "param", "hdf5_path", ":", "hdf5", "path", "which", "can", "be", "stored", "in", "a", "local", "file", "system", "HDFS", "S3", "or", "any", "Hadoop", "-", "supported", "file", "system", ".", ":", "return", ":", "BigDL", "Model" ]
python
test
46.222222
zetaops/pyoko
pyoko/db/queryset.py
https://github.com/zetaops/pyoko/blob/236c509ad85640933ac0f89ad8f7ed95f62adf07/pyoko/db/queryset.py#L561-L567
def data(self): """ return (data_dict, key) tuple instead of models instances """ clone = copy.deepcopy(self) clone._cfg['rtype'] = ReturnType.Object return clone
[ "def", "data", "(", "self", ")", ":", "clone", "=", "copy", ".", "deepcopy", "(", "self", ")", "clone", ".", "_cfg", "[", "'rtype'", "]", "=", "ReturnType", ".", "Object", "return", "clone" ]
return (data_dict, key) tuple instead of models instances
[ "return", "(", "data_dict", "key", ")", "tuple", "instead", "of", "models", "instances" ]
python
train
29.142857
jssimporter/python-jss
jss/jssobjects.py
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jssobjects.py#L438-L456
def save(self): """POST the object to the JSS.""" try: response = requests.post(self._upload_url, auth=self.jss.session.auth, verify=self.jss.session.verify, files=self.resource) except JSSPostError as error: if error.status_code == 409: raise JSSPostError(error) else: raise JSSMethodNotAllowedError(self.__class__.__name__) if response.status_code == 201: if self.jss.verbose: print "POST: Success" print response.text.encode("utf-8") elif response.status_code >= 400: error_handler(JSSPostError, response)
[ "def", "save", "(", "self", ")", ":", "try", ":", "response", "=", "requests", ".", "post", "(", "self", ".", "_upload_url", ",", "auth", "=", "self", ".", "jss", ".", "session", ".", "auth", ",", "verify", "=", "self", ".", "jss", ".", "session", ".", "verify", ",", "files", "=", "self", ".", "resource", ")", "except", "JSSPostError", "as", "error", ":", "if", "error", ".", "status_code", "==", "409", ":", "raise", "JSSPostError", "(", "error", ")", "else", ":", "raise", "JSSMethodNotAllowedError", "(", "self", ".", "__class__", ".", "__name__", ")", "if", "response", ".", "status_code", "==", "201", ":", "if", "self", ".", "jss", ".", "verbose", ":", "print", "\"POST: Success\"", "print", "response", ".", "text", ".", "encode", "(", "\"utf-8\"", ")", "elif", "response", ".", "status_code", ">=", "400", ":", "error_handler", "(", "JSSPostError", ",", "response", ")" ]
POST the object to the JSS.
[ "POST", "the", "object", "to", "the", "JSS", "." ]
python
train
40.315789
jazzband/django-authority
authority/templatetags/permissions.py
https://github.com/jazzband/django-authority/blob/58e08483cdd91a6a69e8019dd2a2edf68531ae97/authority/templatetags/permissions.py#L376-L398
def get_permission_request(parser, token): """ Performs a permission request check with the given signature, user and objects and assigns the result to a context variable. Syntax:: {% get_permission_request PERMISSION_LABEL.CHECK_NAME for USER and *OBJS [as VARNAME] %} {% get_permission_request "poll_permission.change_poll" for request.user and poll as "asked_for_permissio" %} {% get_permission_request "poll_permission.change_poll" for request.user and poll,second_poll as "asked_for_permissio" %} {% if asked_for_permissio %} Dude, you already asked for permission! {% else %} Oh, please fill out this 20 page form and sign here. {% endif %} """ return PermissionForObjectNode.handle_token( parser, token, approved=False, name='"permission_request"')
[ "def", "get_permission_request", "(", "parser", ",", "token", ")", ":", "return", "PermissionForObjectNode", ".", "handle_token", "(", "parser", ",", "token", ",", "approved", "=", "False", ",", "name", "=", "'\"permission_request\"'", ")" ]
Performs a permission request check with the given signature, user and objects and assigns the result to a context variable. Syntax:: {% get_permission_request PERMISSION_LABEL.CHECK_NAME for USER and *OBJS [as VARNAME] %} {% get_permission_request "poll_permission.change_poll" for request.user and poll as "asked_for_permissio" %} {% get_permission_request "poll_permission.change_poll" for request.user and poll,second_poll as "asked_for_permissio" %} {% if asked_for_permissio %} Dude, you already asked for permission! {% else %} Oh, please fill out this 20 page form and sign here. {% endif %}
[ "Performs", "a", "permission", "request", "check", "with", "the", "given", "signature", "user", "and", "objects", "and", "assigns", "the", "result", "to", "a", "context", "variable", "." ]
python
train
37.652174
mikhaildubov/AST-text-analysis
east/applications.py
https://github.com/mikhaildubov/AST-text-analysis/blob/055ad8d2492c100bbbaa25309ec1074bdf1dfaa5/east/applications.py#L11-L56
def keyphrases_table(keyphrases, texts, similarity_measure=None, synonimizer=None, language=consts.Language.ENGLISH): """ Constructs the keyphrases table, containing their matching scores in a set of texts. The resulting table is stored as a dictionary of dictionaries, where the entry table["keyphrase"]["text"] corresponds to the matching score (0 <= score <= 1) of keyphrase "keyphrase" in the text named "text". :param keyphrases: list of strings :param texts: dictionary of form {text_name: text} :param similarity_measure: similarity measure to use :param synonimizer: SynonymExtractor object to be used :param language: Language of the text collection / keyphrases :returns: dictionary of dictionaries, having keyphrases on its first level and texts on the second level. """ similarity_measure = similarity_measure or relevance.ASTRelevanceMeasure() text_titles = texts.keys() text_collection = texts.values() similarity_measure.set_text_collection(text_collection, language) i = 0 keyphrases_prepared = {keyphrase: utils.prepare_text(keyphrase) for keyphrase in keyphrases} total_keyphrases = len(keyphrases) total_scores = len(text_collection) * total_keyphrases res = {} for keyphrase in keyphrases: if not keyphrase: continue res[keyphrase] = {} for j in xrange(len(text_collection)): i += 1 logging.progress("Calculating matching scores", i, total_scores) res[keyphrase][text_titles[j]] = similarity_measure.relevance( keyphrases_prepared[keyphrase], text=j, synonimizer=synonimizer) logging.clear() return res
[ "def", "keyphrases_table", "(", "keyphrases", ",", "texts", ",", "similarity_measure", "=", "None", ",", "synonimizer", "=", "None", ",", "language", "=", "consts", ".", "Language", ".", "ENGLISH", ")", ":", "similarity_measure", "=", "similarity_measure", "or", "relevance", ".", "ASTRelevanceMeasure", "(", ")", "text_titles", "=", "texts", ".", "keys", "(", ")", "text_collection", "=", "texts", ".", "values", "(", ")", "similarity_measure", ".", "set_text_collection", "(", "text_collection", ",", "language", ")", "i", "=", "0", "keyphrases_prepared", "=", "{", "keyphrase", ":", "utils", ".", "prepare_text", "(", "keyphrase", ")", "for", "keyphrase", "in", "keyphrases", "}", "total_keyphrases", "=", "len", "(", "keyphrases", ")", "total_scores", "=", "len", "(", "text_collection", ")", "*", "total_keyphrases", "res", "=", "{", "}", "for", "keyphrase", "in", "keyphrases", ":", "if", "not", "keyphrase", ":", "continue", "res", "[", "keyphrase", "]", "=", "{", "}", "for", "j", "in", "xrange", "(", "len", "(", "text_collection", ")", ")", ":", "i", "+=", "1", "logging", ".", "progress", "(", "\"Calculating matching scores\"", ",", "i", ",", "total_scores", ")", "res", "[", "keyphrase", "]", "[", "text_titles", "[", "j", "]", "]", "=", "similarity_measure", ".", "relevance", "(", "keyphrases_prepared", "[", "keyphrase", "]", ",", "text", "=", "j", ",", "synonimizer", "=", "synonimizer", ")", "logging", ".", "clear", "(", ")", "return", "res" ]
Constructs the keyphrases table, containing their matching scores in a set of texts. The resulting table is stored as a dictionary of dictionaries, where the entry table["keyphrase"]["text"] corresponds to the matching score (0 <= score <= 1) of keyphrase "keyphrase" in the text named "text". :param keyphrases: list of strings :param texts: dictionary of form {text_name: text} :param similarity_measure: similarity measure to use :param synonimizer: SynonymExtractor object to be used :param language: Language of the text collection / keyphrases :returns: dictionary of dictionaries, having keyphrases on its first level and texts on the second level.
[ "Constructs", "the", "keyphrases", "table", "containing", "their", "matching", "scores", "in", "a", "set", "of", "texts", "." ]
python
train
39.891304
GetmeUK/MongoFrames
mongoframes/frames.py
https://github.com/GetmeUK/MongoFrames/blob/7d2bd792235dfa77a9deecab5366f5f73480823d/mongoframes/frames.py#L234-L262
def update(self, *fields): """ Update this document. Optionally a specific list of fields to update can be specified. """ from mongoframes.queries import to_refs assert '_id' in self._document, "Can't update documents without `_id`" # Send update signal signal('update').send(self.__class__, frames=[self]) # Check for selective updates if len(fields) > 0: document = {} for field in fields: document[field] = self._path_to_value(field, self._document) else: document = self._document # Prepare the document to be updated document = to_refs(document) document.pop('_id', None) # Update the document self.get_collection().update_one({'_id': self._id}, {'$set': document}) # Send updated signal signal('updated').send(self.__class__, frames=[self])
[ "def", "update", "(", "self", ",", "*", "fields", ")", ":", "from", "mongoframes", ".", "queries", "import", "to_refs", "assert", "'_id'", "in", "self", ".", "_document", ",", "\"Can't update documents without `_id`\"", "# Send update signal", "signal", "(", "'update'", ")", ".", "send", "(", "self", ".", "__class__", ",", "frames", "=", "[", "self", "]", ")", "# Check for selective updates", "if", "len", "(", "fields", ")", ">", "0", ":", "document", "=", "{", "}", "for", "field", "in", "fields", ":", "document", "[", "field", "]", "=", "self", ".", "_path_to_value", "(", "field", ",", "self", ".", "_document", ")", "else", ":", "document", "=", "self", ".", "_document", "# Prepare the document to be updated", "document", "=", "to_refs", "(", "document", ")", "document", ".", "pop", "(", "'_id'", ",", "None", ")", "# Update the document", "self", ".", "get_collection", "(", ")", ".", "update_one", "(", "{", "'_id'", ":", "self", ".", "_id", "}", ",", "{", "'$set'", ":", "document", "}", ")", "# Send updated signal", "signal", "(", "'updated'", ")", ".", "send", "(", "self", ".", "__class__", ",", "frames", "=", "[", "self", "]", ")" ]
Update this document. Optionally a specific list of fields to update can be specified.
[ "Update", "this", "document", ".", "Optionally", "a", "specific", "list", "of", "fields", "to", "update", "can", "be", "specified", "." ]
python
train
31.724138
bitlabstudio/django-influxdb-metrics
influxdb_metrics/utils.py
https://github.com/bitlabstudio/django-influxdb-metrics/blob/c9f368e28a6072813454b6b549b4afa64aad778a/influxdb_metrics/utils.py#L13-L24
def get_client(): """Returns an ``InfluxDBClient`` instance.""" return InfluxDBClient( settings.INFLUXDB_HOST, settings.INFLUXDB_PORT, settings.INFLUXDB_USER, settings.INFLUXDB_PASSWORD, settings.INFLUXDB_DATABASE, timeout=settings.INFLUXDB_TIMEOUT, ssl=getattr(settings, 'INFLUXDB_SSL', False), verify_ssl=getattr(settings, 'INFLUXDB_VERIFY_SSL', False), )
[ "def", "get_client", "(", ")", ":", "return", "InfluxDBClient", "(", "settings", ".", "INFLUXDB_HOST", ",", "settings", ".", "INFLUXDB_PORT", ",", "settings", ".", "INFLUXDB_USER", ",", "settings", ".", "INFLUXDB_PASSWORD", ",", "settings", ".", "INFLUXDB_DATABASE", ",", "timeout", "=", "settings", ".", "INFLUXDB_TIMEOUT", ",", "ssl", "=", "getattr", "(", "settings", ",", "'INFLUXDB_SSL'", ",", "False", ")", ",", "verify_ssl", "=", "getattr", "(", "settings", ",", "'INFLUXDB_VERIFY_SSL'", ",", "False", ")", ",", ")" ]
Returns an ``InfluxDBClient`` instance.
[ "Returns", "an", "InfluxDBClient", "instance", "." ]
python
train
35.166667
shaldengeki/python-mal
myanimelist/character.py
https://github.com/shaldengeki/python-mal/blob/2c3356411a74d88ba13f6b970388040d696f8392/myanimelist/character.py#L250-L279
def parse_clubs(self, clubs_page): """Parses the DOM and returns character clubs attributes. :type clubs_page: :class:`bs4.BeautifulSoup` :param clubs_page: MAL character clubs page's DOM :rtype: dict :return: character clubs attributes. """ character_info = self.parse_sidebar(clubs_page) second_col = clubs_page.find(u'div', {'id': 'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1] try: clubs_header = second_col.find(u'div', text=u'Related Clubs') character_info[u'clubs'] = [] if clubs_header: curr_elt = clubs_header.nextSibling while curr_elt is not None: if curr_elt.name == u'div': link = curr_elt.find(u'a') club_id = int(re.match(r'/clubs\.php\?cid=(?P<id>[0-9]+)', link.get(u'href')).group(u'id')) num_members = int(re.match(r'(?P<num>[0-9]+) members', curr_elt.find(u'small').text).group(u'num')) character_info[u'clubs'].append(self.session.club(club_id).set({'name': link.text, 'num_members': num_members})) curr_elt = curr_elt.nextSibling except: if not self.session.suppress_parse_exceptions: raise return character_info
[ "def", "parse_clubs", "(", "self", ",", "clubs_page", ")", ":", "character_info", "=", "self", ".", "parse_sidebar", "(", "clubs_page", ")", "second_col", "=", "clubs_page", ".", "find", "(", "u'div'", ",", "{", "'id'", ":", "'content'", "}", ")", ".", "find", "(", "u'table'", ")", ".", "find", "(", "u'tr'", ")", ".", "find_all", "(", "u'td'", ",", "recursive", "=", "False", ")", "[", "1", "]", "try", ":", "clubs_header", "=", "second_col", ".", "find", "(", "u'div'", ",", "text", "=", "u'Related Clubs'", ")", "character_info", "[", "u'clubs'", "]", "=", "[", "]", "if", "clubs_header", ":", "curr_elt", "=", "clubs_header", ".", "nextSibling", "while", "curr_elt", "is", "not", "None", ":", "if", "curr_elt", ".", "name", "==", "u'div'", ":", "link", "=", "curr_elt", ".", "find", "(", "u'a'", ")", "club_id", "=", "int", "(", "re", ".", "match", "(", "r'/clubs\\.php\\?cid=(?P<id>[0-9]+)'", ",", "link", ".", "get", "(", "u'href'", ")", ")", ".", "group", "(", "u'id'", ")", ")", "num_members", "=", "int", "(", "re", ".", "match", "(", "r'(?P<num>[0-9]+) members'", ",", "curr_elt", ".", "find", "(", "u'small'", ")", ".", "text", ")", ".", "group", "(", "u'num'", ")", ")", "character_info", "[", "u'clubs'", "]", ".", "append", "(", "self", ".", "session", ".", "club", "(", "club_id", ")", ".", "set", "(", "{", "'name'", ":", "link", ".", "text", ",", "'num_members'", ":", "num_members", "}", ")", ")", "curr_elt", "=", "curr_elt", ".", "nextSibling", "except", ":", "if", "not", "self", ".", "session", ".", "suppress_parse_exceptions", ":", "raise", "return", "character_info" ]
Parses the DOM and returns character clubs attributes. :type clubs_page: :class:`bs4.BeautifulSoup` :param clubs_page: MAL character clubs page's DOM :rtype: dict :return: character clubs attributes.
[ "Parses", "the", "DOM", "and", "returns", "character", "clubs", "attributes", "." ]
python
train
39.9
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L313-L334
def find_l50(contig_lengths_dict, genome_length_dict): """ Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50 :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: l50_dict: dictionary of strain name: L50 """ # Initialise the dictionary l50_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): currentlength = 0 # Initialise a variable to count how many contigs have been added to the currentlength variable currentcontig = 0 for contig_length in contig_lengths: currentlength += contig_length # Increment :currentcontig each time a contig is added to the current length currentcontig += 1 # Same logic as with the N50, but the contig number is added instead of the length of the contig if currentlength >= genome_length_dict[file_name] * 0.5: l50_dict[file_name] = currentcontig break return l50_dict
[ "def", "find_l50", "(", "contig_lengths_dict", ",", "genome_length_dict", ")", ":", "# Initialise the dictionary", "l50_dict", "=", "dict", "(", ")", "for", "file_name", ",", "contig_lengths", "in", "contig_lengths_dict", ".", "items", "(", ")", ":", "currentlength", "=", "0", "# Initialise a variable to count how many contigs have been added to the currentlength variable", "currentcontig", "=", "0", "for", "contig_length", "in", "contig_lengths", ":", "currentlength", "+=", "contig_length", "# Increment :currentcontig each time a contig is added to the current length", "currentcontig", "+=", "1", "# Same logic as with the N50, but the contig number is added instead of the length of the contig", "if", "currentlength", ">=", "genome_length_dict", "[", "file_name", "]", "*", "0.5", ":", "l50_dict", "[", "file_name", "]", "=", "currentcontig", "break", "return", "l50_dict" ]
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50 :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: l50_dict: dictionary of strain name: L50
[ "Calculate", "the", "L50", "for", "each", "strain", ".", "L50", "is", "defined", "as", "the", "number", "of", "contigs", "required", "to", "achieve", "the", "N50", ":", "param", "contig_lengths_dict", ":", "dictionary", "of", "strain", "name", ":", "reverse", "-", "sorted", "list", "of", "all", "contig", "lengths", ":", "param", "genome_length_dict", ":", "dictionary", "of", "strain", "name", ":", "total", "genome", "length", ":", "return", ":", "l50_dict", ":", "dictionary", "of", "strain", "name", ":", "L50" ]
python
train
52.045455
konstantinstadler/pymrio
pymrio/core/fileio.py
https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/fileio.py#L34-L181
def load_all(path, include_core=True, subfolders=None, path_in_arc=None): """ Loads a full IO system with all extension in path Parameters ---------- path : pathlib.Path or string Path or path with para file name for the data to load. This must either point to the directory containing the uncompressed data or the location of a compressed zip file with the data. In the later case and if there are several mrio's in the zip file the parameter 'path_in_arc' need to be specifiec to further indicate the location of the data in the compressed file. include_core : boolean, optional If False the load method does not include A, L and Z matrix. This significantly reduces the required memory if the purpose is only to analyse the results calculated beforehand. subfolders: list of pathlib.Path or string, optional By default (subfolders=None), all subfolders in path containing a json parameter file (as defined in DEFAULT_FILE_NAMES['filepara']: metadata.json) are parsed. If only a subset should be used, pass a list of names of subfolders. These can either be strings specifying direct subfolders of path, or absolute/relative path if the extensions are stored at a different location. Both modes can be mixed. If the data is read from a zip archive the path must be given as described below in 'path_in_arc', relative to the root defined in the paramter 'path_in_arc'. Extensions in a different zip archive must be read separately by calling the function 'load' for this extension. path_in_arc: string, optional Path to the data in the zip file (where the fileparameters file is located). path_in_arc must be given without leading dot and slash; thus to point to the data in the root of the compressed file pass '', for data in e.g. the folder 'emissions' pass 'emissions/'. Only used if parameter 'path' points to an compressed zip file. Can be None (default) if there is only one mrio database in the zip archive (thus only one file_parameter file as the systemtype entry 'IOSystem'. """ def clean(varStr): """ get valid python name from folder """ return re.sub('\W|^(?=\d)', '_', str(varStr)) path = Path(path) if zipfile.is_zipfile(str(path)): with zipfile.ZipFile(file=str(path), mode='r') as zz: zipcontent = zz.namelist() if path_in_arc: path_in_arc = str(path_in_arc) if path_in_arc not in zipcontent: path_in_arc = os.path.join(path_in_arc, DEFAULT_FILE_NAMES['filepara']) if path_in_arc not in zipcontent: raise ReadError('File parameter file {} not found in {}. ' 'Tip: specify fileparameter filename ' 'through "path_in_arc" if different ' 'from default.'.format( DEFAULT_FILE_NAMES['filepara'], path)) else: with zipfile.ZipFile(file=str(path), mode='r') as zz: fpfiles = [ f for f in zz.namelist() if os.path.basename(f) == DEFAULT_FILE_NAMES['filepara'] and json.loads(zz.read(f).decode('utf-8') )['systemtype'] == 'IOSystem'] if len(fpfiles) == 0: raise ReadError('File parameter file {} not found in {}. ' 'Tip: specify fileparameter filename ' 'through "path_in_arc" if different ' 'from default.'.format( DEFAULT_FILE_NAMES['filepara'], path)) elif len(fpfiles) > 1: raise ReadError('Mulitple mrio archives found in {}. ' 'Specify one by the ' 'parameter "path_in_arc"'.format(path)) else: path_in_arc = os.path.dirname(fpfiles[0]) logging.debug("Expect file parameter-file at {} in {}".format( path_in_arc, path)) io = load(path, include_core=include_core, path_in_arc=path_in_arc) if zipfile.is_zipfile(str(path)): root_in_zip = os.path.dirname(path_in_arc) if subfolders is None: subfolders = { os.path.relpath(os.path.dirname(p), root_in_zip) for p in zipcontent if p.startswith(root_in_zip) and os.path.dirname(p) != root_in_zip} for subfolder_name in subfolders: if subfolder_name not in zipcontent + list({ os.path.dirname(p) for p in zipcontent}): subfolder_full = os.path.join(root_in_zip, subfolder_name) else: subfolder_full = subfolder_name subfolder_name = os.path.basename(os.path.normpath(subfolder_name)) if subfolder_name not in zipcontent: subfolder_full_meta = os.path.join( subfolder_full, DEFAULT_FILE_NAMES['filepara']) else: subfolder_full_meta = subfolder_full if subfolder_full_meta in zipcontent: ext = load(path, include_core=include_core, path_in_arc=subfolder_full_meta) setattr(io, clean(subfolder_name), ext) io.meta._add_fileio("Added satellite account " "from {}".format(subfolder_full)) else: continue else: if subfolders is None: subfolders = [d for d in path.iterdir() if d.is_dir()] for subfolder_name in subfolders: if not os.path.exists(str(subfolder_name)): subfolder_full = path / subfolder_name else: subfolder_full = subfolder_name subfolder_name = os.path.basename(os.path.normpath(subfolder_name)) if not os.path.isfile(str(subfolder_full)): subfolder_full_meta = (subfolder_full / DEFAULT_FILE_NAMES['filepara']) else: subfolder_full_meta = subfolder_full if subfolder_full_meta.exists(): ext = load(subfolder_full, include_core=include_core) setattr(io, clean(subfolder_name), ext) io.meta._add_fileio("Added satellite account " "from {}".format(subfolder_full)) else: continue return io
[ "def", "load_all", "(", "path", ",", "include_core", "=", "True", ",", "subfolders", "=", "None", ",", "path_in_arc", "=", "None", ")", ":", "def", "clean", "(", "varStr", ")", ":", "\"\"\" get valid python name from folder\n \"\"\"", "return", "re", ".", "sub", "(", "'\\W|^(?=\\d)'", ",", "'_'", ",", "str", "(", "varStr", ")", ")", "path", "=", "Path", "(", "path", ")", "if", "zipfile", ".", "is_zipfile", "(", "str", "(", "path", ")", ")", ":", "with", "zipfile", ".", "ZipFile", "(", "file", "=", "str", "(", "path", ")", ",", "mode", "=", "'r'", ")", "as", "zz", ":", "zipcontent", "=", "zz", ".", "namelist", "(", ")", "if", "path_in_arc", ":", "path_in_arc", "=", "str", "(", "path_in_arc", ")", "if", "path_in_arc", "not", "in", "zipcontent", ":", "path_in_arc", "=", "os", ".", "path", ".", "join", "(", "path_in_arc", ",", "DEFAULT_FILE_NAMES", "[", "'filepara'", "]", ")", "if", "path_in_arc", "not", "in", "zipcontent", ":", "raise", "ReadError", "(", "'File parameter file {} not found in {}. '", "'Tip: specify fileparameter filename '", "'through \"path_in_arc\" if different '", "'from default.'", ".", "format", "(", "DEFAULT_FILE_NAMES", "[", "'filepara'", "]", ",", "path", ")", ")", "else", ":", "with", "zipfile", ".", "ZipFile", "(", "file", "=", "str", "(", "path", ")", ",", "mode", "=", "'r'", ")", "as", "zz", ":", "fpfiles", "=", "[", "f", "for", "f", "in", "zz", ".", "namelist", "(", ")", "if", "os", ".", "path", ".", "basename", "(", "f", ")", "==", "DEFAULT_FILE_NAMES", "[", "'filepara'", "]", "and", "json", ".", "loads", "(", "zz", ".", "read", "(", "f", ")", ".", "decode", "(", "'utf-8'", ")", ")", "[", "'systemtype'", "]", "==", "'IOSystem'", "]", "if", "len", "(", "fpfiles", ")", "==", "0", ":", "raise", "ReadError", "(", "'File parameter file {} not found in {}. '", "'Tip: specify fileparameter filename '", "'through \"path_in_arc\" if different '", "'from default.'", ".", "format", "(", "DEFAULT_FILE_NAMES", "[", "'filepara'", "]", ",", "path", ")", ")", "elif", "len", "(", "fpfiles", ")", ">", "1", ":", "raise", "ReadError", "(", "'Mulitple mrio archives found in {}. '", "'Specify one by the '", "'parameter \"path_in_arc\"'", ".", "format", "(", "path", ")", ")", "else", ":", "path_in_arc", "=", "os", ".", "path", ".", "dirname", "(", "fpfiles", "[", "0", "]", ")", "logging", ".", "debug", "(", "\"Expect file parameter-file at {} in {}\"", ".", "format", "(", "path_in_arc", ",", "path", ")", ")", "io", "=", "load", "(", "path", ",", "include_core", "=", "include_core", ",", "path_in_arc", "=", "path_in_arc", ")", "if", "zipfile", ".", "is_zipfile", "(", "str", "(", "path", ")", ")", ":", "root_in_zip", "=", "os", ".", "path", ".", "dirname", "(", "path_in_arc", ")", "if", "subfolders", "is", "None", ":", "subfolders", "=", "{", "os", ".", "path", ".", "relpath", "(", "os", ".", "path", ".", "dirname", "(", "p", ")", ",", "root_in_zip", ")", "for", "p", "in", "zipcontent", "if", "p", ".", "startswith", "(", "root_in_zip", ")", "and", "os", ".", "path", ".", "dirname", "(", "p", ")", "!=", "root_in_zip", "}", "for", "subfolder_name", "in", "subfolders", ":", "if", "subfolder_name", "not", "in", "zipcontent", "+", "list", "(", "{", "os", ".", "path", ".", "dirname", "(", "p", ")", "for", "p", "in", "zipcontent", "}", ")", ":", "subfolder_full", "=", "os", ".", "path", ".", "join", "(", "root_in_zip", ",", "subfolder_name", ")", "else", ":", "subfolder_full", "=", "subfolder_name", "subfolder_name", "=", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "normpath", "(", "subfolder_name", ")", ")", "if", "subfolder_name", "not", "in", "zipcontent", ":", "subfolder_full_meta", "=", "os", ".", "path", ".", "join", "(", "subfolder_full", ",", "DEFAULT_FILE_NAMES", "[", "'filepara'", "]", ")", "else", ":", "subfolder_full_meta", "=", "subfolder_full", "if", "subfolder_full_meta", "in", "zipcontent", ":", "ext", "=", "load", "(", "path", ",", "include_core", "=", "include_core", ",", "path_in_arc", "=", "subfolder_full_meta", ")", "setattr", "(", "io", ",", "clean", "(", "subfolder_name", ")", ",", "ext", ")", "io", ".", "meta", ".", "_add_fileio", "(", "\"Added satellite account \"", "\"from {}\"", ".", "format", "(", "subfolder_full", ")", ")", "else", ":", "continue", "else", ":", "if", "subfolders", "is", "None", ":", "subfolders", "=", "[", "d", "for", "d", "in", "path", ".", "iterdir", "(", ")", "if", "d", ".", "is_dir", "(", ")", "]", "for", "subfolder_name", "in", "subfolders", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "str", "(", "subfolder_name", ")", ")", ":", "subfolder_full", "=", "path", "/", "subfolder_name", "else", ":", "subfolder_full", "=", "subfolder_name", "subfolder_name", "=", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "normpath", "(", "subfolder_name", ")", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "str", "(", "subfolder_full", ")", ")", ":", "subfolder_full_meta", "=", "(", "subfolder_full", "/", "DEFAULT_FILE_NAMES", "[", "'filepara'", "]", ")", "else", ":", "subfolder_full_meta", "=", "subfolder_full", "if", "subfolder_full_meta", ".", "exists", "(", ")", ":", "ext", "=", "load", "(", "subfolder_full", ",", "include_core", "=", "include_core", ")", "setattr", "(", "io", ",", "clean", "(", "subfolder_name", ")", ",", "ext", ")", "io", ".", "meta", ".", "_add_fileio", "(", "\"Added satellite account \"", "\"from {}\"", ".", "format", "(", "subfolder_full", ")", ")", "else", ":", "continue", "return", "io" ]
Loads a full IO system with all extension in path Parameters ---------- path : pathlib.Path or string Path or path with para file name for the data to load. This must either point to the directory containing the uncompressed data or the location of a compressed zip file with the data. In the later case and if there are several mrio's in the zip file the parameter 'path_in_arc' need to be specifiec to further indicate the location of the data in the compressed file. include_core : boolean, optional If False the load method does not include A, L and Z matrix. This significantly reduces the required memory if the purpose is only to analyse the results calculated beforehand. subfolders: list of pathlib.Path or string, optional By default (subfolders=None), all subfolders in path containing a json parameter file (as defined in DEFAULT_FILE_NAMES['filepara']: metadata.json) are parsed. If only a subset should be used, pass a list of names of subfolders. These can either be strings specifying direct subfolders of path, or absolute/relative path if the extensions are stored at a different location. Both modes can be mixed. If the data is read from a zip archive the path must be given as described below in 'path_in_arc', relative to the root defined in the paramter 'path_in_arc'. Extensions in a different zip archive must be read separately by calling the function 'load' for this extension. path_in_arc: string, optional Path to the data in the zip file (where the fileparameters file is located). path_in_arc must be given without leading dot and slash; thus to point to the data in the root of the compressed file pass '', for data in e.g. the folder 'emissions' pass 'emissions/'. Only used if parameter 'path' points to an compressed zip file. Can be None (default) if there is only one mrio database in the zip archive (thus only one file_parameter file as the systemtype entry 'IOSystem'.
[ "Loads", "a", "full", "IO", "system", "with", "all", "extension", "in", "path" ]
python
train
45.452703
tomatohater/django-unfriendly
unfriendly/utils.py
https://github.com/tomatohater/django-unfriendly/blob/38eca5fb45841db331fc66571fff37bef50dfa67/unfriendly/utils.py#L65-L93
def decrypt(ciphertext, secret, inital_vector, checksum=True, lazy=True): """Decrypts ciphertext with secret ciphertext - encrypted content to decrypt secret - secret to decrypt ciphertext inital_vector - initial vector lazy - pad secret if less than legal blocksize (default: True) checksum - verify crc32 byte encoded checksum (default: True) returns plaintext """ secret = _lazysecret(secret) if lazy else secret encobj = AES.new(secret, AES.MODE_CFB, inital_vector) try: padded = ciphertext + ('=' * (len(ciphertext) % 4)) decoded = base64.urlsafe_b64decode(str(padded)) plaintext = encobj.decrypt(decoded) except (TypeError, binascii.Error): raise InvalidKeyError("invalid key") if checksum: try: crc, plaintext = (base64.urlsafe_b64decode( plaintext[-8:]), plaintext[:-8]) except (TypeError, binascii.Error): raise CheckSumError("checksum mismatch") if not crc == _pack_crc(plaintext): raise CheckSumError("checksum mismatch") return plaintext
[ "def", "decrypt", "(", "ciphertext", ",", "secret", ",", "inital_vector", ",", "checksum", "=", "True", ",", "lazy", "=", "True", ")", ":", "secret", "=", "_lazysecret", "(", "secret", ")", "if", "lazy", "else", "secret", "encobj", "=", "AES", ".", "new", "(", "secret", ",", "AES", ".", "MODE_CFB", ",", "inital_vector", ")", "try", ":", "padded", "=", "ciphertext", "+", "(", "'='", "*", "(", "len", "(", "ciphertext", ")", "%", "4", ")", ")", "decoded", "=", "base64", ".", "urlsafe_b64decode", "(", "str", "(", "padded", ")", ")", "plaintext", "=", "encobj", ".", "decrypt", "(", "decoded", ")", "except", "(", "TypeError", ",", "binascii", ".", "Error", ")", ":", "raise", "InvalidKeyError", "(", "\"invalid key\"", ")", "if", "checksum", ":", "try", ":", "crc", ",", "plaintext", "=", "(", "base64", ".", "urlsafe_b64decode", "(", "plaintext", "[", "-", "8", ":", "]", ")", ",", "plaintext", "[", ":", "-", "8", "]", ")", "except", "(", "TypeError", ",", "binascii", ".", "Error", ")", ":", "raise", "CheckSumError", "(", "\"checksum mismatch\"", ")", "if", "not", "crc", "==", "_pack_crc", "(", "plaintext", ")", ":", "raise", "CheckSumError", "(", "\"checksum mismatch\"", ")", "return", "plaintext" ]
Decrypts ciphertext with secret ciphertext - encrypted content to decrypt secret - secret to decrypt ciphertext inital_vector - initial vector lazy - pad secret if less than legal blocksize (default: True) checksum - verify crc32 byte encoded checksum (default: True) returns plaintext
[ "Decrypts", "ciphertext", "with", "secret", "ciphertext", "-", "encrypted", "content", "to", "decrypt", "secret", "-", "secret", "to", "decrypt", "ciphertext", "inital_vector", "-", "initial", "vector", "lazy", "-", "pad", "secret", "if", "less", "than", "legal", "blocksize", "(", "default", ":", "True", ")", "checksum", "-", "verify", "crc32", "byte", "encoded", "checksum", "(", "default", ":", "True", ")", "returns", "plaintext" ]
python
test
38.517241
ArduPilot/MAVProxy
MAVProxy/modules/lib/ANUGA/redfearn.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/ANUGA/redfearn.py#L45-L195
def redfearn(lat, lon, false_easting=None, false_northing=None, zone=None, central_meridian=None, scale_factor=None): """Compute UTM projection using Redfearn's formula lat, lon is latitude and longitude in decimal degrees If false easting and northing are specified they will override the standard If zone is specified reproject lat and long to specified zone instead of standard zone If meridian is specified, reproject lat and lon to that instead of zone. In this case zone will be set to -1 to indicate non-UTM projection Note that zone and meridian cannot both be specifed """ from math import pi, sqrt, sin, cos, tan #GDA Specifications a = 6378137.0 #Semi major axis inverse_flattening = 298.257222101 #1/f if scale_factor is None: K0 = 0.9996 #Central scale factor else: K0 = scale_factor #print('scale', K0) zone_width = 6 #Degrees longitude_of_central_meridian_zone0 = -183 longitude_of_western_edge_zone0 = -186 if false_easting is None: false_easting = 500000 if false_northing is None: if lat < 0: false_northing = 10000000 #Southern hemisphere else: false_northing = 0 #Northern hemisphere) #Derived constants f = 1.0/inverse_flattening b = a*(1-f) #Semi minor axis e2 = 2*f - f*f# = f*(2-f) = (a^2-b^2/a^2 #Eccentricity e = sqrt(e2) e2_ = e2/(1-e2) # = (a^2-b^2)/b^2 #Second eccentricity e_ = sqrt(e2_) e4 = e2*e2 e6 = e2*e4 #Foot point latitude n = (a-b)/(a+b) #Same as e2 - why ? n2 = n*n n3 = n*n2 n4 = n2*n2 G = a*(1-n)*(1-n2)*(1+9*n2/4+225*n4/64)*pi/180 phi = lat*pi/180 #Convert latitude to radians sinphi = sin(phi) sin2phi = sin(2*phi) sin4phi = sin(4*phi) sin6phi = sin(6*phi) cosphi = cos(phi) cosphi2 = cosphi*cosphi cosphi3 = cosphi*cosphi2 cosphi4 = cosphi2*cosphi2 cosphi5 = cosphi*cosphi4 cosphi6 = cosphi2*cosphi4 cosphi7 = cosphi*cosphi6 cosphi8 = cosphi4*cosphi4 t = tan(phi) t2 = t*t t4 = t2*t2 t6 = t2*t4 #Radius of Curvature rho = a*(1-e2)/(1-e2*sinphi*sinphi)**1.5 nu = a/(1-e2*sinphi*sinphi)**0.5 psi = nu/rho psi2 = psi*psi psi3 = psi*psi2 psi4 = psi2*psi2 #Meridian distance A0 = 1 - e2/4 - 3*e4/64 - 5*e6/256 A2 = 3.0/8*(e2+e4/4+15*e6/128) A4 = 15.0/256*(e4+3*e6/4) A6 = 35*e6/3072 term1 = a*A0*phi term2 = -a*A2*sin2phi term3 = a*A4*sin4phi term4 = -a*A6*sin6phi m = term1 + term2 + term3 + term4 #OK if zone is not None and central_meridian is not None: msg = 'You specified both zone and central_meridian. Provide only one of them' raise ValueError(msg) # Zone if zone is None: zone = int((lon - longitude_of_western_edge_zone0)/zone_width) # Central meridian if central_meridian is None: central_meridian = zone*zone_width+longitude_of_central_meridian_zone0 else: zone = -1 omega = (lon-central_meridian)*pi/180 #Relative longitude (radians) omega2 = omega*omega omega3 = omega*omega2 omega4 = omega2*omega2 omega5 = omega*omega4 omega6 = omega3*omega3 omega7 = omega*omega6 omega8 = omega4*omega4 #Northing term1 = nu*sinphi*cosphi*omega2/2 term2 = nu*sinphi*cosphi3*(4*psi2+psi-t2)*omega4/24 term3 = nu*sinphi*cosphi5*\ (8*psi4*(11-24*t2)-28*psi3*(1-6*t2)+\ psi2*(1-32*t2)-psi*2*t2+t4-t2)*omega6/720 term4 = nu*sinphi*cosphi7*(1385-3111*t2+543*t4-t6)*omega8/40320 northing = false_northing + K0*(m + term1 + term2 + term3 + term4) #Easting term1 = nu*omega*cosphi term2 = nu*cosphi3*(psi-t2)*omega3/6 term3 = nu*cosphi5*(4*psi3*(1-6*t2)+psi2*(1+8*t2)-2*psi*t2+t4)*omega5/120 term4 = nu*cosphi7*(61-479*t2+179*t4-t6)*omega7/5040 easting = false_easting + K0*(term1 + term2 + term3 + term4) return zone, easting, northing
[ "def", "redfearn", "(", "lat", ",", "lon", ",", "false_easting", "=", "None", ",", "false_northing", "=", "None", ",", "zone", "=", "None", ",", "central_meridian", "=", "None", ",", "scale_factor", "=", "None", ")", ":", "from", "math", "import", "pi", ",", "sqrt", ",", "sin", ",", "cos", ",", "tan", "#GDA Specifications", "a", "=", "6378137.0", "#Semi major axis", "inverse_flattening", "=", "298.257222101", "#1/f", "if", "scale_factor", "is", "None", ":", "K0", "=", "0.9996", "#Central scale factor", "else", ":", "K0", "=", "scale_factor", "#print('scale', K0)", "zone_width", "=", "6", "#Degrees", "longitude_of_central_meridian_zone0", "=", "-", "183", "longitude_of_western_edge_zone0", "=", "-", "186", "if", "false_easting", "is", "None", ":", "false_easting", "=", "500000", "if", "false_northing", "is", "None", ":", "if", "lat", "<", "0", ":", "false_northing", "=", "10000000", "#Southern hemisphere", "else", ":", "false_northing", "=", "0", "#Northern hemisphere)", "#Derived constants", "f", "=", "1.0", "/", "inverse_flattening", "b", "=", "a", "*", "(", "1", "-", "f", ")", "#Semi minor axis", "e2", "=", "2", "*", "f", "-", "f", "*", "f", "# = f*(2-f) = (a^2-b^2/a^2 #Eccentricity", "e", "=", "sqrt", "(", "e2", ")", "e2_", "=", "e2", "/", "(", "1", "-", "e2", ")", "# = (a^2-b^2)/b^2 #Second eccentricity", "e_", "=", "sqrt", "(", "e2_", ")", "e4", "=", "e2", "*", "e2", "e6", "=", "e2", "*", "e4", "#Foot point latitude", "n", "=", "(", "a", "-", "b", ")", "/", "(", "a", "+", "b", ")", "#Same as e2 - why ?", "n2", "=", "n", "*", "n", "n3", "=", "n", "*", "n2", "n4", "=", "n2", "*", "n2", "G", "=", "a", "*", "(", "1", "-", "n", ")", "*", "(", "1", "-", "n2", ")", "*", "(", "1", "+", "9", "*", "n2", "/", "4", "+", "225", "*", "n4", "/", "64", ")", "*", "pi", "/", "180", "phi", "=", "lat", "*", "pi", "/", "180", "#Convert latitude to radians", "sinphi", "=", "sin", "(", "phi", ")", "sin2phi", "=", "sin", "(", "2", "*", "phi", ")", "sin4phi", "=", "sin", "(", "4", "*", "phi", ")", "sin6phi", "=", "sin", "(", "6", "*", "phi", ")", "cosphi", "=", "cos", "(", "phi", ")", "cosphi2", "=", "cosphi", "*", "cosphi", "cosphi3", "=", "cosphi", "*", "cosphi2", "cosphi4", "=", "cosphi2", "*", "cosphi2", "cosphi5", "=", "cosphi", "*", "cosphi4", "cosphi6", "=", "cosphi2", "*", "cosphi4", "cosphi7", "=", "cosphi", "*", "cosphi6", "cosphi8", "=", "cosphi4", "*", "cosphi4", "t", "=", "tan", "(", "phi", ")", "t2", "=", "t", "*", "t", "t4", "=", "t2", "*", "t2", "t6", "=", "t2", "*", "t4", "#Radius of Curvature", "rho", "=", "a", "*", "(", "1", "-", "e2", ")", "/", "(", "1", "-", "e2", "*", "sinphi", "*", "sinphi", ")", "**", "1.5", "nu", "=", "a", "/", "(", "1", "-", "e2", "*", "sinphi", "*", "sinphi", ")", "**", "0.5", "psi", "=", "nu", "/", "rho", "psi2", "=", "psi", "*", "psi", "psi3", "=", "psi", "*", "psi2", "psi4", "=", "psi2", "*", "psi2", "#Meridian distance", "A0", "=", "1", "-", "e2", "/", "4", "-", "3", "*", "e4", "/", "64", "-", "5", "*", "e6", "/", "256", "A2", "=", "3.0", "/", "8", "*", "(", "e2", "+", "e4", "/", "4", "+", "15", "*", "e6", "/", "128", ")", "A4", "=", "15.0", "/", "256", "*", "(", "e4", "+", "3", "*", "e6", "/", "4", ")", "A6", "=", "35", "*", "e6", "/", "3072", "term1", "=", "a", "*", "A0", "*", "phi", "term2", "=", "-", "a", "*", "A2", "*", "sin2phi", "term3", "=", "a", "*", "A4", "*", "sin4phi", "term4", "=", "-", "a", "*", "A6", "*", "sin6phi", "m", "=", "term1", "+", "term2", "+", "term3", "+", "term4", "#OK", "if", "zone", "is", "not", "None", "and", "central_meridian", "is", "not", "None", ":", "msg", "=", "'You specified both zone and central_meridian. Provide only one of them'", "raise", "ValueError", "(", "msg", ")", "# Zone", "if", "zone", "is", "None", ":", "zone", "=", "int", "(", "(", "lon", "-", "longitude_of_western_edge_zone0", ")", "/", "zone_width", ")", "# Central meridian", "if", "central_meridian", "is", "None", ":", "central_meridian", "=", "zone", "*", "zone_width", "+", "longitude_of_central_meridian_zone0", "else", ":", "zone", "=", "-", "1", "omega", "=", "(", "lon", "-", "central_meridian", ")", "*", "pi", "/", "180", "#Relative longitude (radians)", "omega2", "=", "omega", "*", "omega", "omega3", "=", "omega", "*", "omega2", "omega4", "=", "omega2", "*", "omega2", "omega5", "=", "omega", "*", "omega4", "omega6", "=", "omega3", "*", "omega3", "omega7", "=", "omega", "*", "omega6", "omega8", "=", "omega4", "*", "omega4", "#Northing", "term1", "=", "nu", "*", "sinphi", "*", "cosphi", "*", "omega2", "/", "2", "term2", "=", "nu", "*", "sinphi", "*", "cosphi3", "*", "(", "4", "*", "psi2", "+", "psi", "-", "t2", ")", "*", "omega4", "/", "24", "term3", "=", "nu", "*", "sinphi", "*", "cosphi5", "*", "(", "8", "*", "psi4", "*", "(", "11", "-", "24", "*", "t2", ")", "-", "28", "*", "psi3", "*", "(", "1", "-", "6", "*", "t2", ")", "+", "psi2", "*", "(", "1", "-", "32", "*", "t2", ")", "-", "psi", "*", "2", "*", "t2", "+", "t4", "-", "t2", ")", "*", "omega6", "/", "720", "term4", "=", "nu", "*", "sinphi", "*", "cosphi7", "*", "(", "1385", "-", "3111", "*", "t2", "+", "543", "*", "t4", "-", "t6", ")", "*", "omega8", "/", "40320", "northing", "=", "false_northing", "+", "K0", "*", "(", "m", "+", "term1", "+", "term2", "+", "term3", "+", "term4", ")", "#Easting", "term1", "=", "nu", "*", "omega", "*", "cosphi", "term2", "=", "nu", "*", "cosphi3", "*", "(", "psi", "-", "t2", ")", "*", "omega3", "/", "6", "term3", "=", "nu", "*", "cosphi5", "*", "(", "4", "*", "psi3", "*", "(", "1", "-", "6", "*", "t2", ")", "+", "psi2", "*", "(", "1", "+", "8", "*", "t2", ")", "-", "2", "*", "psi", "*", "t2", "+", "t4", ")", "*", "omega5", "/", "120", "term4", "=", "nu", "*", "cosphi7", "*", "(", "61", "-", "479", "*", "t2", "+", "179", "*", "t4", "-", "t6", ")", "*", "omega7", "/", "5040", "easting", "=", "false_easting", "+", "K0", "*", "(", "term1", "+", "term2", "+", "term3", "+", "term4", ")", "return", "zone", ",", "easting", ",", "northing" ]
Compute UTM projection using Redfearn's formula lat, lon is latitude and longitude in decimal degrees If false easting and northing are specified they will override the standard If zone is specified reproject lat and long to specified zone instead of standard zone If meridian is specified, reproject lat and lon to that instead of zone. In this case zone will be set to -1 to indicate non-UTM projection Note that zone and meridian cannot both be specifed
[ "Compute", "UTM", "projection", "using", "Redfearn", "s", "formula" ]
python
train
26.370861
shichao-an/115wangpan
u115/api.py
https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L63-L68
def post(self, url, data, params=None): """ Initiate a POST request """ r = self.session.post(url, data=data, params=params) return self._response_parser(r, expect_json=False)
[ "def", "post", "(", "self", ",", "url", ",", "data", ",", "params", "=", "None", ")", ":", "r", "=", "self", ".", "session", ".", "post", "(", "url", ",", "data", "=", "data", ",", "params", "=", "params", ")", "return", "self", ".", "_response_parser", "(", "r", ",", "expect_json", "=", "False", ")" ]
Initiate a POST request
[ "Initiate", "a", "POST", "request" ]
python
train
35
vpelletier/pprofile
pprofile.py
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L520-L629
def callgrind(self, out, filename=None, commandline=None, relative_path=False): """ Dump statistics in callgrind format. Contains: - per-line hit count, time and time-per-hit - call associations (call tree) Note: hit count is not inclusive, in that it is not the sum of all hits inside that call. Time unit: microsecond (1e-6 second). out (file-ish opened for writing) Destination of callgrind profiling data. filename (str, collection of str) If provided, dump stats for given source file(s) only. By default, list for all known files. commandline (anything with __str__) If provided, will be output as the command line used to generate this profiling data. relative_path (bool) When True, absolute elements are stripped from path. Useful when maintaining several copies of source trees with their own profiling result, so kcachegrind does not look in system-wide files which may not match with profiled code. """ print(u'# callgrind format', file=out) print(u'version: 1', file=out) print(u'creator: pprofile', file=out) print(u'event: usphit :microseconds/hit', file=out) print(u'events: hits microseconds usphit', file=out) if commandline is not None: print(u'cmd:', commandline, file=out) file_dict = self._mergeFileTiming() if relative_path: convertPath = _relpath else: convertPath = lambda x: x if os.path.sep != "/": # qCacheGrind (windows build) needs at least one UNIX separator # in path to find the file. Adapt here even if this is probably # more of a qCacheGrind issue... convertPath = lambda x, cascade=convertPath: cascade( '/'.join(x.split(os.path.sep)) ) code_to_name_dict = {} homonym_counter = {} def getCodeName(filename, code): # Tracks code objects globally, because callee information needs # to be consistent accross files. # Inside a file, grants unique names to each code object. try: return code_to_name_dict[code] except KeyError: name = code.co_name + ':%i' % code.co_firstlineno key = (filename, name) homonym_count = homonym_counter.get(key, 0) if homonym_count: name += '_%i' % homonym_count homonym_counter[key] = homonym_count + 1 code_to_name_dict[code] = name return name for current_file in self._getFileNameList(filename, may_sort=False): file_timing = file_dict[current_file] print(u'fl=%s' % convertPath(current_file), file=out) # When a local callable is created an immediately executed, this # loop would start a new "fn=" section but would not end it before # emitting "cfn=" lines, making the callee appear as not being # called by interrupted "fn=" section. # So dispatch all functions in a first pass, and build # uninterrupted sections in a second pass. # Note: cost line is a list just to be mutable. A single item is # expected. func_dict = defaultdict(lambda: defaultdict(lambda: ([], []))) for lineno, code, hits, duration in file_timing.iterHits(): func_dict[getCodeName(current_file, code)][lineno][0].append( (hits, int(duration * 1000000)), ) for ( lineno, caller, call_hits, call_duration, callee_file, callee, ) in file_timing.iterCalls(): call_ticks = int(call_duration * 1000000) func_call_list = func_dict[ getCodeName(current_file, caller) ][lineno][1] append = func_call_list.append append(u'cfl=' + convertPath(callee_file)) append(u'cfn=' + getCodeName(callee_file, callee)) append(u'calls=%i %i' % (call_hits, callee.co_firstlineno)) append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits)) for func_name, line_dict in func_dict.iteritems(): print(u'fn=%s' % func_name, file=out) for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()): if func_hit_list: # Multiple function objects may "reside" on the same # line of the same file (same global dict). # Sum these up and produce a single cachegrind event. hits = sum(x for x, _ in func_hit_list) ticks = sum(x for _, x in func_hit_list) print( u'%i %i %i %i' % ( lineno, hits, ticks, ticks // hits, ), file=out, ) for line in func_call_list: print(line, file=out)
[ "def", "callgrind", "(", "self", ",", "out", ",", "filename", "=", "None", ",", "commandline", "=", "None", ",", "relative_path", "=", "False", ")", ":", "print", "(", "u'# callgrind format'", ",", "file", "=", "out", ")", "print", "(", "u'version: 1'", ",", "file", "=", "out", ")", "print", "(", "u'creator: pprofile'", ",", "file", "=", "out", ")", "print", "(", "u'event: usphit :microseconds/hit'", ",", "file", "=", "out", ")", "print", "(", "u'events: hits microseconds usphit'", ",", "file", "=", "out", ")", "if", "commandline", "is", "not", "None", ":", "print", "(", "u'cmd:'", ",", "commandline", ",", "file", "=", "out", ")", "file_dict", "=", "self", ".", "_mergeFileTiming", "(", ")", "if", "relative_path", ":", "convertPath", "=", "_relpath", "else", ":", "convertPath", "=", "lambda", "x", ":", "x", "if", "os", ".", "path", ".", "sep", "!=", "\"/\"", ":", "# qCacheGrind (windows build) needs at least one UNIX separator", "# in path to find the file. Adapt here even if this is probably", "# more of a qCacheGrind issue...", "convertPath", "=", "lambda", "x", ",", "cascade", "=", "convertPath", ":", "cascade", "(", "'/'", ".", "join", "(", "x", ".", "split", "(", "os", ".", "path", ".", "sep", ")", ")", ")", "code_to_name_dict", "=", "{", "}", "homonym_counter", "=", "{", "}", "def", "getCodeName", "(", "filename", ",", "code", ")", ":", "# Tracks code objects globally, because callee information needs", "# to be consistent accross files.", "# Inside a file, grants unique names to each code object.", "try", ":", "return", "code_to_name_dict", "[", "code", "]", "except", "KeyError", ":", "name", "=", "code", ".", "co_name", "+", "':%i'", "%", "code", ".", "co_firstlineno", "key", "=", "(", "filename", ",", "name", ")", "homonym_count", "=", "homonym_counter", ".", "get", "(", "key", ",", "0", ")", "if", "homonym_count", ":", "name", "+=", "'_%i'", "%", "homonym_count", "homonym_counter", "[", "key", "]", "=", "homonym_count", "+", "1", "code_to_name_dict", "[", "code", "]", "=", "name", "return", "name", "for", "current_file", "in", "self", ".", "_getFileNameList", "(", "filename", ",", "may_sort", "=", "False", ")", ":", "file_timing", "=", "file_dict", "[", "current_file", "]", "print", "(", "u'fl=%s'", "%", "convertPath", "(", "current_file", ")", ",", "file", "=", "out", ")", "# When a local callable is created an immediately executed, this", "# loop would start a new \"fn=\" section but would not end it before", "# emitting \"cfn=\" lines, making the callee appear as not being", "# called by interrupted \"fn=\" section.", "# So dispatch all functions in a first pass, and build", "# uninterrupted sections in a second pass.", "# Note: cost line is a list just to be mutable. A single item is", "# expected.", "func_dict", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "lambda", ":", "(", "[", "]", ",", "[", "]", ")", ")", ")", "for", "lineno", ",", "code", ",", "hits", ",", "duration", "in", "file_timing", ".", "iterHits", "(", ")", ":", "func_dict", "[", "getCodeName", "(", "current_file", ",", "code", ")", "]", "[", "lineno", "]", "[", "0", "]", ".", "append", "(", "(", "hits", ",", "int", "(", "duration", "*", "1000000", ")", ")", ",", ")", "for", "(", "lineno", ",", "caller", ",", "call_hits", ",", "call_duration", ",", "callee_file", ",", "callee", ",", ")", "in", "file_timing", ".", "iterCalls", "(", ")", ":", "call_ticks", "=", "int", "(", "call_duration", "*", "1000000", ")", "func_call_list", "=", "func_dict", "[", "getCodeName", "(", "current_file", ",", "caller", ")", "]", "[", "lineno", "]", "[", "1", "]", "append", "=", "func_call_list", ".", "append", "append", "(", "u'cfl='", "+", "convertPath", "(", "callee_file", ")", ")", "append", "(", "u'cfn='", "+", "getCodeName", "(", "callee_file", ",", "callee", ")", ")", "append", "(", "u'calls=%i %i'", "%", "(", "call_hits", ",", "callee", ".", "co_firstlineno", ")", ")", "append", "(", "u'%i %i %i %i'", "%", "(", "lineno", ",", "call_hits", ",", "call_ticks", ",", "call_ticks", "//", "call_hits", ")", ")", "for", "func_name", ",", "line_dict", "in", "func_dict", ".", "iteritems", "(", ")", ":", "print", "(", "u'fn=%s'", "%", "func_name", ",", "file", "=", "out", ")", "for", "lineno", ",", "(", "func_hit_list", ",", "func_call_list", ")", "in", "sorted", "(", "line_dict", ".", "iteritems", "(", ")", ")", ":", "if", "func_hit_list", ":", "# Multiple function objects may \"reside\" on the same", "# line of the same file (same global dict).", "# Sum these up and produce a single cachegrind event.", "hits", "=", "sum", "(", "x", "for", "x", ",", "_", "in", "func_hit_list", ")", "ticks", "=", "sum", "(", "x", "for", "_", ",", "x", "in", "func_hit_list", ")", "print", "(", "u'%i %i %i %i'", "%", "(", "lineno", ",", "hits", ",", "ticks", ",", "ticks", "//", "hits", ",", ")", ",", "file", "=", "out", ",", ")", "for", "line", "in", "func_call_list", ":", "print", "(", "line", ",", "file", "=", "out", ")" ]
Dump statistics in callgrind format. Contains: - per-line hit count, time and time-per-hit - call associations (call tree) Note: hit count is not inclusive, in that it is not the sum of all hits inside that call. Time unit: microsecond (1e-6 second). out (file-ish opened for writing) Destination of callgrind profiling data. filename (str, collection of str) If provided, dump stats for given source file(s) only. By default, list for all known files. commandline (anything with __str__) If provided, will be output as the command line used to generate this profiling data. relative_path (bool) When True, absolute elements are stripped from path. Useful when maintaining several copies of source trees with their own profiling result, so kcachegrind does not look in system-wide files which may not match with profiled code.
[ "Dump", "statistics", "in", "callgrind", "format", ".", "Contains", ":", "-", "per", "-", "line", "hit", "count", "time", "and", "time", "-", "per", "-", "hit", "-", "call", "associations", "(", "call", "tree", ")", "Note", ":", "hit", "count", "is", "not", "inclusive", "in", "that", "it", "is", "not", "the", "sum", "of", "all", "hits", "inside", "that", "call", ".", "Time", "unit", ":", "microsecond", "(", "1e", "-", "6", "second", ")", ".", "out", "(", "file", "-", "ish", "opened", "for", "writing", ")", "Destination", "of", "callgrind", "profiling", "data", ".", "filename", "(", "str", "collection", "of", "str", ")", "If", "provided", "dump", "stats", "for", "given", "source", "file", "(", "s", ")", "only", ".", "By", "default", "list", "for", "all", "known", "files", ".", "commandline", "(", "anything", "with", "__str__", ")", "If", "provided", "will", "be", "output", "as", "the", "command", "line", "used", "to", "generate", "this", "profiling", "data", ".", "relative_path", "(", "bool", ")", "When", "True", "absolute", "elements", "are", "stripped", "from", "path", ".", "Useful", "when", "maintaining", "several", "copies", "of", "source", "trees", "with", "their", "own", "profiling", "result", "so", "kcachegrind", "does", "not", "look", "in", "system", "-", "wide", "files", "which", "may", "not", "match", "with", "profiled", "code", "." ]
python
train
48.954545
SmokinCaterpillar/pypet
pypet/storageservice.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/storageservice.py#L4611-L4626
def _prm_get_longest_stringsize(string_list): """ Returns the longest string size for a string entry across data.""" maxlength = 1 for stringar in string_list: if isinstance(stringar, np.ndarray): if stringar.ndim > 0: for string in stringar.ravel(): maxlength = max(len(string), maxlength) else: maxlength = max(len(stringar.tolist()), maxlength) else: maxlength = max(len(stringar), maxlength) # Make the string Col longer than needed in order to allow later on slightly larger strings return int(maxlength * 1.5)
[ "def", "_prm_get_longest_stringsize", "(", "string_list", ")", ":", "maxlength", "=", "1", "for", "stringar", "in", "string_list", ":", "if", "isinstance", "(", "stringar", ",", "np", ".", "ndarray", ")", ":", "if", "stringar", ".", "ndim", ">", "0", ":", "for", "string", "in", "stringar", ".", "ravel", "(", ")", ":", "maxlength", "=", "max", "(", "len", "(", "string", ")", ",", "maxlength", ")", "else", ":", "maxlength", "=", "max", "(", "len", "(", "stringar", ".", "tolist", "(", ")", ")", ",", "maxlength", ")", "else", ":", "maxlength", "=", "max", "(", "len", "(", "stringar", ")", ",", "maxlength", ")", "# Make the string Col longer than needed in order to allow later on slightly larger strings", "return", "int", "(", "maxlength", "*", "1.5", ")" ]
Returns the longest string size for a string entry across data.
[ "Returns", "the", "longest", "string", "size", "for", "a", "string", "entry", "across", "data", "." ]
python
test
42.375
python-openxml/python-docx
docx/text/parfmt.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/text/parfmt.py#L114-L128
def line_spacing(self): """ |float| or |Length| value specifying the space between baselines in successive lines of the paragraph. A value of |None| indicates line spacing is inherited from the style hierarchy. A float value, e.g. ``2.0`` or ``1.75``, indicates spacing is applied in multiples of line heights. A |Length| value such as ``Pt(12)`` indicates spacing is a fixed height. The |Pt| value class is a convenient way to apply line spacing in units of points. Assigning |None| resets line spacing to inherit from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return self._line_spacing(pPr.spacing_line, pPr.spacing_lineRule)
[ "def", "line_spacing", "(", "self", ")", ":", "pPr", "=", "self", ".", "_element", ".", "pPr", "if", "pPr", "is", "None", ":", "return", "None", "return", "self", ".", "_line_spacing", "(", "pPr", ".", "spacing_line", ",", "pPr", ".", "spacing_lineRule", ")" ]
|float| or |Length| value specifying the space between baselines in successive lines of the paragraph. A value of |None| indicates line spacing is inherited from the style hierarchy. A float value, e.g. ``2.0`` or ``1.75``, indicates spacing is applied in multiples of line heights. A |Length| value such as ``Pt(12)`` indicates spacing is a fixed height. The |Pt| value class is a convenient way to apply line spacing in units of points. Assigning |None| resets line spacing to inherit from the style hierarchy.
[ "|float|", "or", "|Length|", "value", "specifying", "the", "space", "between", "baselines", "in", "successive", "lines", "of", "the", "paragraph", ".", "A", "value", "of", "|None|", "indicates", "line", "spacing", "is", "inherited", "from", "the", "style", "hierarchy", ".", "A", "float", "value", "e", ".", "g", ".", "2", ".", "0", "or", "1", ".", "75", "indicates", "spacing", "is", "applied", "in", "multiples", "of", "line", "heights", ".", "A", "|Length|", "value", "such", "as", "Pt", "(", "12", ")", "indicates", "spacing", "is", "a", "fixed", "height", ".", "The", "|Pt|", "value", "class", "is", "a", "convenient", "way", "to", "apply", "line", "spacing", "in", "units", "of", "points", ".", "Assigning", "|None|", "resets", "line", "spacing", "to", "inherit", "from", "the", "style", "hierarchy", "." ]
python
train
50.933333
Clivern/PyLogging
pylogging/pylogging.py
https://github.com/Clivern/PyLogging/blob/46a1442ec63796302ec7fe3d49bd06a0f7a2fe70/pylogging/pylogging.py#L125-L130
def info(self, msg): """ Log Info Messages """ self._execActions('info', msg) msg = self._execFilters('info', msg) self._processMsg('info', msg) self._sendMsg('info', msg)
[ "def", "info", "(", "self", ",", "msg", ")", ":", "self", ".", "_execActions", "(", "'info'", ",", "msg", ")", "msg", "=", "self", ".", "_execFilters", "(", "'info'", ",", "msg", ")", "self", ".", "_processMsg", "(", "'info'", ",", "msg", ")", "self", ".", "_sendMsg", "(", "'info'", ",", "msg", ")" ]
Log Info Messages
[ "Log", "Info", "Messages" ]
python
train
34.333333
tensorflow/tensor2tensor
tensor2tensor/models/image_transformer.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/image_transformer.py#L438-L451
def imagetransformerpp_base_8l_8h_big_cond_dr03_dan(): """big 1d model for conditional image generation.2.99 on cifar10.""" hparams = imagetransformerpp_sep_channels_8l_8h() hparams.hidden_size = 512 hparams.num_heads = 8 hparams.filter_size = 2048 hparams.batch_size = 4 hparams.max_length = 3075 hparams.layer_prepostprocess_dropout = 0.3 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.summarize_grads = True hparams.learning_rate = 0.01 return hparams
[ "def", "imagetransformerpp_base_8l_8h_big_cond_dr03_dan", "(", ")", ":", "hparams", "=", "imagetransformerpp_sep_channels_8l_8h", "(", ")", "hparams", ".", "hidden_size", "=", "512", "hparams", ".", "num_heads", "=", "8", "hparams", ".", "filter_size", "=", "2048", "hparams", ".", "batch_size", "=", "4", "hparams", ".", "max_length", "=", "3075", "hparams", ".", "layer_prepostprocess_dropout", "=", "0.3", "hparams", ".", "layer_preprocess_sequence", "=", "\"none\"", "hparams", ".", "layer_postprocess_sequence", "=", "\"dan\"", "hparams", ".", "summarize_grads", "=", "True", "hparams", ".", "learning_rate", "=", "0.01", "return", "hparams" ]
big 1d model for conditional image generation.2.99 on cifar10.
[ "big", "1d", "model", "for", "conditional", "image", "generation", ".", "2", ".", "99", "on", "cifar10", "." ]
python
train
36.714286
bcbio/bcbio-nextgen
bcbio/cwl/create.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/create.py#L736-L743
def _add_secondary_if_exists(secondary, out, get_retriever): """Add secondary files only if present locally or remotely. """ secondary = [_file_local_or_remote(y, get_retriever) for y in secondary] secondary = [z for z in secondary if z] if secondary: out["secondaryFiles"] = [{"class": "File", "path": f} for f in secondary] return out
[ "def", "_add_secondary_if_exists", "(", "secondary", ",", "out", ",", "get_retriever", ")", ":", "secondary", "=", "[", "_file_local_or_remote", "(", "y", ",", "get_retriever", ")", "for", "y", "in", "secondary", "]", "secondary", "=", "[", "z", "for", "z", "in", "secondary", "if", "z", "]", "if", "secondary", ":", "out", "[", "\"secondaryFiles\"", "]", "=", "[", "{", "\"class\"", ":", "\"File\"", ",", "\"path\"", ":", "f", "}", "for", "f", "in", "secondary", "]", "return", "out" ]
Add secondary files only if present locally or remotely.
[ "Add", "secondary", "files", "only", "if", "present", "locally", "or", "remotely", "." ]
python
train
45.125
twisted/mantissa
xmantissa/website.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/website.py#L194-L223
def _produceIt(self, segments, thunk): """ Underlying implmeentation of L{PrefixURLMixin.produceResource} and L{PrefixURLMixin.sessionlessProduceResource}. @param segments: the URL segments to dispatch. @param thunk: a 0-argument callable which returns an L{IResource} provider, or None. @return: a 2-tuple of C{(resource, remainingSegments)}, or L{None}. """ if not self.prefixURL: needle = () else: needle = tuple(self.prefixURL.split('/')) S = len(needle) if segments[:S] == needle: if segments == JUST_SLASH: # I *HATE* THE WEB subsegments = segments else: subsegments = segments[S:] res = thunk() # Even though the URL matched up, sometimes we might still # decide to not handle this request (eg, some prerequisite # for our function is not met by the store). Allow None # to be returned by createResource to indicate this case. if res is not None: return res, subsegments
[ "def", "_produceIt", "(", "self", ",", "segments", ",", "thunk", ")", ":", "if", "not", "self", ".", "prefixURL", ":", "needle", "=", "(", ")", "else", ":", "needle", "=", "tuple", "(", "self", ".", "prefixURL", ".", "split", "(", "'/'", ")", ")", "S", "=", "len", "(", "needle", ")", "if", "segments", "[", ":", "S", "]", "==", "needle", ":", "if", "segments", "==", "JUST_SLASH", ":", "# I *HATE* THE WEB", "subsegments", "=", "segments", "else", ":", "subsegments", "=", "segments", "[", "S", ":", "]", "res", "=", "thunk", "(", ")", "# Even though the URL matched up, sometimes we might still", "# decide to not handle this request (eg, some prerequisite", "# for our function is not met by the store). Allow None", "# to be returned by createResource to indicate this case.", "if", "res", "is", "not", "None", ":", "return", "res", ",", "subsegments" ]
Underlying implmeentation of L{PrefixURLMixin.produceResource} and L{PrefixURLMixin.sessionlessProduceResource}. @param segments: the URL segments to dispatch. @param thunk: a 0-argument callable which returns an L{IResource} provider, or None. @return: a 2-tuple of C{(resource, remainingSegments)}, or L{None}.
[ "Underlying", "implmeentation", "of", "L", "{", "PrefixURLMixin", ".", "produceResource", "}", "and", "L", "{", "PrefixURLMixin", ".", "sessionlessProduceResource", "}", "." ]
python
train
37.733333
WhyNotHugo/django-afip
django_afip/models.py
https://github.com/WhyNotHugo/django-afip/blob/5fb73213f1fe86ca52b501ffd0737911ef26ddb3/django_afip/models.py#L343-L359
def generate_csr(self, basename='djangoafip'): """ Creates a CSR for this TaxPayer's key Creates a file-like object that contains the CSR which can be used to request a new certificate from AFIP. """ csr = BytesIO() crypto.create_csr( self.key.file, self.name, '{}{}'.format(basename, int(datetime.now().timestamp())), 'CUIT {}'.format(self.cuit), csr, ) csr.seek(0) return csr
[ "def", "generate_csr", "(", "self", ",", "basename", "=", "'djangoafip'", ")", ":", "csr", "=", "BytesIO", "(", ")", "crypto", ".", "create_csr", "(", "self", ".", "key", ".", "file", ",", "self", ".", "name", ",", "'{}{}'", ".", "format", "(", "basename", ",", "int", "(", "datetime", ".", "now", "(", ")", ".", "timestamp", "(", ")", ")", ")", ",", "'CUIT {}'", ".", "format", "(", "self", ".", "cuit", ")", ",", "csr", ",", ")", "csr", ".", "seek", "(", "0", ")", "return", "csr" ]
Creates a CSR for this TaxPayer's key Creates a file-like object that contains the CSR which can be used to request a new certificate from AFIP.
[ "Creates", "a", "CSR", "for", "this", "TaxPayer", "s", "key" ]
python
train
29.529412
benjamin-hodgson/asynqp
src/asynqp/channel.py
https://github.com/benjamin-hodgson/asynqp/blob/ea8630d1803d10d4fd64b1a0e50f3097710b34d1/src/asynqp/channel.py#L126-L153
def set_qos(self, prefetch_size=0, prefetch_count=0, apply_globally=False): """ Specify quality of service by requesting that messages be pre-fetched from the server. Pre-fetching means that the server will deliver messages to the client while the client is still processing unacknowledged messages. This method is a :ref:`coroutine <coroutine>`. :param int prefetch_size: Specifies a prefetch window in bytes. Messages smaller than this will be sent from the server in advance. This value may be set to 0, which means "no specific limit". :param int prefetch_count: Specifies a prefetch window in terms of whole messages. :param bool apply_globally: If true, apply these QoS settings on a global level. The meaning of this is implementation-dependent. From the `RabbitMQ documentation <https://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.qos.global>`_: RabbitMQ has reinterpreted this field. The original specification said: "By default the QoS settings apply to the current channel only. If this field is set, they are applied to the entire connection." Instead, RabbitMQ takes global=false to mean that the QoS settings should apply per-consumer (for new consumers on the channel; existing ones being unaffected) and global=true to mean that the QoS settings should apply per-channel. """ self.sender.send_BasicQos(prefetch_size, prefetch_count, apply_globally) yield from self.synchroniser.wait(spec.BasicQosOK) self.reader.ready()
[ "def", "set_qos", "(", "self", ",", "prefetch_size", "=", "0", ",", "prefetch_count", "=", "0", ",", "apply_globally", "=", "False", ")", ":", "self", ".", "sender", ".", "send_BasicQos", "(", "prefetch_size", ",", "prefetch_count", ",", "apply_globally", ")", "yield", "from", "self", ".", "synchroniser", ".", "wait", "(", "spec", ".", "BasicQosOK", ")", "self", ".", "reader", ".", "ready", "(", ")" ]
Specify quality of service by requesting that messages be pre-fetched from the server. Pre-fetching means that the server will deliver messages to the client while the client is still processing unacknowledged messages. This method is a :ref:`coroutine <coroutine>`. :param int prefetch_size: Specifies a prefetch window in bytes. Messages smaller than this will be sent from the server in advance. This value may be set to 0, which means "no specific limit". :param int prefetch_count: Specifies a prefetch window in terms of whole messages. :param bool apply_globally: If true, apply these QoS settings on a global level. The meaning of this is implementation-dependent. From the `RabbitMQ documentation <https://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.qos.global>`_: RabbitMQ has reinterpreted this field. The original specification said: "By default the QoS settings apply to the current channel only. If this field is set, they are applied to the entire connection." Instead, RabbitMQ takes global=false to mean that the QoS settings should apply per-consumer (for new consumers on the channel; existing ones being unaffected) and global=true to mean that the QoS settings should apply per-channel.
[ "Specify", "quality", "of", "service", "by", "requesting", "that", "messages", "be", "pre", "-", "fetched", "from", "the", "server", ".", "Pre", "-", "fetching", "means", "that", "the", "server", "will", "deliver", "messages", "to", "the", "client", "while", "the", "client", "is", "still", "processing", "unacknowledged", "messages", "." ]
python
train
59.214286
wonambi-python/wonambi
wonambi/attr/chan.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/attr/chan.py#L427-L450
def find_channel_groups(chan): """Channels are often organized in groups (different grids / strips or channels in different brain locations), so we use a simple heuristic to get these channel groups. Parameters ---------- chan : instance of Channels channels to group Returns ------- groups : dict channel groups: key is the common string, and the item is a list of labels """ labels = chan.return_label() group_names = {match('([A-Za-z ]+)\d+', label).group(1) for label in labels} groups = {} for group_name in group_names: groups[group_name] = [label for label in labels if label.startswith(group_name)] return groups
[ "def", "find_channel_groups", "(", "chan", ")", ":", "labels", "=", "chan", ".", "return_label", "(", ")", "group_names", "=", "{", "match", "(", "'([A-Za-z ]+)\\d+'", ",", "label", ")", ".", "group", "(", "1", ")", "for", "label", "in", "labels", "}", "groups", "=", "{", "}", "for", "group_name", "in", "group_names", ":", "groups", "[", "group_name", "]", "=", "[", "label", "for", "label", "in", "labels", "if", "label", ".", "startswith", "(", "group_name", ")", "]", "return", "groups" ]
Channels are often organized in groups (different grids / strips or channels in different brain locations), so we use a simple heuristic to get these channel groups. Parameters ---------- chan : instance of Channels channels to group Returns ------- groups : dict channel groups: key is the common string, and the item is a list of labels
[ "Channels", "are", "often", "organized", "in", "groups", "(", "different", "grids", "/", "strips", "or", "channels", "in", "different", "brain", "locations", ")", "so", "we", "use", "a", "simple", "heuristic", "to", "get", "these", "channel", "groups", "." ]
python
train
28.875
Duke-GCB/DukeDSClient
ddsc/core/localstore.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/localstore.py#L77-L87
def _update_remote_children(remote_parent, children): """ Update remote_ids based on on parent matching up the names of children. :param remote_parent: RemoteProject/RemoteFolder who has children :param children: [LocalFolder,LocalFile] children to set remote_ids based on remote children """ name_to_child = _name_to_child_map(children) for remote_child in remote_parent.children: local_child = name_to_child.get(remote_child.name) if local_child: local_child.update_remote_ids(remote_child)
[ "def", "_update_remote_children", "(", "remote_parent", ",", "children", ")", ":", "name_to_child", "=", "_name_to_child_map", "(", "children", ")", "for", "remote_child", "in", "remote_parent", ".", "children", ":", "local_child", "=", "name_to_child", ".", "get", "(", "remote_child", ".", "name", ")", "if", "local_child", ":", "local_child", ".", "update_remote_ids", "(", "remote_child", ")" ]
Update remote_ids based on on parent matching up the names of children. :param remote_parent: RemoteProject/RemoteFolder who has children :param children: [LocalFolder,LocalFile] children to set remote_ids based on remote children
[ "Update", "remote_ids", "based", "on", "on", "parent", "matching", "up", "the", "names", "of", "children", ".", ":", "param", "remote_parent", ":", "RemoteProject", "/", "RemoteFolder", "who", "has", "children", ":", "param", "children", ":", "[", "LocalFolder", "LocalFile", "]", "children", "to", "set", "remote_ids", "based", "on", "remote", "children" ]
python
train
48.909091
pazz/alot
alot/account.py
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/account.py#L82-L93
def from_string(cls, address, case_sensitive=False): """Alternate constructor for building from a string. :param str address: An email address in <user>@<domain> form :param bool case_sensitive: passed directly to the constructor argument of the same name. :returns: An account from the given arguments :rtype: :class:`Account` """ assert isinstance(address, str), 'address must be str' username, domainname = address.split('@') return cls(username, domainname, case_sensitive=case_sensitive)
[ "def", "from_string", "(", "cls", ",", "address", ",", "case_sensitive", "=", "False", ")", ":", "assert", "isinstance", "(", "address", ",", "str", ")", ",", "'address must be str'", "username", ",", "domainname", "=", "address", ".", "split", "(", "'@'", ")", "return", "cls", "(", "username", ",", "domainname", ",", "case_sensitive", "=", "case_sensitive", ")" ]
Alternate constructor for building from a string. :param str address: An email address in <user>@<domain> form :param bool case_sensitive: passed directly to the constructor argument of the same name. :returns: An account from the given arguments :rtype: :class:`Account`
[ "Alternate", "constructor", "for", "building", "from", "a", "string", "." ]
python
train
47.166667
galactics/beyond
beyond/frames/iau2010.py
https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/frames/iau2010.py#L135-L185
def _xysxy2(date): """Here we deviate from what has been done everywhere else. Instead of taking the formulas available in the Vallado, we take those described in the files tab5.2{a,b,d}.txt. The result should be equivalent, but they are the last iteration of the IAU2000A as of June 2016 Args: date (Date) Return: 3-tuple of float: Values of X, Y, s + XY/2 in arcsecond """ planets = _planets(date) x_tab, y_tab, s_tab = _tab('X'), _tab('Y'), _tab('s') ttt = date.change_scale('TT').julian_century # Units: micro-arcsecond X = -16616.99 + 2004191742.88 * ttt - 427219.05 * ttt ** 2 - 198620.54 * ttt ** 3\ - 46.05 * ttt ** 4 + 5.98 * ttt ** 5 Y = -6950.78 - 25381.99 * ttt - 22407250.99 * ttt ** 2 + 1842.28 * ttt ** 3\ + 1113.06 * ttt ** 4 + 0.99 * ttt ** 5 s_xy2 = 94.0 + 3808.65 * ttt - 122.68 * ttt ** 2 - 72574.11 * ttt ** 3\ + 27.98 * ttt ** 4 + 15.62 * ttt ** 5 for j in range(5): _x, _y, _s = 0, 0, 0 for i in range(len(x_tab[j])): Axs, Axc, *p_coefs = x_tab[j][i] ax_p = np.dot(p_coefs, planets) _x += Axs * np.sin(ax_p) + Axc * np.cos(ax_p) for i in range(len(y_tab[j])): Ays, Ayc, *p_coefs = y_tab[j][i] ay_p = np.dot(p_coefs, planets) _y += Ays * np.sin(ay_p) + Ayc * np.cos(ay_p) for i in range(len(s_tab[j])): Ass, Asc, *p_coefs = s_tab[j][i] as_p = np.dot(p_coefs, planets) _s += Ass * np.sin(as_p) + Asc * np.cos(as_p) X += _x * ttt ** j Y += _y * ttt ** j s_xy2 += _s * ttt ** j # Conversion to arcsecond return X * 1e-6, Y * 1e-6, s_xy2 * 1e-6
[ "def", "_xysxy2", "(", "date", ")", ":", "planets", "=", "_planets", "(", "date", ")", "x_tab", ",", "y_tab", ",", "s_tab", "=", "_tab", "(", "'X'", ")", ",", "_tab", "(", "'Y'", ")", ",", "_tab", "(", "'s'", ")", "ttt", "=", "date", ".", "change_scale", "(", "'TT'", ")", ".", "julian_century", "# Units: micro-arcsecond", "X", "=", "-", "16616.99", "+", "2004191742.88", "*", "ttt", "-", "427219.05", "*", "ttt", "**", "2", "-", "198620.54", "*", "ttt", "**", "3", "-", "46.05", "*", "ttt", "**", "4", "+", "5.98", "*", "ttt", "**", "5", "Y", "=", "-", "6950.78", "-", "25381.99", "*", "ttt", "-", "22407250.99", "*", "ttt", "**", "2", "+", "1842.28", "*", "ttt", "**", "3", "+", "1113.06", "*", "ttt", "**", "4", "+", "0.99", "*", "ttt", "**", "5", "s_xy2", "=", "94.0", "+", "3808.65", "*", "ttt", "-", "122.68", "*", "ttt", "**", "2", "-", "72574.11", "*", "ttt", "**", "3", "+", "27.98", "*", "ttt", "**", "4", "+", "15.62", "*", "ttt", "**", "5", "for", "j", "in", "range", "(", "5", ")", ":", "_x", ",", "_y", ",", "_s", "=", "0", ",", "0", ",", "0", "for", "i", "in", "range", "(", "len", "(", "x_tab", "[", "j", "]", ")", ")", ":", "Axs", ",", "Axc", ",", "", "*", "p_coefs", "=", "x_tab", "[", "j", "]", "[", "i", "]", "ax_p", "=", "np", ".", "dot", "(", "p_coefs", ",", "planets", ")", "_x", "+=", "Axs", "*", "np", ".", "sin", "(", "ax_p", ")", "+", "Axc", "*", "np", ".", "cos", "(", "ax_p", ")", "for", "i", "in", "range", "(", "len", "(", "y_tab", "[", "j", "]", ")", ")", ":", "Ays", ",", "Ayc", ",", "", "*", "p_coefs", "=", "y_tab", "[", "j", "]", "[", "i", "]", "ay_p", "=", "np", ".", "dot", "(", "p_coefs", ",", "planets", ")", "_y", "+=", "Ays", "*", "np", ".", "sin", "(", "ay_p", ")", "+", "Ayc", "*", "np", ".", "cos", "(", "ay_p", ")", "for", "i", "in", "range", "(", "len", "(", "s_tab", "[", "j", "]", ")", ")", ":", "Ass", ",", "Asc", ",", "", "*", "p_coefs", "=", "s_tab", "[", "j", "]", "[", "i", "]", "as_p", "=", "np", ".", "dot", "(", "p_coefs", ",", "planets", ")", "_s", "+=", "Ass", "*", "np", ".", "sin", "(", "as_p", ")", "+", "Asc", "*", "np", ".", "cos", "(", "as_p", ")", "X", "+=", "_x", "*", "ttt", "**", "j", "Y", "+=", "_y", "*", "ttt", "**", "j", "s_xy2", "+=", "_s", "*", "ttt", "**", "j", "# Conversion to arcsecond", "return", "X", "*", "1e-6", ",", "Y", "*", "1e-6", ",", "s_xy2", "*", "1e-6" ]
Here we deviate from what has been done everywhere else. Instead of taking the formulas available in the Vallado, we take those described in the files tab5.2{a,b,d}.txt. The result should be equivalent, but they are the last iteration of the IAU2000A as of June 2016 Args: date (Date) Return: 3-tuple of float: Values of X, Y, s + XY/2 in arcsecond
[ "Here", "we", "deviate", "from", "what", "has", "been", "done", "everywhere", "else", ".", "Instead", "of", "taking", "the", "formulas", "available", "in", "the", "Vallado", "we", "take", "those", "described", "in", "the", "files", "tab5", ".", "2", "{", "a", "b", "d", "}", ".", "txt", "." ]
python
train
33.196078
keon/algorithms
algorithms/dfs/all_factors.py
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/dfs/all_factors.py#L87-L111
def get_factors_iterative2(n): """[summary] analog as above Arguments: n {[int]} -- [description] Returns: [list of lists] -- [all factors of n] """ ans, stack, x = [], [], 2 while True: if x > n // x: if not stack: return ans ans.append(stack + [n]) x = stack.pop() n *= x x += 1 elif n % x == 0: stack.append(x) n //= x else: x += 1
[ "def", "get_factors_iterative2", "(", "n", ")", ":", "ans", ",", "stack", ",", "x", "=", "[", "]", ",", "[", "]", ",", "2", "while", "True", ":", "if", "x", ">", "n", "//", "x", ":", "if", "not", "stack", ":", "return", "ans", "ans", ".", "append", "(", "stack", "+", "[", "n", "]", ")", "x", "=", "stack", ".", "pop", "(", ")", "n", "*=", "x", "x", "+=", "1", "elif", "n", "%", "x", "==", "0", ":", "stack", ".", "append", "(", "x", ")", "n", "//=", "x", "else", ":", "x", "+=", "1" ]
[summary] analog as above Arguments: n {[int]} -- [description] Returns: [list of lists] -- [all factors of n]
[ "[", "summary", "]", "analog", "as", "above" ]
python
train
19.88
gem/oq-engine
openquake/hmtk/seismicity/smoothing/utils.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/smoothing/utils.py#L75-L93
def incremental_a_value(bval, min_mag, mag_inc): ''' Incremental a-value from cumulative - using the version of the Hermann (1979) formula described in Wesson et al. (2003) :param float bval: Gutenberg & Richter (1944) b-value :param np.ndarray min_mag: Minimum magnitude of completeness table :param float mag_inc: Magnitude increment of the completeness table ''' a_cum = 10. ** (bval * min_mag) a_inc = a_cum + np.log10((10. ** (bval * mag_inc)) - (10. ** (-bval * mag_inc))) return a_inc
[ "def", "incremental_a_value", "(", "bval", ",", "min_mag", ",", "mag_inc", ")", ":", "a_cum", "=", "10.", "**", "(", "bval", "*", "min_mag", ")", "a_inc", "=", "a_cum", "+", "np", ".", "log10", "(", "(", "10.", "**", "(", "bval", "*", "mag_inc", ")", ")", "-", "(", "10.", "**", "(", "-", "bval", "*", "mag_inc", ")", ")", ")", "return", "a_inc" ]
Incremental a-value from cumulative - using the version of the Hermann (1979) formula described in Wesson et al. (2003) :param float bval: Gutenberg & Richter (1944) b-value :param np.ndarray min_mag: Minimum magnitude of completeness table :param float mag_inc: Magnitude increment of the completeness table
[ "Incremental", "a", "-", "value", "from", "cumulative", "-", "using", "the", "version", "of", "the", "Hermann", "(", "1979", ")", "formula", "described", "in", "Wesson", "et", "al", ".", "(", "2003", ")" ]
python
train
30
StanfordVL/robosuite
robosuite/wrappers/ik_wrapper.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/wrappers/ik_wrapper.py#L116-L126
def _make_input(self, action, old_quat): """ Helper function that returns a dictionary with keys dpos, rotation from a raw input array. The first three elements are taken to be displacement in position, and a quaternion indicating the change in rotation with respect to @old_quat. """ return { "dpos": action[:3], # IK controller takes an absolute orientation in robot base frame "rotation": T.quat2mat(T.quat_multiply(old_quat, action[3:7])), }
[ "def", "_make_input", "(", "self", ",", "action", ",", "old_quat", ")", ":", "return", "{", "\"dpos\"", ":", "action", "[", ":", "3", "]", ",", "# IK controller takes an absolute orientation in robot base frame", "\"rotation\"", ":", "T", ".", "quat2mat", "(", "T", ".", "quat_multiply", "(", "old_quat", ",", "action", "[", "3", ":", "7", "]", ")", ")", ",", "}" ]
Helper function that returns a dictionary with keys dpos, rotation from a raw input array. The first three elements are taken to be displacement in position, and a quaternion indicating the change in rotation with respect to @old_quat.
[ "Helper", "function", "that", "returns", "a", "dictionary", "with", "keys", "dpos", "rotation", "from", "a", "raw", "input", "array", ".", "The", "first", "three", "elements", "are", "taken", "to", "be", "displacement", "in", "position", "and", "a", "quaternion", "indicating", "the", "change", "in", "rotation", "with", "respect", "to" ]
python
train
47.909091
RockFeng0/rtsf-web
webuidriver/actions.py
https://github.com/RockFeng0/rtsf-web/blob/ceabcf62ddf1c969a97b5c7a4a4c547198b6ea71/webuidriver/actions.py#L221-L232
def _elements(cls): ''' find the elements with controls ''' if not cls.__is_selector(): raise Exception("Invalid selector[%s]." %cls.__control["by"]) driver = Web.driver try: elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"])) except: raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"])) return elements
[ "def", "_elements", "(", "cls", ")", ":", "if", "not", "cls", ".", "__is_selector", "(", ")", ":", "raise", "Exception", "(", "\"Invalid selector[%s].\"", "%", "cls", ".", "__control", "[", "\"by\"", "]", ")", "driver", "=", "Web", ".", "driver", "try", ":", "elements", "=", "WebDriverWait", "(", "driver", ",", "cls", ".", "__control", "[", "\"timeout\"", "]", ")", ".", "until", "(", "lambda", "driver", ":", "getattr", "(", "driver", ",", "\"find_elements\"", ")", "(", "cls", ".", "__control", "[", "\"by\"", "]", ",", "cls", ".", "__control", "[", "\"value\"", "]", ")", ")", "except", ":", "raise", "Exception", "(", "\"Timeout at %d seconds.Element(%s) not found.\"", "%", "(", "cls", ".", "__control", "[", "\"timeout\"", "]", ",", "cls", ".", "__control", "[", "\"by\"", "]", ")", ")", "return", "elements" ]
find the elements with controls
[ "find", "the", "elements", "with", "controls" ]
python
train
50
binux/pyspider
pyspider/libs/utils.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/libs/utils.py#L38-L51
def hide_me(tb, g=globals()): """Hide stack traceback of given stack""" base_tb = tb try: while tb and tb.tb_frame.f_globals is not g: tb = tb.tb_next while tb and tb.tb_frame.f_globals is g: tb = tb.tb_next except Exception as e: logging.exception(e) tb = base_tb if not tb: tb = base_tb return tb
[ "def", "hide_me", "(", "tb", ",", "g", "=", "globals", "(", ")", ")", ":", "base_tb", "=", "tb", "try", ":", "while", "tb", "and", "tb", ".", "tb_frame", ".", "f_globals", "is", "not", "g", ":", "tb", "=", "tb", ".", "tb_next", "while", "tb", "and", "tb", ".", "tb_frame", ".", "f_globals", "is", "g", ":", "tb", "=", "tb", ".", "tb_next", "except", "Exception", "as", "e", ":", "logging", ".", "exception", "(", "e", ")", "tb", "=", "base_tb", "if", "not", "tb", ":", "tb", "=", "base_tb", "return", "tb" ]
Hide stack traceback of given stack
[ "Hide", "stack", "traceback", "of", "given", "stack" ]
python
train
26.642857
matiasb/python-unrar
unrar/rarfile.py
https://github.com/matiasb/python-unrar/blob/b1ac46cbcf42f3d3c5c69ab971fe97369a4da617/unrar/rarfile.py#L247-L252
def namelist(self): """Return a list of file names in the archive.""" names = [] for member in self.filelist: names.append(member.filename) return names
[ "def", "namelist", "(", "self", ")", ":", "names", "=", "[", "]", "for", "member", "in", "self", ".", "filelist", ":", "names", ".", "append", "(", "member", ".", "filename", ")", "return", "names" ]
Return a list of file names in the archive.
[ "Return", "a", "list", "of", "file", "names", "in", "the", "archive", "." ]
python
valid
31.833333
suds-community/suds
suds/sax/element.py
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/sax/element.py#L299-L310
def namespace(self): """ Get the element's namespace. @return: The element's namespace by resolving the prefix, the explicit namespace or the inherited namespace. @rtype: (I{prefix}, I{name}) """ if self.prefix is None: return self.defaultNamespace() return self.resolvePrefix(self.prefix)
[ "def", "namespace", "(", "self", ")", ":", "if", "self", ".", "prefix", "is", "None", ":", "return", "self", ".", "defaultNamespace", "(", ")", "return", "self", ".", "resolvePrefix", "(", "self", ".", "prefix", ")" ]
Get the element's namespace. @return: The element's namespace by resolving the prefix, the explicit namespace or the inherited namespace. @rtype: (I{prefix}, I{name})
[ "Get", "the", "element", "s", "namespace", "." ]
python
train
30
vertexproject/synapse
synapse/lib/migrate.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/migrate.py#L53-L75
async def delayNdefProps(self): ''' Hold this during a series of renames to delay ndef secondary property processing until the end.... ''' async with self.getTempSlab() as slab: seqn = s_slabseqn.SlabSeqn(slab, 'ndef') self.ndefdelay = seqn yield self.ndefdelay = None logger.info(f'Processing {seqn.index()} delayed values.') # process them all now... for i, (oldv, newv) in seqn.iter(0): await self.editNdefProps(oldv, newv) if i and i % _progress == 0: logger.info(f'Processed {i} delayed values.')
[ "async", "def", "delayNdefProps", "(", "self", ")", ":", "async", "with", "self", ".", "getTempSlab", "(", ")", "as", "slab", ":", "seqn", "=", "s_slabseqn", ".", "SlabSeqn", "(", "slab", ",", "'ndef'", ")", "self", ".", "ndefdelay", "=", "seqn", "yield", "self", ".", "ndefdelay", "=", "None", "logger", ".", "info", "(", "f'Processing {seqn.index()} delayed values.'", ")", "# process them all now...", "for", "i", ",", "(", "oldv", ",", "newv", ")", "in", "seqn", ".", "iter", "(", "0", ")", ":", "await", "self", ".", "editNdefProps", "(", "oldv", ",", "newv", ")", "if", "i", "and", "i", "%", "_progress", "==", "0", ":", "logger", ".", "info", "(", "f'Processed {i} delayed values.'", ")" ]
Hold this during a series of renames to delay ndef secondary property processing until the end....
[ "Hold", "this", "during", "a", "series", "of", "renames", "to", "delay", "ndef", "secondary", "property", "processing", "until", "the", "end", "...." ]
python
train
28.782609
zxylvlp/PingPHP
pingphp/grammar.py
https://github.com/zxylvlp/PingPHP/blob/2e9a5f1ef4b5b13310e3f8ff350fa91032357bc5/pingphp/grammar.py#L1225-L1229
def p_FuncDef(p): ''' FuncDef : DEF RefModifier INDENTIFIER LPARENT ParamList RPARENT COLON ReturnTypeModifier Terminator Block ''' p[0] = FuncDef(p[2], p[3], p[5], p[8], p[9], p[10])
[ "def", "p_FuncDef", "(", "p", ")", ":", "p", "[", "0", "]", "=", "FuncDef", "(", "p", "[", "2", "]", ",", "p", "[", "3", "]", ",", "p", "[", "5", "]", ",", "p", "[", "8", "]", ",", "p", "[", "9", "]", ",", "p", "[", "10", "]", ")" ]
FuncDef : DEF RefModifier INDENTIFIER LPARENT ParamList RPARENT COLON ReturnTypeModifier Terminator Block
[ "FuncDef", ":", "DEF", "RefModifier", "INDENTIFIER", "LPARENT", "ParamList", "RPARENT", "COLON", "ReturnTypeModifier", "Terminator", "Block" ]
python
train
39
ThreatConnect-Inc/tcex
tcex/tcex_cache.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_cache.py#L62-L92
def get(self, rid, data_callback=None, raise_on_error=True): """Get cached data from the data store. Args: rid (str): The record identifier. data_callback (callable): A method that will return the data. raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError. Returns: object : Python request response. """ cached_data = None ds_data = self.ds.get(rid, raise_on_error=False) if ds_data is not None: expired = True if ds_data.get('found') is True: if self.ttl < int(ds_data.get('_source', {}).get('cache-date', 0)): cached_data = ds_data.get('_source', {}).get('cache-data') expired = False self.tcex.log.debug('Using cached data for ({}).'.format(rid)) else: self.tcex.log.debug('Cached data is expired for ({}).'.format(rid)) if expired or ds_data.get('found') is False: # when cache is expired or does not exist use callback to get data if possible if callable(data_callback): cached_data = data_callback(rid) self.tcex.log.debug('Using callback data for ({}).'.format(rid)) if cached_data: self.update(rid, cached_data, raise_on_error) # update the cache data return cached_data
[ "def", "get", "(", "self", ",", "rid", ",", "data_callback", "=", "None", ",", "raise_on_error", "=", "True", ")", ":", "cached_data", "=", "None", "ds_data", "=", "self", ".", "ds", ".", "get", "(", "rid", ",", "raise_on_error", "=", "False", ")", "if", "ds_data", "is", "not", "None", ":", "expired", "=", "True", "if", "ds_data", ".", "get", "(", "'found'", ")", "is", "True", ":", "if", "self", ".", "ttl", "<", "int", "(", "ds_data", ".", "get", "(", "'_source'", ",", "{", "}", ")", ".", "get", "(", "'cache-date'", ",", "0", ")", ")", ":", "cached_data", "=", "ds_data", ".", "get", "(", "'_source'", ",", "{", "}", ")", ".", "get", "(", "'cache-data'", ")", "expired", "=", "False", "self", ".", "tcex", ".", "log", ".", "debug", "(", "'Using cached data for ({}).'", ".", "format", "(", "rid", ")", ")", "else", ":", "self", ".", "tcex", ".", "log", ".", "debug", "(", "'Cached data is expired for ({}).'", ".", "format", "(", "rid", ")", ")", "if", "expired", "or", "ds_data", ".", "get", "(", "'found'", ")", "is", "False", ":", "# when cache is expired or does not exist use callback to get data if possible", "if", "callable", "(", "data_callback", ")", ":", "cached_data", "=", "data_callback", "(", "rid", ")", "self", ".", "tcex", ".", "log", ".", "debug", "(", "'Using callback data for ({}).'", ".", "format", "(", "rid", ")", ")", "if", "cached_data", ":", "self", ".", "update", "(", "rid", ",", "cached_data", ",", "raise_on_error", ")", "# update the cache data", "return", "cached_data" ]
Get cached data from the data store. Args: rid (str): The record identifier. data_callback (callable): A method that will return the data. raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError. Returns: object : Python request response.
[ "Get", "cached", "data", "from", "the", "data", "store", "." ]
python
train
47
saltstack/salt
salt/utils/aws.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/aws.py#L543-L579
def get_region_from_metadata(): ''' Try to get region from instance identity document and cache it .. versionadded:: 2015.5.6 ''' global __Location__ if __Location__ == 'do-not-get-from-metadata': log.debug('Previously failed to get AWS region from metadata. Not trying again.') return None # Cached region if __Location__ != '': return __Location__ try: # Connections to instance meta-data must fail fast and never be proxied result = requests.get( "http://169.254.169.254/latest/dynamic/instance-identity/document", proxies={'http': ''}, timeout=AWS_METADATA_TIMEOUT, ) except requests.exceptions.RequestException: log.warning('Failed to get AWS region from instance metadata.', exc_info=True) # Do not try again __Location__ = 'do-not-get-from-metadata' return None try: region = result.json()['region'] __Location__ = region return __Location__ except (ValueError, KeyError): log.warning('Failed to decode JSON from instance metadata.') return None return None
[ "def", "get_region_from_metadata", "(", ")", ":", "global", "__Location__", "if", "__Location__", "==", "'do-not-get-from-metadata'", ":", "log", ".", "debug", "(", "'Previously failed to get AWS region from metadata. Not trying again.'", ")", "return", "None", "# Cached region", "if", "__Location__", "!=", "''", ":", "return", "__Location__", "try", ":", "# Connections to instance meta-data must fail fast and never be proxied", "result", "=", "requests", ".", "get", "(", "\"http://169.254.169.254/latest/dynamic/instance-identity/document\"", ",", "proxies", "=", "{", "'http'", ":", "''", "}", ",", "timeout", "=", "AWS_METADATA_TIMEOUT", ",", ")", "except", "requests", ".", "exceptions", ".", "RequestException", ":", "log", ".", "warning", "(", "'Failed to get AWS region from instance metadata.'", ",", "exc_info", "=", "True", ")", "# Do not try again", "__Location__", "=", "'do-not-get-from-metadata'", "return", "None", "try", ":", "region", "=", "result", ".", "json", "(", ")", "[", "'region'", "]", "__Location__", "=", "region", "return", "__Location__", "except", "(", "ValueError", ",", "KeyError", ")", ":", "log", ".", "warning", "(", "'Failed to decode JSON from instance metadata.'", ")", "return", "None", "return", "None" ]
Try to get region from instance identity document and cache it .. versionadded:: 2015.5.6
[ "Try", "to", "get", "region", "from", "instance", "identity", "document", "and", "cache", "it" ]
python
train
30.540541
saltstack/salt
salt/modules/inspectlib/kiwiproc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/kiwiproc.py#L247-L257
def _create_doc(self): ''' Create document. :return: ''' root = etree.Element('image') root.set('schemaversion', '6.3') root.set('name', self.name) return root
[ "def", "_create_doc", "(", "self", ")", ":", "root", "=", "etree", ".", "Element", "(", "'image'", ")", "root", ".", "set", "(", "'schemaversion'", ",", "'6.3'", ")", "root", ".", "set", "(", "'name'", ",", "self", ".", "name", ")", "return", "root" ]
Create document. :return:
[ "Create", "document", "." ]
python
train
19.545455
mkouhei/bootstrap-py
bootstrap_py/classifiers.py
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/classifiers.py#L43-L46
def licenses(self): """OSI Approved license.""" return {self._acronym_lic(l): l for l in self.resp_text.split('\n') if l.startswith(self.prefix_lic)}
[ "def", "licenses", "(", "self", ")", ":", "return", "{", "self", ".", "_acronym_lic", "(", "l", ")", ":", "l", "for", "l", "in", "self", ".", "resp_text", ".", "split", "(", "'\\n'", ")", "if", "l", ".", "startswith", "(", "self", ".", "prefix_lic", ")", "}" ]
OSI Approved license.
[ "OSI", "Approved", "license", "." ]
python
train
44.5
tensorflow/tensorboard
tensorboard/compat/tensorflow_stub/tensor_shape.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/compat/tensorflow_stub/tensor_shape.py#L908-L920
def as_proto(self): """Returns this shape as a `TensorShapeProto`.""" if self._dims is None: return tensor_shape_pb2.TensorShapeProto(unknown_rank=True) else: return tensor_shape_pb2.TensorShapeProto( dim=[ tensor_shape_pb2.TensorShapeProto.Dim( size=-1 if d.value is None else d.value ) for d in self._dims ] )
[ "def", "as_proto", "(", "self", ")", ":", "if", "self", ".", "_dims", "is", "None", ":", "return", "tensor_shape_pb2", ".", "TensorShapeProto", "(", "unknown_rank", "=", "True", ")", "else", ":", "return", "tensor_shape_pb2", ".", "TensorShapeProto", "(", "dim", "=", "[", "tensor_shape_pb2", ".", "TensorShapeProto", ".", "Dim", "(", "size", "=", "-", "1", "if", "d", ".", "value", "is", "None", "else", "d", ".", "value", ")", "for", "d", "in", "self", ".", "_dims", "]", ")" ]
Returns this shape as a `TensorShapeProto`.
[ "Returns", "this", "shape", "as", "a", "TensorShapeProto", "." ]
python
train
36.538462
klen/flask-pw
flask_pw/__init__.py
https://github.com/klen/flask-pw/blob/04d7846f0c89f2b2331b238b1c8223368c2a40a7/flask_pw/__init__.py#L147-L158
def cmd_rollback(self, name): """Rollback migrations.""" from peewee_migrate.router import Router, LOGGER LOGGER.setLevel('INFO') LOGGER.propagate = 0 router = Router(self.database, migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'], migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE']) router.rollback(name)
[ "def", "cmd_rollback", "(", "self", ",", "name", ")", ":", "from", "peewee_migrate", ".", "router", "import", "Router", ",", "LOGGER", "LOGGER", ".", "setLevel", "(", "'INFO'", ")", "LOGGER", ".", "propagate", "=", "0", "router", "=", "Router", "(", "self", ".", "database", ",", "migrate_dir", "=", "self", ".", "app", ".", "config", "[", "'PEEWEE_MIGRATE_DIR'", "]", ",", "migrate_table", "=", "self", ".", "app", ".", "config", "[", "'PEEWEE_MIGRATE_TABLE'", "]", ")", "router", ".", "rollback", "(", "name", ")" ]
Rollback migrations.
[ "Rollback", "migrations", "." ]
python
train
33.083333
lambdamusic/Ontospy
ontospy/core/sparqlHelper.py
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/sparqlHelper.py#L331-L349
def getPropAllSupers(self, aURI): """ note: requires SPARQL 1.1 2015-06-04: currenlty not used, inferred from above """ aURI = aURI try: qres = self.rdflib_graph.query("""SELECT DISTINCT ?x WHERE { { <%s> rdfs:subPropertyOf+ ?x } FILTER (!isBlank(?x)) } """ % (aURI)) except: printDebug( "... warning: the 'getPropAllSupers' query failed (maybe missing SPARQL 1.1 support?)" ) qres = [] return list(qres)
[ "def", "getPropAllSupers", "(", "self", ",", "aURI", ")", ":", "aURI", "=", "aURI", "try", ":", "qres", "=", "self", ".", "rdflib_graph", ".", "query", "(", "\"\"\"SELECT DISTINCT ?x\n WHERE {\n { <%s> rdfs:subPropertyOf+ ?x }\n FILTER (!isBlank(?x))\n }\n \"\"\"", "%", "(", "aURI", ")", ")", "except", ":", "printDebug", "(", "\"... warning: the 'getPropAllSupers' query failed (maybe missing SPARQL 1.1 support?)\"", ")", "qres", "=", "[", "]", "return", "list", "(", "qres", ")" ]
note: requires SPARQL 1.1 2015-06-04: currenlty not used, inferred from above
[ "note", ":", "requires", "SPARQL", "1", ".", "1", "2015", "-", "06", "-", "04", ":", "currenlty", "not", "used", "inferred", "from", "above" ]
python
train
33
Kozea/pygal
pygal/graph/map.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/map.py#L53-L60
def _value_format(self, value): """ Format value for map value display. """ return '%s: %s' % ( self.area_names.get(self.adapt_code(value[0]), '?'), self._y_format(value[1]) )
[ "def", "_value_format", "(", "self", ",", "value", ")", ":", "return", "'%s: %s'", "%", "(", "self", ".", "area_names", ".", "get", "(", "self", ".", "adapt_code", "(", "value", "[", "0", "]", ")", ",", "'?'", ")", ",", "self", ".", "_y_format", "(", "value", "[", "1", "]", ")", ")" ]
Format value for map value display.
[ "Format", "value", "for", "map", "value", "display", "." ]
python
train
29
gabstopper/smc-python
smc/core/interfaces.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/core/interfaces.py#L1750-L1765
def delete(self): """ Delete this Vlan interface from the parent interface. This will also remove stale routes if the interface has networks associated with it. :return: None """ if self in self._parent.vlan_interface: self._parent.data['vlanInterfaces'] = [ v for v in self._parent.vlan_interface if v != self] self.update() for route in self._parent._engine.routing: if route.to_delete: route.delete()
[ "def", "delete", "(", "self", ")", ":", "if", "self", "in", "self", ".", "_parent", ".", "vlan_interface", ":", "self", ".", "_parent", ".", "data", "[", "'vlanInterfaces'", "]", "=", "[", "v", "for", "v", "in", "self", ".", "_parent", ".", "vlan_interface", "if", "v", "!=", "self", "]", "self", ".", "update", "(", ")", "for", "route", "in", "self", ".", "_parent", ".", "_engine", ".", "routing", ":", "if", "route", ".", "to_delete", ":", "route", ".", "delete", "(", ")" ]
Delete this Vlan interface from the parent interface. This will also remove stale routes if the interface has networks associated with it. :return: None
[ "Delete", "this", "Vlan", "interface", "from", "the", "parent", "interface", ".", "This", "will", "also", "remove", "stale", "routes", "if", "the", "interface", "has", "networks", "associated", "with", "it", ".", ":", "return", ":", "None" ]
python
train
34.8125
numenta/nupic
src/nupic/database/client_jobs_dao.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/client_jobs_dao.py#L618-L643
def connect(self, deleteOldVersions=False, recreate=False): """ Locate the current version of the jobs DB or create a new one, and optionally delete old versions laying around. If desired, this method can be called at any time to re-create the tables from scratch, delete old versions of the database, etc. Parameters: ---------------------------------------------------------------- deleteOldVersions: if true, delete any old versions of the DB left on the server recreate: if true, recreate the database from scratch even if it already exists. """ # Initialize tables, if needed with ConnectionFactory.get() as conn: # Initialize tables self._initTables(cursor=conn.cursor, deleteOldVersions=deleteOldVersions, recreate=recreate) # Save our connection id conn.cursor.execute('SELECT CONNECTION_ID()') self._connectionID = conn.cursor.fetchall()[0][0] self._logger.info("clientJobsConnectionID=%r", self._connectionID) return
[ "def", "connect", "(", "self", ",", "deleteOldVersions", "=", "False", ",", "recreate", "=", "False", ")", ":", "# Initialize tables, if needed", "with", "ConnectionFactory", ".", "get", "(", ")", "as", "conn", ":", "# Initialize tables", "self", ".", "_initTables", "(", "cursor", "=", "conn", ".", "cursor", ",", "deleteOldVersions", "=", "deleteOldVersions", ",", "recreate", "=", "recreate", ")", "# Save our connection id", "conn", ".", "cursor", ".", "execute", "(", "'SELECT CONNECTION_ID()'", ")", "self", ".", "_connectionID", "=", "conn", ".", "cursor", ".", "fetchall", "(", ")", "[", "0", "]", "[", "0", "]", "self", ".", "_logger", ".", "info", "(", "\"clientJobsConnectionID=%r\"", ",", "self", ".", "_connectionID", ")", "return" ]
Locate the current version of the jobs DB or create a new one, and optionally delete old versions laying around. If desired, this method can be called at any time to re-create the tables from scratch, delete old versions of the database, etc. Parameters: ---------------------------------------------------------------- deleteOldVersions: if true, delete any old versions of the DB left on the server recreate: if true, recreate the database from scratch even if it already exists.
[ "Locate", "the", "current", "version", "of", "the", "jobs", "DB", "or", "create", "a", "new", "one", "and", "optionally", "delete", "old", "versions", "laying", "around", ".", "If", "desired", "this", "method", "can", "be", "called", "at", "any", "time", "to", "re", "-", "create", "the", "tables", "from", "scratch", "delete", "old", "versions", "of", "the", "database", "etc", "." ]
python
valid
41.423077
stevearc/dql
dql/engine.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/engine.py#L77-L94
def iter_insert_items(tree): """ Iterate over the items to insert from an INSERT statement """ if tree.list_values: keys = tree.attrs for values in tree.list_values: if len(keys) != len(values): raise SyntaxError( "Values '%s' do not match attributes " "'%s'" % (values, keys) ) yield dict(zip(keys, map(resolve, values))) elif tree.map_values: for item in tree.map_values: data = {} for (key, val) in item: data[key] = resolve(val) yield data else: raise SyntaxError("No insert data found")
[ "def", "iter_insert_items", "(", "tree", ")", ":", "if", "tree", ".", "list_values", ":", "keys", "=", "tree", ".", "attrs", "for", "values", "in", "tree", ".", "list_values", ":", "if", "len", "(", "keys", ")", "!=", "len", "(", "values", ")", ":", "raise", "SyntaxError", "(", "\"Values '%s' do not match attributes \"", "\"'%s'\"", "%", "(", "values", ",", "keys", ")", ")", "yield", "dict", "(", "zip", "(", "keys", ",", "map", "(", "resolve", ",", "values", ")", ")", ")", "elif", "tree", ".", "map_values", ":", "for", "item", "in", "tree", ".", "map_values", ":", "data", "=", "{", "}", "for", "(", "key", ",", "val", ")", "in", "item", ":", "data", "[", "key", "]", "=", "resolve", "(", "val", ")", "yield", "data", "else", ":", "raise", "SyntaxError", "(", "\"No insert data found\"", ")" ]
Iterate over the items to insert from an INSERT statement
[ "Iterate", "over", "the", "items", "to", "insert", "from", "an", "INSERT", "statement" ]
python
train
36.111111
pyca/pynacl
src/nacl/hash.py
https://github.com/pyca/pynacl/blob/0df0c2c7693fa5d316846111ce510702756f5feb/src/nacl/hash.py#L60-L70
def sha256(message, encoder=nacl.encoding.HexEncoder): """ Hashes ``message`` with SHA256. :param message: The message to hash. :type message: bytes :param encoder: A class that is able to encode the hashed message. :returns: The hashed message. :rtype: bytes """ return encoder.encode(nacl.bindings.crypto_hash_sha256(message))
[ "def", "sha256", "(", "message", ",", "encoder", "=", "nacl", ".", "encoding", ".", "HexEncoder", ")", ":", "return", "encoder", ".", "encode", "(", "nacl", ".", "bindings", ".", "crypto_hash_sha256", "(", "message", ")", ")" ]
Hashes ``message`` with SHA256. :param message: The message to hash. :type message: bytes :param encoder: A class that is able to encode the hashed message. :returns: The hashed message. :rtype: bytes
[ "Hashes", "message", "with", "SHA256", "." ]
python
train
32.272727
saltstack/salt
salt/beacons/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/__init__.py#L186-L192
def _remove_list_item(self, beacon_config, label): ''' Remove an item from a beacon config list ''' index = self._get_index(beacon_config, label) del beacon_config[index]
[ "def", "_remove_list_item", "(", "self", ",", "beacon_config", ",", "label", ")", ":", "index", "=", "self", ".", "_get_index", "(", "beacon_config", ",", "label", ")", "del", "beacon_config", "[", "index", "]" ]
Remove an item from a beacon config list
[ "Remove", "an", "item", "from", "a", "beacon", "config", "list" ]
python
train
29.285714
mabuchilab/QNET
src/qnet/algebra/pattern_matching/__init__.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/pattern_matching/__init__.py#L232-L246
def extended_arg_patterns(self): """Iterator over patterns for positional arguments to be matched This yields the elements of :attr:`args`, extended by their `mode` value """ for arg in self._arg_iterator(self.args): if isinstance(arg, Pattern): if arg.mode > self.single: while True: yield arg else: yield arg else: yield arg
[ "def", "extended_arg_patterns", "(", "self", ")", ":", "for", "arg", "in", "self", ".", "_arg_iterator", "(", "self", ".", "args", ")", ":", "if", "isinstance", "(", "arg", ",", "Pattern", ")", ":", "if", "arg", ".", "mode", ">", "self", ".", "single", ":", "while", "True", ":", "yield", "arg", "else", ":", "yield", "arg", "else", ":", "yield", "arg" ]
Iterator over patterns for positional arguments to be matched This yields the elements of :attr:`args`, extended by their `mode` value
[ "Iterator", "over", "patterns", "for", "positional", "arguments", "to", "be", "matched" ]
python
train
32.6
ytjia/utils-py
utils_py/time_seg_util.py
https://github.com/ytjia/utils-py/blob/68039b367e2e38fdecf234ecc625406b9e203ec0/utils_py/time_seg_util.py#L53-L64
def get_still_seg_belonged(dt_str, seg_duration, fmt='%Y-%m-%d %H:%M:%S'): """ 获取该时刻所属的非滑动时间片 :param dt_str: datetime string, eg: 2016-10-31 12:22:11 :param seg_duration: 时间片长度, unit: minute :param fmt: datetime string format :return: """ dt = time_util.str_to_datetime(dt_str, fmt) minutes_of_day = time_util.get_minutes_of_day(dt) return time_util.minutes_to_time_str( minutes_of_day - minutes_of_day % seg_duration)
[ "def", "get_still_seg_belonged", "(", "dt_str", ",", "seg_duration", ",", "fmt", "=", "'%Y-%m-%d %H:%M:%S'", ")", ":", "dt", "=", "time_util", ".", "str_to_datetime", "(", "dt_str", ",", "fmt", ")", "minutes_of_day", "=", "time_util", ".", "get_minutes_of_day", "(", "dt", ")", "return", "time_util", ".", "minutes_to_time_str", "(", "minutes_of_day", "-", "minutes_of_day", "%", "seg_duration", ")" ]
获取该时刻所属的非滑动时间片 :param dt_str: datetime string, eg: 2016-10-31 12:22:11 :param seg_duration: 时间片长度, unit: minute :param fmt: datetime string format :return:
[ "获取该时刻所属的非滑动时间片", ":", "param", "dt_str", ":", "datetime", "string", "eg", ":", "2016", "-", "10", "-", "31", "12", ":", "22", ":", "11", ":", "param", "seg_duration", ":", "时间片长度", "unit", ":", "minute", ":", "param", "fmt", ":", "datetime", "string", "format", ":", "return", ":" ]
python
train
38.25
ioos/compliance-checker
compliance_checker/cf/cf.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cf/cf.py#L1969-L2044
def check_calendar(self, ds): ''' Check the calendar attribute for variables defining time and ensure it is a valid calendar prescribed by CF. CF §4.4.1 In order to calculate a new date and time given a base date, base time and a time increment one must know what calendar to use. The values currently defined for calendar are: - gregorian or standard - proleptic_gregorian - noleap or 365_day - all_leap or 366_day - 360_day - julian - none The calendar attribute may be set to none in climate experiments that simulate a fixed time of year. The time of year is indicated by the date in the reference time of the units attribute. If none of the calendars defined above applies, a non-standard calendar can be defined. The lengths of each month are explicitly defined with the month_lengths attribute of the time axis. If leap years are included, then two other attributes of the time axis should also be defined: leap_year, leap_month The calendar attribute is not required when a non-standard calendar is being used. It is sufficient to define the calendar using the month_lengths attribute, along with leap_year, and leap_month as appropriate. However, the calendar attribute is allowed to take non-standard values and in that case defining the non-standard calendar using the appropriate attributes is required. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ''' valid_calendars = [ 'gregorian', 'standard', 'proleptic_gregorian', 'noleap', '365_day', 'all_leap', '366_day', '360_day', 'julian', 'none' ] ret_val = [] # if has a calendar, check that it is within the valid values # otherwise no calendar is valid for time_var in ds.get_variables_by_attributes(calendar=lambda c: c is not None): reasoning = None valid_calendar = time_var.calendar in valid_calendars if not valid_calendar: reasoning = ["§4.4.1 Variable %s should have a valid calendar: '%s' is not a valid calendar" % (time_var.name, time_var.calendar)] # passes if the calendar is valid, otherwise notify of invalid # calendar result = Result(BaseCheck.LOW, valid_calendar, self.section_titles['4.4'], reasoning) ret_val.append(result) return ret_val
[ "def", "check_calendar", "(", "self", ",", "ds", ")", ":", "valid_calendars", "=", "[", "'gregorian'", ",", "'standard'", ",", "'proleptic_gregorian'", ",", "'noleap'", ",", "'365_day'", ",", "'all_leap'", ",", "'366_day'", ",", "'360_day'", ",", "'julian'", ",", "'none'", "]", "ret_val", "=", "[", "]", "# if has a calendar, check that it is within the valid values", "# otherwise no calendar is valid", "for", "time_var", "in", "ds", ".", "get_variables_by_attributes", "(", "calendar", "=", "lambda", "c", ":", "c", "is", "not", "None", ")", ":", "reasoning", "=", "None", "valid_calendar", "=", "time_var", ".", "calendar", "in", "valid_calendars", "if", "not", "valid_calendar", ":", "reasoning", "=", "[", "\"§4.4.1 Variable %s should have a valid calendar: '%s' is not a valid calendar\" ", " ", "t", "ime_var.", "n", "ame,", " ", "ime_var.", "c", "alendar)", "]", "", "# passes if the calendar is valid, otherwise notify of invalid", "# calendar", "result", "=", "Result", "(", "BaseCheck", ".", "LOW", ",", "valid_calendar", ",", "self", ".", "section_titles", "[", "'4.4'", "]", ",", "reasoning", ")", "ret_val", ".", "append", "(", "result", ")", "return", "ret_val" ]
Check the calendar attribute for variables defining time and ensure it is a valid calendar prescribed by CF. CF §4.4.1 In order to calculate a new date and time given a base date, base time and a time increment one must know what calendar to use. The values currently defined for calendar are: - gregorian or standard - proleptic_gregorian - noleap or 365_day - all_leap or 366_day - 360_day - julian - none The calendar attribute may be set to none in climate experiments that simulate a fixed time of year. The time of year is indicated by the date in the reference time of the units attribute. If none of the calendars defined above applies, a non-standard calendar can be defined. The lengths of each month are explicitly defined with the month_lengths attribute of the time axis. If leap years are included, then two other attributes of the time axis should also be defined: leap_year, leap_month The calendar attribute is not required when a non-standard calendar is being used. It is sufficient to define the calendar using the month_lengths attribute, along with leap_year, and leap_month as appropriate. However, the calendar attribute is allowed to take non-standard values and in that case defining the non-standard calendar using the appropriate attributes is required. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results
[ "Check", "the", "calendar", "attribute", "for", "variables", "defining", "time", "and", "ensure", "it", "is", "a", "valid", "calendar", "prescribed", "by", "CF", "." ]
python
train
35.855263
jeffh/describe
describe/matchers/core.py
https://github.com/jeffh/describe/blob/6a33ffecc3340b57e60bc8a7095521882ff9a156/describe/matchers/core.py#L28-L34
def asserts(self, *args, **kwargs): """Wraps match method and places under an assertion. Override this for higher-level control, such as returning a custom object for additional validation (e.g. expect().to.change()) """ result = self.match(*args, **kwargs) self.expect(result) return result
[ "def", "asserts", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "result", "=", "self", ".", "match", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "expect", "(", "result", ")", "return", "result" ]
Wraps match method and places under an assertion. Override this for higher-level control, such as returning a custom object for additional validation (e.g. expect().to.change())
[ "Wraps", "match", "method", "and", "places", "under", "an", "assertion", ".", "Override", "this", "for", "higher", "-", "level", "control", "such", "as", "returning", "a", "custom", "object", "for", "additional", "validation", "(", "e", ".", "g", ".", "expect", "()", ".", "to", ".", "change", "()", ")" ]
python
train
48.428571
envi-idl/envipyengine
envipyengine/taskengine/engine.py
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/taskengine/engine.py#L37-L45
def tasks(self): """ Returns a list of all tasks known to the engine. :return: A list of task names. """ task_input = {'taskName': 'QueryTaskCatalog'} output = taskengine.execute(task_input, self._engine_name, cwd=self._cwd) return output['outputParameters']['TASKS']
[ "def", "tasks", "(", "self", ")", ":", "task_input", "=", "{", "'taskName'", ":", "'QueryTaskCatalog'", "}", "output", "=", "taskengine", ".", "execute", "(", "task_input", ",", "self", ".", "_engine_name", ",", "cwd", "=", "self", ".", "_cwd", ")", "return", "output", "[", "'outputParameters'", "]", "[", "'TASKS'", "]" ]
Returns a list of all tasks known to the engine. :return: A list of task names.
[ "Returns", "a", "list", "of", "all", "tasks", "known", "to", "the", "engine", "." ]
python
train
35.111111
danilobellini/audiolazy
examples/save_and_memoize_synth.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/examples/save_and_memoize_synth.py#L147-L166
def unpitched_low(dur, idx): """ Non-harmonic bass/lower frequency sound as a list (due to memoization). Parameters ---------- dur: Duration, in samples. idx: Zero or one (integer), for a small difference to the sound played. Returns ------- A list with the synthesized note. """ env = sinusoid(lag2freq(dur * 2)).limit(dur) ** 2 freq = 40 + 20 * sinusoid(1000 * Hz, phase=uniform(-pi, pi)) # Hz result = (low_table(freq * Hz) + low_table(freq * 1.1 * Hz)) * env * .5 return list(result)
[ "def", "unpitched_low", "(", "dur", ",", "idx", ")", ":", "env", "=", "sinusoid", "(", "lag2freq", "(", "dur", "*", "2", ")", ")", ".", "limit", "(", "dur", ")", "**", "2", "freq", "=", "40", "+", "20", "*", "sinusoid", "(", "1000", "*", "Hz", ",", "phase", "=", "uniform", "(", "-", "pi", ",", "pi", ")", ")", "# Hz", "result", "=", "(", "low_table", "(", "freq", "*", "Hz", ")", "+", "low_table", "(", "freq", "*", "1.1", "*", "Hz", ")", ")", "*", "env", "*", ".5", "return", "list", "(", "result", ")" ]
Non-harmonic bass/lower frequency sound as a list (due to memoization). Parameters ---------- dur: Duration, in samples. idx: Zero or one (integer), for a small difference to the sound played. Returns ------- A list with the synthesized note.
[ "Non", "-", "harmonic", "bass", "/", "lower", "frequency", "sound", "as", "a", "list", "(", "due", "to", "memoization", ")", "." ]
python
train
25.35
tk0miya/tk.phpautodoc
src/phply/phpparse.py
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L402-L411
def p_case_list(p): '''case_list : empty | case_list CASE expr case_separator inner_statement_list | case_list DEFAULT case_separator inner_statement_list''' if len(p) == 6: p[0] = p[1] + [ast.Case(p[3], p[5], lineno=p.lineno(2))] elif len(p) == 5: p[0] = p[1] + [ast.Default(p[4], lineno=p.lineno(2))] else: p[0] = []
[ "def", "p_case_list", "(", "p", ")", ":", "if", "len", "(", "p", ")", "==", "6", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "ast", ".", "Case", "(", "p", "[", "3", "]", ",", "p", "[", "5", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "2", ")", ")", "]", "elif", "len", "(", "p", ")", "==", "5", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "ast", ".", "Default", "(", "p", "[", "4", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "2", ")", ")", "]", "else", ":", "p", "[", "0", "]", "=", "[", "]" ]
case_list : empty | case_list CASE expr case_separator inner_statement_list | case_list DEFAULT case_separator inner_statement_list
[ "case_list", ":", "empty", "|", "case_list", "CASE", "expr", "case_separator", "inner_statement_list", "|", "case_list", "DEFAULT", "case_separator", "inner_statement_list" ]
python
train
38.3
xzased/lvm2py
lvm2py/vg.py
https://github.com/xzased/lvm2py/blob/34ce69304531a474c2fe4a4009ca445a8c103cd6/lvm2py/vg.py#L446-L485
def create_lv(self, name, length, units): """ Creates a logical volume and returns the LogicalVolume instance associated with the lv_t handle:: from lvm2py import * lvm = LVM() vg = lvm.get_vg("myvg", "w") lv = vg.create_lv("mylv", 40, "MiB") *Args:* * name (str): The desired logical volume name. * length (int): The desired size. * units (str): The size units. *Raises:* * HandleError, CommitError, ValueError .. note:: The VolumeGroup instance must be in write mode, otherwise CommitError is raised. """ if units != "%": size = size_units[units] * length else: if not (0 < length <= 100) or type(length) is float: raise ValueError("Length not supported.") size = (self.size("B") / 100) * length self.open() lvh = lvm_vg_create_lv_linear(self.handle, name, c_ulonglong(size)) if not bool(lvh): self.close() raise CommitError("Failed to create LV.") lv = LogicalVolume(self, lvh=lvh) self.close() return lv
[ "def", "create_lv", "(", "self", ",", "name", ",", "length", ",", "units", ")", ":", "if", "units", "!=", "\"%\"", ":", "size", "=", "size_units", "[", "units", "]", "*", "length", "else", ":", "if", "not", "(", "0", "<", "length", "<=", "100", ")", "or", "type", "(", "length", ")", "is", "float", ":", "raise", "ValueError", "(", "\"Length not supported.\"", ")", "size", "=", "(", "self", ".", "size", "(", "\"B\"", ")", "/", "100", ")", "*", "length", "self", ".", "open", "(", ")", "lvh", "=", "lvm_vg_create_lv_linear", "(", "self", ".", "handle", ",", "name", ",", "c_ulonglong", "(", "size", ")", ")", "if", "not", "bool", "(", "lvh", ")", ":", "self", ".", "close", "(", ")", "raise", "CommitError", "(", "\"Failed to create LV.\"", ")", "lv", "=", "LogicalVolume", "(", "self", ",", "lvh", "=", "lvh", ")", "self", ".", "close", "(", ")", "return", "lv" ]
Creates a logical volume and returns the LogicalVolume instance associated with the lv_t handle:: from lvm2py import * lvm = LVM() vg = lvm.get_vg("myvg", "w") lv = vg.create_lv("mylv", 40, "MiB") *Args:* * name (str): The desired logical volume name. * length (int): The desired size. * units (str): The size units. *Raises:* * HandleError, CommitError, ValueError .. note:: The VolumeGroup instance must be in write mode, otherwise CommitError is raised.
[ "Creates", "a", "logical", "volume", "and", "returns", "the", "LogicalVolume", "instance", "associated", "with", "the", "lv_t", "handle", "::" ]
python
train
30.85
quantopian/zipline
zipline/__main__.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L383-L391
def clean(bundle, before, after, keep_last): """Clean up data downloaded with the ingest command. """ bundles_module.clean( bundle, before, after, keep_last, )
[ "def", "clean", "(", "bundle", ",", "before", ",", "after", ",", "keep_last", ")", ":", "bundles_module", ".", "clean", "(", "bundle", ",", "before", ",", "after", ",", "keep_last", ",", ")" ]
Clean up data downloaded with the ingest command.
[ "Clean", "up", "data", "downloaded", "with", "the", "ingest", "command", "." ]
python
train
22.111111
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/exportxml.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/exportxml.py#L242-L245
def parse_child_elements(self, element): '''parses all children of an etree element''' for child in element.iterchildren(): self.parsers[child.tag](child)
[ "def", "parse_child_elements", "(", "self", ",", "element", ")", ":", "for", "child", "in", "element", ".", "iterchildren", "(", ")", ":", "self", ".", "parsers", "[", "child", ".", "tag", "]", "(", "child", ")" ]
parses all children of an etree element
[ "parses", "all", "children", "of", "an", "etree", "element" ]
python
train
44.75
trailofbits/protofuzz
protofuzz/gen.py
https://github.com/trailofbits/protofuzz/blob/589492d34de9a0da6cc5554094e2588b893b2fd8/protofuzz/gen.py#L130-L152
def make_dependent(self, source, target, action): ''' Create a dependency between path 'source' and path 'target' via the callable 'action'. >>> permuter._generators [IterValueGenerator(one), IterValueGenerator(two)] >>> permuter.make_dependent('one', 'two', lambda x: x + 1) Going forward, 'two' will only contain values that are (one+1) ''' if not self._generators: return src_permuter, src = self._resolve_child(source) dest = self._resolve_child(target)[1] # pylint: disable=protected-access container = src_permuter._generators idx = container.index(src) container[idx] = DependentValueGenerator(src.name(), dest, action) self._update_independent_generators()
[ "def", "make_dependent", "(", "self", ",", "source", ",", "target", ",", "action", ")", ":", "if", "not", "self", ".", "_generators", ":", "return", "src_permuter", ",", "src", "=", "self", ".", "_resolve_child", "(", "source", ")", "dest", "=", "self", ".", "_resolve_child", "(", "target", ")", "[", "1", "]", "# pylint: disable=protected-access", "container", "=", "src_permuter", ".", "_generators", "idx", "=", "container", ".", "index", "(", "src", ")", "container", "[", "idx", "]", "=", "DependentValueGenerator", "(", "src", ".", "name", "(", ")", ",", "dest", ",", "action", ")", "self", ".", "_update_independent_generators", "(", ")" ]
Create a dependency between path 'source' and path 'target' via the callable 'action'. >>> permuter._generators [IterValueGenerator(one), IterValueGenerator(two)] >>> permuter.make_dependent('one', 'two', lambda x: x + 1) Going forward, 'two' will only contain values that are (one+1)
[ "Create", "a", "dependency", "between", "path", "source", "and", "path", "target", "via", "the", "callable", "action", "." ]
python
train
34.217391
jbloomlab/phydms
phydmslib/models.py
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/models.py#L1068-L1088
def _update_dPrxy(self): """Update `dPrxy`.""" super(ExpCM_fitprefs, self)._update_dPrxy() if 'zeta' in self.freeparams: tildeFrxyQxy = self.tildeFrxy * self.Qxy j = 0 zetaxterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype='float') zetayterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype='float') for r in range(self.nsites): for i in range(N_AA - 1): zetari = self.zeta[j] zetaxterm.fill(0) zetayterm.fill(0) zetaxterm[r][self._aa_for_x > i] = -1.0 / zetari zetaxterm[r][self._aa_for_x == i] = -1.0 / (zetari - 1.0) zetayterm[r][self._aa_for_y > i] = 1.0 / zetari zetayterm[r][self._aa_for_y == i] = 1.0 / (zetari - 1.0) self.dPrxy['zeta'][j] = tildeFrxyQxy * (zetayterm + zetaxterm) _fill_diagonals(self.dPrxy['zeta'][j], self._diag_indices) j += 1
[ "def", "_update_dPrxy", "(", "self", ")", ":", "super", "(", "ExpCM_fitprefs", ",", "self", ")", ".", "_update_dPrxy", "(", ")", "if", "'zeta'", "in", "self", ".", "freeparams", ":", "tildeFrxyQxy", "=", "self", ".", "tildeFrxy", "*", "self", ".", "Qxy", "j", "=", "0", "zetaxterm", "=", "scipy", ".", "ndarray", "(", "(", "self", ".", "nsites", ",", "N_CODON", ",", "N_CODON", ")", ",", "dtype", "=", "'float'", ")", "zetayterm", "=", "scipy", ".", "ndarray", "(", "(", "self", ".", "nsites", ",", "N_CODON", ",", "N_CODON", ")", ",", "dtype", "=", "'float'", ")", "for", "r", "in", "range", "(", "self", ".", "nsites", ")", ":", "for", "i", "in", "range", "(", "N_AA", "-", "1", ")", ":", "zetari", "=", "self", ".", "zeta", "[", "j", "]", "zetaxterm", ".", "fill", "(", "0", ")", "zetayterm", ".", "fill", "(", "0", ")", "zetaxterm", "[", "r", "]", "[", "self", ".", "_aa_for_x", ">", "i", "]", "=", "-", "1.0", "/", "zetari", "zetaxterm", "[", "r", "]", "[", "self", ".", "_aa_for_x", "==", "i", "]", "=", "-", "1.0", "/", "(", "zetari", "-", "1.0", ")", "zetayterm", "[", "r", "]", "[", "self", ".", "_aa_for_y", ">", "i", "]", "=", "1.0", "/", "zetari", "zetayterm", "[", "r", "]", "[", "self", ".", "_aa_for_y", "==", "i", "]", "=", "1.0", "/", "(", "zetari", "-", "1.0", ")", "self", ".", "dPrxy", "[", "'zeta'", "]", "[", "j", "]", "=", "tildeFrxyQxy", "*", "(", "zetayterm", "+", "zetaxterm", ")", "_fill_diagonals", "(", "self", ".", "dPrxy", "[", "'zeta'", "]", "[", "j", "]", ",", "self", ".", "_diag_indices", ")", "j", "+=", "1" ]
Update `dPrxy`.
[ "Update", "dPrxy", "." ]
python
train
50
bitcraze/crazyflie-lib-python
examples/basicparam.py
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/examples/basicparam.py#L99-L125
def _param_callback(self, name, value): """Generic callback registered for all the groups""" print('{0}: {1}'.format(name, value)) # Remove each parameter from the list and close the link when # all are fetched self._param_check_list.remove(name) if len(self._param_check_list) == 0: print('Have fetched all parameter values.') # First remove all the group callbacks for g in self._param_groups: self._cf.param.remove_update_callback(group=g, cb=self._param_callback) # Create a new random value [0.00,1.00] for pid_attitude.pitch_kd # and set it pkd = random.random() print('') print('Write: pid_attitude.pitch_kd={:.2f}'.format(pkd)) self._cf.param.add_update_callback(group='pid_attitude', name='pitch_kd', cb=self._a_pitch_kd_callback) # When setting a value the parameter is automatically read back # and the registered callbacks will get the updated value self._cf.param.set_value('pid_attitude.pitch_kd', '{:.2f}'.format(pkd))
[ "def", "_param_callback", "(", "self", ",", "name", ",", "value", ")", ":", "print", "(", "'{0}: {1}'", ".", "format", "(", "name", ",", "value", ")", ")", "# Remove each parameter from the list and close the link when", "# all are fetched", "self", ".", "_param_check_list", ".", "remove", "(", "name", ")", "if", "len", "(", "self", ".", "_param_check_list", ")", "==", "0", ":", "print", "(", "'Have fetched all parameter values.'", ")", "# First remove all the group callbacks", "for", "g", "in", "self", ".", "_param_groups", ":", "self", ".", "_cf", ".", "param", ".", "remove_update_callback", "(", "group", "=", "g", ",", "cb", "=", "self", ".", "_param_callback", ")", "# Create a new random value [0.00,1.00] for pid_attitude.pitch_kd", "# and set it", "pkd", "=", "random", ".", "random", "(", ")", "print", "(", "''", ")", "print", "(", "'Write: pid_attitude.pitch_kd={:.2f}'", ".", "format", "(", "pkd", ")", ")", "self", ".", "_cf", ".", "param", ".", "add_update_callback", "(", "group", "=", "'pid_attitude'", ",", "name", "=", "'pitch_kd'", ",", "cb", "=", "self", ".", "_a_pitch_kd_callback", ")", "# When setting a value the parameter is automatically read back", "# and the registered callbacks will get the updated value", "self", ".", "_cf", ".", "param", ".", "set_value", "(", "'pid_attitude.pitch_kd'", ",", "'{:.2f}'", ".", "format", "(", "pkd", ")", ")" ]
Generic callback registered for all the groups
[ "Generic", "callback", "registered", "for", "all", "the", "groups" ]
python
train
48.259259
cloud9ers/gurumate
environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/pkg_resources.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/distribute-0.6.31-py2.7.egg/pkg_resources.py#L1736-L1743
def StringIO(*args, **kw): """Thunk to load the real StringIO on demand""" global StringIO try: from cStringIO import StringIO except ImportError: from StringIO import StringIO return StringIO(*args,**kw)
[ "def", "StringIO", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "global", "StringIO", "try", ":", "from", "cStringIO", "import", "StringIO", "except", "ImportError", ":", "from", "StringIO", "import", "StringIO", "return", "StringIO", "(", "*", "args", ",", "*", "*", "kw", ")" ]
Thunk to load the real StringIO on demand
[ "Thunk", "to", "load", "the", "real", "StringIO", "on", "demand" ]
python
test
29.125
facundobatista/logassert
logassert/logassert.py
https://github.com/facundobatista/logassert/blob/79dc3d22a402fa0fb91cf3b046c63f039aa71890/logassert/logassert.py#L84-L97
def _check_neg(self, level, *tokens): """Check that the different tokens were NOT logged in one record, assert by level.""" for record in self.records: if level is not None and record.levelno != level: continue if all(token in record.message for token in tokens): break else: return # didn't exit, all tokens found in the same record msg = "Tokens {} found in the following record: {} {!r}".format( tokens, record.levelname, record.message) self.test_instance.fail(msg)
[ "def", "_check_neg", "(", "self", ",", "level", ",", "*", "tokens", ")", ":", "for", "record", "in", "self", ".", "records", ":", "if", "level", "is", "not", "None", "and", "record", ".", "levelno", "!=", "level", ":", "continue", "if", "all", "(", "token", "in", "record", ".", "message", "for", "token", "in", "tokens", ")", ":", "break", "else", ":", "return", "# didn't exit, all tokens found in the same record", "msg", "=", "\"Tokens {} found in the following record: {} {!r}\"", ".", "format", "(", "tokens", ",", "record", ".", "levelname", ",", "record", ".", "message", ")", "self", ".", "test_instance", ".", "fail", "(", "msg", ")" ]
Check that the different tokens were NOT logged in one record, assert by level.
[ "Check", "that", "the", "different", "tokens", "were", "NOT", "logged", "in", "one", "record", "assert", "by", "level", "." ]
python
train
41.928571
StackStorm/pybind
pybind/nos/v6_0_2f/brocade_vswitch_rpc/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_vswitch_rpc/__init__.py#L109-L132
def _set_get_vnetwork_hosts(self, v, load=False): """ Setter method for get_vnetwork_hosts, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_hosts (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_vnetwork_hosts is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_vnetwork_hosts() directly. YANG Description: Shows discovered hosts """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=get_vnetwork_hosts.get_vnetwork_hosts, is_leaf=True, yang_name="get-vnetwork-hosts", rest_name="get-vnetwork-hosts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'host-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """get_vnetwork_hosts must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=get_vnetwork_hosts.get_vnetwork_hosts, is_leaf=True, yang_name="get-vnetwork-hosts", rest_name="get-vnetwork-hosts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'host-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)""", }) self.__get_vnetwork_hosts = t if hasattr(self, '_set'): self._set()
[ "def", "_set_get_vnetwork_hosts", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "get_vnetwork_hosts", ".", "get_vnetwork_hosts", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"get-vnetwork-hosts\"", ",", "rest_name", "=", "\"get-vnetwork-hosts\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "False", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'hidden'", ":", "u'rpccmd'", ",", "u'actionpoint'", ":", "u'host-name'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-vswitch'", ",", "defining_module", "=", "'brocade-vswitch'", ",", "yang_type", "=", "'rpc'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"get_vnetwork_hosts must be of a type compatible with rpc\"\"\"", ",", "'defined-type'", ":", "\"rpc\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=get_vnetwork_hosts.get_vnetwork_hosts, is_leaf=True, yang_name=\"get-vnetwork-hosts\", rest_name=\"get-vnetwork-hosts\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'host-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__get_vnetwork_hosts", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for get_vnetwork_hosts, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_hosts (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_vnetwork_hosts is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_vnetwork_hosts() directly. YANG Description: Shows discovered hosts
[ "Setter", "method", "for", "get_vnetwork_hosts", "mapped", "from", "YANG", "variable", "/", "brocade_vswitch_rpc", "/", "get_vnetwork_hosts", "(", "rpc", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_get_vnetwork_hosts", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_get_vnetwork_hosts", "()", "directly", "." ]
python
train
71.416667
mbedmicro/pyOCD
pyocd/probe/cmsis_dap_probe.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/cmsis_dap_probe.py#L219-L225
def assert_reset(self, asserted): """Assert or de-assert target reset line""" try: self._invalidate_cached_registers() self._link.assert_reset(asserted) except DAPAccess.Error as exc: six.raise_from(self._convert_exception(exc), exc)
[ "def", "assert_reset", "(", "self", ",", "asserted", ")", ":", "try", ":", "self", ".", "_invalidate_cached_registers", "(", ")", "self", ".", "_link", ".", "assert_reset", "(", "asserted", ")", "except", "DAPAccess", ".", "Error", "as", "exc", ":", "six", ".", "raise_from", "(", "self", ".", "_convert_exception", "(", "exc", ")", ",", "exc", ")" ]
Assert or de-assert target reset line
[ "Assert", "or", "de", "-", "assert", "target", "reset", "line" ]
python
train
41