repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
nccgroup/Scout2
AWSScout2/configs/regions.py
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/configs/regions.py#L91-L136
def fetch_all(self, credentials, regions = [], partition_name = 'aws', targets = None): """ Fetch all the configuration supported by Scout2 for a given service :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all. """ # Initialize targets # Tweak params realtargets = () if not targets: targets = self.targets for i, target in enumerate(targets['first_region']): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),) targets['first_region'] = realtargets realtargets = () for i, target in enumerate(targets['other_regions']): params = self.tweak_params(target[3], credentials) realtargets = realtargets + ((target[0], target[1], target[2], params, target[4]),) targets['other_regions'] = realtargets printInfo('Fetching %s config...' % format_service_name(self.service)) self.fetchstatuslogger = FetchStatusLogger(targets['first_region'], True) api_service = 'ec2' if self.service.lower() == 'vpc' else self.service.lower() # Init regions regions = build_region_list(api_service, regions, partition_name) # TODO: move this code within this class self.fetchstatuslogger.counts['regions']['discovered'] = len(regions) # Threading to fetch & parse resources (queue consumer) q = self._init_threading(self._fetch_target, {}, self.thread_config['parse']) # Threading to list resources (queue feeder) qr = self._init_threading(self._fetch_region, {'api_service': api_service, 'credentials': credentials, 'q': q, 'targets': ()}, self.thread_config['list']) # Go for i, region in enumerate(regions): qr.put((region, targets['first_region'] if i == 0 else targets['other_regions'])) # Join qr.join() q.join() # Show completion and force newline self.fetchstatuslogger.show(True)
[ "def", "fetch_all", "(", "self", ",", "credentials", ",", "regions", "=", "[", "]", ",", "partition_name", "=", "'aws'", ",", "targets", "=", "None", ")", ":", "# Initialize targets", "# Tweak params", "realtargets", "=", "(", ")", "if", "not", "targets", ":", "targets", "=", "self", ".", "targets", "for", "i", ",", "target", "in", "enumerate", "(", "targets", "[", "'first_region'", "]", ")", ":", "params", "=", "self", ".", "tweak_params", "(", "target", "[", "3", "]", ",", "credentials", ")", "realtargets", "=", "realtargets", "+", "(", "(", "target", "[", "0", "]", ",", "target", "[", "1", "]", ",", "target", "[", "2", "]", ",", "params", ",", "target", "[", "4", "]", ")", ",", ")", "targets", "[", "'first_region'", "]", "=", "realtargets", "realtargets", "=", "(", ")", "for", "i", ",", "target", "in", "enumerate", "(", "targets", "[", "'other_regions'", "]", ")", ":", "params", "=", "self", ".", "tweak_params", "(", "target", "[", "3", "]", ",", "credentials", ")", "realtargets", "=", "realtargets", "+", "(", "(", "target", "[", "0", "]", ",", "target", "[", "1", "]", ",", "target", "[", "2", "]", ",", "params", ",", "target", "[", "4", "]", ")", ",", ")", "targets", "[", "'other_regions'", "]", "=", "realtargets", "printInfo", "(", "'Fetching %s config...'", "%", "format_service_name", "(", "self", ".", "service", ")", ")", "self", ".", "fetchstatuslogger", "=", "FetchStatusLogger", "(", "targets", "[", "'first_region'", "]", ",", "True", ")", "api_service", "=", "'ec2'", "if", "self", ".", "service", ".", "lower", "(", ")", "==", "'vpc'", "else", "self", ".", "service", ".", "lower", "(", ")", "# Init regions", "regions", "=", "build_region_list", "(", "api_service", ",", "regions", ",", "partition_name", ")", "# TODO: move this code within this class", "self", ".", "fetchstatuslogger", ".", "counts", "[", "'regions'", "]", "[", "'discovered'", "]", "=", "len", "(", "regions", ")", "# Threading to fetch & parse resources (queue consumer)", "q", "=", "self", ".", "_init_threading", "(", "self", ".", "_fetch_target", ",", "{", "}", ",", "self", ".", "thread_config", "[", "'parse'", "]", ")", "# Threading to list resources (queue feeder)", "qr", "=", "self", ".", "_init_threading", "(", "self", ".", "_fetch_region", ",", "{", "'api_service'", ":", "api_service", ",", "'credentials'", ":", "credentials", ",", "'q'", ":", "q", ",", "'targets'", ":", "(", ")", "}", ",", "self", ".", "thread_config", "[", "'list'", "]", ")", "# Go", "for", "i", ",", "region", "in", "enumerate", "(", "regions", ")", ":", "qr", ".", "put", "(", "(", "region", ",", "targets", "[", "'first_region'", "]", "if", "i", "==", "0", "else", "targets", "[", "'other_regions'", "]", ")", ")", "# Join", "qr", ".", "join", "(", ")", "q", ".", "join", "(", ")", "# Show completion and force newline", "self", ".", "fetchstatuslogger", ".", "show", "(", "True", ")" ]
Fetch all the configuration supported by Scout2 for a given service :param credentials: F :param service: Name of the service :param regions: Name of regions to fetch data from :param partition_name: AWS partition to connect to :param targets: Type of resources to be fetched; defaults to all.
[ "Fetch", "all", "the", "configuration", "supported", "by", "Scout2", "for", "a", "given", "service" ]
python
train
51.326087
Qiskit/qiskit-terra
qiskit/quantum_info/operators/channel/chi.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/quantum_info/operators/channel/chi.py#L256-L270
def multiply(self, other): """Return the QuantumChannel self + other. Args: other (complex): a complex number. Returns: Chi: the scalar multiplication other * self as a Chi object. Raises: QiskitError: if other is not a valid scalar. """ if not isinstance(other, Number): raise QiskitError("other is not a number") return Chi(other * self._data, self._input_dims, self._output_dims)
[ "def", "multiply", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "Number", ")", ":", "raise", "QiskitError", "(", "\"other is not a number\"", ")", "return", "Chi", "(", "other", "*", "self", ".", "_data", ",", "self", ".", "_input_dims", ",", "self", ".", "_output_dims", ")" ]
Return the QuantumChannel self + other. Args: other (complex): a complex number. Returns: Chi: the scalar multiplication other * self as a Chi object. Raises: QiskitError: if other is not a valid scalar.
[ "Return", "the", "QuantumChannel", "self", "+", "other", "." ]
python
test
31.666667
mikicz/arca
arca/backend/vagrant.py
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/vagrant.py#L226-L292
def run(self, repo: str, branch: str, task: Task, git_repo: Repo, repo_path: Path): """ Starts up a VM, builds an docker image and gets it to the VM, runs the script over SSH, returns result. Stops the VM if ``keep_vm_running`` is not set. """ from fabric import api from fabric.exceptions import CommandTimeout # start up or get running VM vm_location = self.get_vm_location() self.ensure_vm_running(vm_location) logger.info("Running with VM located at %s", vm_location) # pushes the image to the registry so it can be pulled in the VM self.check_docker_access() # init client self.get_image_for_repo(repo, branch, git_repo, repo_path) requirements_option, requirements_hash = self.get_requirements_information(repo_path) # getting things needed for execution over SSH image_tag = self.get_image_tag(requirements_option, requirements_hash, self.get_dependencies()) image_name = self.use_registry_name task_filename, task_json = self.serialized_task(task) (vm_location / task_filename).write_text(task_json) container_name = self.get_container_name(repo, branch, git_repo) # setting up Fabric api.env.hosts = [self.vagrant.user_hostname_port()] api.env.key_filename = self.vagrant.keyfile() api.env.disable_known_hosts = True # useful for when the vagrant box ip changes. api.env.abort_exception = BuildError # raises SystemExit otherwise api.env.shell = "/bin/sh -l -c" if self.quiet: api.output.everything = False else: api.output.everything = True # executes the task try: res = api.execute(self.fabric_task, container_name=container_name, definition_filename=task_filename, image_name=image_name, image_tag=image_tag, repository=str(repo_path.relative_to(Path(self._arca.base_dir).resolve() / 'repos')), timeout=task.timeout) return Result(res[self.vagrant.user_hostname_port()].stdout) except CommandTimeout: raise BuildTimeoutError(f"The task timeouted after {task.timeout} seconds.") except BuildError: # can be raised by :meth:`Result.__init__` raise except Exception as e: logger.exception(e) raise BuildError("The build failed", extra_info={ "exception": e }) finally: # stops or destroys the VM if it should not be kept running if not self.keep_vm_running: if self.destroy: self.vagrant.destroy() shutil.rmtree(self.vagrant.root, ignore_errors=True) self.vagrant = None else: self.vagrant.halt()
[ "def", "run", "(", "self", ",", "repo", ":", "str", ",", "branch", ":", "str", ",", "task", ":", "Task", ",", "git_repo", ":", "Repo", ",", "repo_path", ":", "Path", ")", ":", "from", "fabric", "import", "api", "from", "fabric", ".", "exceptions", "import", "CommandTimeout", "# start up or get running VM", "vm_location", "=", "self", ".", "get_vm_location", "(", ")", "self", ".", "ensure_vm_running", "(", "vm_location", ")", "logger", ".", "info", "(", "\"Running with VM located at %s\"", ",", "vm_location", ")", "# pushes the image to the registry so it can be pulled in the VM", "self", ".", "check_docker_access", "(", ")", "# init client", "self", ".", "get_image_for_repo", "(", "repo", ",", "branch", ",", "git_repo", ",", "repo_path", ")", "requirements_option", ",", "requirements_hash", "=", "self", ".", "get_requirements_information", "(", "repo_path", ")", "# getting things needed for execution over SSH", "image_tag", "=", "self", ".", "get_image_tag", "(", "requirements_option", ",", "requirements_hash", ",", "self", ".", "get_dependencies", "(", ")", ")", "image_name", "=", "self", ".", "use_registry_name", "task_filename", ",", "task_json", "=", "self", ".", "serialized_task", "(", "task", ")", "(", "vm_location", "/", "task_filename", ")", ".", "write_text", "(", "task_json", ")", "container_name", "=", "self", ".", "get_container_name", "(", "repo", ",", "branch", ",", "git_repo", ")", "# setting up Fabric", "api", ".", "env", ".", "hosts", "=", "[", "self", ".", "vagrant", ".", "user_hostname_port", "(", ")", "]", "api", ".", "env", ".", "key_filename", "=", "self", ".", "vagrant", ".", "keyfile", "(", ")", "api", ".", "env", ".", "disable_known_hosts", "=", "True", "# useful for when the vagrant box ip changes.", "api", ".", "env", ".", "abort_exception", "=", "BuildError", "# raises SystemExit otherwise", "api", ".", "env", ".", "shell", "=", "\"/bin/sh -l -c\"", "if", "self", ".", "quiet", ":", "api", ".", "output", ".", "everything", "=", "False", "else", ":", "api", ".", "output", ".", "everything", "=", "True", "# executes the task", "try", ":", "res", "=", "api", ".", "execute", "(", "self", ".", "fabric_task", ",", "container_name", "=", "container_name", ",", "definition_filename", "=", "task_filename", ",", "image_name", "=", "image_name", ",", "image_tag", "=", "image_tag", ",", "repository", "=", "str", "(", "repo_path", ".", "relative_to", "(", "Path", "(", "self", ".", "_arca", ".", "base_dir", ")", ".", "resolve", "(", ")", "/", "'repos'", ")", ")", ",", "timeout", "=", "task", ".", "timeout", ")", "return", "Result", "(", "res", "[", "self", ".", "vagrant", ".", "user_hostname_port", "(", ")", "]", ".", "stdout", ")", "except", "CommandTimeout", ":", "raise", "BuildTimeoutError", "(", "f\"The task timeouted after {task.timeout} seconds.\"", ")", "except", "BuildError", ":", "# can be raised by :meth:`Result.__init__`", "raise", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "e", ")", "raise", "BuildError", "(", "\"The build failed\"", ",", "extra_info", "=", "{", "\"exception\"", ":", "e", "}", ")", "finally", ":", "# stops or destroys the VM if it should not be kept running", "if", "not", "self", ".", "keep_vm_running", ":", "if", "self", ".", "destroy", ":", "self", ".", "vagrant", ".", "destroy", "(", ")", "shutil", ".", "rmtree", "(", "self", ".", "vagrant", ".", "root", ",", "ignore_errors", "=", "True", ")", "self", ".", "vagrant", "=", "None", "else", ":", "self", ".", "vagrant", ".", "halt", "(", ")" ]
Starts up a VM, builds an docker image and gets it to the VM, runs the script over SSH, returns result. Stops the VM if ``keep_vm_running`` is not set.
[ "Starts", "up", "a", "VM", "builds", "an", "docker", "image", "and", "gets", "it", "to", "the", "VM", "runs", "the", "script", "over", "SSH", "returns", "result", ".", "Stops", "the", "VM", "if", "keep_vm_running", "is", "not", "set", "." ]
python
train
44.208955
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/coverage/files.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/files.py#L288-L309
def find_python_files(dirname): """Yield all of the importable Python files in `dirname`, recursively. To be importable, the files have to be in a directory with a __init__.py, except for `dirname` itself, which isn't required to have one. The assumption is that `dirname` was specified directly, so the user knows best, but subdirectories are checked for a __init__.py to be sure we only find the importable files. """ for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)): if i > 0 and '__init__.py' not in filenames: # If a directory doesn't have __init__.py, then it isn't # importable and neither are its files del dirnames[:] continue for filename in filenames: # We're only interested in files that look like reasonable Python # files: Must end with .py or .pyw, and must not have certain funny # characters that probably mean they are editor junk. if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename): yield os.path.join(dirpath, filename)
[ "def", "find_python_files", "(", "dirname", ")", ":", "for", "i", ",", "(", "dirpath", ",", "dirnames", ",", "filenames", ")", "in", "enumerate", "(", "os", ".", "walk", "(", "dirname", ")", ")", ":", "if", "i", ">", "0", "and", "'__init__.py'", "not", "in", "filenames", ":", "# If a directory doesn't have __init__.py, then it isn't", "# importable and neither are its files", "del", "dirnames", "[", ":", "]", "continue", "for", "filename", "in", "filenames", ":", "# We're only interested in files that look like reasonable Python", "# files: Must end with .py or .pyw, and must not have certain funny", "# characters that probably mean they are editor junk.", "if", "re", ".", "match", "(", "r\"^[^.#~!$@%^&*()+=,]+\\.pyw?$\"", ",", "filename", ")", ":", "yield", "os", ".", "path", ".", "join", "(", "dirpath", ",", "filename", ")" ]
Yield all of the importable Python files in `dirname`, recursively. To be importable, the files have to be in a directory with a __init__.py, except for `dirname` itself, which isn't required to have one. The assumption is that `dirname` was specified directly, so the user knows best, but subdirectories are checked for a __init__.py to be sure we only find the importable files.
[ "Yield", "all", "of", "the", "importable", "Python", "files", "in", "dirname", "recursively", "." ]
python
test
50.227273
ratcave/ratcave
ratcave/shader.py
https://github.com/ratcave/ratcave/blob/e3862cdaba100ac2c6c78c08c4b09638e0c88fd4/ratcave/shader.py#L197-L218
def bind(self): """Activate this Shader, making it the currently-bound program. Any Mesh.draw() calls after bind() will have their data processed by this Shader. To unbind, call Shader.unbind(). Example:: shader.bind() mesh.draw() shader.unbind() .. note:: Shader.bind() and Shader.unbind() can be also be called implicitly by using the 'with' statement. Example of with statement with Shader:: with shader: mesh.draw() """ if not self.is_linked: if not self.is_compiled: self.compile() self.link() super(self.__class__, self).bind()
[ "def", "bind", "(", "self", ")", ":", "if", "not", "self", ".", "is_linked", ":", "if", "not", "self", ".", "is_compiled", ":", "self", ".", "compile", "(", ")", "self", ".", "link", "(", ")", "super", "(", "self", ".", "__class__", ",", "self", ")", ".", "bind", "(", ")" ]
Activate this Shader, making it the currently-bound program. Any Mesh.draw() calls after bind() will have their data processed by this Shader. To unbind, call Shader.unbind(). Example:: shader.bind() mesh.draw() shader.unbind() .. note:: Shader.bind() and Shader.unbind() can be also be called implicitly by using the 'with' statement. Example of with statement with Shader:: with shader: mesh.draw()
[ "Activate", "this", "Shader", "making", "it", "the", "currently", "-", "bound", "program", "." ]
python
train
31.045455
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/client.py#L342-L354
def collections(self): """List top-level collections of the client's database. Returns: Sequence[~.firestore_v1beta1.collection.CollectionReference]: iterator of subcollections of the current document. """ iterator = self._firestore_api.list_collection_ids( self._database_string, metadata=self._rpc_metadata ) iterator.client = self iterator.item_to_value = _item_to_collection_ref return iterator
[ "def", "collections", "(", "self", ")", ":", "iterator", "=", "self", ".", "_firestore_api", ".", "list_collection_ids", "(", "self", ".", "_database_string", ",", "metadata", "=", "self", ".", "_rpc_metadata", ")", "iterator", ".", "client", "=", "self", "iterator", ".", "item_to_value", "=", "_item_to_collection_ref", "return", "iterator" ]
List top-level collections of the client's database. Returns: Sequence[~.firestore_v1beta1.collection.CollectionReference]: iterator of subcollections of the current document.
[ "List", "top", "-", "level", "collections", "of", "the", "client", "s", "database", "." ]
python
train
37.769231
heitzmann/gdspy
gdspy/__init__.py
https://github.com/heitzmann/gdspy/blob/2c8d1313248c544e2066d19095b7ad7158c79bc9/gdspy/__init__.py#L2397-L2432
def get_bounding_box(self): """ Returns the bounding box for this cell. Returns ------- out : Numpy array[2,2] or ``None`` Bounding box of this cell [[x_min, y_min], [x_max, y_max]], or ``None`` if the cell is empty. """ if len(self.elements) == 0: return None if not (self._bb_valid and all(ref._bb_valid for ref in self.get_dependencies(True))): bb = numpy.array(((1e300, 1e300), (-1e300, -1e300))) all_polygons = [] for element in self.elements: if isinstance(element, PolygonSet): all_polygons.extend(element.polygons) elif isinstance(element, CellReference) or isinstance( element, CellArray): element_bb = element.get_bounding_box() if element_bb is not None: bb[0, 0] = min(bb[0, 0], element_bb[0, 0]) bb[0, 1] = min(bb[0, 1], element_bb[0, 1]) bb[1, 0] = max(bb[1, 0], element_bb[1, 0]) bb[1, 1] = max(bb[1, 1], element_bb[1, 1]) if len(all_polygons) > 0: all_points = numpy.concatenate(all_polygons).transpose() bb[0, 0] = min(bb[0, 0], all_points[0].min()) bb[0, 1] = min(bb[0, 1], all_points[1].min()) bb[1, 0] = max(bb[1, 0], all_points[0].max()) bb[1, 1] = max(bb[1, 1], all_points[1].max()) self._bb_valid = True _bounding_boxes[self] = bb return _bounding_boxes[self]
[ "def", "get_bounding_box", "(", "self", ")", ":", "if", "len", "(", "self", ".", "elements", ")", "==", "0", ":", "return", "None", "if", "not", "(", "self", ".", "_bb_valid", "and", "all", "(", "ref", ".", "_bb_valid", "for", "ref", "in", "self", ".", "get_dependencies", "(", "True", ")", ")", ")", ":", "bb", "=", "numpy", ".", "array", "(", "(", "(", "1e300", ",", "1e300", ")", ",", "(", "-", "1e300", ",", "-", "1e300", ")", ")", ")", "all_polygons", "=", "[", "]", "for", "element", "in", "self", ".", "elements", ":", "if", "isinstance", "(", "element", ",", "PolygonSet", ")", ":", "all_polygons", ".", "extend", "(", "element", ".", "polygons", ")", "elif", "isinstance", "(", "element", ",", "CellReference", ")", "or", "isinstance", "(", "element", ",", "CellArray", ")", ":", "element_bb", "=", "element", ".", "get_bounding_box", "(", ")", "if", "element_bb", "is", "not", "None", ":", "bb", "[", "0", ",", "0", "]", "=", "min", "(", "bb", "[", "0", ",", "0", "]", ",", "element_bb", "[", "0", ",", "0", "]", ")", "bb", "[", "0", ",", "1", "]", "=", "min", "(", "bb", "[", "0", ",", "1", "]", ",", "element_bb", "[", "0", ",", "1", "]", ")", "bb", "[", "1", ",", "0", "]", "=", "max", "(", "bb", "[", "1", ",", "0", "]", ",", "element_bb", "[", "1", ",", "0", "]", ")", "bb", "[", "1", ",", "1", "]", "=", "max", "(", "bb", "[", "1", ",", "1", "]", ",", "element_bb", "[", "1", ",", "1", "]", ")", "if", "len", "(", "all_polygons", ")", ">", "0", ":", "all_points", "=", "numpy", ".", "concatenate", "(", "all_polygons", ")", ".", "transpose", "(", ")", "bb", "[", "0", ",", "0", "]", "=", "min", "(", "bb", "[", "0", ",", "0", "]", ",", "all_points", "[", "0", "]", ".", "min", "(", ")", ")", "bb", "[", "0", ",", "1", "]", "=", "min", "(", "bb", "[", "0", ",", "1", "]", ",", "all_points", "[", "1", "]", ".", "min", "(", ")", ")", "bb", "[", "1", ",", "0", "]", "=", "max", "(", "bb", "[", "1", ",", "0", "]", ",", "all_points", "[", "0", "]", ".", "max", "(", ")", ")", "bb", "[", "1", ",", "1", "]", "=", "max", "(", "bb", "[", "1", ",", "1", "]", ",", "all_points", "[", "1", "]", ".", "max", "(", ")", ")", "self", ".", "_bb_valid", "=", "True", "_bounding_boxes", "[", "self", "]", "=", "bb", "return", "_bounding_boxes", "[", "self", "]" ]
Returns the bounding box for this cell. Returns ------- out : Numpy array[2,2] or ``None`` Bounding box of this cell [[x_min, y_min], [x_max, y_max]], or ``None`` if the cell is empty.
[ "Returns", "the", "bounding", "box", "for", "this", "cell", "." ]
python
train
45.444444
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L863-L880
def y_axis_rotation(theta): """Generates a 3x3 rotation matrix for a rotation of angle theta about the y axis. Parameters ---------- theta : float amount to rotate, in radians Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix. """ R = np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [-np.sin(theta), 0, np.cos(theta)]]) return R
[ "def", "y_axis_rotation", "(", "theta", ")", ":", "R", "=", "np", ".", "array", "(", "[", "[", "np", ".", "cos", "(", "theta", ")", ",", "0", ",", "np", ".", "sin", "(", "theta", ")", "]", ",", "[", "0", ",", "1", ",", "0", "]", ",", "[", "-", "np", ".", "sin", "(", "theta", ")", ",", "0", ",", "np", ".", "cos", "(", "theta", ")", "]", "]", ")", "return", "R" ]
Generates a 3x3 rotation matrix for a rotation of angle theta about the y axis. Parameters ---------- theta : float amount to rotate, in radians Returns ------- :obj:`numpy.ndarray` of float A random 3x3 rotation matrix.
[ "Generates", "a", "3x3", "rotation", "matrix", "for", "a", "rotation", "of", "angle", "theta", "about", "the", "y", "axis", "." ]
python
train
27.888889
objectrocket/python-client
objectrocket/acls.py
https://github.com/objectrocket/python-client/blob/a65868c7511ff49a5fbe304e53bf592b7fc6d5ef/objectrocket/acls.py#L78-L90
def get(self, instance, acl): """Get an ACL by ID belonging to the instance specified by name. :param str instance: The name of the instance from which to fetch the ACL. :param str acl: The ID of the ACL to fetch. :returns: An :py:class:`Acl` object, or None if ACL does not exist. :rtype: :py:class:`Acl` """ base_url = self._url.format(instance=instance) url = '{base}{aclid}/'.format(base=base_url, aclid=acl) response = requests.get(url, **self._default_request_kwargs) data = self._get_response_data(response) return self._concrete_acl(data)
[ "def", "get", "(", "self", ",", "instance", ",", "acl", ")", ":", "base_url", "=", "self", ".", "_url", ".", "format", "(", "instance", "=", "instance", ")", "url", "=", "'{base}{aclid}/'", ".", "format", "(", "base", "=", "base_url", ",", "aclid", "=", "acl", ")", "response", "=", "requests", ".", "get", "(", "url", ",", "*", "*", "self", ".", "_default_request_kwargs", ")", "data", "=", "self", ".", "_get_response_data", "(", "response", ")", "return", "self", ".", "_concrete_acl", "(", "data", ")" ]
Get an ACL by ID belonging to the instance specified by name. :param str instance: The name of the instance from which to fetch the ACL. :param str acl: The ID of the ACL to fetch. :returns: An :py:class:`Acl` object, or None if ACL does not exist. :rtype: :py:class:`Acl`
[ "Get", "an", "ACL", "by", "ID", "belonging", "to", "the", "instance", "specified", "by", "name", "." ]
python
train
47.923077
dusktreader/flask-praetorian
flask_praetorian/base.py
https://github.com/dusktreader/flask-praetorian/blob/d530cf3ffeffd61bfff1b8c79e8b45e9bfa0db0c/flask_praetorian/base.py#L303-L318
def encode_eternal_jwt_token(self, user, **custom_claims): """ This utility function encodes a jwt token that never expires .. note:: This should be used sparingly since the token could become a security concern if it is ever lost. If you use this method, you should be sure that your application also implements a blacklist so that a given token can be blocked should it be lost or become a security concern """ return self.encode_jwt_token( user, override_access_lifespan=VITAM_AETERNUM, override_refresh_lifespan=VITAM_AETERNUM, **custom_claims )
[ "def", "encode_eternal_jwt_token", "(", "self", ",", "user", ",", "*", "*", "custom_claims", ")", ":", "return", "self", ".", "encode_jwt_token", "(", "user", ",", "override_access_lifespan", "=", "VITAM_AETERNUM", ",", "override_refresh_lifespan", "=", "VITAM_AETERNUM", ",", "*", "*", "custom_claims", ")" ]
This utility function encodes a jwt token that never expires .. note:: This should be used sparingly since the token could become a security concern if it is ever lost. If you use this method, you should be sure that your application also implements a blacklist so that a given token can be blocked should it be lost or become a security concern
[ "This", "utility", "function", "encodes", "a", "jwt", "token", "that", "never", "expires" ]
python
train
43.9375
googleads/googleads-python-lib
examples/adwords/v201809/migration/migrate_to_extension_settings.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/examples/adwords/v201809/migration/migrate_to_extension_settings.py#L324-L350
def GetFeedItemIdsForCampaign(campaign_feed): """Gets the Feed Item Ids used by a campaign through a given Campaign Feed. Args: campaign_feed: the Campaign Feed we are retrieving Feed Item Ids from. Returns: A list of Feed Item IDs. """ feed_item_ids = set() try: lhs_operand = campaign_feed['matchingFunction']['lhsOperand'] except KeyError: lhs_operand = None if (lhs_operand and lhs_operand[0]['FunctionArgumentOperand.Type'] == 'RequestContextOperand'): request_context_operand = lhs_operand[0] if (request_context_operand['contextType'] == 'FEED_ITEM_ID' and campaign_feed['matchingFunction']['operator'] == 'IN'): for argument in campaign_feed['matchingFunction']['rhsOperand']: if argument['xsi_type'] == 'ConstantOperand': feed_item_ids.add(argument['longValue']) return feed_item_ids
[ "def", "GetFeedItemIdsForCampaign", "(", "campaign_feed", ")", ":", "feed_item_ids", "=", "set", "(", ")", "try", ":", "lhs_operand", "=", "campaign_feed", "[", "'matchingFunction'", "]", "[", "'lhsOperand'", "]", "except", "KeyError", ":", "lhs_operand", "=", "None", "if", "(", "lhs_operand", "and", "lhs_operand", "[", "0", "]", "[", "'FunctionArgumentOperand.Type'", "]", "==", "'RequestContextOperand'", ")", ":", "request_context_operand", "=", "lhs_operand", "[", "0", "]", "if", "(", "request_context_operand", "[", "'contextType'", "]", "==", "'FEED_ITEM_ID'", "and", "campaign_feed", "[", "'matchingFunction'", "]", "[", "'operator'", "]", "==", "'IN'", ")", ":", "for", "argument", "in", "campaign_feed", "[", "'matchingFunction'", "]", "[", "'rhsOperand'", "]", ":", "if", "argument", "[", "'xsi_type'", "]", "==", "'ConstantOperand'", ":", "feed_item_ids", ".", "add", "(", "argument", "[", "'longValue'", "]", ")", "return", "feed_item_ids" ]
Gets the Feed Item Ids used by a campaign through a given Campaign Feed. Args: campaign_feed: the Campaign Feed we are retrieving Feed Item Ids from. Returns: A list of Feed Item IDs.
[ "Gets", "the", "Feed", "Item", "Ids", "used", "by", "a", "campaign", "through", "a", "given", "Campaign", "Feed", "." ]
python
train
31.555556
PaulHancock/Aegean
AegeanTools/MIMAS.py
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L77-L92
def galactic2fk5(l, b): """ Convert galactic l/b to fk5 ra/dec Parameters ---------- l, b : float Galactic coordinates in radians. Returns ------- ra, dec : float FK5 ecliptic coordinates in radians. """ a = SkyCoord(l, b, unit=(u.radian, u.radian), frame='galactic') return a.fk5.ra.radian, a.fk5.dec.radian
[ "def", "galactic2fk5", "(", "l", ",", "b", ")", ":", "a", "=", "SkyCoord", "(", "l", ",", "b", ",", "unit", "=", "(", "u", ".", "radian", ",", "u", ".", "radian", ")", ",", "frame", "=", "'galactic'", ")", "return", "a", ".", "fk5", ".", "ra", ".", "radian", ",", "a", ".", "fk5", ".", "dec", ".", "radian" ]
Convert galactic l/b to fk5 ra/dec Parameters ---------- l, b : float Galactic coordinates in radians. Returns ------- ra, dec : float FK5 ecliptic coordinates in radians.
[ "Convert", "galactic", "l", "/", "b", "to", "fk5", "ra", "/", "dec" ]
python
train
22.1875
inveniosoftware/invenio-communities
invenio_communities/models.py
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L114-L152
def create(cls, community, record, user=None, expires_at=None, notify=True): """Create a record inclusion request to a community. :param community: Community object. :param record: Record API object. :param expires_at: Time after which the request expires and shouldn't be resolved anymore. """ if expires_at and expires_at < datetime.utcnow(): raise InclusionRequestExpiryTimeError( community=community, record=record) if community.has_record(record): raise InclusionRequestObsoleteError( community=community, record=record) try: # Create inclusion request with db.session.begin_nested(): obj = cls( id_community=community.id, id_record=record.id, user=user, expires_at=expires_at ) db.session.add(obj) except (IntegrityError, FlushError): raise InclusionRequestExistsError( community=community, record=record) # Send signal inclusion_request_created.send( current_app._get_current_object(), request=obj, notify=notify ) return obj
[ "def", "create", "(", "cls", ",", "community", ",", "record", ",", "user", "=", "None", ",", "expires_at", "=", "None", ",", "notify", "=", "True", ")", ":", "if", "expires_at", "and", "expires_at", "<", "datetime", ".", "utcnow", "(", ")", ":", "raise", "InclusionRequestExpiryTimeError", "(", "community", "=", "community", ",", "record", "=", "record", ")", "if", "community", ".", "has_record", "(", "record", ")", ":", "raise", "InclusionRequestObsoleteError", "(", "community", "=", "community", ",", "record", "=", "record", ")", "try", ":", "# Create inclusion request", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "obj", "=", "cls", "(", "id_community", "=", "community", ".", "id", ",", "id_record", "=", "record", ".", "id", ",", "user", "=", "user", ",", "expires_at", "=", "expires_at", ")", "db", ".", "session", ".", "add", "(", "obj", ")", "except", "(", "IntegrityError", ",", "FlushError", ")", ":", "raise", "InclusionRequestExistsError", "(", "community", "=", "community", ",", "record", "=", "record", ")", "# Send signal", "inclusion_request_created", ".", "send", "(", "current_app", ".", "_get_current_object", "(", ")", ",", "request", "=", "obj", ",", "notify", "=", "notify", ")", "return", "obj" ]
Create a record inclusion request to a community. :param community: Community object. :param record: Record API object. :param expires_at: Time after which the request expires and shouldn't be resolved anymore.
[ "Create", "a", "record", "inclusion", "request", "to", "a", "community", "." ]
python
train
33.384615
Dispersive-Hydrodynamics-Lab/PACE
PACE/PACE.py
https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L310-L326
def plot_file(self, name: str=None, time: int=None) -> None: """ Plot specific time for provided datafile. If no time provided, will plot middle. :param: savefile name :param: time/data column """ if not time: time = int(len(self.times) / 2) if not name: name = './img/' + self.filename + '.png' yhat, residuals, residual_mean, noise = self._get_fit(time) plt.figure() plt.scatter(self.domain, self.averagedata[:, time], alpha=0.2) plt.plot(yhat) plt.savefig(name)
[ "def", "plot_file", "(", "self", ",", "name", ":", "str", "=", "None", ",", "time", ":", "int", "=", "None", ")", "->", "None", ":", "if", "not", "time", ":", "time", "=", "int", "(", "len", "(", "self", ".", "times", ")", "/", "2", ")", "if", "not", "name", ":", "name", "=", "'./img/'", "+", "self", ".", "filename", "+", "'.png'", "yhat", ",", "residuals", ",", "residual_mean", ",", "noise", "=", "self", ".", "_get_fit", "(", "time", ")", "plt", ".", "figure", "(", ")", "plt", ".", "scatter", "(", "self", ".", "domain", ",", "self", ".", "averagedata", "[", ":", ",", "time", "]", ",", "alpha", "=", "0.2", ")", "plt", ".", "plot", "(", "yhat", ")", "plt", ".", "savefig", "(", "name", ")" ]
Plot specific time for provided datafile. If no time provided, will plot middle. :param: savefile name :param: time/data column
[ "Plot", "specific", "time", "for", "provided", "datafile", ".", "If", "no", "time", "provided", "will", "plot", "middle", "." ]
python
train
33.941176
Erotemic/utool
utool/util_dict.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dict.py#L1317-L1350
def merge_dicts(*args): r""" add / concatenate / union / join / merge / combine dictionaries Copies the first dictionary given and then repeatedly calls update using the rest of the dicts given in args. Duplicate keys will receive the last value specified the list of dictionaries. Returns: dict: mergedict_ CommandLine: python -m utool.util_dict --test-merge_dicts References: http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> x = {'a': 1, 'b': 2} >>> y = {'b': 3, 'c': 4} >>> mergedict_ = merge_dicts(x, y) >>> result = ut.repr4(mergedict_, sorted_=True, newlines=False) >>> print(result) {'a': 1, 'b': 3, 'c': 4} """ iter_ = iter(args) mergedict_ = six.next(iter_).copy() for dict_ in iter_: mergedict_.update(dict_) return mergedict_
[ "def", "merge_dicts", "(", "*", "args", ")", ":", "iter_", "=", "iter", "(", "args", ")", "mergedict_", "=", "six", ".", "next", "(", "iter_", ")", ".", "copy", "(", ")", "for", "dict_", "in", "iter_", ":", "mergedict_", ".", "update", "(", "dict_", ")", "return", "mergedict_" ]
r""" add / concatenate / union / join / merge / combine dictionaries Copies the first dictionary given and then repeatedly calls update using the rest of the dicts given in args. Duplicate keys will receive the last value specified the list of dictionaries. Returns: dict: mergedict_ CommandLine: python -m utool.util_dict --test-merge_dicts References: http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> x = {'a': 1, 'b': 2} >>> y = {'b': 3, 'c': 4} >>> mergedict_ = merge_dicts(x, y) >>> result = ut.repr4(mergedict_, sorted_=True, newlines=False) >>> print(result) {'a': 1, 'b': 3, 'c': 4}
[ "r", "add", "/", "concatenate", "/", "union", "/", "join", "/", "merge", "/", "combine", "dictionaries" ]
python
train
30.088235
fabaff/python-mystrom
pymystrom/bulb.py
https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L139-L149
def set_color_hsv(self, hue, saturation, value): """Turn the bulb on with the given values as HSV.""" try: data = "action=on&color={};{};{}".format(hue, saturation, value) request = requests.post( '{}/{}/{}'.format(self.resource, URI, self._mac), data=data, timeout=self.timeout) if request.status_code == 200: self.data['on'] = True except requests.exceptions.ConnectionError: raise exceptions.MyStromConnectionError()
[ "def", "set_color_hsv", "(", "self", ",", "hue", ",", "saturation", ",", "value", ")", ":", "try", ":", "data", "=", "\"action=on&color={};{};{}\"", ".", "format", "(", "hue", ",", "saturation", ",", "value", ")", "request", "=", "requests", ".", "post", "(", "'{}/{}/{}'", ".", "format", "(", "self", ".", "resource", ",", "URI", ",", "self", ".", "_mac", ")", ",", "data", "=", "data", ",", "timeout", "=", "self", ".", "timeout", ")", "if", "request", ".", "status_code", "==", "200", ":", "self", ".", "data", "[", "'on'", "]", "=", "True", "except", "requests", ".", "exceptions", ".", "ConnectionError", ":", "raise", "exceptions", ".", "MyStromConnectionError", "(", ")" ]
Turn the bulb on with the given values as HSV.
[ "Turn", "the", "bulb", "on", "with", "the", "given", "values", "as", "HSV", "." ]
python
train
48.090909
apache/incubator-mxnet
python/mxnet/image/detection.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/image/detection.py#L235-L250
def _check_satisfy_constraints(self, label, xmin, ymin, xmax, ymax, width, height): """Check if constrains are satisfied""" if (xmax - xmin) * (ymax - ymin) < 2: return False # only 1 pixel x1 = float(xmin) / width y1 = float(ymin) / height x2 = float(xmax) / width y2 = float(ymax) / height object_areas = self._calculate_areas(label[:, 1:]) valid_objects = np.where(object_areas * width * height > 2)[0] if valid_objects.size < 1: return False intersects = self._intersect(label[valid_objects, 1:], x1, y1, x2, y2) coverages = self._calculate_areas(intersects) / object_areas[valid_objects] coverages = coverages[np.where(coverages > 0)[0]] return coverages.size > 0 and np.amin(coverages) > self.min_object_covered
[ "def", "_check_satisfy_constraints", "(", "self", ",", "label", ",", "xmin", ",", "ymin", ",", "xmax", ",", "ymax", ",", "width", ",", "height", ")", ":", "if", "(", "xmax", "-", "xmin", ")", "*", "(", "ymax", "-", "ymin", ")", "<", "2", ":", "return", "False", "# only 1 pixel", "x1", "=", "float", "(", "xmin", ")", "/", "width", "y1", "=", "float", "(", "ymin", ")", "/", "height", "x2", "=", "float", "(", "xmax", ")", "/", "width", "y2", "=", "float", "(", "ymax", ")", "/", "height", "object_areas", "=", "self", ".", "_calculate_areas", "(", "label", "[", ":", ",", "1", ":", "]", ")", "valid_objects", "=", "np", ".", "where", "(", "object_areas", "*", "width", "*", "height", ">", "2", ")", "[", "0", "]", "if", "valid_objects", ".", "size", "<", "1", ":", "return", "False", "intersects", "=", "self", ".", "_intersect", "(", "label", "[", "valid_objects", ",", "1", ":", "]", ",", "x1", ",", "y1", ",", "x2", ",", "y2", ")", "coverages", "=", "self", ".", "_calculate_areas", "(", "intersects", ")", "/", "object_areas", "[", "valid_objects", "]", "coverages", "=", "coverages", "[", "np", ".", "where", "(", "coverages", ">", "0", ")", "[", "0", "]", "]", "return", "coverages", ".", "size", ">", "0", "and", "np", ".", "amin", "(", "coverages", ")", ">", "self", ".", "min_object_covered" ]
Check if constrains are satisfied
[ "Check", "if", "constrains", "are", "satisfied" ]
python
train
51.9375
guaix-ucm/pyemir
emirdrp/processing/wavecal/rectwv_coeff_add_longslit_model.py
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/rectwv_coeff_add_longslit_model.py#L44-L266
def rectwv_coeff_add_longslit_model(rectwv_coeff, geometry, debugplot=0): """Compute longslit_model coefficients for RectWaveCoeff object. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for a particular CSU configuration corresponding to a longslit observation. geometry : TBD debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Updated object with longslit_model coefficients computed. """ logger = logging.getLogger(__name__) # check grism and filter grism_name = rectwv_coeff.tags['grism'] logger.info('Grism: ' + grism_name) filter_name = rectwv_coeff.tags['filter'] logger.info('Filter: ' + filter_name) # list of slitlets to be computed list_valid_islitlets = list(range(1, EMIR_NBARS + 1)) for idel in rectwv_coeff.missing_slitlets: list_valid_islitlets.remove(idel) if abs(debugplot) >= 10: print('>>> valid slitlet numbers:\n', list_valid_islitlets) # --- # check that the CSU configuration corresponds to longslit csu_bar_slit_center_list = [] for islitlet in list_valid_islitlets: csu_bar_slit_center_list.append( rectwv_coeff.contents[islitlet - 1]['csu_bar_slit_center'] ) if abs(debugplot) >= 10: logger.debug('Checking csu_bar_slit_center values:') summary(np.array(csu_bar_slit_center_list), debug=True) pause_debugplot(debugplot) # --- # polynomial coefficients corresponding to the wavelength calibration # step 0: determine poldeg_refined, checking that it is the same for # all the slitlets poldeg_refined_list = [] for islitlet in list_valid_islitlets: poldeg_refined_list.append( len(rectwv_coeff.contents[islitlet - 1]['wpoly_coeff']) - 1 ) # remove duplicates poldeg_refined_list = list(set(poldeg_refined_list)) if len(poldeg_refined_list) != 1: raise ValueError('Unexpected different poldeg_refined found: ' + str(poldeg_refined_list)) poldeg_refined = poldeg_refined_list[0] # step 1: compute variation of each coefficient as a function of # y0_reference_middle of each slitlet list_poly = [] for i in range(poldeg_refined + 1): xp = [] yp = [] for islitlet in list_valid_islitlets: tmp_dict = rectwv_coeff.contents[islitlet - 1] wpoly_coeff = tmp_dict['wpoly_coeff'] if wpoly_coeff is not None: xp.append(tmp_dict['y0_reference_middle']) yp.append(wpoly_coeff[i]) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp), deg=2, times_sigma_reject=5, xlabel='y0_rectified', ylabel='coeff[' + str(i) + ']', title="Fit to refined wavelength calibration coefficients", geometry=geometry, debugplot=debugplot ) list_poly.append(poly) # step 2: use the variation of each polynomial coefficient with # y0_reference_middle to infer the expected wavelength calibration # polynomial for each rectifified slitlet for islitlet in list_valid_islitlets: tmp_dict = rectwv_coeff.contents[islitlet - 1] y0_reference_middle = tmp_dict['y0_reference_middle'] list_new_coeff = [] for i in range(poldeg_refined + 1): new_coeff = list_poly[i](y0_reference_middle) list_new_coeff.append(new_coeff) tmp_dict['wpoly_coeff_longslit_model'] = list_new_coeff # --- # rectification transformation coefficients aij and bij # step 0: determine order_fmap, checking that it is the same for # all the slitlets order_fmap_list = [] for islitlet in list_valid_islitlets: order_fmap_list.append( rectwv_coeff.contents[islitlet - 1]['ttd_order'] ) # remove duplicates order_fmap_list = list(set(order_fmap_list)) if len(order_fmap_list) != 1: raise ValueError('Unexpected different order_fmap found') order_fmap = order_fmap_list[0] # step 1: compute variation of each coefficient as a function of # y0_reference_middle of each slitlet list_poly_ttd_aij = [] list_poly_ttd_bij = [] list_poly_tti_aij = [] list_poly_tti_bij = [] ncoef_ttd = ncoef_fmap(order_fmap) for i in range(ncoef_ttd): xp = [] yp_ttd_aij = [] yp_ttd_bij = [] yp_tti_aij = [] yp_tti_bij = [] for islitlet in list_valid_islitlets: tmp_dict = rectwv_coeff.contents[islitlet - 1] ttd_aij = tmp_dict['ttd_aij'] ttd_bij = tmp_dict['ttd_bij'] tti_aij = tmp_dict['tti_aij'] tti_bij = tmp_dict['tti_bij'] if ttd_aij is not None: xp.append(tmp_dict['y0_reference_middle']) yp_ttd_aij.append(ttd_aij[i]) yp_ttd_bij.append(ttd_bij[i]) yp_tti_aij.append(tti_aij[i]) yp_tti_bij.append(tti_bij[i]) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp_ttd_aij), deg=5, times_sigma_reject=5, xlabel='y0_rectified', ylabel='ttd_aij[' + str(i) + ']', geometry=geometry, debugplot=debugplot ) list_poly_ttd_aij.append(poly) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp_ttd_bij), deg=5, times_sigma_reject=5, xlabel='y0_rectified', ylabel='ttd_bij[' + str(i) + ']', geometry=geometry, debugplot=debugplot ) list_poly_ttd_bij.append(poly) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp_tti_aij), deg=5, times_sigma_reject=5, xlabel='y0_rectified', ylabel='tti_aij[' + str(i) + ']', geometry=geometry, debugplot=debugplot ) list_poly_tti_aij.append(poly) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp_tti_bij), deg=5, times_sigma_reject=5, xlabel='y0_rectified', ylabel='tti_bij[' + str(i) + ']', geometry=geometry, debugplot=debugplot ) list_poly_tti_bij.append(poly) # step 2: use the variation of each coefficient with y0_reference_middle # to infer the expected rectification transformation for each slitlet for islitlet in list_valid_islitlets: tmp_dict = rectwv_coeff.contents[islitlet - 1] y0_reference_middle = tmp_dict['y0_reference_middle'] tmp_dict['ttd_order_longslit_model'] = order_fmap ttd_aij_longslit_model = [] ttd_bij_longslit_model = [] tti_aij_longslit_model = [] tti_bij_longslit_model = [] for i in range(ncoef_ttd): new_coeff = list_poly_ttd_aij[i](y0_reference_middle) ttd_aij_longslit_model.append(new_coeff) new_coeff = list_poly_ttd_bij[i](y0_reference_middle) ttd_bij_longslit_model.append(new_coeff) new_coeff = list_poly_tti_aij[i](y0_reference_middle) tti_aij_longslit_model.append(new_coeff) new_coeff = list_poly_tti_bij[i](y0_reference_middle) tti_bij_longslit_model.append(new_coeff) tmp_dict['ttd_aij_longslit_model'] = ttd_aij_longslit_model tmp_dict['ttd_bij_longslit_model'] = ttd_bij_longslit_model tmp_dict['tti_aij_longslit_model'] = tti_aij_longslit_model tmp_dict['tti_bij_longslit_model'] = tti_bij_longslit_model # --- # update uuid and meta_info in output JSON structure rectwv_coeff.uuid = str(uuid4()) rectwv_coeff.meta_info['creation_date'] = datetime.now().isoformat() # return updated object return rectwv_coeff
[ "def", "rectwv_coeff_add_longslit_model", "(", "rectwv_coeff", ",", "geometry", ",", "debugplot", "=", "0", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "# check grism and filter", "grism_name", "=", "rectwv_coeff", ".", "tags", "[", "'grism'", "]", "logger", ".", "info", "(", "'Grism: '", "+", "grism_name", ")", "filter_name", "=", "rectwv_coeff", ".", "tags", "[", "'filter'", "]", "logger", ".", "info", "(", "'Filter: '", "+", "filter_name", ")", "# list of slitlets to be computed", "list_valid_islitlets", "=", "list", "(", "range", "(", "1", ",", "EMIR_NBARS", "+", "1", ")", ")", "for", "idel", "in", "rectwv_coeff", ".", "missing_slitlets", ":", "list_valid_islitlets", ".", "remove", "(", "idel", ")", "if", "abs", "(", "debugplot", ")", ">=", "10", ":", "print", "(", "'>>> valid slitlet numbers:\\n'", ",", "list_valid_islitlets", ")", "# ---", "# check that the CSU configuration corresponds to longslit", "csu_bar_slit_center_list", "=", "[", "]", "for", "islitlet", "in", "list_valid_islitlets", ":", "csu_bar_slit_center_list", ".", "append", "(", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "[", "'csu_bar_slit_center'", "]", ")", "if", "abs", "(", "debugplot", ")", ">=", "10", ":", "logger", ".", "debug", "(", "'Checking csu_bar_slit_center values:'", ")", "summary", "(", "np", ".", "array", "(", "csu_bar_slit_center_list", ")", ",", "debug", "=", "True", ")", "pause_debugplot", "(", "debugplot", ")", "# ---", "# polynomial coefficients corresponding to the wavelength calibration", "# step 0: determine poldeg_refined, checking that it is the same for", "# all the slitlets", "poldeg_refined_list", "=", "[", "]", "for", "islitlet", "in", "list_valid_islitlets", ":", "poldeg_refined_list", ".", "append", "(", "len", "(", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "[", "'wpoly_coeff'", "]", ")", "-", "1", ")", "# remove duplicates", "poldeg_refined_list", "=", "list", "(", "set", "(", "poldeg_refined_list", ")", ")", "if", "len", "(", "poldeg_refined_list", ")", "!=", "1", ":", "raise", "ValueError", "(", "'Unexpected different poldeg_refined found: '", "+", "str", "(", "poldeg_refined_list", ")", ")", "poldeg_refined", "=", "poldeg_refined_list", "[", "0", "]", "# step 1: compute variation of each coefficient as a function of", "# y0_reference_middle of each slitlet", "list_poly", "=", "[", "]", "for", "i", "in", "range", "(", "poldeg_refined", "+", "1", ")", ":", "xp", "=", "[", "]", "yp", "=", "[", "]", "for", "islitlet", "in", "list_valid_islitlets", ":", "tmp_dict", "=", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "wpoly_coeff", "=", "tmp_dict", "[", "'wpoly_coeff'", "]", "if", "wpoly_coeff", "is", "not", "None", ":", "xp", ".", "append", "(", "tmp_dict", "[", "'y0_reference_middle'", "]", ")", "yp", ".", "append", "(", "wpoly_coeff", "[", "i", "]", ")", "poly", ",", "yres", ",", "reject", "=", "polfit_residuals_with_sigma_rejection", "(", "x", "=", "np", ".", "array", "(", "xp", ")", ",", "y", "=", "np", ".", "array", "(", "yp", ")", ",", "deg", "=", "2", ",", "times_sigma_reject", "=", "5", ",", "xlabel", "=", "'y0_rectified'", ",", "ylabel", "=", "'coeff['", "+", "str", "(", "i", ")", "+", "']'", ",", "title", "=", "\"Fit to refined wavelength calibration coefficients\"", ",", "geometry", "=", "geometry", ",", "debugplot", "=", "debugplot", ")", "list_poly", ".", "append", "(", "poly", ")", "# step 2: use the variation of each polynomial coefficient with", "# y0_reference_middle to infer the expected wavelength calibration", "# polynomial for each rectifified slitlet", "for", "islitlet", "in", "list_valid_islitlets", ":", "tmp_dict", "=", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "y0_reference_middle", "=", "tmp_dict", "[", "'y0_reference_middle'", "]", "list_new_coeff", "=", "[", "]", "for", "i", "in", "range", "(", "poldeg_refined", "+", "1", ")", ":", "new_coeff", "=", "list_poly", "[", "i", "]", "(", "y0_reference_middle", ")", "list_new_coeff", ".", "append", "(", "new_coeff", ")", "tmp_dict", "[", "'wpoly_coeff_longslit_model'", "]", "=", "list_new_coeff", "# ---", "# rectification transformation coefficients aij and bij", "# step 0: determine order_fmap, checking that it is the same for", "# all the slitlets", "order_fmap_list", "=", "[", "]", "for", "islitlet", "in", "list_valid_islitlets", ":", "order_fmap_list", ".", "append", "(", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "[", "'ttd_order'", "]", ")", "# remove duplicates", "order_fmap_list", "=", "list", "(", "set", "(", "order_fmap_list", ")", ")", "if", "len", "(", "order_fmap_list", ")", "!=", "1", ":", "raise", "ValueError", "(", "'Unexpected different order_fmap found'", ")", "order_fmap", "=", "order_fmap_list", "[", "0", "]", "# step 1: compute variation of each coefficient as a function of", "# y0_reference_middle of each slitlet", "list_poly_ttd_aij", "=", "[", "]", "list_poly_ttd_bij", "=", "[", "]", "list_poly_tti_aij", "=", "[", "]", "list_poly_tti_bij", "=", "[", "]", "ncoef_ttd", "=", "ncoef_fmap", "(", "order_fmap", ")", "for", "i", "in", "range", "(", "ncoef_ttd", ")", ":", "xp", "=", "[", "]", "yp_ttd_aij", "=", "[", "]", "yp_ttd_bij", "=", "[", "]", "yp_tti_aij", "=", "[", "]", "yp_tti_bij", "=", "[", "]", "for", "islitlet", "in", "list_valid_islitlets", ":", "tmp_dict", "=", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "ttd_aij", "=", "tmp_dict", "[", "'ttd_aij'", "]", "ttd_bij", "=", "tmp_dict", "[", "'ttd_bij'", "]", "tti_aij", "=", "tmp_dict", "[", "'tti_aij'", "]", "tti_bij", "=", "tmp_dict", "[", "'tti_bij'", "]", "if", "ttd_aij", "is", "not", "None", ":", "xp", ".", "append", "(", "tmp_dict", "[", "'y0_reference_middle'", "]", ")", "yp_ttd_aij", ".", "append", "(", "ttd_aij", "[", "i", "]", ")", "yp_ttd_bij", ".", "append", "(", "ttd_bij", "[", "i", "]", ")", "yp_tti_aij", ".", "append", "(", "tti_aij", "[", "i", "]", ")", "yp_tti_bij", ".", "append", "(", "tti_bij", "[", "i", "]", ")", "poly", ",", "yres", ",", "reject", "=", "polfit_residuals_with_sigma_rejection", "(", "x", "=", "np", ".", "array", "(", "xp", ")", ",", "y", "=", "np", ".", "array", "(", "yp_ttd_aij", ")", ",", "deg", "=", "5", ",", "times_sigma_reject", "=", "5", ",", "xlabel", "=", "'y0_rectified'", ",", "ylabel", "=", "'ttd_aij['", "+", "str", "(", "i", ")", "+", "']'", ",", "geometry", "=", "geometry", ",", "debugplot", "=", "debugplot", ")", "list_poly_ttd_aij", ".", "append", "(", "poly", ")", "poly", ",", "yres", ",", "reject", "=", "polfit_residuals_with_sigma_rejection", "(", "x", "=", "np", ".", "array", "(", "xp", ")", ",", "y", "=", "np", ".", "array", "(", "yp_ttd_bij", ")", ",", "deg", "=", "5", ",", "times_sigma_reject", "=", "5", ",", "xlabel", "=", "'y0_rectified'", ",", "ylabel", "=", "'ttd_bij['", "+", "str", "(", "i", ")", "+", "']'", ",", "geometry", "=", "geometry", ",", "debugplot", "=", "debugplot", ")", "list_poly_ttd_bij", ".", "append", "(", "poly", ")", "poly", ",", "yres", ",", "reject", "=", "polfit_residuals_with_sigma_rejection", "(", "x", "=", "np", ".", "array", "(", "xp", ")", ",", "y", "=", "np", ".", "array", "(", "yp_tti_aij", ")", ",", "deg", "=", "5", ",", "times_sigma_reject", "=", "5", ",", "xlabel", "=", "'y0_rectified'", ",", "ylabel", "=", "'tti_aij['", "+", "str", "(", "i", ")", "+", "']'", ",", "geometry", "=", "geometry", ",", "debugplot", "=", "debugplot", ")", "list_poly_tti_aij", ".", "append", "(", "poly", ")", "poly", ",", "yres", ",", "reject", "=", "polfit_residuals_with_sigma_rejection", "(", "x", "=", "np", ".", "array", "(", "xp", ")", ",", "y", "=", "np", ".", "array", "(", "yp_tti_bij", ")", ",", "deg", "=", "5", ",", "times_sigma_reject", "=", "5", ",", "xlabel", "=", "'y0_rectified'", ",", "ylabel", "=", "'tti_bij['", "+", "str", "(", "i", ")", "+", "']'", ",", "geometry", "=", "geometry", ",", "debugplot", "=", "debugplot", ")", "list_poly_tti_bij", ".", "append", "(", "poly", ")", "# step 2: use the variation of each coefficient with y0_reference_middle", "# to infer the expected rectification transformation for each slitlet", "for", "islitlet", "in", "list_valid_islitlets", ":", "tmp_dict", "=", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "y0_reference_middle", "=", "tmp_dict", "[", "'y0_reference_middle'", "]", "tmp_dict", "[", "'ttd_order_longslit_model'", "]", "=", "order_fmap", "ttd_aij_longslit_model", "=", "[", "]", "ttd_bij_longslit_model", "=", "[", "]", "tti_aij_longslit_model", "=", "[", "]", "tti_bij_longslit_model", "=", "[", "]", "for", "i", "in", "range", "(", "ncoef_ttd", ")", ":", "new_coeff", "=", "list_poly_ttd_aij", "[", "i", "]", "(", "y0_reference_middle", ")", "ttd_aij_longslit_model", ".", "append", "(", "new_coeff", ")", "new_coeff", "=", "list_poly_ttd_bij", "[", "i", "]", "(", "y0_reference_middle", ")", "ttd_bij_longslit_model", ".", "append", "(", "new_coeff", ")", "new_coeff", "=", "list_poly_tti_aij", "[", "i", "]", "(", "y0_reference_middle", ")", "tti_aij_longslit_model", ".", "append", "(", "new_coeff", ")", "new_coeff", "=", "list_poly_tti_bij", "[", "i", "]", "(", "y0_reference_middle", ")", "tti_bij_longslit_model", ".", "append", "(", "new_coeff", ")", "tmp_dict", "[", "'ttd_aij_longslit_model'", "]", "=", "ttd_aij_longslit_model", "tmp_dict", "[", "'ttd_bij_longslit_model'", "]", "=", "ttd_bij_longslit_model", "tmp_dict", "[", "'tti_aij_longslit_model'", "]", "=", "tti_aij_longslit_model", "tmp_dict", "[", "'tti_bij_longslit_model'", "]", "=", "tti_bij_longslit_model", "# ---", "# update uuid and meta_info in output JSON structure", "rectwv_coeff", ".", "uuid", "=", "str", "(", "uuid4", "(", ")", ")", "rectwv_coeff", ".", "meta_info", "[", "'creation_date'", "]", "=", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "# return updated object", "return", "rectwv_coeff" ]
Compute longslit_model coefficients for RectWaveCoeff object. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for a particular CSU configuration corresponding to a longslit observation. geometry : TBD debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Updated object with longslit_model coefficients computed.
[ "Compute", "longslit_model", "coefficients", "for", "RectWaveCoeff", "object", "." ]
python
train
36.520179
zhanglab/psamm
psamm/datasource/sbml.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/datasource/sbml.py#L891-L914
def _make_safe_id(self, id): """Returns a modified id that has been made safe for SBML. Replaces or deletes the ones that aren't allowed. """ substitutions = { '-': '_DASH_', '/': '_FSLASH_', '\\': '_BSLASH_', '(': '_LPAREN_', ')': '_RPAREN_', '[': '_LSQBKT_', ']': '_RSQBKT_', ',': '_COMMA_', '.': '_PERIOD_', "'": '_APOS_' } id = re.sub(r'\(([a-z])\)$', '_\\1', id) for symbol, escape in iteritems(substitutions): id = id.replace(symbol, escape) id = re.sub(r'[^a-zA-Z0-9_]', '', id) return id
[ "def", "_make_safe_id", "(", "self", ",", "id", ")", ":", "substitutions", "=", "{", "'-'", ":", "'_DASH_'", ",", "'/'", ":", "'_FSLASH_'", ",", "'\\\\'", ":", "'_BSLASH_'", ",", "'('", ":", "'_LPAREN_'", ",", "')'", ":", "'_RPAREN_'", ",", "'['", ":", "'_LSQBKT_'", ",", "']'", ":", "'_RSQBKT_'", ",", "','", ":", "'_COMMA_'", ",", "'.'", ":", "'_PERIOD_'", ",", "\"'\"", ":", "'_APOS_'", "}", "id", "=", "re", ".", "sub", "(", "r'\\(([a-z])\\)$'", ",", "'_\\\\1'", ",", "id", ")", "for", "symbol", ",", "escape", "in", "iteritems", "(", "substitutions", ")", ":", "id", "=", "id", ".", "replace", "(", "symbol", ",", "escape", ")", "id", "=", "re", ".", "sub", "(", "r'[^a-zA-Z0-9_]'", ",", "''", ",", "id", ")", "return", "id" ]
Returns a modified id that has been made safe for SBML. Replaces or deletes the ones that aren't allowed.
[ "Returns", "a", "modified", "id", "that", "has", "been", "made", "safe", "for", "SBML", "." ]
python
train
28.291667
iotile/coretools
iotilesensorgraph/iotile/sg/parser/scopes/gated_clock_scope.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilesensorgraph/iotile/sg/parser/scopes/gated_clock_scope.py#L48-L86
def clock(self, interval, basis): """Return a NodeInput tuple for triggering an event every interval. We request each distinct type of clock at most once and combine it with our latch stream each time it is requested. Args: interval (int): The interval (in seconds) at which this input should trigger. """ cache_name = self._classify_clock(interval, basis) cache_data = self.clock_cache.get(cache_name) if cache_data is None: parent_stream, trigger = self.parent.clock(interval, basis) if trigger.use_count is False: raise SensorGraphSemanticError("Unsupported clock trigger in GatedClockScope", trigger=trigger) elif interval % trigger.reference != 0: raise SensorGraphSemanticError("Unsupported trigger ratio in GatedClockScope", trigger=trigger, interval=interval) ratio = interval // trigger.reference stream = self.allocator.allocate_stream(DataStream.CounterType) latch_stream = self.allocator.attach_stream(self.latch_stream) self.sensor_graph.add_node(u'({} always && {} {}) => {} using copy_latest_a'.format(parent_stream, latch_stream, self.latch_trigger, stream)) self.clock_cache[cache_name] = (stream, ratio) else: stream, ratio = cache_data if interval % ratio != 0: raise SensorGraphSemanticError("Unsupported trigger ratio in GatedClockScope", ratio=ratio, interval=interval) count = interval // ratio clock_stream = self.allocator.attach_stream(stream) return clock_stream, InputTrigger(u'count', '>=', count)
[ "def", "clock", "(", "self", ",", "interval", ",", "basis", ")", ":", "cache_name", "=", "self", ".", "_classify_clock", "(", "interval", ",", "basis", ")", "cache_data", "=", "self", ".", "clock_cache", ".", "get", "(", "cache_name", ")", "if", "cache_data", "is", "None", ":", "parent_stream", ",", "trigger", "=", "self", ".", "parent", ".", "clock", "(", "interval", ",", "basis", ")", "if", "trigger", ".", "use_count", "is", "False", ":", "raise", "SensorGraphSemanticError", "(", "\"Unsupported clock trigger in GatedClockScope\"", ",", "trigger", "=", "trigger", ")", "elif", "interval", "%", "trigger", ".", "reference", "!=", "0", ":", "raise", "SensorGraphSemanticError", "(", "\"Unsupported trigger ratio in GatedClockScope\"", ",", "trigger", "=", "trigger", ",", "interval", "=", "interval", ")", "ratio", "=", "interval", "//", "trigger", ".", "reference", "stream", "=", "self", ".", "allocator", ".", "allocate_stream", "(", "DataStream", ".", "CounterType", ")", "latch_stream", "=", "self", ".", "allocator", ".", "attach_stream", "(", "self", ".", "latch_stream", ")", "self", ".", "sensor_graph", ".", "add_node", "(", "u'({} always && {} {}) => {} using copy_latest_a'", ".", "format", "(", "parent_stream", ",", "latch_stream", ",", "self", ".", "latch_trigger", ",", "stream", ")", ")", "self", ".", "clock_cache", "[", "cache_name", "]", "=", "(", "stream", ",", "ratio", ")", "else", ":", "stream", ",", "ratio", "=", "cache_data", "if", "interval", "%", "ratio", "!=", "0", ":", "raise", "SensorGraphSemanticError", "(", "\"Unsupported trigger ratio in GatedClockScope\"", ",", "ratio", "=", "ratio", ",", "interval", "=", "interval", ")", "count", "=", "interval", "//", "ratio", "clock_stream", "=", "self", ".", "allocator", ".", "attach_stream", "(", "stream", ")", "return", "clock_stream", ",", "InputTrigger", "(", "u'count'", ",", "'>='", ",", "count", ")" ]
Return a NodeInput tuple for triggering an event every interval. We request each distinct type of clock at most once and combine it with our latch stream each time it is requested. Args: interval (int): The interval (in seconds) at which this input should trigger.
[ "Return", "a", "NodeInput", "tuple", "for", "triggering", "an", "event", "every", "interval", "." ]
python
train
43.153846
NoneGG/aredis
aredis/scripting.py
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/scripting.py#L15-L31
async def execute(self, keys=[], args=[], client=None): "Execute the script, passing any required ``args``" if client is None: client = self.registered_client args = tuple(keys) + tuple(args) # make sure the Redis server knows about the script if isinstance(client, BasePipeline): # make sure this script is good to go on pipeline client.scripts.add(self) try: return await client.evalsha(self.sha, len(keys), *args) except NoScriptError: # Maybe the client is pointed to a differnet server than the client # that created this instance? # Overwrite the sha just in case there was a discrepancy. self.sha = await client.script_load(self.script) return await client.evalsha(self.sha, len(keys), *args)
[ "async", "def", "execute", "(", "self", ",", "keys", "=", "[", "]", ",", "args", "=", "[", "]", ",", "client", "=", "None", ")", ":", "if", "client", "is", "None", ":", "client", "=", "self", ".", "registered_client", "args", "=", "tuple", "(", "keys", ")", "+", "tuple", "(", "args", ")", "# make sure the Redis server knows about the script", "if", "isinstance", "(", "client", ",", "BasePipeline", ")", ":", "# make sure this script is good to go on pipeline", "client", ".", "scripts", ".", "add", "(", "self", ")", "try", ":", "return", "await", "client", ".", "evalsha", "(", "self", ".", "sha", ",", "len", "(", "keys", ")", ",", "*", "args", ")", "except", "NoScriptError", ":", "# Maybe the client is pointed to a differnet server than the client", "# that created this instance?", "# Overwrite the sha just in case there was a discrepancy.", "self", ".", "sha", "=", "await", "client", ".", "script_load", "(", "self", ".", "script", ")", "return", "await", "client", ".", "evalsha", "(", "self", ".", "sha", ",", "len", "(", "keys", ")", ",", "*", "args", ")" ]
Execute the script, passing any required ``args``
[ "Execute", "the", "script", "passing", "any", "required", "args" ]
python
train
49.823529
SheffieldML/GPy
GPy/kern/src/stationary.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/stationary.py#L118-L123
def dK_dr_via_X(self, X, X2): """ compute the derivative of K wrt X going through X """ #a convenience function, so we can cache dK_dr return self.dK_dr(self._scaled_dist(X, X2))
[ "def", "dK_dr_via_X", "(", "self", ",", "X", ",", "X2", ")", ":", "#a convenience function, so we can cache dK_dr", "return", "self", ".", "dK_dr", "(", "self", ".", "_scaled_dist", "(", "X", ",", "X2", ")", ")" ]
compute the derivative of K wrt X going through X
[ "compute", "the", "derivative", "of", "K", "wrt", "X", "going", "through", "X" ]
python
train
35.5
jthacker/terseparse
terseparse/utils.py
https://github.com/jthacker/terseparse/blob/236a31faf819f3ae9019a545613b8e7a6808f7b2/terseparse/utils.py#L20-L32
def rep(obj, *attrs, **kwargs): """Create a repr of a property based class quickly Args: obj -- instance of class *attrs -- list of attrs to add to the representation **kwargs -- Extra arguments to add that are not captured as attributes Returns: A string representing the class """ s = obj.__class__.__name__ args = chain(((attr, getattr(obj, attr)) for attr in attrs), kwargs.items()) s += '(%s)' % ','.join('{}={!r}'.format(k, v) for k, v in args) return s
[ "def", "rep", "(", "obj", ",", "*", "attrs", ",", "*", "*", "kwargs", ")", ":", "s", "=", "obj", ".", "__class__", ".", "__name__", "args", "=", "chain", "(", "(", "(", "attr", ",", "getattr", "(", "obj", ",", "attr", ")", ")", "for", "attr", "in", "attrs", ")", ",", "kwargs", ".", "items", "(", ")", ")", "s", "+=", "'(%s)'", "%", "','", ".", "join", "(", "'{}={!r}'", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "args", ")", "return", "s" ]
Create a repr of a property based class quickly Args: obj -- instance of class *attrs -- list of attrs to add to the representation **kwargs -- Extra arguments to add that are not captured as attributes Returns: A string representing the class
[ "Create", "a", "repr", "of", "a", "property", "based", "class", "quickly", "Args", ":", "obj", "--", "instance", "of", "class", "*", "attrs", "--", "list", "of", "attrs", "to", "add", "to", "the", "representation", "**", "kwargs", "--", "Extra", "arguments", "to", "add", "that", "are", "not", "captured", "as", "attributes" ]
python
train
39.307692
PMBio/limix-backup
limix/deprecated/utils/plot.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/deprecated/utils/plot.py#L180-L225
def plot_normal(x=None, mean_x=None,std_x=None,color='red',linewidth=2,alpha=1,bins=20,xlim=False,plot_mean=True,plot_std=False,plot_2std=True,figure=None,annotate=True,histogram=True): """ plot a fit of a normal distribution to the data in x. """ import pylab if figure is None: figure=pylab.figure() if mean_x is None: #fit maximum likelihood Normal distribution mean to samples X mean_x = x.mean() #sample mean if std_x is None: #fit maximum likelihood Normal distribution standard deviation to samples X std_x = x.std() #sample standard deviation xvals=np.arange(mean_x-5*std_x,mean_x+5*std_x,.001) yvals=st.norm.pdf(xvals,mean_x,std_x) #plot normal distribution: ax = pylab.plot(xvals,yvals,color=color,linewidth=linewidth,alpha=alpha) if x is not None and histogram: #plot histogram of x-values pylab.hist(x,bins,normed=True) if plot_mean: #evaluate distribution at the mean: max_cdf=st.norm.pdf(mean_x,mean_x,std_x) pylab.plot([mean_x,mean_x],[0,max_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--") if annotate: pylab.annotate('$\mu$', xy=(mean_x+0.6*std_x, 1.0*max_cdf), horizontalalignment='center', verticalalignment='center',fontsize=15,color=color) if plot_std:#plot mean +- 1*standard deviation (64% interval) std_cdf=st.norm.pdf(mean_x+std_x,mean_x,std_x) pylab.plot([mean_x+std_x,mean_x+std_x],[0,std_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--") pylab.plot([mean_x-std_x,mean_x-std_x],[0,std_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--") if annotate: pylab.annotate('$\mu+\sigma$', xy=(mean_x+1.6*std_x, 1.5*std_cdf), horizontalalignment='center', verticalalignment='center',fontsize=15,color=color) if plot_2std:#plot mean +- 2*standard deviations (95% interval) std2_cdf=st.norm.pdf(mean_x+2*std_x,mean_x,std_x) pylab.plot([mean_x+2*std_x,mean_x+2*std_x],[0,std2_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--") pylab.plot([mean_x-2*std_x,mean_x-2*std_x],[0,std2_cdf],color=color,linewidth=linewidth,alpha=alpha,linestyle="--") if annotate: pylab.annotate('$\mu+2\sigma$', xy=(mean_x+2.6*std_x, 1.5*std2_cdf), horizontalalignment='center', verticalalignment='center',fontsize=15,color=color) if xlim: #cut of unused space on y-axis pylab.xlim([mean_x-4*std_x,mean_x+4*std_x]) return figure
[ "def", "plot_normal", "(", "x", "=", "None", ",", "mean_x", "=", "None", ",", "std_x", "=", "None", ",", "color", "=", "'red'", ",", "linewidth", "=", "2", ",", "alpha", "=", "1", ",", "bins", "=", "20", ",", "xlim", "=", "False", ",", "plot_mean", "=", "True", ",", "plot_std", "=", "False", ",", "plot_2std", "=", "True", ",", "figure", "=", "None", ",", "annotate", "=", "True", ",", "histogram", "=", "True", ")", ":", "import", "pylab", "if", "figure", "is", "None", ":", "figure", "=", "pylab", ".", "figure", "(", ")", "if", "mean_x", "is", "None", ":", "#fit maximum likelihood Normal distribution mean to samples X", "mean_x", "=", "x", ".", "mean", "(", ")", "#sample mean", "if", "std_x", "is", "None", ":", "#fit maximum likelihood Normal distribution standard deviation to samples X", "std_x", "=", "x", ".", "std", "(", ")", "#sample standard deviation", "xvals", "=", "np", ".", "arange", "(", "mean_x", "-", "5", "*", "std_x", ",", "mean_x", "+", "5", "*", "std_x", ",", ".001", ")", "yvals", "=", "st", ".", "norm", ".", "pdf", "(", "xvals", ",", "mean_x", ",", "std_x", ")", "#plot normal distribution:", "ax", "=", "pylab", ".", "plot", "(", "xvals", ",", "yvals", ",", "color", "=", "color", ",", "linewidth", "=", "linewidth", ",", "alpha", "=", "alpha", ")", "if", "x", "is", "not", "None", "and", "histogram", ":", "#plot histogram of x-values", "pylab", ".", "hist", "(", "x", ",", "bins", ",", "normed", "=", "True", ")", "if", "plot_mean", ":", "#evaluate distribution at the mean:", "max_cdf", "=", "st", ".", "norm", ".", "pdf", "(", "mean_x", ",", "mean_x", ",", "std_x", ")", "pylab", ".", "plot", "(", "[", "mean_x", ",", "mean_x", "]", ",", "[", "0", ",", "max_cdf", "]", ",", "color", "=", "color", ",", "linewidth", "=", "linewidth", ",", "alpha", "=", "alpha", ",", "linestyle", "=", "\"--\"", ")", "if", "annotate", ":", "pylab", ".", "annotate", "(", "'$\\mu$'", ",", "xy", "=", "(", "mean_x", "+", "0.6", "*", "std_x", ",", "1.0", "*", "max_cdf", ")", ",", "horizontalalignment", "=", "'center'", ",", "verticalalignment", "=", "'center'", ",", "fontsize", "=", "15", ",", "color", "=", "color", ")", "if", "plot_std", ":", "#plot mean +- 1*standard deviation (64% interval)", "std_cdf", "=", "st", ".", "norm", ".", "pdf", "(", "mean_x", "+", "std_x", ",", "mean_x", ",", "std_x", ")", "pylab", ".", "plot", "(", "[", "mean_x", "+", "std_x", ",", "mean_x", "+", "std_x", "]", ",", "[", "0", ",", "std_cdf", "]", ",", "color", "=", "color", ",", "linewidth", "=", "linewidth", ",", "alpha", "=", "alpha", ",", "linestyle", "=", "\"--\"", ")", "pylab", ".", "plot", "(", "[", "mean_x", "-", "std_x", ",", "mean_x", "-", "std_x", "]", ",", "[", "0", ",", "std_cdf", "]", ",", "color", "=", "color", ",", "linewidth", "=", "linewidth", ",", "alpha", "=", "alpha", ",", "linestyle", "=", "\"--\"", ")", "if", "annotate", ":", "pylab", ".", "annotate", "(", "'$\\mu+\\sigma$'", ",", "xy", "=", "(", "mean_x", "+", "1.6", "*", "std_x", ",", "1.5", "*", "std_cdf", ")", ",", "horizontalalignment", "=", "'center'", ",", "verticalalignment", "=", "'center'", ",", "fontsize", "=", "15", ",", "color", "=", "color", ")", "if", "plot_2std", ":", "#plot mean +- 2*standard deviations (95% interval)", "std2_cdf", "=", "st", ".", "norm", ".", "pdf", "(", "mean_x", "+", "2", "*", "std_x", ",", "mean_x", ",", "std_x", ")", "pylab", ".", "plot", "(", "[", "mean_x", "+", "2", "*", "std_x", ",", "mean_x", "+", "2", "*", "std_x", "]", ",", "[", "0", ",", "std2_cdf", "]", ",", "color", "=", "color", ",", "linewidth", "=", "linewidth", ",", "alpha", "=", "alpha", ",", "linestyle", "=", "\"--\"", ")", "pylab", ".", "plot", "(", "[", "mean_x", "-", "2", "*", "std_x", ",", "mean_x", "-", "2", "*", "std_x", "]", ",", "[", "0", ",", "std2_cdf", "]", ",", "color", "=", "color", ",", "linewidth", "=", "linewidth", ",", "alpha", "=", "alpha", ",", "linestyle", "=", "\"--\"", ")", "if", "annotate", ":", "pylab", ".", "annotate", "(", "'$\\mu+2\\sigma$'", ",", "xy", "=", "(", "mean_x", "+", "2.6", "*", "std_x", ",", "1.5", "*", "std2_cdf", ")", ",", "horizontalalignment", "=", "'center'", ",", "verticalalignment", "=", "'center'", ",", "fontsize", "=", "15", ",", "color", "=", "color", ")", "if", "xlim", ":", "#cut of unused space on y-axis", "pylab", ".", "xlim", "(", "[", "mean_x", "-", "4", "*", "std_x", ",", "mean_x", "+", "4", "*", "std_x", "]", ")", "return", "figure" ]
plot a fit of a normal distribution to the data in x.
[ "plot", "a", "fit", "of", "a", "normal", "distribution", "to", "the", "data", "in", "x", "." ]
python
train
55.391304
boriel/zxbasic
zxbparser.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L1618-L1629
def p_restore(p): """ statement : RESTORE | RESTORE ID | RESTORE NUMBER """ if len(p) == 2: id_ = '__DATA__{0}'.format(len(gl.DATAS)) else: id_ = p[2] lbl = check_and_make_label(id_, p.lineno(1)) p[0] = make_sentence('RESTORE', lbl)
[ "def", "p_restore", "(", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "id_", "=", "'__DATA__{0}'", ".", "format", "(", "len", "(", "gl", ".", "DATAS", ")", ")", "else", ":", "id_", "=", "p", "[", "2", "]", "lbl", "=", "check_and_make_label", "(", "id_", ",", "p", ".", "lineno", "(", "1", ")", ")", "p", "[", "0", "]", "=", "make_sentence", "(", "'RESTORE'", ",", "lbl", ")" ]
statement : RESTORE | RESTORE ID | RESTORE NUMBER
[ "statement", ":", "RESTORE", "|", "RESTORE", "ID", "|", "RESTORE", "NUMBER" ]
python
train
24.833333
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/quaternion.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/quaternion.py#L320-L346
def _q_to_dcm(self, q): """ Create DCM from q :param q: array q which represents a quaternion [w, x, y, z] :returns: 3x3 dcm array """ assert(len(q) == 4) assert(np.allclose(QuaternionBase.norm_array(q), 1)) dcm = np.zeros([3, 3]) a = q[0] b = q[1] c = q[2] d = q[3] a_sq = a * a b_sq = b * b c_sq = c * c d_sq = d * d dcm[0][0] = a_sq + b_sq - c_sq - d_sq dcm[0][1] = 2 * (b * c - a * d) dcm[0][2] = 2 * (a * c + b * d) dcm[1][0] = 2 * (b * c + a * d) dcm[1][1] = a_sq - b_sq + c_sq - d_sq dcm[1][2] = 2 * (c * d - a * b) dcm[2][0] = 2 * (b * d - a * c) dcm[2][1] = 2 * (a * b + c * d) dcm[2][2] = a_sq - b_sq - c_sq + d_sq return dcm
[ "def", "_q_to_dcm", "(", "self", ",", "q", ")", ":", "assert", "(", "len", "(", "q", ")", "==", "4", ")", "assert", "(", "np", ".", "allclose", "(", "QuaternionBase", ".", "norm_array", "(", "q", ")", ",", "1", ")", ")", "dcm", "=", "np", ".", "zeros", "(", "[", "3", ",", "3", "]", ")", "a", "=", "q", "[", "0", "]", "b", "=", "q", "[", "1", "]", "c", "=", "q", "[", "2", "]", "d", "=", "q", "[", "3", "]", "a_sq", "=", "a", "*", "a", "b_sq", "=", "b", "*", "b", "c_sq", "=", "c", "*", "c", "d_sq", "=", "d", "*", "d", "dcm", "[", "0", "]", "[", "0", "]", "=", "a_sq", "+", "b_sq", "-", "c_sq", "-", "d_sq", "dcm", "[", "0", "]", "[", "1", "]", "=", "2", "*", "(", "b", "*", "c", "-", "a", "*", "d", ")", "dcm", "[", "0", "]", "[", "2", "]", "=", "2", "*", "(", "a", "*", "c", "+", "b", "*", "d", ")", "dcm", "[", "1", "]", "[", "0", "]", "=", "2", "*", "(", "b", "*", "c", "+", "a", "*", "d", ")", "dcm", "[", "1", "]", "[", "1", "]", "=", "a_sq", "-", "b_sq", "+", "c_sq", "-", "d_sq", "dcm", "[", "1", "]", "[", "2", "]", "=", "2", "*", "(", "c", "*", "d", "-", "a", "*", "b", ")", "dcm", "[", "2", "]", "[", "0", "]", "=", "2", "*", "(", "b", "*", "d", "-", "a", "*", "c", ")", "dcm", "[", "2", "]", "[", "1", "]", "=", "2", "*", "(", "a", "*", "b", "+", "c", "*", "d", ")", "dcm", "[", "2", "]", "[", "2", "]", "=", "a_sq", "-", "b_sq", "-", "c_sq", "+", "d_sq", "return", "dcm" ]
Create DCM from q :param q: array q which represents a quaternion [w, x, y, z] :returns: 3x3 dcm array
[ "Create", "DCM", "from", "q", ":", "param", "q", ":", "array", "q", "which", "represents", "a", "quaternion", "[", "w", "x", "y", "z", "]", ":", "returns", ":", "3x3", "dcm", "array" ]
python
train
30.259259
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/targets.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L508-L515
def create_main_target (self, name): """ Returns a 'MainTarget' class instance corresponding to the 'name'. """ assert isinstance(name, basestring) if not self.built_main_targets_: self.build_main_targets () return self.main_targets_.get (name, None)
[ "def", "create_main_target", "(", "self", ",", "name", ")", ":", "assert", "isinstance", "(", "name", ",", "basestring", ")", "if", "not", "self", ".", "built_main_targets_", ":", "self", ".", "build_main_targets", "(", ")", "return", "self", ".", "main_targets_", ".", "get", "(", "name", ",", "None", ")" ]
Returns a 'MainTarget' class instance corresponding to the 'name'.
[ "Returns", "a", "MainTarget", "class", "instance", "corresponding", "to", "the", "name", "." ]
python
train
37
TorkamaniLab/metapipe
metapipe/models/command_template.py
https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/command_template.py#L155-L163
def _search_for_files(parts): """ Given a list of parts, return all of the nested file parts. """ file_parts = [] for part in parts: if isinstance(part, list): file_parts.extend(_search_for_files(part)) elif isinstance(part, FileToken): file_parts.append(part) return file_parts
[ "def", "_search_for_files", "(", "parts", ")", ":", "file_parts", "=", "[", "]", "for", "part", "in", "parts", ":", "if", "isinstance", "(", "part", ",", "list", ")", ":", "file_parts", ".", "extend", "(", "_search_for_files", "(", "part", ")", ")", "elif", "isinstance", "(", "part", ",", "FileToken", ")", ":", "file_parts", ".", "append", "(", "part", ")", "return", "file_parts" ]
Given a list of parts, return all of the nested file parts.
[ "Given", "a", "list", "of", "parts", "return", "all", "of", "the", "nested", "file", "parts", "." ]
python
train
36.222222
marcomusy/vtkplotter
vtkplotter/vtkio.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/vtkio.py#L508-L530
def loadImageData(filename, spacing=()): """Read and return a ``vtkImageData`` object from file.""" if not os.path.isfile(filename): colors.printc("~noentry File not found:", filename, c=1) return None if ".tif" in filename.lower(): reader = vtk.vtkTIFFReader() elif ".slc" in filename.lower(): reader = vtk.vtkSLCReader() if not reader.CanReadFile(filename): colors.printc("~prohibited Sorry bad slc file " + filename, c=1) exit(1) elif ".vti" in filename.lower(): reader = vtk.vtkXMLImageDataReader() elif ".mhd" in filename.lower(): reader = vtk.vtkMetaImageReader() reader.SetFileName(filename) reader.Update() image = reader.GetOutput() if len(spacing) == 3: image.SetSpacing(spacing[0], spacing[1], spacing[2]) return image
[ "def", "loadImageData", "(", "filename", ",", "spacing", "=", "(", ")", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "colors", ".", "printc", "(", "\"~noentry File not found:\"", ",", "filename", ",", "c", "=", "1", ")", "return", "None", "if", "\".tif\"", "in", "filename", ".", "lower", "(", ")", ":", "reader", "=", "vtk", ".", "vtkTIFFReader", "(", ")", "elif", "\".slc\"", "in", "filename", ".", "lower", "(", ")", ":", "reader", "=", "vtk", ".", "vtkSLCReader", "(", ")", "if", "not", "reader", ".", "CanReadFile", "(", "filename", ")", ":", "colors", ".", "printc", "(", "\"~prohibited Sorry bad slc file \"", "+", "filename", ",", "c", "=", "1", ")", "exit", "(", "1", ")", "elif", "\".vti\"", "in", "filename", ".", "lower", "(", ")", ":", "reader", "=", "vtk", ".", "vtkXMLImageDataReader", "(", ")", "elif", "\".mhd\"", "in", "filename", ".", "lower", "(", ")", ":", "reader", "=", "vtk", ".", "vtkMetaImageReader", "(", ")", "reader", ".", "SetFileName", "(", "filename", ")", "reader", ".", "Update", "(", ")", "image", "=", "reader", ".", "GetOutput", "(", ")", "if", "len", "(", "spacing", ")", "==", "3", ":", "image", ".", "SetSpacing", "(", "spacing", "[", "0", "]", ",", "spacing", "[", "1", "]", ",", "spacing", "[", "2", "]", ")", "return", "image" ]
Read and return a ``vtkImageData`` object from file.
[ "Read", "and", "return", "a", "vtkImageData", "object", "from", "file", "." ]
python
train
36.521739
edublancas/sklearn-evaluation
docs/sphinxext/ipython_sphinxext/ipython_directive.py
https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/docs/sphinxext/ipython_sphinxext/ipython_directive.py#L640-L708
def process_block(self, block): """ process block from the block_parser and return a list of processed lines """ ret = [] output = None input_lines = None lineno = self.IP.execution_count input_prompt = self.promptin % lineno output_prompt = self.promptout % lineno image_file = None image_directive = None found_input = False for token, data in block: if token == COMMENT: out_data = self.process_comment(data) elif token == INPUT: found_input = True (out_data, input_lines, output, is_doctest, decorator, image_file, image_directive) = \ self.process_input(data, input_prompt, lineno) elif token == OUTPUT: if not found_input: TAB = ' ' * 4 linenumber = 0 source = 'Unavailable' content = 'Unavailable' if self.directive: linenumber = self.directive.state.document.current_line source = self.directive.state.document.current_source content = self.directive.content # Add tabs and join into a single string. content = '\n'.join([TAB + line for line in content]) e = ('\n\nInvalid block: Block contains an output prompt ' 'without an input prompt.\n\n' 'Document source: {0}\n\n' 'Content begins at line {1}: \n\n{2}\n\n' 'Problematic block within content: \n\n{TAB}{3}\n\n') e = e.format(source, linenumber, content, block, TAB=TAB) # Write, rather than include in exception, since Sphinx # will truncate tracebacks. sys.stdout.write(e) raise RuntimeError('An invalid block was detected.') out_data = \ self.process_output(data, output_prompt, input_lines, output, is_doctest, decorator, image_file) if out_data: # Then there was user submitted output in verbatim mode. # We need to remove the last element of `ret` that was # added in `process_input`, as it is '' and would introduce # an undesirable newline. assert(ret[-1] == '') del ret[-1] if out_data: ret.extend(out_data) # save the image files if image_file is not None: self.save_image(image_file) return ret, image_directive
[ "def", "process_block", "(", "self", ",", "block", ")", ":", "ret", "=", "[", "]", "output", "=", "None", "input_lines", "=", "None", "lineno", "=", "self", ".", "IP", ".", "execution_count", "input_prompt", "=", "self", ".", "promptin", "%", "lineno", "output_prompt", "=", "self", ".", "promptout", "%", "lineno", "image_file", "=", "None", "image_directive", "=", "None", "found_input", "=", "False", "for", "token", ",", "data", "in", "block", ":", "if", "token", "==", "COMMENT", ":", "out_data", "=", "self", ".", "process_comment", "(", "data", ")", "elif", "token", "==", "INPUT", ":", "found_input", "=", "True", "(", "out_data", ",", "input_lines", ",", "output", ",", "is_doctest", ",", "decorator", ",", "image_file", ",", "image_directive", ")", "=", "self", ".", "process_input", "(", "data", ",", "input_prompt", ",", "lineno", ")", "elif", "token", "==", "OUTPUT", ":", "if", "not", "found_input", ":", "TAB", "=", "' '", "*", "4", "linenumber", "=", "0", "source", "=", "'Unavailable'", "content", "=", "'Unavailable'", "if", "self", ".", "directive", ":", "linenumber", "=", "self", ".", "directive", ".", "state", ".", "document", ".", "current_line", "source", "=", "self", ".", "directive", ".", "state", ".", "document", ".", "current_source", "content", "=", "self", ".", "directive", ".", "content", "# Add tabs and join into a single string.", "content", "=", "'\\n'", ".", "join", "(", "[", "TAB", "+", "line", "for", "line", "in", "content", "]", ")", "e", "=", "(", "'\\n\\nInvalid block: Block contains an output prompt '", "'without an input prompt.\\n\\n'", "'Document source: {0}\\n\\n'", "'Content begins at line {1}: \\n\\n{2}\\n\\n'", "'Problematic block within content: \\n\\n{TAB}{3}\\n\\n'", ")", "e", "=", "e", ".", "format", "(", "source", ",", "linenumber", ",", "content", ",", "block", ",", "TAB", "=", "TAB", ")", "# Write, rather than include in exception, since Sphinx", "# will truncate tracebacks.", "sys", ".", "stdout", ".", "write", "(", "e", ")", "raise", "RuntimeError", "(", "'An invalid block was detected.'", ")", "out_data", "=", "self", ".", "process_output", "(", "data", ",", "output_prompt", ",", "input_lines", ",", "output", ",", "is_doctest", ",", "decorator", ",", "image_file", ")", "if", "out_data", ":", "# Then there was user submitted output in verbatim mode.", "# We need to remove the last element of `ret` that was", "# added in `process_input`, as it is '' and would introduce", "# an undesirable newline.", "assert", "(", "ret", "[", "-", "1", "]", "==", "''", ")", "del", "ret", "[", "-", "1", "]", "if", "out_data", ":", "ret", ".", "extend", "(", "out_data", ")", "# save the image files", "if", "image_file", "is", "not", "None", ":", "self", ".", "save_image", "(", "image_file", ")", "return", "ret", ",", "image_directive" ]
process block from the block_parser and return a list of processed lines
[ "process", "block", "from", "the", "block_parser", "and", "return", "a", "list", "of", "processed", "lines" ]
python
train
41
hotdoc/hotdoc
hotdoc/extensions/c/clang/cindex.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/extensions/c/clang/cindex.py#L1727-L1735
def walk_preorder(self): """Depth-first preorder walk over the cursor and its descendants. Yields cursors. """ yield self for child in self.get_children(): for descendant in child.walk_preorder(): yield descendant
[ "def", "walk_preorder", "(", "self", ")", ":", "yield", "self", "for", "child", "in", "self", ".", "get_children", "(", ")", ":", "for", "descendant", "in", "child", ".", "walk_preorder", "(", ")", ":", "yield", "descendant" ]
Depth-first preorder walk over the cursor and its descendants. Yields cursors.
[ "Depth", "-", "first", "preorder", "walk", "over", "the", "cursor", "and", "its", "descendants", "." ]
python
train
30.444444
OpenAssets/openassets
openassets/protocol.py
https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/protocol.py#L54-L78
def get_output(self, transaction_hash, output_index): """ Gets an output and information about its asset ID and asset quantity. :param bytes transaction_hash: The hash of the transaction containing the output. :param int output_index: The index of the output. :return: An object containing the output as well as its asset ID and asset quantity. :rtype: Future[TransactionOutput] """ cached_output = yield from self._cache.get(transaction_hash, output_index) if cached_output is not None: return cached_output transaction = yield from self._transaction_provider(transaction_hash) if transaction is None: raise ValueError('Transaction {0} could not be retrieved'.format(bitcoin.core.b2lx(transaction_hash))) colored_outputs = yield from self.color_transaction(transaction) for index, output in enumerate(colored_outputs): yield from self._cache.put(transaction_hash, index, output) return colored_outputs[output_index]
[ "def", "get_output", "(", "self", ",", "transaction_hash", ",", "output_index", ")", ":", "cached_output", "=", "yield", "from", "self", ".", "_cache", ".", "get", "(", "transaction_hash", ",", "output_index", ")", "if", "cached_output", "is", "not", "None", ":", "return", "cached_output", "transaction", "=", "yield", "from", "self", ".", "_transaction_provider", "(", "transaction_hash", ")", "if", "transaction", "is", "None", ":", "raise", "ValueError", "(", "'Transaction {0} could not be retrieved'", ".", "format", "(", "bitcoin", ".", "core", ".", "b2lx", "(", "transaction_hash", ")", ")", ")", "colored_outputs", "=", "yield", "from", "self", ".", "color_transaction", "(", "transaction", ")", "for", "index", ",", "output", "in", "enumerate", "(", "colored_outputs", ")", ":", "yield", "from", "self", ".", "_cache", ".", "put", "(", "transaction_hash", ",", "index", ",", "output", ")", "return", "colored_outputs", "[", "output_index", "]" ]
Gets an output and information about its asset ID and asset quantity. :param bytes transaction_hash: The hash of the transaction containing the output. :param int output_index: The index of the output. :return: An object containing the output as well as its asset ID and asset quantity. :rtype: Future[TransactionOutput]
[ "Gets", "an", "output", "and", "information", "about", "its", "asset", "ID", "and", "asset", "quantity", "." ]
python
train
41.88
tcalmant/ipopo
pelix/shell/parser.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/parser.py#L556-L574
def __print_command_help(self, session, namespace, cmd_name): """ Prints the documentation of the given command :param session: Session handler :param namespace: Name space of the command :param cmd_name: Name of the command """ # Extract documentation args, doc = self.__extract_help(self._commands[namespace][cmd_name]) # Print the command name, and its arguments if args: session.write_line("- {0} {1}", cmd_name, args) else: session.write_line("- {0}", cmd_name) # Print the documentation line session.write_line("\t\t{0}", doc)
[ "def", "__print_command_help", "(", "self", ",", "session", ",", "namespace", ",", "cmd_name", ")", ":", "# Extract documentation", "args", ",", "doc", "=", "self", ".", "__extract_help", "(", "self", ".", "_commands", "[", "namespace", "]", "[", "cmd_name", "]", ")", "# Print the command name, and its arguments", "if", "args", ":", "session", ".", "write_line", "(", "\"- {0} {1}\"", ",", "cmd_name", ",", "args", ")", "else", ":", "session", ".", "write_line", "(", "\"- {0}\"", ",", "cmd_name", ")", "# Print the documentation line", "session", ".", "write_line", "(", "\"\\t\\t{0}\"", ",", "doc", ")" ]
Prints the documentation of the given command :param session: Session handler :param namespace: Name space of the command :param cmd_name: Name of the command
[ "Prints", "the", "documentation", "of", "the", "given", "command" ]
python
train
33.947368
openai/baselines
baselines/deepq/replay_buffer.py
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/deepq/replay_buffer.py#L100-L105
def add(self, *args, **kwargs): """See ReplayBuffer.store_effect""" idx = self._next_idx super().add(*args, **kwargs) self._it_sum[idx] = self._max_priority ** self._alpha self._it_min[idx] = self._max_priority ** self._alpha
[ "def", "add", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "idx", "=", "self", ".", "_next_idx", "super", "(", ")", ".", "add", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "_it_sum", "[", "idx", "]", "=", "self", ".", "_max_priority", "**", "self", ".", "_alpha", "self", ".", "_it_min", "[", "idx", "]", "=", "self", ".", "_max_priority", "**", "self", ".", "_alpha" ]
See ReplayBuffer.store_effect
[ "See", "ReplayBuffer", ".", "store_effect" ]
python
valid
43.333333
Gandi/gandi.cli
gandi/cli/core/params.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/core/params.py#L222-L225
def _get_choices(self, gandi): """ Internal method to get choices list """ packages = super(CertificatePackageType, self)._get_choices(gandi) return list(set([pack.split('_')[1] for pack in packages]))
[ "def", "_get_choices", "(", "self", ",", "gandi", ")", ":", "packages", "=", "super", "(", "CertificatePackageType", ",", "self", ")", ".", "_get_choices", "(", "gandi", ")", "return", "list", "(", "set", "(", "[", "pack", ".", "split", "(", "'_'", ")", "[", "1", "]", "for", "pack", "in", "packages", "]", ")", ")" ]
Internal method to get choices list
[ "Internal", "method", "to", "get", "choices", "list" ]
python
train
55.5
mardix/Mocha
mocha/utils.py
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/utils.py#L67-L82
def is_username_valid(username): """ Check if a valid username. valid: oracle bill-gates steve.jobs micro_soft not valid Bill Gates - no space allowed [email protected] - @ is not a valid character :param username: string :return: """ pattern = re.compile(r"^[a-zA-Z0-9_.-]+$") return bool(pattern.match(username))
[ "def", "is_username_valid", "(", "username", ")", ":", "pattern", "=", "re", ".", "compile", "(", "r\"^[a-zA-Z0-9_.-]+$\"", ")", "return", "bool", "(", "pattern", ".", "match", "(", "username", ")", ")" ]
Check if a valid username. valid: oracle bill-gates steve.jobs micro_soft not valid Bill Gates - no space allowed [email protected] - @ is not a valid character :param username: string :return:
[ "Check", "if", "a", "valid", "username", ".", "valid", ":", "oracle", "bill", "-", "gates", "steve", ".", "jobs", "micro_soft", "not", "valid", "Bill", "Gates", "-", "no", "space", "allowed", "me" ]
python
train
23.4375
sendgrid/sendgrid-python
sendgrid/helpers/mail/html_content.py
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/sendgrid/helpers/mail/html_content.py#L46-L59
def get(self): """ Get a JSON-ready representation of this HtmlContent. :returns: This HtmlContent, ready for use in a request body. :rtype: dict """ content = {} if self.mime_type is not None: content["type"] = self.mime_type if self.content is not None: content["value"] = self.content return content
[ "def", "get", "(", "self", ")", ":", "content", "=", "{", "}", "if", "self", ".", "mime_type", "is", "not", "None", ":", "content", "[", "\"type\"", "]", "=", "self", ".", "mime_type", "if", "self", ".", "content", "is", "not", "None", ":", "content", "[", "\"value\"", "]", "=", "self", ".", "content", "return", "content" ]
Get a JSON-ready representation of this HtmlContent. :returns: This HtmlContent, ready for use in a request body. :rtype: dict
[ "Get", "a", "JSON", "-", "ready", "representation", "of", "this", "HtmlContent", "." ]
python
train
27.642857
quantmind/pulsar
pulsar/utils/context.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/context.py#L74-L83
def stack_get(self, key): """Set a value in a task context stack """ task = Task.current_task() try: context = task._context_stack except AttributeError: return if key in context: return context[key][-1]
[ "def", "stack_get", "(", "self", ",", "key", ")", ":", "task", "=", "Task", ".", "current_task", "(", ")", "try", ":", "context", "=", "task", ".", "_context_stack", "except", "AttributeError", ":", "return", "if", "key", "in", "context", ":", "return", "context", "[", "key", "]", "[", "-", "1", "]" ]
Set a value in a task context stack
[ "Set", "a", "value", "in", "a", "task", "context", "stack" ]
python
train
27.8
Karaage-Cluster/karaage
karaage/common/simple.py
https://github.com/Karaage-Cluster/karaage/blob/2f4c8b4e2d728b3fcbb151160c49000f1c04f5c9/karaage/common/simple.py#L35-L52
def direct_to_template( request, template, extra_context=None, mimetype=None, **kwargs): """ Render a given template with any extra URL parameters in the context as ``{{ params }}``. """ if extra_context is None: extra_context = {} dictionary = {'params': kwargs} for key, value in extra_context.items(): if callable(value): dictionary[key] = value() else: dictionary[key] = value t = loader.get_template(template) return HttpResponse( t.render(context=dictionary, request=request), content_type=mimetype)
[ "def", "direct_to_template", "(", "request", ",", "template", ",", "extra_context", "=", "None", ",", "mimetype", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "extra_context", "is", "None", ":", "extra_context", "=", "{", "}", "dictionary", "=", "{", "'params'", ":", "kwargs", "}", "for", "key", ",", "value", "in", "extra_context", ".", "items", "(", ")", ":", "if", "callable", "(", "value", ")", ":", "dictionary", "[", "key", "]", "=", "value", "(", ")", "else", ":", "dictionary", "[", "key", "]", "=", "value", "t", "=", "loader", ".", "get_template", "(", "template", ")", "return", "HttpResponse", "(", "t", ".", "render", "(", "context", "=", "dictionary", ",", "request", "=", "request", ")", ",", "content_type", "=", "mimetype", ")" ]
Render a given template with any extra URL parameters in the context as ``{{ params }}``.
[ "Render", "a", "given", "template", "with", "any", "extra", "URL", "parameters", "in", "the", "context", "as", "{{", "params", "}}", "." ]
python
train
33.111111
AlecAivazis/graphql-over-kafka
nautilus/network/events/actionHandlers/queryHandler.py
https://github.com/AlecAivazis/graphql-over-kafka/blob/70e2acef27a2f87355590be1a6ca60ce3ab4d09c/nautilus/network/events/actionHandlers/queryHandler.py#L5-L29
async def query_handler(service, action_type, payload, props, **kwds): """ This action handler interprets the payload as a query to be executed by the api gateway service. """ # check that the action type indicates a query if action_type == query_action_type(): print('encountered query event {!r} '.format(payload)) # perform the query result = await parse_string(payload, service.object_resolver, service.connection_resolver, service.mutation_resolver, obey_auth=False ) # the props for the reply message reply_props = {'correlation_id': props['correlation_id']} if 'correlation_id' in props else {} # publish the success event await service.event_broker.send( payload=result, action_type=change_action_status(action_type, success_status()), **reply_props )
[ "async", "def", "query_handler", "(", "service", ",", "action_type", ",", "payload", ",", "props", ",", "*", "*", "kwds", ")", ":", "# check that the action type indicates a query", "if", "action_type", "==", "query_action_type", "(", ")", ":", "print", "(", "'encountered query event {!r} '", ".", "format", "(", "payload", ")", ")", "# perform the query", "result", "=", "await", "parse_string", "(", "payload", ",", "service", ".", "object_resolver", ",", "service", ".", "connection_resolver", ",", "service", ".", "mutation_resolver", ",", "obey_auth", "=", "False", ")", "# the props for the reply message", "reply_props", "=", "{", "'correlation_id'", ":", "props", "[", "'correlation_id'", "]", "}", "if", "'correlation_id'", "in", "props", "else", "{", "}", "# publish the success event", "await", "service", ".", "event_broker", ".", "send", "(", "payload", "=", "result", ",", "action_type", "=", "change_action_status", "(", "action_type", ",", "success_status", "(", ")", ")", ",", "*", "*", "reply_props", ")" ]
This action handler interprets the payload as a query to be executed by the api gateway service.
[ "This", "action", "handler", "interprets", "the", "payload", "as", "a", "query", "to", "be", "executed", "by", "the", "api", "gateway", "service", "." ]
python
train
37
brean/python-pathfinding
pathfinding/core/grid.py
https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/core/grid.py#L61-L68
def inside(self, x, y): """ check, if field position is inside map :param x: x pos :param y: y pos :return: """ return 0 <= x < self.width and 0 <= y < self.height
[ "def", "inside", "(", "self", ",", "x", ",", "y", ")", ":", "return", "0", "<=", "x", "<", "self", ".", "width", "and", "0", "<=", "y", "<", "self", ".", "height" ]
check, if field position is inside map :param x: x pos :param y: y pos :return:
[ "check", "if", "field", "position", "is", "inside", "map", ":", "param", "x", ":", "x", "pos", ":", "param", "y", ":", "y", "pos", ":", "return", ":" ]
python
train
26.5
llazzaro/django-scheduler
schedule/utils.py
https://github.com/llazzaro/django-scheduler/blob/0530b74a5fc0b1125645002deaa4da2337ed0f17/schedule/utils.py#L87-L91
def get_additional_occurrences(self, start, end): """ Return persisted occurrences which are now in the period """ return [occ for _, occ in list(self.lookup.items()) if (occ.start < end and occ.end >= start and not occ.cancelled)]
[ "def", "get_additional_occurrences", "(", "self", ",", "start", ",", "end", ")", ":", "return", "[", "occ", "for", "_", ",", "occ", "in", "list", "(", "self", ".", "lookup", ".", "items", "(", ")", ")", "if", "(", "occ", ".", "start", "<", "end", "and", "occ", ".", "end", ">=", "start", "and", "not", "occ", ".", "cancelled", ")", "]" ]
Return persisted occurrences which are now in the period
[ "Return", "persisted", "occurrences", "which", "are", "now", "in", "the", "period" ]
python
train
51.8
apache/incubator-mxnet
python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py#L924-L942
def convert_l2normalization(node, **kwargs): """Map MXNet's L2Normalization operator attributes to onnx's LpNormalization operator and return the created node. """ name, input_nodes, attrs = get_inputs(node, kwargs) mode = attrs.get("mode", "instance") if mode != "channel": raise AttributeError("L2Normalization: ONNX currently supports channel mode only") l2norm_node = onnx.helper.make_node( "LpNormalization", input_nodes, [name], axis=1, # channel only name=name ) return [l2norm_node]
[ "def", "convert_l2normalization", "(", "node", ",", "*", "*", "kwargs", ")", ":", "name", ",", "input_nodes", ",", "attrs", "=", "get_inputs", "(", "node", ",", "kwargs", ")", "mode", "=", "attrs", ".", "get", "(", "\"mode\"", ",", "\"instance\"", ")", "if", "mode", "!=", "\"channel\"", ":", "raise", "AttributeError", "(", "\"L2Normalization: ONNX currently supports channel mode only\"", ")", "l2norm_node", "=", "onnx", ".", "helper", ".", "make_node", "(", "\"LpNormalization\"", ",", "input_nodes", ",", "[", "name", "]", ",", "axis", "=", "1", ",", "# channel only", "name", "=", "name", ")", "return", "[", "l2norm_node", "]" ]
Map MXNet's L2Normalization operator attributes to onnx's LpNormalization operator and return the created node.
[ "Map", "MXNet", "s", "L2Normalization", "operator", "attributes", "to", "onnx", "s", "LpNormalization", "operator", "and", "return", "the", "created", "node", "." ]
python
train
29.473684
miquelo/resort
packages/resort/engine/__init__.py
https://github.com/miquelo/resort/blob/097a25d3257c91a75c194fd44c2797ab356f85dd/packages/resort/engine/__init__.py#L69-L99
def load(self, prof_name): """ Load the profile with the given name. :param str prof_name: Profile name. :rtype: ProfileStub :return: An stub to loaded profile. """ prof_dir = self.__profile_dir(prof_name) prof_ini_path = self.__profile_ini_path(prof_dir) if not os.path.exists(prof_ini_path): msg = "Profile '{}' does not exist" raise Exception(msg.format(prof_name)) # Load profile prof_ini_file = open(prof_ini_path, "r") prof_ini = configparser.ConfigParser() prof_ini.read_file(prof_ini_file) prof_ini_file.close() # Prepare profile prof_type = prof_ini["profile"]["type"] prof_stub = self.__profile_stub(prof_name, prof_type, prof_dir) prof_stub.prepare(prof_ini["properties"]) return prof_stub
[ "def", "load", "(", "self", ",", "prof_name", ")", ":", "prof_dir", "=", "self", ".", "__profile_dir", "(", "prof_name", ")", "prof_ini_path", "=", "self", ".", "__profile_ini_path", "(", "prof_dir", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "prof_ini_path", ")", ":", "msg", "=", "\"Profile '{}' does not exist\"", "raise", "Exception", "(", "msg", ".", "format", "(", "prof_name", ")", ")", "# Load profile", "prof_ini_file", "=", "open", "(", "prof_ini_path", ",", "\"r\"", ")", "prof_ini", "=", "configparser", ".", "ConfigParser", "(", ")", "prof_ini", ".", "read_file", "(", "prof_ini_file", ")", "prof_ini_file", ".", "close", "(", ")", "# Prepare profile", "prof_type", "=", "prof_ini", "[", "\"profile\"", "]", "[", "\"type\"", "]", "prof_stub", "=", "self", ".", "__profile_stub", "(", "prof_name", ",", "prof_type", ",", "prof_dir", ")", "prof_stub", ".", "prepare", "(", "prof_ini", "[", "\"properties\"", "]", ")", "return", "prof_stub" ]
Load the profile with the given name. :param str prof_name: Profile name. :rtype: ProfileStub :return: An stub to loaded profile.
[ "Load", "the", "profile", "with", "the", "given", "name", ".", ":", "param", "str", "prof_name", ":", "Profile", "name", ".", ":", "rtype", ":", "ProfileStub", ":", "return", ":", "An", "stub", "to", "loaded", "profile", "." ]
python
train
24.096774
ellmetha/django-machina
machina/apps/forum_tracking/views.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_tracking/views.py#L41-L44
def get(self, request, pk=None): """ Handles GET requests. """ self.top_level_forum = get_object_or_404(Forum, pk=pk) if pk else None return super().get(request, pk)
[ "def", "get", "(", "self", ",", "request", ",", "pk", "=", "None", ")", ":", "self", ".", "top_level_forum", "=", "get_object_or_404", "(", "Forum", ",", "pk", "=", "pk", ")", "if", "pk", "else", "None", "return", "super", "(", ")", ".", "get", "(", "request", ",", "pk", ")" ]
Handles GET requests.
[ "Handles", "GET", "requests", "." ]
python
train
46.5
singularityhub/singularity-python
singularity/utils.py
https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/utils.py#L70-L85
def run_command(cmd, sudo=False): '''run_command uses subprocess to send a command to the terminal. :param cmd: the command to send, should be a list for subprocess :param error_message: the error message to give to user if fails, if none specified, will alert that command failed. :param sudopw: if specified (not None) command will be run asking for sudo ''' if sudo is True: cmd = ['sudo'] + cmd output = Popen(cmd,stderr=STDOUT,stdout=PIPE) t = output.communicate()[0],output.returncode output = {'message':t[0], 'return_code':t[1]} return output
[ "def", "run_command", "(", "cmd", ",", "sudo", "=", "False", ")", ":", "if", "sudo", "is", "True", ":", "cmd", "=", "[", "'sudo'", "]", "+", "cmd", "output", "=", "Popen", "(", "cmd", ",", "stderr", "=", "STDOUT", ",", "stdout", "=", "PIPE", ")", "t", "=", "output", ".", "communicate", "(", ")", "[", "0", "]", ",", "output", ".", "returncode", "output", "=", "{", "'message'", ":", "t", "[", "0", "]", ",", "'return_code'", ":", "t", "[", "1", "]", "}", "return", "output" ]
run_command uses subprocess to send a command to the terminal. :param cmd: the command to send, should be a list for subprocess :param error_message: the error message to give to user if fails, if none specified, will alert that command failed. :param sudopw: if specified (not None) command will be run asking for sudo
[ "run_command", "uses", "subprocess", "to", "send", "a", "command", "to", "the", "terminal", ".", ":", "param", "cmd", ":", "the", "command", "to", "send", "should", "be", "a", "list", "for", "subprocess", ":", "param", "error_message", ":", "the", "error", "message", "to", "give", "to", "user", "if", "fails", "if", "none", "specified", "will", "alert", "that", "command", "failed", ".", ":", "param", "sudopw", ":", "if", "specified", "(", "not", "None", ")", "command", "will", "be", "run", "asking", "for", "sudo" ]
python
train
37.6875
spyder-ide/spyder-notebook
spyder_notebook/widgets/dom.py
https://github.com/spyder-ide/spyder-notebook/blob/54e626b9d2a3fccd3e4625b0f97fe06e5bb1a6db/spyder_notebook/widgets/dom.py#L65-L71
def set_class_value(self, selector, classname): """Set the class of element matched by the given selector.""" return self.evaluate(""" (function () {{ var element = document.querySelector({0}); element.className = {1}; }})();""".format(repr(selector), repr(classname)))
[ "def", "set_class_value", "(", "self", ",", "selector", ",", "classname", ")", ":", "return", "self", ".", "evaluate", "(", "\"\"\"\n (function () {{\n var element = document.querySelector({0});\n element.className = {1};\n }})();\"\"\"", ".", "format", "(", "repr", "(", "selector", ")", ",", "repr", "(", "classname", ")", ")", ")" ]
Set the class of element matched by the given selector.
[ "Set", "the", "class", "of", "element", "matched", "by", "the", "given", "selector", "." ]
python
train
46.714286
za-creature/gulpless
gulpless/handlers.py
https://github.com/za-creature/gulpless/blob/fd73907dbe86880086719816bb042233f85121f6/gulpless/handlers.py#L233-L240
def deleted(self, src, path): """Update the reference tree when a handled file is deleted.""" if self.parents[path] is not None: for parent in self.parents[path]: self.children[parent].remove(path) if not self.children[parent]: del self.children[parent] del self.parents[path]
[ "def", "deleted", "(", "self", ",", "src", ",", "path", ")", ":", "if", "self", ".", "parents", "[", "path", "]", "is", "not", "None", ":", "for", "parent", "in", "self", ".", "parents", "[", "path", "]", ":", "self", ".", "children", "[", "parent", "]", ".", "remove", "(", "path", ")", "if", "not", "self", ".", "children", "[", "parent", "]", ":", "del", "self", ".", "children", "[", "parent", "]", "del", "self", ".", "parents", "[", "path", "]" ]
Update the reference tree when a handled file is deleted.
[ "Update", "the", "reference", "tree", "when", "a", "handled", "file", "is", "deleted", "." ]
python
train
44.625
ansibleplaybookbundle/ansible-playbook-bundle
src/apb/cli.py
https://github.com/ansibleplaybookbundle/ansible-playbook-bundle/blob/585694be9b417f1a88354cbfe286bfd68c2c9494/src/apb/cli.py#L686-L745
def main(): """ main """ # BZ 1581651 - Override the ArgumentParser to disable argument abbreviations. parser = OverrideArgumentParser( description=u'APB tooling for ' u'assisting in building and packaging APBs.' ) parser.add_argument( '--debug', action='store_true', dest='debug', help=u'Enable debug output', default=False ) # TODO: Modify project to accept relative paths parser.add_argument( '--project', '-p', action='store', dest='base_path', help=u'Specify a path to your project. Defaults to CWD.', default=os.getcwd() ) parser.add_argument( '--token', action='store', dest='auth_token', help=u'Specify OpenShift auth token to be used', default=None ) subparsers = parser.add_subparsers(title='subcommand', dest='subcommand') subparsers.required = True for subcommand in AVAILABLE_COMMANDS: subparser = subparsers.add_parser( subcommand, help=AVAILABLE_COMMANDS[subcommand] ) globals()['subcmd_%s_parser' % subcommand](subparser) args = parser.parse_args() if args.subcommand == 'help': parser.print_help() sys.exit(0) if args.subcommand == 'version': version = pkg_resources.require("apb")[0].version print("Version: apb-%s" % version) sys.exit(0) try: getattr(apb.engine, u'cmdrun_{}'.format(args.subcommand))(**vars(args)) except Exception as e: print("Exception occurred! %s" % e) sys.exit(1)
[ "def", "main", "(", ")", ":", "# BZ 1581651 - Override the ArgumentParser to disable argument abbreviations.", "parser", "=", "OverrideArgumentParser", "(", "description", "=", "u'APB tooling for '", "u'assisting in building and packaging APBs.'", ")", "parser", ".", "add_argument", "(", "'--debug'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'debug'", ",", "help", "=", "u'Enable debug output'", ",", "default", "=", "False", ")", "# TODO: Modify project to accept relative paths", "parser", ".", "add_argument", "(", "'--project'", ",", "'-p'", ",", "action", "=", "'store'", ",", "dest", "=", "'base_path'", ",", "help", "=", "u'Specify a path to your project. Defaults to CWD.'", ",", "default", "=", "os", ".", "getcwd", "(", ")", ")", "parser", ".", "add_argument", "(", "'--token'", ",", "action", "=", "'store'", ",", "dest", "=", "'auth_token'", ",", "help", "=", "u'Specify OpenShift auth token to be used'", ",", "default", "=", "None", ")", "subparsers", "=", "parser", ".", "add_subparsers", "(", "title", "=", "'subcommand'", ",", "dest", "=", "'subcommand'", ")", "subparsers", ".", "required", "=", "True", "for", "subcommand", "in", "AVAILABLE_COMMANDS", ":", "subparser", "=", "subparsers", ".", "add_parser", "(", "subcommand", ",", "help", "=", "AVAILABLE_COMMANDS", "[", "subcommand", "]", ")", "globals", "(", ")", "[", "'subcmd_%s_parser'", "%", "subcommand", "]", "(", "subparser", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "args", ".", "subcommand", "==", "'help'", ":", "parser", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "0", ")", "if", "args", ".", "subcommand", "==", "'version'", ":", "version", "=", "pkg_resources", ".", "require", "(", "\"apb\"", ")", "[", "0", "]", ".", "version", "print", "(", "\"Version: apb-%s\"", "%", "version", ")", "sys", ".", "exit", "(", "0", ")", "try", ":", "getattr", "(", "apb", ".", "engine", ",", "u'cmdrun_{}'", ".", "format", "(", "args", ".", "subcommand", ")", ")", "(", "*", "*", "vars", "(", "args", ")", ")", "except", "Exception", "as", "e", ":", "print", "(", "\"Exception occurred! %s\"", "%", "e", ")", "sys", ".", "exit", "(", "1", ")" ]
main
[ "main" ]
python
train
26.5
Opentrons/opentrons
api/src/opentrons/hardware_control/modules/magdeck.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/hardware_control/modules/magdeck.py#L99-L107
def engage(self, height): """ Move the magnet to a specific height, in mm from home position """ if height > MAX_ENGAGE_HEIGHT or height < 0: raise ValueError('Invalid engage height. Should be 0 to {}'.format( MAX_ENGAGE_HEIGHT)) self._driver.move(height) self._engaged = True
[ "def", "engage", "(", "self", ",", "height", ")", ":", "if", "height", ">", "MAX_ENGAGE_HEIGHT", "or", "height", "<", "0", ":", "raise", "ValueError", "(", "'Invalid engage height. Should be 0 to {}'", ".", "format", "(", "MAX_ENGAGE_HEIGHT", ")", ")", "self", ".", "_driver", ".", "move", "(", "height", ")", "self", ".", "_engaged", "=", "True" ]
Move the magnet to a specific height, in mm from home position
[ "Move", "the", "magnet", "to", "a", "specific", "height", "in", "mm", "from", "home", "position" ]
python
train
38.222222
rosenbrockc/fortpy
fortpy/scripts/analyze.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/scripts/analyze.py#L1165-L1180
def do_parse(self, arg, fullparse=False): """Parse the test results from the specified directory and load them under the name of 'module.executable ' that they were created with. E.g. parse classes.polya/ """ from os import path fullpath = path.abspath(path.expanduser(arg)) if path.isdir(fullpath): if fullpath[-1] == "/": end = -2 else: end = -1 case = fullpath.split("/")[end] self.tests[case] = Analysis(fullpath, fullparse) self.do_set(case) else: msg.err("The folder {} does not exist.".format(fullpath))
[ "def", "do_parse", "(", "self", ",", "arg", ",", "fullparse", "=", "False", ")", ":", "from", "os", "import", "path", "fullpath", "=", "path", ".", "abspath", "(", "path", ".", "expanduser", "(", "arg", ")", ")", "if", "path", ".", "isdir", "(", "fullpath", ")", ":", "if", "fullpath", "[", "-", "1", "]", "==", "\"/\"", ":", "end", "=", "-", "2", "else", ":", "end", "=", "-", "1", "case", "=", "fullpath", ".", "split", "(", "\"/\"", ")", "[", "end", "]", "self", ".", "tests", "[", "case", "]", "=", "Analysis", "(", "fullpath", ",", "fullparse", ")", "self", ".", "do_set", "(", "case", ")", "else", ":", "msg", ".", "err", "(", "\"The folder {} does not exist.\"", ".", "format", "(", "fullpath", ")", ")" ]
Parse the test results from the specified directory and load them under the name of 'module.executable ' that they were created with. E.g. parse classes.polya/
[ "Parse", "the", "test", "results", "from", "the", "specified", "directory", "and", "load", "them", "under", "the", "name", "of", "module", ".", "executable", "that", "they", "were", "created", "with", ".", "E", ".", "g", ".", "parse", "classes", ".", "polya", "/" ]
python
train
40.9375
PhilipTrauner/Meh
meh/__init__.py
https://github.com/PhilipTrauner/Meh/blob/c9bb695c1cfbc645c3ccceb27e40dadba7678815/meh/__init__.py#L316-L326
def dumps(self): """ Returns contents of config file as string OUT: out (type: str, hint: config content) """ out = "" for option in self.options: value = make_value(option.default_value) out += "%s = %s%s\n" % (option.name, value, (" # %s" % option.comment) if option.comment else "") return out.rstrip("\n")
[ "def", "dumps", "(", "self", ")", ":", "out", "=", "\"\"", "for", "option", "in", "self", ".", "options", ":", "value", "=", "make_value", "(", "option", ".", "default_value", ")", "out", "+=", "\"%s = %s%s\\n\"", "%", "(", "option", ".", "name", ",", "value", ",", "(", "\" # %s\"", "%", "option", ".", "comment", ")", "if", "option", ".", "comment", "else", "\"\"", ")", "return", "out", ".", "rstrip", "(", "\"\\n\"", ")" ]
Returns contents of config file as string OUT: out (type: str, hint: config content)
[ "Returns", "contents", "of", "config", "file", "as", "string", "OUT", ":", "out", "(", "type", ":", "str", "hint", ":", "config", "content", ")" ]
python
train
29.454545
kpdyer/libfte
fte/encoder.py
https://github.com/kpdyer/libfte/blob/74ed6ad197b6e72d1b9709c4dbc04041e05eb9b7/fte/encoder.py#L135-L166
def decode(self, covertext): """Given an input string ``unrank(X[:n]) || X[n:]`` returns ``X``. """ if not isinstance(covertext, str): raise InvalidInputException('Input must be of type string.') insufficient = (len(covertext) < self._fixed_slice) if insufficient: raise DecodeFailureError( "Covertext is shorter than self._fixed_slice, can't decode.") maximumBytesToRank = int(math.floor(self.getCapacity() / 8.0)) rank_payload = self._dfa.rank(covertext[:self._fixed_slice]) X = fte.bit_ops.long_to_bytes(rank_payload) X = string.rjust(X, maximumBytesToRank, '\x00') msg_len_header = self._encrypter.decryptOneBlock( X[:DfaEncoderObject._COVERTEXT_HEADER_LEN_CIPHERTTEXT]) msg_len_header = msg_len_header[8:16] msg_len = fte.bit_ops.bytes_to_long( msg_len_header[:DfaEncoderObject._COVERTEXT_HEADER_LEN_PLAINTEXT]) retval = X[16:16 + msg_len] retval += covertext[self._fixed_slice:] ctxt_len = self._encrypter.getCiphertextLen(retval) remaining_buffer = retval[ctxt_len:] retval = retval[:ctxt_len] retval = self._encrypter.decrypt(retval) return retval, remaining_buffer
[ "def", "decode", "(", "self", ",", "covertext", ")", ":", "if", "not", "isinstance", "(", "covertext", ",", "str", ")", ":", "raise", "InvalidInputException", "(", "'Input must be of type string.'", ")", "insufficient", "=", "(", "len", "(", "covertext", ")", "<", "self", ".", "_fixed_slice", ")", "if", "insufficient", ":", "raise", "DecodeFailureError", "(", "\"Covertext is shorter than self._fixed_slice, can't decode.\"", ")", "maximumBytesToRank", "=", "int", "(", "math", ".", "floor", "(", "self", ".", "getCapacity", "(", ")", "/", "8.0", ")", ")", "rank_payload", "=", "self", ".", "_dfa", ".", "rank", "(", "covertext", "[", ":", "self", ".", "_fixed_slice", "]", ")", "X", "=", "fte", ".", "bit_ops", ".", "long_to_bytes", "(", "rank_payload", ")", "X", "=", "string", ".", "rjust", "(", "X", ",", "maximumBytesToRank", ",", "'\\x00'", ")", "msg_len_header", "=", "self", ".", "_encrypter", ".", "decryptOneBlock", "(", "X", "[", ":", "DfaEncoderObject", ".", "_COVERTEXT_HEADER_LEN_CIPHERTTEXT", "]", ")", "msg_len_header", "=", "msg_len_header", "[", "8", ":", "16", "]", "msg_len", "=", "fte", ".", "bit_ops", ".", "bytes_to_long", "(", "msg_len_header", "[", ":", "DfaEncoderObject", ".", "_COVERTEXT_HEADER_LEN_PLAINTEXT", "]", ")", "retval", "=", "X", "[", "16", ":", "16", "+", "msg_len", "]", "retval", "+=", "covertext", "[", "self", ".", "_fixed_slice", ":", "]", "ctxt_len", "=", "self", ".", "_encrypter", ".", "getCiphertextLen", "(", "retval", ")", "remaining_buffer", "=", "retval", "[", "ctxt_len", ":", "]", "retval", "=", "retval", "[", ":", "ctxt_len", "]", "retval", "=", "self", ".", "_encrypter", ".", "decrypt", "(", "retval", ")", "return", "retval", ",", "remaining_buffer" ]
Given an input string ``unrank(X[:n]) || X[n:]`` returns ``X``.
[ "Given", "an", "input", "string", "unrank", "(", "X", "[", ":", "n", "]", ")", "||", "X", "[", "n", ":", "]", "returns", "X", "." ]
python
train
39.53125
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/_hyperlink_labels.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_hyperlink_labels.py#L297-L370
def byte_href_anchors_state_machine(self): ''' byte-based state machine extractor of anchor tags, so we can compute byte offsets for anchor texts and associate them with their href. Generates tuple(href_string, first_byte, byte_length, anchor_text) ''' tag_depth = 0 a_tag_depth = 0 vals = [] href = None idx_bytes = enumerate( self.clean_html ) while 1: end_idx, val, next_b = read_to( idx_bytes, '<' ) tag_depth += 1 if href: ## must be inside an anchor tag, so accumulate the ## whole anchor assert a_tag_depth > 0, (href, self.clean_html) vals.append(val) ## figure out if start of an "A" anchor tag or close ## of a previous tag idx, next_b1 = idx_bytes.next() if next_b1.lower() == 'a': ## could be start of "A" tag idx, next_b2 = idx_bytes.next() if next_b2 == ' ': a_tag_depth += 1 href = None for idx, attr_name, attr_val in iter_attrs( idx_bytes ): if attr_name.lower() == 'href': href = attr_val if idx is None: ## doc ended mid tag, so invalid HTML--> just bail return first = idx + 1 ## if we got an href, then we want to keep the ## first byte idx of the anchor: if href: ## Someone could nest an A tag inside another ## A tag, which is invalid (even in HTML5), so ## vals could be nonempty. We only generate ## the leaf-level A tags in these rare cases ## of nested A tags, so reset it: vals = [] elif next_b1 == '/': idx, next_b1 = idx_bytes.next() if next_b1 == 'a': ## could be end of "A" tag idx, next_b2 = idx_bytes.next() if next_b2 == '>': a_tag_depth -= 1 if href: ## join is much faster than using += above anchor = b''.join(vals) length = len(anchor) ## yield the data yield href, first, len(anchor), anchor ## reset, no yield again in a nested A tag href = None else: ## the next_b was not part of </a> or a nested <a tag, ## so keep it in the output vals.append(next_b)
[ "def", "byte_href_anchors_state_machine", "(", "self", ")", ":", "tag_depth", "=", "0", "a_tag_depth", "=", "0", "vals", "=", "[", "]", "href", "=", "None", "idx_bytes", "=", "enumerate", "(", "self", ".", "clean_html", ")", "while", "1", ":", "end_idx", ",", "val", ",", "next_b", "=", "read_to", "(", "idx_bytes", ",", "'<'", ")", "tag_depth", "+=", "1", "if", "href", ":", "## must be inside an anchor tag, so accumulate the", "## whole anchor", "assert", "a_tag_depth", ">", "0", ",", "(", "href", ",", "self", ".", "clean_html", ")", "vals", ".", "append", "(", "val", ")", "## figure out if start of an \"A\" anchor tag or close", "## of a previous tag", "idx", ",", "next_b1", "=", "idx_bytes", ".", "next", "(", ")", "if", "next_b1", ".", "lower", "(", ")", "==", "'a'", ":", "## could be start of \"A\" tag", "idx", ",", "next_b2", "=", "idx_bytes", ".", "next", "(", ")", "if", "next_b2", "==", "' '", ":", "a_tag_depth", "+=", "1", "href", "=", "None", "for", "idx", ",", "attr_name", ",", "attr_val", "in", "iter_attrs", "(", "idx_bytes", ")", ":", "if", "attr_name", ".", "lower", "(", ")", "==", "'href'", ":", "href", "=", "attr_val", "if", "idx", "is", "None", ":", "## doc ended mid tag, so invalid HTML--> just bail", "return", "first", "=", "idx", "+", "1", "## if we got an href, then we want to keep the", "## first byte idx of the anchor:", "if", "href", ":", "## Someone could nest an A tag inside another", "## A tag, which is invalid (even in HTML5), so", "## vals could be nonempty. We only generate", "## the leaf-level A tags in these rare cases", "## of nested A tags, so reset it:", "vals", "=", "[", "]", "elif", "next_b1", "==", "'/'", ":", "idx", ",", "next_b1", "=", "idx_bytes", ".", "next", "(", ")", "if", "next_b1", "==", "'a'", ":", "## could be end of \"A\" tag", "idx", ",", "next_b2", "=", "idx_bytes", ".", "next", "(", ")", "if", "next_b2", "==", "'>'", ":", "a_tag_depth", "-=", "1", "if", "href", ":", "## join is much faster than using += above", "anchor", "=", "b''", ".", "join", "(", "vals", ")", "length", "=", "len", "(", "anchor", ")", "## yield the data", "yield", "href", ",", "first", ",", "len", "(", "anchor", ")", ",", "anchor", "## reset, no yield again in a nested A tag", "href", "=", "None", "else", ":", "## the next_b was not part of </a> or a nested <a tag,", "## so keep it in the output", "vals", ".", "append", "(", "next_b", ")" ]
byte-based state machine extractor of anchor tags, so we can compute byte offsets for anchor texts and associate them with their href. Generates tuple(href_string, first_byte, byte_length, anchor_text)
[ "byte", "-", "based", "state", "machine", "extractor", "of", "anchor", "tags", "so", "we", "can", "compute", "byte", "offsets", "for", "anchor", "texts", "and", "associate", "them", "with", "their", "href", ".", "Generates", "tuple", "(", "href_string", "first_byte", "byte_length", "anchor_text", ")" ]
python
test
38.621622
larsks/thecache
thecache/cache.py
https://github.com/larsks/thecache/blob/e535f91031a7f92f19b5ff6fe2a1a03c7680e9e0/thecache/cache.py#L146-L151
def store_lines(self, key, content): '''like store_iter, but appends a newline to each chunk of content''' return self.store_iter( key, (data + '\n'.encode('utf-8') for data in content))
[ "def", "store_lines", "(", "self", ",", "key", ",", "content", ")", ":", "return", "self", ".", "store_iter", "(", "key", ",", "(", "data", "+", "'\\n'", ".", "encode", "(", "'utf-8'", ")", "for", "data", "in", "content", ")", ")" ]
like store_iter, but appends a newline to each chunk of content
[ "like", "store_iter", "but", "appends", "a", "newline", "to", "each", "chunk", "of", "content" ]
python
train
38.166667
daviddrysdale/python-phonenumbers
python/phonenumbers/phonenumberutil.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumberutil.py#L1802-L1827
def _example_number_anywhere_for_type(num_type): """Gets a valid number for the specified number type (it may belong to any country). Arguments: num_type -- The type of number that is needed. Returns a valid number for the specified type. Returns None when the metadata does not contain such information. This should only happen when no numbers of this type are allocated anywhere in the world anymore. """ for region_code in SUPPORTED_REGIONS: example_numobj = example_number_for_type(region_code, num_type) if example_numobj is not None: return example_numobj # If there wasn't an example number for a region, try the non-geographical entities. for country_calling_code in COUNTRY_CODES_FOR_NON_GEO_REGIONS: metadata = PhoneMetadata.metadata_for_nongeo_region(country_calling_code, None) desc = _number_desc_by_type(metadata, num_type) if desc is not None and desc.example_number is not None: try: return parse(_PLUS_SIGN + unicod(country_calling_code) + desc.example_number, UNKNOWN_REGION) except NumberParseException: # pragma no cover pass # There are no example numbers of this type for any country in the library. return None
[ "def", "_example_number_anywhere_for_type", "(", "num_type", ")", ":", "for", "region_code", "in", "SUPPORTED_REGIONS", ":", "example_numobj", "=", "example_number_for_type", "(", "region_code", ",", "num_type", ")", "if", "example_numobj", "is", "not", "None", ":", "return", "example_numobj", "# If there wasn't an example number for a region, try the non-geographical entities.", "for", "country_calling_code", "in", "COUNTRY_CODES_FOR_NON_GEO_REGIONS", ":", "metadata", "=", "PhoneMetadata", ".", "metadata_for_nongeo_region", "(", "country_calling_code", ",", "None", ")", "desc", "=", "_number_desc_by_type", "(", "metadata", ",", "num_type", ")", "if", "desc", "is", "not", "None", "and", "desc", ".", "example_number", "is", "not", "None", ":", "try", ":", "return", "parse", "(", "_PLUS_SIGN", "+", "unicod", "(", "country_calling_code", ")", "+", "desc", ".", "example_number", ",", "UNKNOWN_REGION", ")", "except", "NumberParseException", ":", "# pragma no cover", "pass", "# There are no example numbers of this type for any country in the library.", "return", "None" ]
Gets a valid number for the specified number type (it may belong to any country). Arguments: num_type -- The type of number that is needed. Returns a valid number for the specified type. Returns None when the metadata does not contain such information. This should only happen when no numbers of this type are allocated anywhere in the world anymore.
[ "Gets", "a", "valid", "number", "for", "the", "specified", "number", "type", "(", "it", "may", "belong", "to", "any", "country", ")", "." ]
python
train
48.769231
happyleavesaoc/gstreamer-player
gsp/__init__.py
https://github.com/happyleavesaoc/gstreamer-player/blob/750edd95d4be4d2f8eee3aa3cb86d4781758f5fb/gsp/__init__.py#L165-L169
def state(self, state): """Set state.""" self._state = state self._manager[ATTR_STATE] = state _LOGGER.info('state changed to %s', state)
[ "def", "state", "(", "self", ",", "state", ")", ":", "self", ".", "_state", "=", "state", "self", ".", "_manager", "[", "ATTR_STATE", "]", "=", "state", "_LOGGER", ".", "info", "(", "'state changed to %s'", ",", "state", ")" ]
Set state.
[ "Set", "state", "." ]
python
train
33
timothydmorton/isochrones
isochrones/starmodel_old.py
https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/starmodel_old.py#L767-L841
def triangle(self, params=None, query=None, extent=0.999, **kwargs): """ Makes a nifty corner plot. Uses :func:`triangle.corner`. :param params: (optional) Names of columns (from :attr:`StarModel.samples`) to plot. If ``None``, then it will plot samples of the parameters used in the MCMC fit-- that is, mass, age, [Fe/H], and optionally distance and A_V. :param query: (optional) Optional query on samples. :param extent: (optional) Will be appropriately passed to :func:`triangle.corner`. :param **kwargs: Additional keyword arguments passed to :func:`triangle.corner`. :return: Figure oject containing corner plot. """ if triangle is None: raise ImportError('please run "pip install triangle_plot".') if params is None: if self.fit_for_distance: params = ['mass', 'age', 'feh', 'distance', 'AV'] else: params = ['mass', 'age', 'feh'] df = self.samples if query is not None: df = df.query(query) #convert extent to ranges, but making sure # that truths are in range. extents = [] remove = [] for i,par in enumerate(params): m = re.search('delta_(\w+)$',par) if m: if type(self) == BinaryStarModel: b = m.group(1) values = (df['{}_mag_B'.format(b)] - df['{}_mag_A'.format(b)]) df[par] = values else: remove.append(i) continue else: values = df[par] qs = np.array([0.5 - 0.5*extent, 0.5 + 0.5*extent]) minval, maxval = values.quantile(qs) if 'truths' in kwargs: datarange = maxval - minval if kwargs['truths'][i] < minval: minval = kwargs['truths'][i] - 0.05*datarange if kwargs['truths'][i] > maxval: maxval = kwargs['truths'][i] + 0.05*datarange extents.append((minval,maxval)) [params.pop(i) for i in remove] fig = triangle.corner(df[params], labels=params, extents=extents, **kwargs) fig.suptitle(self.name, fontsize=22) return fig
[ "def", "triangle", "(", "self", ",", "params", "=", "None", ",", "query", "=", "None", ",", "extent", "=", "0.999", ",", "*", "*", "kwargs", ")", ":", "if", "triangle", "is", "None", ":", "raise", "ImportError", "(", "'please run \"pip install triangle_plot\".'", ")", "if", "params", "is", "None", ":", "if", "self", ".", "fit_for_distance", ":", "params", "=", "[", "'mass'", ",", "'age'", ",", "'feh'", ",", "'distance'", ",", "'AV'", "]", "else", ":", "params", "=", "[", "'mass'", ",", "'age'", ",", "'feh'", "]", "df", "=", "self", ".", "samples", "if", "query", "is", "not", "None", ":", "df", "=", "df", ".", "query", "(", "query", ")", "#convert extent to ranges, but making sure", "# that truths are in range.", "extents", "=", "[", "]", "remove", "=", "[", "]", "for", "i", ",", "par", "in", "enumerate", "(", "params", ")", ":", "m", "=", "re", ".", "search", "(", "'delta_(\\w+)$'", ",", "par", ")", "if", "m", ":", "if", "type", "(", "self", ")", "==", "BinaryStarModel", ":", "b", "=", "m", ".", "group", "(", "1", ")", "values", "=", "(", "df", "[", "'{}_mag_B'", ".", "format", "(", "b", ")", "]", "-", "df", "[", "'{}_mag_A'", ".", "format", "(", "b", ")", "]", ")", "df", "[", "par", "]", "=", "values", "else", ":", "remove", ".", "append", "(", "i", ")", "continue", "else", ":", "values", "=", "df", "[", "par", "]", "qs", "=", "np", ".", "array", "(", "[", "0.5", "-", "0.5", "*", "extent", ",", "0.5", "+", "0.5", "*", "extent", "]", ")", "minval", ",", "maxval", "=", "values", ".", "quantile", "(", "qs", ")", "if", "'truths'", "in", "kwargs", ":", "datarange", "=", "maxval", "-", "minval", "if", "kwargs", "[", "'truths'", "]", "[", "i", "]", "<", "minval", ":", "minval", "=", "kwargs", "[", "'truths'", "]", "[", "i", "]", "-", "0.05", "*", "datarange", "if", "kwargs", "[", "'truths'", "]", "[", "i", "]", ">", "maxval", ":", "maxval", "=", "kwargs", "[", "'truths'", "]", "[", "i", "]", "+", "0.05", "*", "datarange", "extents", ".", "append", "(", "(", "minval", ",", "maxval", ")", ")", "[", "params", ".", "pop", "(", "i", ")", "for", "i", "in", "remove", "]", "fig", "=", "triangle", ".", "corner", "(", "df", "[", "params", "]", ",", "labels", "=", "params", ",", "extents", "=", "extents", ",", "*", "*", "kwargs", ")", "fig", ".", "suptitle", "(", "self", ".", "name", ",", "fontsize", "=", "22", ")", "return", "fig" ]
Makes a nifty corner plot. Uses :func:`triangle.corner`. :param params: (optional) Names of columns (from :attr:`StarModel.samples`) to plot. If ``None``, then it will plot samples of the parameters used in the MCMC fit-- that is, mass, age, [Fe/H], and optionally distance and A_V. :param query: (optional) Optional query on samples. :param extent: (optional) Will be appropriately passed to :func:`triangle.corner`. :param **kwargs: Additional keyword arguments passed to :func:`triangle.corner`. :return: Figure oject containing corner plot.
[ "Makes", "a", "nifty", "corner", "plot", "." ]
python
train
33.08
christophertbrown/bioscripts
ctbBio/genome_variation.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L215-L252
def parse_fasta_annotations(fastas, annot_tables, trans_table): """ parse gene call information from Prodigal fasta output """ if annot_tables is not False: annots = {} for table in annot_tables: for cds in open(table): ID, start, end, strand = cds.strip().split() annots[ID] = [start, end, int(strand)] for fasta in fastas: for seq in parse_fasta(fasta): if ('# ;gc_cont' not in seq[0] and '# ID=' not in seq[0]) and annot_tables is False: print('# specify fasta from Prodigal or annotations table (-t)', file=sys.stderr) exit() if 'ID=' in seq[0]: ID = seq[0].rsplit('ID=', 1)[1].split(';', 1)[0] contig = seq[0].split()[0].split('>')[1].rsplit('_%s' % (ID), 1)[0] else: contig = seq[0].split()[0].split('>')[1].rsplit('_', 1)[0] locus = seq[0].split()[0].split('>')[1] # annotation info from Prodigal if ('# ;gc_cont' in seq[0] or '# ID=' in seq[0]): info = seq[0].split(' # ') start, end, strand = int(info[1]), int(info[2]), info[3] if strand == '1': strand = 1 else: strand = -1 product = [''.join(info[4].split()[1:])] # annotation info from table else: start, end, strand = annots[locus] product = seq[0].split(' ', 1)[1] info = {'transl_table':[trans_table], \ 'translation':[seq[1]], \ 'product':product} yield contig, [locus, [start, end, strand], info]
[ "def", "parse_fasta_annotations", "(", "fastas", ",", "annot_tables", ",", "trans_table", ")", ":", "if", "annot_tables", "is", "not", "False", ":", "annots", "=", "{", "}", "for", "table", "in", "annot_tables", ":", "for", "cds", "in", "open", "(", "table", ")", ":", "ID", ",", "start", ",", "end", ",", "strand", "=", "cds", ".", "strip", "(", ")", ".", "split", "(", ")", "annots", "[", "ID", "]", "=", "[", "start", ",", "end", ",", "int", "(", "strand", ")", "]", "for", "fasta", "in", "fastas", ":", "for", "seq", "in", "parse_fasta", "(", "fasta", ")", ":", "if", "(", "'# ;gc_cont'", "not", "in", "seq", "[", "0", "]", "and", "'# ID='", "not", "in", "seq", "[", "0", "]", ")", "and", "annot_tables", "is", "False", ":", "print", "(", "'# specify fasta from Prodigal or annotations table (-t)'", ",", "file", "=", "sys", ".", "stderr", ")", "exit", "(", ")", "if", "'ID='", "in", "seq", "[", "0", "]", ":", "ID", "=", "seq", "[", "0", "]", ".", "rsplit", "(", "'ID='", ",", "1", ")", "[", "1", "]", ".", "split", "(", "';'", ",", "1", ")", "[", "0", "]", "contig", "=", "seq", "[", "0", "]", ".", "split", "(", ")", "[", "0", "]", ".", "split", "(", "'>'", ")", "[", "1", "]", ".", "rsplit", "(", "'_%s'", "%", "(", "ID", ")", ",", "1", ")", "[", "0", "]", "else", ":", "contig", "=", "seq", "[", "0", "]", ".", "split", "(", ")", "[", "0", "]", ".", "split", "(", "'>'", ")", "[", "1", "]", ".", "rsplit", "(", "'_'", ",", "1", ")", "[", "0", "]", "locus", "=", "seq", "[", "0", "]", ".", "split", "(", ")", "[", "0", "]", ".", "split", "(", "'>'", ")", "[", "1", "]", "# annotation info from Prodigal", "if", "(", "'# ;gc_cont'", "in", "seq", "[", "0", "]", "or", "'# ID='", "in", "seq", "[", "0", "]", ")", ":", "info", "=", "seq", "[", "0", "]", ".", "split", "(", "' # '", ")", "start", ",", "end", ",", "strand", "=", "int", "(", "info", "[", "1", "]", ")", ",", "int", "(", "info", "[", "2", "]", ")", ",", "info", "[", "3", "]", "if", "strand", "==", "'1'", ":", "strand", "=", "1", "else", ":", "strand", "=", "-", "1", "product", "=", "[", "''", ".", "join", "(", "info", "[", "4", "]", ".", "split", "(", ")", "[", "1", ":", "]", ")", "]", "# annotation info from table", "else", ":", "start", ",", "end", ",", "strand", "=", "annots", "[", "locus", "]", "product", "=", "seq", "[", "0", "]", ".", "split", "(", "' '", ",", "1", ")", "[", "1", "]", "info", "=", "{", "'transl_table'", ":", "[", "trans_table", "]", ",", "'translation'", ":", "[", "seq", "[", "1", "]", "]", ",", "'product'", ":", "product", "}", "yield", "contig", ",", "[", "locus", ",", "[", "start", ",", "end", ",", "strand", "]", ",", "info", "]" ]
parse gene call information from Prodigal fasta output
[ "parse", "gene", "call", "information", "from", "Prodigal", "fasta", "output" ]
python
train
44.921053
frejanordsiek/hdf5storage
hdf5storage/Marshallers.py
https://github.com/frejanordsiek/hdf5storage/blob/539275141dd3a4efbbbfd9bdb978f3ed59e3f05d/hdf5storage/Marshallers.py#L264-L301
def get_type_string(self, data, type_string): """ Gets type string. Finds the type string for 'data' contained in ``python_type_strings`` using its ``type``. Non-``None`` 'type_string` overrides whatever type string is looked up. The override makes it easier for subclasses to convert something that the parent marshaller can write to disk but still put the right type string in place). Parameters ---------- data : type to be marshalled The Python object that is being written to disk. type_string : str or None If it is a ``str``, it overrides any looked up type string. ``None`` means don't override. Returns ------- str The type string associated with 'data'. Will be 'type_string' if it is not ``None``. Notes ----- Subclasses probably do not need to override this method. """ if type_string is not None: return type_string else: tp = type(data) try: return self.type_to_typestring[tp] except KeyError: return self.type_to_typestring[tp.__module__ + '.' + tp.__name__]
[ "def", "get_type_string", "(", "self", ",", "data", ",", "type_string", ")", ":", "if", "type_string", "is", "not", "None", ":", "return", "type_string", "else", ":", "tp", "=", "type", "(", "data", ")", "try", ":", "return", "self", ".", "type_to_typestring", "[", "tp", "]", "except", "KeyError", ":", "return", "self", ".", "type_to_typestring", "[", "tp", ".", "__module__", "+", "'.'", "+", "tp", ".", "__name__", "]" ]
Gets type string. Finds the type string for 'data' contained in ``python_type_strings`` using its ``type``. Non-``None`` 'type_string` overrides whatever type string is looked up. The override makes it easier for subclasses to convert something that the parent marshaller can write to disk but still put the right type string in place). Parameters ---------- data : type to be marshalled The Python object that is being written to disk. type_string : str or None If it is a ``str``, it overrides any looked up type string. ``None`` means don't override. Returns ------- str The type string associated with 'data'. Will be 'type_string' if it is not ``None``. Notes ----- Subclasses probably do not need to override this method.
[ "Gets", "type", "string", "." ]
python
train
33.894737
SetBased/py-etlt
etlt/cleaner/MoneyCleaner.py
https://github.com/SetBased/py-etlt/blob/1c5b8ea60293c14f54d7845a9fe5c595021f66f2/etlt/cleaner/MoneyCleaner.py#L18-L43
def clean(amount): """ Converts a number to a number with decimal point. :param str amount: The input number. :rtype: str """ # Return empty input immediately. if not amount: return amount if re.search(r'[\. ][0-9]{3},[0-9]{1,2}$', amount): # Assume amount is in 1.123,12 or 1 123,12 format (Dutch). return amount.replace('.', '').replace(' ', '').replace(',', '.') if re.search(r'[, ][0-9]{3}\.[0-9]{1,2}$', amount): # Assume amount is in 1,123.12 format (Engels). return amount.replace(',', '').replace(' ', '') if re.search(r'[0-9](,[0-9]{1,2}$)', amount): # Assume amount is in 123,12 or in 123,1 format (Dutch). return amount.replace(',', '.') # Format of amount is not recognized. Return amount. return amount
[ "def", "clean", "(", "amount", ")", ":", "# Return empty input immediately.", "if", "not", "amount", ":", "return", "amount", "if", "re", ".", "search", "(", "r'[\\. ][0-9]{3},[0-9]{1,2}$'", ",", "amount", ")", ":", "# Assume amount is in 1.123,12 or 1 123,12 format (Dutch).", "return", "amount", ".", "replace", "(", "'.'", ",", "''", ")", ".", "replace", "(", "' '", ",", "''", ")", ".", "replace", "(", "','", ",", "'.'", ")", "if", "re", ".", "search", "(", "r'[, ][0-9]{3}\\.[0-9]{1,2}$'", ",", "amount", ")", ":", "# Assume amount is in 1,123.12 format (Engels).", "return", "amount", ".", "replace", "(", "','", ",", "''", ")", ".", "replace", "(", "' '", ",", "''", ")", "if", "re", ".", "search", "(", "r'[0-9](,[0-9]{1,2}$)'", ",", "amount", ")", ":", "# Assume amount is in 123,12 or in 123,1 format (Dutch).", "return", "amount", ".", "replace", "(", "','", ",", "'.'", ")", "# Format of amount is not recognized. Return amount.", "return", "amount" ]
Converts a number to a number with decimal point. :param str amount: The input number. :rtype: str
[ "Converts", "a", "number", "to", "a", "number", "with", "decimal", "point", "." ]
python
train
33.692308
onicagroup/runway
runway/context.py
https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/context.py#L21-L26
def save_existing_iam_env_vars(self): """Backup IAM environment variables for later restoration.""" for i in ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN']: if i in self.env_vars: self.env_vars['OLD_' + i] = self.env_vars[i]
[ "def", "save_existing_iam_env_vars", "(", "self", ")", ":", "for", "i", "in", "[", "'AWS_ACCESS_KEY_ID'", ",", "'AWS_SECRET_ACCESS_KEY'", ",", "'AWS_SESSION_TOKEN'", "]", ":", "if", "i", "in", "self", ".", "env_vars", ":", "self", ".", "env_vars", "[", "'OLD_'", "+", "i", "]", "=", "self", ".", "env_vars", "[", "i", "]" ]
Backup IAM environment variables for later restoration.
[ "Backup", "IAM", "environment", "variables", "for", "later", "restoration", "." ]
python
train
50.333333
google/grr
grr/client/grr_response_client/comms.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/comms.py#L1279-L1300
def InitiateEnrolment(self): """Initiate the enrollment process. We do not sent more than one enrollment request every 10 minutes. Note that we still communicate to the server in fast poll mode, but these requests are not carrying any payload. """ logging.debug("sending enrollment request") now = time.time() if now > self.last_enrollment_time + 10 * 60: if not self.last_enrollment_time: # This is the first enrollment request - we should enter fastpoll mode. self.timer.FastPoll() self.last_enrollment_time = now # Send registration request: self.client_worker.SendReply( rdf_crypto.Certificate( type=rdf_crypto.Certificate.Type.CSR, pem=self.communicator.GetCSRAsPem()), session_id=rdfvalue.SessionID( queue=queues.ENROLLMENT, flow_name="Enrol"))
[ "def", "InitiateEnrolment", "(", "self", ")", ":", "logging", ".", "debug", "(", "\"sending enrollment request\"", ")", "now", "=", "time", ".", "time", "(", ")", "if", "now", ">", "self", ".", "last_enrollment_time", "+", "10", "*", "60", ":", "if", "not", "self", ".", "last_enrollment_time", ":", "# This is the first enrollment request - we should enter fastpoll mode.", "self", ".", "timer", ".", "FastPoll", "(", ")", "self", ".", "last_enrollment_time", "=", "now", "# Send registration request:", "self", ".", "client_worker", ".", "SendReply", "(", "rdf_crypto", ".", "Certificate", "(", "type", "=", "rdf_crypto", ".", "Certificate", ".", "Type", ".", "CSR", ",", "pem", "=", "self", ".", "communicator", ".", "GetCSRAsPem", "(", ")", ")", ",", "session_id", "=", "rdfvalue", ".", "SessionID", "(", "queue", "=", "queues", ".", "ENROLLMENT", ",", "flow_name", "=", "\"Enrol\"", ")", ")" ]
Initiate the enrollment process. We do not sent more than one enrollment request every 10 minutes. Note that we still communicate to the server in fast poll mode, but these requests are not carrying any payload.
[ "Initiate", "the", "enrollment", "process", "." ]
python
train
39.318182
hongtaocai/googlefinance
googlefinance/__init__.py
https://github.com/hongtaocai/googlefinance/blob/9f703d8d4e00d645320d49186eee4520341ec273/googlefinance/__init__.py#L84-L105
def getQuotes(symbols): ''' get real-time quotes (index, last trade price, last trade time, etc) for stocks, using google api: http://finance.google.com/finance/info?client=ig&q=symbols Unlike python package 'yahoo-finance' (15 min delay), There is no delay for NYSE and NASDAQ stocks in 'googlefinance' package. example: quotes = getQuotes('AAPL') return: [{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}] quotes = getQuotes(['AAPL', 'GOOG']) return: [{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}, {u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'571.34', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'571.34', u'Yield': u'', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'', u'StockSymbol': u'GOOG', u'ID': u'304466804484872'}] :param symbols: a single symbol or a list of stock symbols :return: real-time quotes list ''' if type(symbols) == type('str'): symbols = [symbols] content = json.loads(request(symbols)) return replaceKeys(content);
[ "def", "getQuotes", "(", "symbols", ")", ":", "if", "type", "(", "symbols", ")", "==", "type", "(", "'str'", ")", ":", "symbols", "=", "[", "symbols", "]", "content", "=", "json", ".", "loads", "(", "request", "(", "symbols", ")", ")", "return", "replaceKeys", "(", "content", ")" ]
get real-time quotes (index, last trade price, last trade time, etc) for stocks, using google api: http://finance.google.com/finance/info?client=ig&q=symbols Unlike python package 'yahoo-finance' (15 min delay), There is no delay for NYSE and NASDAQ stocks in 'googlefinance' package. example: quotes = getQuotes('AAPL') return: [{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}] quotes = getQuotes(['AAPL', 'GOOG']) return: [{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}, {u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'571.34', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'571.34', u'Yield': u'', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'', u'StockSymbol': u'GOOG', u'ID': u'304466804484872'}] :param symbols: a single symbol or a list of stock symbols :return: real-time quotes list
[ "get", "real", "-", "time", "quotes", "(", "index", "last", "trade", "price", "last", "trade", "time", "etc", ")", "for", "stocks", "using", "google", "api", ":", "http", ":", "//", "finance", ".", "google", ".", "com", "/", "finance", "/", "info?client", "=", "ig&q", "=", "symbols" ]
python
train
71.727273
maxpumperla/elephas
elephas/worker.py
https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/worker.py#L26-L49
def train(self, data_iterator): """Train a keras model on a worker """ optimizer = get_optimizer(self.master_optimizer) self.model = model_from_yaml(self.yaml, self.custom_objects) self.model.compile(optimizer=optimizer, loss=self.master_loss, metrics=self.master_metrics) self.model.set_weights(self.parameters.value) feature_iterator, label_iterator = tee(data_iterator, 2) x_train = np.asarray([x for x, y in feature_iterator]) y_train = np.asarray([y for x, y in label_iterator]) self.model.compile(optimizer=self.master_optimizer, loss=self.master_loss, metrics=self.master_metrics) weights_before_training = self.model.get_weights() if x_train.shape[0] > self.train_config.get('batch_size'): self.model.fit(x_train, y_train, **self.train_config) weights_after_training = self.model.get_weights() deltas = subtract_params( weights_before_training, weights_after_training) yield deltas
[ "def", "train", "(", "self", ",", "data_iterator", ")", ":", "optimizer", "=", "get_optimizer", "(", "self", ".", "master_optimizer", ")", "self", ".", "model", "=", "model_from_yaml", "(", "self", ".", "yaml", ",", "self", ".", "custom_objects", ")", "self", ".", "model", ".", "compile", "(", "optimizer", "=", "optimizer", ",", "loss", "=", "self", ".", "master_loss", ",", "metrics", "=", "self", ".", "master_metrics", ")", "self", ".", "model", ".", "set_weights", "(", "self", ".", "parameters", ".", "value", ")", "feature_iterator", ",", "label_iterator", "=", "tee", "(", "data_iterator", ",", "2", ")", "x_train", "=", "np", ".", "asarray", "(", "[", "x", "for", "x", ",", "y", "in", "feature_iterator", "]", ")", "y_train", "=", "np", ".", "asarray", "(", "[", "y", "for", "x", ",", "y", "in", "label_iterator", "]", ")", "self", ".", "model", ".", "compile", "(", "optimizer", "=", "self", ".", "master_optimizer", ",", "loss", "=", "self", ".", "master_loss", ",", "metrics", "=", "self", ".", "master_metrics", ")", "weights_before_training", "=", "self", ".", "model", ".", "get_weights", "(", ")", "if", "x_train", ".", "shape", "[", "0", "]", ">", "self", ".", "train_config", ".", "get", "(", "'batch_size'", ")", ":", "self", ".", "model", ".", "fit", "(", "x_train", ",", "y_train", ",", "*", "*", "self", ".", "train_config", ")", "weights_after_training", "=", "self", ".", "model", ".", "get_weights", "(", ")", "deltas", "=", "subtract_params", "(", "weights_before_training", ",", "weights_after_training", ")", "yield", "deltas" ]
Train a keras model on a worker
[ "Train", "a", "keras", "model", "on", "a", "worker" ]
python
train
45.583333
gem/oq-engine
openquake/hazardlib/gsim/nga_east.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/nga_east.py#L627-L637
def get_stddevs(self, mag, imt, stddev_types, num_sites): """ Returns the total standard deviation """ stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: sigma = self._get_total_sigma(imt, mag) stddevs.append(sigma + np.zeros(num_sites)) return stddevs
[ "def", "get_stddevs", "(", "self", ",", "mag", ",", "imt", ",", "stddev_types", ",", "num_sites", ")", ":", "stddevs", "=", "[", "]", "for", "stddev_type", "in", "stddev_types", ":", "assert", "stddev_type", "in", "self", ".", "DEFINED_FOR_STANDARD_DEVIATION_TYPES", "if", "stddev_type", "==", "const", ".", "StdDev", ".", "TOTAL", ":", "sigma", "=", "self", ".", "_get_total_sigma", "(", "imt", ",", "mag", ")", "stddevs", ".", "append", "(", "sigma", "+", "np", ".", "zeros", "(", "num_sites", ")", ")", "return", "stddevs" ]
Returns the total standard deviation
[ "Returns", "the", "total", "standard", "deviation" ]
python
train
40.272727
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_arp.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_arp.py#L64-L78
def hide_arp_holder_arp_entry_interfacetype_Port_channel_Port_channel(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_arp_holder = ET.SubElement(config, "hide-arp-holder", xmlns="urn:brocade.com:mgmt:brocade-arp") arp_entry = ET.SubElement(hide_arp_holder, "arp-entry") arp_ip_address_key = ET.SubElement(arp_entry, "arp-ip-address") arp_ip_address_key.text = kwargs.pop('arp_ip_address') interfacetype = ET.SubElement(arp_entry, "interfacetype") Port_channel = ET.SubElement(interfacetype, "Port-channel") Port_channel = ET.SubElement(Port_channel, "Port-channel") Port_channel.text = kwargs.pop('Port_channel') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "hide_arp_holder_arp_entry_interfacetype_Port_channel_Port_channel", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "hide_arp_holder", "=", "ET", ".", "SubElement", "(", "config", ",", "\"hide-arp-holder\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-arp\"", ")", "arp_entry", "=", "ET", ".", "SubElement", "(", "hide_arp_holder", ",", "\"arp-entry\"", ")", "arp_ip_address_key", "=", "ET", ".", "SubElement", "(", "arp_entry", ",", "\"arp-ip-address\"", ")", "arp_ip_address_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'arp_ip_address'", ")", "interfacetype", "=", "ET", ".", "SubElement", "(", "arp_entry", ",", "\"interfacetype\"", ")", "Port_channel", "=", "ET", ".", "SubElement", "(", "interfacetype", ",", "\"Port-channel\"", ")", "Port_channel", "=", "ET", ".", "SubElement", "(", "Port_channel", ",", "\"Port-channel\"", ")", "Port_channel", ".", "text", "=", "kwargs", ".", "pop", "(", "'Port_channel'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
53.866667
buildbot/buildbot
master/buildbot/reporters/gerrit.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/reporters/gerrit.py#L52-L64
def _handleLegacyResult(result): """ make sure the result is backward compatible """ if not isinstance(result, dict): warnings.warn('The Gerrit status callback uses the old way to ' 'communicate results. The outcome might be not what is ' 'expected.') message, verified, reviewed = result result = makeReviewResult(message, (GERRIT_LABEL_VERIFIED, verified), (GERRIT_LABEL_REVIEWED, reviewed)) return result
[ "def", "_handleLegacyResult", "(", "result", ")", ":", "if", "not", "isinstance", "(", "result", ",", "dict", ")", ":", "warnings", ".", "warn", "(", "'The Gerrit status callback uses the old way to '", "'communicate results. The outcome might be not what is '", "'expected.'", ")", "message", ",", "verified", ",", "reviewed", "=", "result", "result", "=", "makeReviewResult", "(", "message", ",", "(", "GERRIT_LABEL_VERIFIED", ",", "verified", ")", ",", "(", "GERRIT_LABEL_REVIEWED", ",", "reviewed", ")", ")", "return", "result" ]
make sure the result is backward compatible
[ "make", "sure", "the", "result", "is", "backward", "compatible" ]
python
train
42.461538
Fizzadar/pyinfra
pyinfra/modules/git.py
https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/git.py#L23-L41
def config( state, host, key, value, repo=None, ): ''' Manage git config for a repository or globally. + key: the key of the config to ensure + value: the value this key should have + repo: specify the git repo path to edit local config (defaults to global) ''' existing_config = host.fact.git_config(repo) if key not in existing_config or existing_config[key] != value: if repo is None: yield 'git config --global {0} "{1}"'.format(key, value) else: yield 'cd {0} && git config --local {1} "{2}"'.format(repo, key, value)
[ "def", "config", "(", "state", ",", "host", ",", "key", ",", "value", ",", "repo", "=", "None", ",", ")", ":", "existing_config", "=", "host", ".", "fact", ".", "git_config", "(", "repo", ")", "if", "key", "not", "in", "existing_config", "or", "existing_config", "[", "key", "]", "!=", "value", ":", "if", "repo", "is", "None", ":", "yield", "'git config --global {0} \"{1}\"'", ".", "format", "(", "key", ",", "value", ")", "else", ":", "yield", "'cd {0} && git config --local {1} \"{2}\"'", ".", "format", "(", "repo", ",", "key", ",", "value", ")" ]
Manage git config for a repository or globally. + key: the key of the config to ensure + value: the value this key should have + repo: specify the git repo path to edit local config (defaults to global)
[ "Manage", "git", "config", "for", "a", "repository", "or", "globally", "." ]
python
train
30.894737
h2oai/h2o-3
scripts/summarizeIntermittens.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/scripts/summarizeIntermittens.py#L144-L179
def extractPrintSaveIntermittens(): """ This function will print out the intermittents onto the screen for casual viewing. It will also print out where the giant summary dictionary is going to be stored. :return: None """ # extract intermittents from collected failed tests global g_summary_dict_intermittents localtz = time.tzname[0] for ind in range(len(g_summary_dict_all["TestName"])): if g_summary_dict_all["TestInfo"][ind]["FailureCount"] >= g_threshold_failure: addFailedTests(g_summary_dict_intermittents, g_summary_dict_all, ind) # save dict in file if len(g_summary_dict_intermittents["TestName"]) > 0: json.dump(g_summary_dict_intermittents, open(g_summary_dict_name, 'w')) with open(g_summary_csv_filename, 'w') as summaryFile: for ind in range(len(g_summary_dict_intermittents["TestName"])): testName = g_summary_dict_intermittents["TestName"][ind] numberFailure = g_summary_dict_intermittents["TestInfo"][ind]["FailureCount"] firstFailedTS = parser.parse(time.ctime(min(g_summary_dict_intermittents["TestInfo"][ind]["Timestamp"]))+ ' '+localtz) firstFailedStr = firstFailedTS.strftime("%a %b %d %H:%M:%S %Y %Z") recentFail = parser.parse(time.ctime(max(g_summary_dict_intermittents["TestInfo"][ind]["Timestamp"]))+ ' '+localtz) recentFailStr = recentFail.strftime("%a %b %d %H:%M:%S %Y %Z") eachTest = "{0}, {1}, {2}, {3}\n".format(testName, recentFailStr, numberFailure, g_summary_dict_intermittents["TestInfo"][ind]["TestCategory"][0]) summaryFile.write(eachTest) print("Intermittent: {0}, Last failed: {1}, Failed {2} times since " "{3}".format(testName, recentFailStr, numberFailure, firstFailedStr))
[ "def", "extractPrintSaveIntermittens", "(", ")", ":", "# extract intermittents from collected failed tests", "global", "g_summary_dict_intermittents", "localtz", "=", "time", ".", "tzname", "[", "0", "]", "for", "ind", "in", "range", "(", "len", "(", "g_summary_dict_all", "[", "\"TestName\"", "]", ")", ")", ":", "if", "g_summary_dict_all", "[", "\"TestInfo\"", "]", "[", "ind", "]", "[", "\"FailureCount\"", "]", ">=", "g_threshold_failure", ":", "addFailedTests", "(", "g_summary_dict_intermittents", ",", "g_summary_dict_all", ",", "ind", ")", "# save dict in file", "if", "len", "(", "g_summary_dict_intermittents", "[", "\"TestName\"", "]", ")", ">", "0", ":", "json", ".", "dump", "(", "g_summary_dict_intermittents", ",", "open", "(", "g_summary_dict_name", ",", "'w'", ")", ")", "with", "open", "(", "g_summary_csv_filename", ",", "'w'", ")", "as", "summaryFile", ":", "for", "ind", "in", "range", "(", "len", "(", "g_summary_dict_intermittents", "[", "\"TestName\"", "]", ")", ")", ":", "testName", "=", "g_summary_dict_intermittents", "[", "\"TestName\"", "]", "[", "ind", "]", "numberFailure", "=", "g_summary_dict_intermittents", "[", "\"TestInfo\"", "]", "[", "ind", "]", "[", "\"FailureCount\"", "]", "firstFailedTS", "=", "parser", ".", "parse", "(", "time", ".", "ctime", "(", "min", "(", "g_summary_dict_intermittents", "[", "\"TestInfo\"", "]", "[", "ind", "]", "[", "\"Timestamp\"", "]", ")", ")", "+", "' '", "+", "localtz", ")", "firstFailedStr", "=", "firstFailedTS", ".", "strftime", "(", "\"%a %b %d %H:%M:%S %Y %Z\"", ")", "recentFail", "=", "parser", ".", "parse", "(", "time", ".", "ctime", "(", "max", "(", "g_summary_dict_intermittents", "[", "\"TestInfo\"", "]", "[", "ind", "]", "[", "\"Timestamp\"", "]", ")", ")", "+", "' '", "+", "localtz", ")", "recentFailStr", "=", "recentFail", ".", "strftime", "(", "\"%a %b %d %H:%M:%S %Y %Z\"", ")", "eachTest", "=", "\"{0}, {1}, {2}, {3}\\n\"", ".", "format", "(", "testName", ",", "recentFailStr", ",", "numberFailure", ",", "g_summary_dict_intermittents", "[", "\"TestInfo\"", "]", "[", "ind", "]", "[", "\"TestCategory\"", "]", "[", "0", "]", ")", "summaryFile", ".", "write", "(", "eachTest", ")", "print", "(", "\"Intermittent: {0}, Last failed: {1}, Failed {2} times since \"", "\"{3}\"", ".", "format", "(", "testName", ",", "recentFailStr", ",", "numberFailure", ",", "firstFailedStr", ")", ")" ]
This function will print out the intermittents onto the screen for casual viewing. It will also print out where the giant summary dictionary is going to be stored. :return: None
[ "This", "function", "will", "print", "out", "the", "intermittents", "onto", "the", "screen", "for", "casual", "viewing", ".", "It", "will", "also", "print", "out", "where", "the", "giant", "summary", "dictionary", "is", "going", "to", "be", "stored", "." ]
python
test
55.333333
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/geometry/rect.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/geometry/rect.py#L115-L129
def padded(self, padding): """Return a new Rect padded (smaller) by padding on all sides Parameters ---------- padding : float The padding. Returns ------- rect : instance of Rect The padded rectangle. """ return Rect(pos=(self.pos[0]+padding, self.pos[1]+padding), size=(self.size[0]-2*padding, self.size[1]-2*padding))
[ "def", "padded", "(", "self", ",", "padding", ")", ":", "return", "Rect", "(", "pos", "=", "(", "self", ".", "pos", "[", "0", "]", "+", "padding", ",", "self", ".", "pos", "[", "1", "]", "+", "padding", ")", ",", "size", "=", "(", "self", ".", "size", "[", "0", "]", "-", "2", "*", "padding", ",", "self", ".", "size", "[", "1", "]", "-", "2", "*", "padding", ")", ")" ]
Return a new Rect padded (smaller) by padding on all sides Parameters ---------- padding : float The padding. Returns ------- rect : instance of Rect The padded rectangle.
[ "Return", "a", "new", "Rect", "padded", "(", "smaller", ")", "by", "padding", "on", "all", "sides" ]
python
train
28.266667
ronaldguillen/wave
wave/fields.py
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/fields.py#L1018-L1029
def enforce_timezone(self, value): """ When `self.default_timezone` is `None`, always return naive datetimes. When `self.default_timezone` is not `None`, always return aware datetimes. """ field_timezone = getattr(self, 'timezone', self.default_timezone()) if (field_timezone is not None) and not timezone.is_aware(value): return timezone.make_aware(value, field_timezone) elif (field_timezone is None) and timezone.is_aware(value): return timezone.make_naive(value, timezone.UTC()) return value
[ "def", "enforce_timezone", "(", "self", ",", "value", ")", ":", "field_timezone", "=", "getattr", "(", "self", ",", "'timezone'", ",", "self", ".", "default_timezone", "(", ")", ")", "if", "(", "field_timezone", "is", "not", "None", ")", "and", "not", "timezone", ".", "is_aware", "(", "value", ")", ":", "return", "timezone", ".", "make_aware", "(", "value", ",", "field_timezone", ")", "elif", "(", "field_timezone", "is", "None", ")", "and", "timezone", ".", "is_aware", "(", "value", ")", ":", "return", "timezone", ".", "make_naive", "(", "value", ",", "timezone", ".", "UTC", "(", ")", ")", "return", "value" ]
When `self.default_timezone` is `None`, always return naive datetimes. When `self.default_timezone` is not `None`, always return aware datetimes.
[ "When", "self", ".", "default_timezone", "is", "None", "always", "return", "naive", "datetimes", ".", "When", "self", ".", "default_timezone", "is", "not", "None", "always", "return", "aware", "datetimes", "." ]
python
train
47.75
opendatateam/udata
udata/models/slug_fields.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/slug_fields.py#L113-L193
def populate_slug(instance, field): ''' Populate a slug field if needed. ''' value = getattr(instance, field.db_field) try: previous = instance.__class__.objects.get(id=instance.id) except Exception: previous = None # Field value has changed changed = field.db_field in instance._get_changed_fields() # Field initial value has been manually set manual = not previous and value or changed if not manual and field.populate_from: # value to slugify is extracted from populate_from parameter value = getattr(instance, field.populate_from) if previous and value == getattr(previous, field.populate_from): return value if previous and getattr(previous, field.db_field) == value: # value is unchanged from DB return value if previous and not changed and not field.update: # Field is not manually set and slug should not update on change return value slug = field.slugify(value) # This can happen when serializing an object which does not contain # the properties used to generate the slug. Typically, when such # an object is passed to one of the Celery workers (see issue #20). if slug is None: return old_slug = getattr(previous, field.db_field, None) if slug == old_slug: return slug # Ensure uniqueness if field.unique: base_slug = slug index = 1 qs = instance.__class__.objects if previous: qs = qs(id__ne=previous.id) def exists(s): return qs( class_check=False, **{field.db_field: s} ).limit(1).count(True) > 0 while exists(slug): slug = '{0}-{1}'.format(base_slug, index) index += 1 # Track old slugs for this class if field.follow and old_slug != slug: ns = instance.__class__.__name__ # Destroy redirections from this new slug SlugFollow.objects(namespace=ns, old_slug=slug).delete() if old_slug: # Create a redirect for previous slug slug_follower, created = SlugFollow.objects.get_or_create( namespace=ns, old_slug=old_slug, auto_save=False, ) slug_follower.new_slug = slug slug_follower.save() # Maintain previous redirects SlugFollow.objects(namespace=ns, new_slug=old_slug).update(new_slug=slug) setattr(instance, field.db_field, slug) return slug
[ "def", "populate_slug", "(", "instance", ",", "field", ")", ":", "value", "=", "getattr", "(", "instance", ",", "field", ".", "db_field", ")", "try", ":", "previous", "=", "instance", ".", "__class__", ".", "objects", ".", "get", "(", "id", "=", "instance", ".", "id", ")", "except", "Exception", ":", "previous", "=", "None", "# Field value has changed", "changed", "=", "field", ".", "db_field", "in", "instance", ".", "_get_changed_fields", "(", ")", "# Field initial value has been manually set", "manual", "=", "not", "previous", "and", "value", "or", "changed", "if", "not", "manual", "and", "field", ".", "populate_from", ":", "# value to slugify is extracted from populate_from parameter", "value", "=", "getattr", "(", "instance", ",", "field", ".", "populate_from", ")", "if", "previous", "and", "value", "==", "getattr", "(", "previous", ",", "field", ".", "populate_from", ")", ":", "return", "value", "if", "previous", "and", "getattr", "(", "previous", ",", "field", ".", "db_field", ")", "==", "value", ":", "# value is unchanged from DB", "return", "value", "if", "previous", "and", "not", "changed", "and", "not", "field", ".", "update", ":", "# Field is not manually set and slug should not update on change", "return", "value", "slug", "=", "field", ".", "slugify", "(", "value", ")", "# This can happen when serializing an object which does not contain", "# the properties used to generate the slug. Typically, when such", "# an object is passed to one of the Celery workers (see issue #20).", "if", "slug", "is", "None", ":", "return", "old_slug", "=", "getattr", "(", "previous", ",", "field", ".", "db_field", ",", "None", ")", "if", "slug", "==", "old_slug", ":", "return", "slug", "# Ensure uniqueness", "if", "field", ".", "unique", ":", "base_slug", "=", "slug", "index", "=", "1", "qs", "=", "instance", ".", "__class__", ".", "objects", "if", "previous", ":", "qs", "=", "qs", "(", "id__ne", "=", "previous", ".", "id", ")", "def", "exists", "(", "s", ")", ":", "return", "qs", "(", "class_check", "=", "False", ",", "*", "*", "{", "field", ".", "db_field", ":", "s", "}", ")", ".", "limit", "(", "1", ")", ".", "count", "(", "True", ")", ">", "0", "while", "exists", "(", "slug", ")", ":", "slug", "=", "'{0}-{1}'", ".", "format", "(", "base_slug", ",", "index", ")", "index", "+=", "1", "# Track old slugs for this class", "if", "field", ".", "follow", "and", "old_slug", "!=", "slug", ":", "ns", "=", "instance", ".", "__class__", ".", "__name__", "# Destroy redirections from this new slug", "SlugFollow", ".", "objects", "(", "namespace", "=", "ns", ",", "old_slug", "=", "slug", ")", ".", "delete", "(", ")", "if", "old_slug", ":", "# Create a redirect for previous slug", "slug_follower", ",", "created", "=", "SlugFollow", ".", "objects", ".", "get_or_create", "(", "namespace", "=", "ns", ",", "old_slug", "=", "old_slug", ",", "auto_save", "=", "False", ",", ")", "slug_follower", ".", "new_slug", "=", "slug", "slug_follower", ".", "save", "(", ")", "# Maintain previous redirects", "SlugFollow", ".", "objects", "(", "namespace", "=", "ns", ",", "new_slug", "=", "old_slug", ")", ".", "update", "(", "new_slug", "=", "slug", ")", "setattr", "(", "instance", ",", "field", ".", "db_field", ",", "slug", ")", "return", "slug" ]
Populate a slug field if needed.
[ "Populate", "a", "slug", "field", "if", "needed", "." ]
python
train
30.654321
shoebot/shoebot
shoebot/grammar/livecode.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/grammar/livecode.py#L66-L74
def reload_functions(self): """ Replace functions in namespace with functions from edited_source. """ with LiveExecution.lock: if self.edited_source: tree = ast.parse(self.edited_source) for f in [n for n in ast.walk(tree) if isinstance(n, ast.FunctionDef)]: self.ns[f.name].__code__ = meta.decompiler.compile_func(f, self.filename, self.ns).__code__
[ "def", "reload_functions", "(", "self", ")", ":", "with", "LiveExecution", ".", "lock", ":", "if", "self", ".", "edited_source", ":", "tree", "=", "ast", ".", "parse", "(", "self", ".", "edited_source", ")", "for", "f", "in", "[", "n", "for", "n", "in", "ast", ".", "walk", "(", "tree", ")", "if", "isinstance", "(", "n", ",", "ast", ".", "FunctionDef", ")", "]", ":", "self", ".", "ns", "[", "f", ".", "name", "]", ".", "__code__", "=", "meta", ".", "decompiler", ".", "compile_func", "(", "f", ",", "self", ".", "filename", ",", "self", ".", "ns", ")", ".", "__code__" ]
Replace functions in namespace with functions from edited_source.
[ "Replace", "functions", "in", "namespace", "with", "functions", "from", "edited_source", "." ]
python
valid
48.666667
budacom/trading-bots
trading_bots/contrib/clients.py
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/contrib/clients.py#L241-L243
def fetch_trades_since(self, since: int) -> List[Trade]: """Fetch trades since given timestamp.""" return self._fetch_since('trades', self.market.code)(self._trades_since)(since)
[ "def", "fetch_trades_since", "(", "self", ",", "since", ":", "int", ")", "->", "List", "[", "Trade", "]", ":", "return", "self", ".", "_fetch_since", "(", "'trades'", ",", "self", ".", "market", ".", "code", ")", "(", "self", ".", "_trades_since", ")", "(", "since", ")" ]
Fetch trades since given timestamp.
[ "Fetch", "trades", "since", "given", "timestamp", "." ]
python
train
64
Becksteinlab/GromacsWrapper
gromacs/fileformats/blocks.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/fileformats/blocks.py#L168-L179
def renumber_atoms(self): """Reset the molecule's atoms :attr:`number` to be 1-indexed""" if self.atoms: # reset the mapping self._anumb_to_atom = {} for i,atom in enumerate(self.atoms): atom.number = i+1 # starting from 1 else: self.logger("the number of atoms is zero - no renumbering")
[ "def", "renumber_atoms", "(", "self", ")", ":", "if", "self", ".", "atoms", ":", "# reset the mapping", "self", ".", "_anumb_to_atom", "=", "{", "}", "for", "i", ",", "atom", "in", "enumerate", "(", "self", ".", "atoms", ")", ":", "atom", ".", "number", "=", "i", "+", "1", "# starting from 1", "else", ":", "self", ".", "logger", "(", "\"the number of atoms is zero - no renumbering\"", ")" ]
Reset the molecule's atoms :attr:`number` to be 1-indexed
[ "Reset", "the", "molecule", "s", "atoms", ":", "attr", ":", "number", "to", "be", "1", "-", "indexed" ]
python
valid
30.833333
pre-commit/pre-commit
pre_commit/store.py
https://github.com/pre-commit/pre-commit/blob/72f98d26e690da11dc2e41861d14c58eb21930cb/pre_commit/store.py#L156-L176
def clone(self, repo, ref, deps=()): """Clone the given url and checkout the specific ref.""" if os.path.isdir(repo): repo = os.path.abspath(repo) def clone_strategy(directory): env = git.no_git_env() def _git_cmd(*args): cmd_output('git', *args, cwd=directory, env=env) _git_cmd('init', '.') _git_cmd('remote', 'add', 'origin', repo) try: self._shallow_clone(ref, _git_cmd) except CalledProcessError: self._complete_clone(ref, _git_cmd) return self._new_repo(repo, ref, deps, clone_strategy)
[ "def", "clone", "(", "self", ",", "repo", ",", "ref", ",", "deps", "=", "(", ")", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "repo", ")", ":", "repo", "=", "os", ".", "path", ".", "abspath", "(", "repo", ")", "def", "clone_strategy", "(", "directory", ")", ":", "env", "=", "git", ".", "no_git_env", "(", ")", "def", "_git_cmd", "(", "*", "args", ")", ":", "cmd_output", "(", "'git'", ",", "*", "args", ",", "cwd", "=", "directory", ",", "env", "=", "env", ")", "_git_cmd", "(", "'init'", ",", "'.'", ")", "_git_cmd", "(", "'remote'", ",", "'add'", ",", "'origin'", ",", "repo", ")", "try", ":", "self", ".", "_shallow_clone", "(", "ref", ",", "_git_cmd", ")", "except", "CalledProcessError", ":", "self", ".", "_complete_clone", "(", "ref", ",", "_git_cmd", ")", "return", "self", ".", "_new_repo", "(", "repo", ",", "ref", ",", "deps", ",", "clone_strategy", ")" ]
Clone the given url and checkout the specific ref.
[ "Clone", "the", "given", "url", "and", "checkout", "the", "specific", "ref", "." ]
python
train
30.571429
uw-it-aca/uw-restclients-sws
uw_sws/term.py
https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/term.py#L74-L88
def get_term_after(aterm): """ Returns a uw_sws.models.Term object, for the term after the term given. """ next_year = aterm.year if aterm.quarter == "autumn": next_quarter = QUARTER_SEQ[0] else: next_quarter = QUARTER_SEQ[QUARTER_SEQ.index(aterm.quarter) + 1] if next_quarter == "winter": next_year += 1 return get_term_by_year_and_quarter(next_year, next_quarter)
[ "def", "get_term_after", "(", "aterm", ")", ":", "next_year", "=", "aterm", ".", "year", "if", "aterm", ".", "quarter", "==", "\"autumn\"", ":", "next_quarter", "=", "QUARTER_SEQ", "[", "0", "]", "else", ":", "next_quarter", "=", "QUARTER_SEQ", "[", "QUARTER_SEQ", ".", "index", "(", "aterm", ".", "quarter", ")", "+", "1", "]", "if", "next_quarter", "==", "\"winter\"", ":", "next_year", "+=", "1", "return", "get_term_by_year_and_quarter", "(", "next_year", ",", "next_quarter", ")" ]
Returns a uw_sws.models.Term object, for the term after the term given.
[ "Returns", "a", "uw_sws", ".", "models", ".", "Term", "object", "for", "the", "term", "after", "the", "term", "given", "." ]
python
train
27.533333
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/handlers.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/handlers.py#L866-L902
def _attempt_slice_retry(self, shard_state, tstate): """Attempt to retry this slice. This method may modify shard_state and tstate to prepare for retry or fail. Args: shard_state: model.ShardState for current shard. tstate: model.TransientShardState for current shard. Returns: A _TASK_DIRECTIVE enum. RETRY_SLICE if slice should be retried. RETRY_SHARD if shard retry should be attempted. """ if (shard_state.slice_retries + 1 < parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS): logging.warning( "Slice %s %s failed for the %s of up to %s attempts " "(%s of %s taskqueue execution attempts). " "Will retry now.", tstate.shard_id, tstate.slice_id, shard_state.slice_retries + 1, parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS, self.task_retry_count() + 1, parameters.config.TASK_MAX_ATTEMPTS) # Clear info related to current exception. Otherwise, the real # callstack that includes a frame for this method will show up # in log. sys.exc_clear() self._try_free_lease(shard_state, slice_retry=True) return self._TASK_DIRECTIVE.RETRY_SLICE if parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS > 0: logging.warning("Slice attempt %s exceeded %s max attempts.", self.task_retry_count() + 1, parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS) return self._TASK_DIRECTIVE.RETRY_SHARD
[ "def", "_attempt_slice_retry", "(", "self", ",", "shard_state", ",", "tstate", ")", ":", "if", "(", "shard_state", ".", "slice_retries", "+", "1", "<", "parameters", ".", "config", ".", "TASK_MAX_DATA_PROCESSING_ATTEMPTS", ")", ":", "logging", ".", "warning", "(", "\"Slice %s %s failed for the %s of up to %s attempts \"", "\"(%s of %s taskqueue execution attempts). \"", "\"Will retry now.\"", ",", "tstate", ".", "shard_id", ",", "tstate", ".", "slice_id", ",", "shard_state", ".", "slice_retries", "+", "1", ",", "parameters", ".", "config", ".", "TASK_MAX_DATA_PROCESSING_ATTEMPTS", ",", "self", ".", "task_retry_count", "(", ")", "+", "1", ",", "parameters", ".", "config", ".", "TASK_MAX_ATTEMPTS", ")", "# Clear info related to current exception. Otherwise, the real", "# callstack that includes a frame for this method will show up", "# in log.", "sys", ".", "exc_clear", "(", ")", "self", ".", "_try_free_lease", "(", "shard_state", ",", "slice_retry", "=", "True", ")", "return", "self", ".", "_TASK_DIRECTIVE", ".", "RETRY_SLICE", "if", "parameters", ".", "config", ".", "TASK_MAX_DATA_PROCESSING_ATTEMPTS", ">", "0", ":", "logging", ".", "warning", "(", "\"Slice attempt %s exceeded %s max attempts.\"", ",", "self", ".", "task_retry_count", "(", ")", "+", "1", ",", "parameters", ".", "config", ".", "TASK_MAX_DATA_PROCESSING_ATTEMPTS", ")", "return", "self", ".", "_TASK_DIRECTIVE", ".", "RETRY_SHARD" ]
Attempt to retry this slice. This method may modify shard_state and tstate to prepare for retry or fail. Args: shard_state: model.ShardState for current shard. tstate: model.TransientShardState for current shard. Returns: A _TASK_DIRECTIVE enum. RETRY_SLICE if slice should be retried. RETRY_SHARD if shard retry should be attempted.
[ "Attempt", "to", "retry", "this", "slice", "." ]
python
train
40.567568
jobovy/galpy
galpy/df/streamdf.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/streamdf.py#L1905-L1976
def length(self,threshold=0.2,phys=False,ang=False,tdisrupt=None, **kwargs): """ NAME: length PURPOSE: calculate the length of the stream INPUT: threshold - threshold down from the density near the progenitor at which to define the 'end' of the stream phys= (False) if True, return the length in physical kpc ang= (False) if True, return the length in sky angular arc length in degree coord - coordinate to return the density in ('apar' [default], 'll','ra','customra','phi') OUTPUT: length (rad for parallel angle; kpc for physical length; deg for sky arc length) HISTORY: 2015-12-22 - Written - Bovy (UofT) """ peak_dens= self.density_par(0.1,tdisrupt=tdisrupt,**kwargs) # assume that this is the peak try: result=\ optimize.brentq(lambda x: self.density_par(x, tdisrupt=tdisrupt, **kwargs)\ -peak_dens*threshold, 0.1,self._deltaAngleTrack) except RuntimeError: #pragma: no cover raise RuntimeError('Length could not be returned, because length method failed to find the threshold value') except ValueError: raise ValueError('Length could not be returned, because length method failed to initialize') if phys: # Need to now integrate length dXda= self._interpTrackX.derivative() dYda= self._interpTrackY.derivative() dZda= self._interpTrackZ.derivative() result= integrate.quad(lambda da: numpy.sqrt(dXda(da)**2.\ +dYda(da)**2.\ +dZda(da)**2.), 0.,result)[0]*self._ro elif ang: # Need to now integrate length if numpy.median(numpy.roll(self._interpolatedObsTrackLB[:,0],-1) -self._interpolatedObsTrackLB[:,0]) > 0.: ll= dePeriod(self._interpolatedObsTrackLB[:,0][:,numpy.newaxis].T*numpy.pi/180.).T*180./numpy.pi else: ll= dePeriod(self._interpolatedObsTrackLB[::-1,0][:,numpy.newaxis].T*numpy.pi/180.).T[::-1]*180./numpy.pi if numpy.median(numpy.roll(self._interpolatedObsTrackLB[:,1],-1) -self._interpolatedObsTrackLB[:,1]) > 0.: bb= dePeriod(self._interpolatedObsTrackLB[:,1][:,numpy.newaxis].T*numpy.pi/180.).T*180./numpy.pi else: bb= dePeriod(self._interpolatedObsTrackLB[::-1,1][:,numpy.newaxis].T*numpy.pi/180.).T[::-1]*180./numpy.pi dlda= interpolate.InterpolatedUnivariateSpline(\ self._interpolatedThetasTrack,ll,k=3).derivative() dbda= interpolate.InterpolatedUnivariateSpline(\ self._interpolatedThetasTrack,bb,k=3).derivative() result= integrate.quad(lambda da: numpy.sqrt(dlda(da)**2.\ +dbda(da)**2.), 0.,result)[0] return result
[ "def", "length", "(", "self", ",", "threshold", "=", "0.2", ",", "phys", "=", "False", ",", "ang", "=", "False", ",", "tdisrupt", "=", "None", ",", "*", "*", "kwargs", ")", ":", "peak_dens", "=", "self", ".", "density_par", "(", "0.1", ",", "tdisrupt", "=", "tdisrupt", ",", "*", "*", "kwargs", ")", "# assume that this is the peak", "try", ":", "result", "=", "optimize", ".", "brentq", "(", "lambda", "x", ":", "self", ".", "density_par", "(", "x", ",", "tdisrupt", "=", "tdisrupt", ",", "*", "*", "kwargs", ")", "-", "peak_dens", "*", "threshold", ",", "0.1", ",", "self", ".", "_deltaAngleTrack", ")", "except", "RuntimeError", ":", "#pragma: no cover", "raise", "RuntimeError", "(", "'Length could not be returned, because length method failed to find the threshold value'", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'Length could not be returned, because length method failed to initialize'", ")", "if", "phys", ":", "# Need to now integrate length", "dXda", "=", "self", ".", "_interpTrackX", ".", "derivative", "(", ")", "dYda", "=", "self", ".", "_interpTrackY", ".", "derivative", "(", ")", "dZda", "=", "self", ".", "_interpTrackZ", ".", "derivative", "(", ")", "result", "=", "integrate", ".", "quad", "(", "lambda", "da", ":", "numpy", ".", "sqrt", "(", "dXda", "(", "da", ")", "**", "2.", "+", "dYda", "(", "da", ")", "**", "2.", "+", "dZda", "(", "da", ")", "**", "2.", ")", ",", "0.", ",", "result", ")", "[", "0", "]", "*", "self", ".", "_ro", "elif", "ang", ":", "# Need to now integrate length", "if", "numpy", ".", "median", "(", "numpy", ".", "roll", "(", "self", ".", "_interpolatedObsTrackLB", "[", ":", ",", "0", "]", ",", "-", "1", ")", "-", "self", ".", "_interpolatedObsTrackLB", "[", ":", ",", "0", "]", ")", ">", "0.", ":", "ll", "=", "dePeriod", "(", "self", ".", "_interpolatedObsTrackLB", "[", ":", ",", "0", "]", "[", ":", ",", "numpy", ".", "newaxis", "]", ".", "T", "*", "numpy", ".", "pi", "/", "180.", ")", ".", "T", "*", "180.", "/", "numpy", ".", "pi", "else", ":", "ll", "=", "dePeriod", "(", "self", ".", "_interpolatedObsTrackLB", "[", ":", ":", "-", "1", ",", "0", "]", "[", ":", ",", "numpy", ".", "newaxis", "]", ".", "T", "*", "numpy", ".", "pi", "/", "180.", ")", ".", "T", "[", ":", ":", "-", "1", "]", "*", "180.", "/", "numpy", ".", "pi", "if", "numpy", ".", "median", "(", "numpy", ".", "roll", "(", "self", ".", "_interpolatedObsTrackLB", "[", ":", ",", "1", "]", ",", "-", "1", ")", "-", "self", ".", "_interpolatedObsTrackLB", "[", ":", ",", "1", "]", ")", ">", "0.", ":", "bb", "=", "dePeriod", "(", "self", ".", "_interpolatedObsTrackLB", "[", ":", ",", "1", "]", "[", ":", ",", "numpy", ".", "newaxis", "]", ".", "T", "*", "numpy", ".", "pi", "/", "180.", ")", ".", "T", "*", "180.", "/", "numpy", ".", "pi", "else", ":", "bb", "=", "dePeriod", "(", "self", ".", "_interpolatedObsTrackLB", "[", ":", ":", "-", "1", ",", "1", "]", "[", ":", ",", "numpy", ".", "newaxis", "]", ".", "T", "*", "numpy", ".", "pi", "/", "180.", ")", ".", "T", "[", ":", ":", "-", "1", "]", "*", "180.", "/", "numpy", ".", "pi", "dlda", "=", "interpolate", ".", "InterpolatedUnivariateSpline", "(", "self", ".", "_interpolatedThetasTrack", ",", "ll", ",", "k", "=", "3", ")", ".", "derivative", "(", ")", "dbda", "=", "interpolate", ".", "InterpolatedUnivariateSpline", "(", "self", ".", "_interpolatedThetasTrack", ",", "bb", ",", "k", "=", "3", ")", ".", "derivative", "(", ")", "result", "=", "integrate", ".", "quad", "(", "lambda", "da", ":", "numpy", ".", "sqrt", "(", "dlda", "(", "da", ")", "**", "2.", "+", "dbda", "(", "da", ")", "**", "2.", ")", ",", "0.", ",", "result", ")", "[", "0", "]", "return", "result" ]
NAME: length PURPOSE: calculate the length of the stream INPUT: threshold - threshold down from the density near the progenitor at which to define the 'end' of the stream phys= (False) if True, return the length in physical kpc ang= (False) if True, return the length in sky angular arc length in degree coord - coordinate to return the density in ('apar' [default], 'll','ra','customra','phi') OUTPUT: length (rad for parallel angle; kpc for physical length; deg for sky arc length) HISTORY: 2015-12-22 - Written - Bovy (UofT)
[ "NAME", ":" ]
python
train
46.25
msmbuilder/msmbuilder
msmbuilder/msm/msm.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/msm/msm.py#L212-L281
def eigtransform(self, sequences, right=True, mode='clip'): r"""Transform a list of sequences by projecting the sequences onto the first `n_timescales` dynamical eigenvectors. Parameters ---------- sequences : list of array-like List of sequences, or a single sequence. Each sequence should be a 1D iterable of state labels. Labels can be integers, strings, or other orderable objects. right : bool Which eigenvectors to map onto. Both the left (:math:`\Phi`) and the right (:math`\Psi`) eigenvectors of the transition matrix are commonly used, and differ in their normalization. The two sets of eigenvectors are related by the stationary distribution :: \Phi_i(x) = \Psi_i(x) * \mu(x) In the MSM literature, the right vectors (default here) are approximations to the transfer operator eigenfunctions, whereas the left eigenfunction are approximations to the propagator eigenfunctions. For more details, refer to reference [1]. mode : {'clip', 'fill'} Method by which to treat labels in `sequences` which do not have a corresponding index. This can be due, for example, to the ergodic trimming step. ``clip`` Unmapped labels are removed during transform. If they occur at the beginning or end of a sequence, the resulting transformed sequence will be shorted. If they occur in the middle of a sequence, that sequence will be broken into two (or more) sequences. (Default) ``fill`` Unmapped labels will be replaced with NaN, to signal missing data. [The use of NaN to signal missing data is not fantastic, but it's consistent with current behavior of the ``pandas`` library.] Returns ------- transformed : list of 2d arrays Each element of transformed is an array of shape ``(n_samples, n_timescales)`` containing the transformed data. References ---------- .. [1] Prinz, Jan-Hendrik, et al. "Markov models of molecular kinetics: Generation and validation." J. Chem. Phys. 134.17 (2011): 174105. """ result = [] for y in self.transform(sequences, mode=mode): if right: op = self.right_eigenvectors_[:, 1:] else: op = self.left_eigenvectors_[:, 1:] is_finite = np.isfinite(y) if not np.all(is_finite): value = np.empty((y.shape[0], op.shape[1])) value[is_finite, :] = np.take(op, y[is_finite].astype(np.int), axis=0) value[~is_finite, :] = np.nan else: value = np.take(op, y, axis=0) result.append(value) return result
[ "def", "eigtransform", "(", "self", ",", "sequences", ",", "right", "=", "True", ",", "mode", "=", "'clip'", ")", ":", "result", "=", "[", "]", "for", "y", "in", "self", ".", "transform", "(", "sequences", ",", "mode", "=", "mode", ")", ":", "if", "right", ":", "op", "=", "self", ".", "right_eigenvectors_", "[", ":", ",", "1", ":", "]", "else", ":", "op", "=", "self", ".", "left_eigenvectors_", "[", ":", ",", "1", ":", "]", "is_finite", "=", "np", ".", "isfinite", "(", "y", ")", "if", "not", "np", ".", "all", "(", "is_finite", ")", ":", "value", "=", "np", ".", "empty", "(", "(", "y", ".", "shape", "[", "0", "]", ",", "op", ".", "shape", "[", "1", "]", ")", ")", "value", "[", "is_finite", ",", ":", "]", "=", "np", ".", "take", "(", "op", ",", "y", "[", "is_finite", "]", ".", "astype", "(", "np", ".", "int", ")", ",", "axis", "=", "0", ")", "value", "[", "~", "is_finite", ",", ":", "]", "=", "np", ".", "nan", "else", ":", "value", "=", "np", ".", "take", "(", "op", ",", "y", ",", "axis", "=", "0", ")", "result", ".", "append", "(", "value", ")", "return", "result" ]
r"""Transform a list of sequences by projecting the sequences onto the first `n_timescales` dynamical eigenvectors. Parameters ---------- sequences : list of array-like List of sequences, or a single sequence. Each sequence should be a 1D iterable of state labels. Labels can be integers, strings, or other orderable objects. right : bool Which eigenvectors to map onto. Both the left (:math:`\Phi`) and the right (:math`\Psi`) eigenvectors of the transition matrix are commonly used, and differ in their normalization. The two sets of eigenvectors are related by the stationary distribution :: \Phi_i(x) = \Psi_i(x) * \mu(x) In the MSM literature, the right vectors (default here) are approximations to the transfer operator eigenfunctions, whereas the left eigenfunction are approximations to the propagator eigenfunctions. For more details, refer to reference [1]. mode : {'clip', 'fill'} Method by which to treat labels in `sequences` which do not have a corresponding index. This can be due, for example, to the ergodic trimming step. ``clip`` Unmapped labels are removed during transform. If they occur at the beginning or end of a sequence, the resulting transformed sequence will be shorted. If they occur in the middle of a sequence, that sequence will be broken into two (or more) sequences. (Default) ``fill`` Unmapped labels will be replaced with NaN, to signal missing data. [The use of NaN to signal missing data is not fantastic, but it's consistent with current behavior of the ``pandas`` library.] Returns ------- transformed : list of 2d arrays Each element of transformed is an array of shape ``(n_samples, n_timescales)`` containing the transformed data. References ---------- .. [1] Prinz, Jan-Hendrik, et al. "Markov models of molecular kinetics: Generation and validation." J. Chem. Phys. 134.17 (2011): 174105.
[ "r", "Transform", "a", "list", "of", "sequences", "by", "projecting", "the", "sequences", "onto", "the", "first", "n_timescales", "dynamical", "eigenvectors", "." ]
python
train
41.985714
lingfeiwang/findr-python
findr/pij.py
https://github.com/lingfeiwang/findr-python/blob/417f163e658fee6ef311571f7048f96069a0cf1f/findr/pij.py#L551-L578
def cassist(self,dc,dt,dt2,nodiag=False,memlimit=-1): """Calculates probability of gene i regulating gene j with continuous data assisted method, with the recommended combination of multiple tests. Probabilities are converted from likelihood ratios separately for each A. This gives better predictions when the number of secondary targets (dt2) is large. (Check program warnings.) dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data. Entry dc[i,j] is anchor i's value for sample j. Anchor i is used to infer the probability of gene i -> any other gene. dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A Entry dt[i,j] is gene i's expression level for sample j. dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B. dt2 has the same format as dt, and can be identical with, different from, or a superset of dt. When dt2 is a superset of (or identical with) dt, dt2 must be arranged to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and set parameter nodiag = 1. nodiag: skip diagonal regulations, i.e. regulation A->B for A=B. This should be set to True when A is a subset of B and aligned correspondingly. memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage. Return: dictionary with following keys: ret:0 iff execution succeeded. p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability function from for recommended combination of multiple tests. For more information on tests, see paper. ftype can be found in auto.py. Example: see findr.examples.geuvadis5 """ return _cassist_any(self,dc,dt,dt2,"pij_cassist",nodiag=nodiag,memlimit=memlimit)
[ "def", "cassist", "(", "self", ",", "dc", ",", "dt", ",", "dt2", ",", "nodiag", "=", "False", ",", "memlimit", "=", "-", "1", ")", ":", "return", "_cassist_any", "(", "self", ",", "dc", ",", "dt", ",", "dt2", ",", "\"pij_cassist\"", ",", "nodiag", "=", "nodiag", ",", "memlimit", "=", "memlimit", ")" ]
Calculates probability of gene i regulating gene j with continuous data assisted method, with the recommended combination of multiple tests. Probabilities are converted from likelihood ratios separately for each A. This gives better predictions when the number of secondary targets (dt2) is large. (Check program warnings.) dc: numpy.ndarray(nt,ns,dtype=ftype(='f4' by default)) Continuous anchor data. Entry dc[i,j] is anchor i's value for sample j. Anchor i is used to infer the probability of gene i -> any other gene. dt: numpy.ndarray(nt,ns,dtype=ftype(='=f4' by default)) Gene expression data for A Entry dt[i,j] is gene i's expression level for sample j. dt2:numpy.ndarray(nt2,ns,dtype=ftype(='=f4' by default)) Gene expression data for B. dt2 has the same format as dt, and can be identical with, different from, or a superset of dt. When dt2 is a superset of (or identical with) dt, dt2 must be arranged to be identical with dt at its upper submatrix, i.e. dt2[:nt,:]=dt, and set parameter nodiag = 1. nodiag: skip diagonal regulations, i.e. regulation A->B for A=B. This should be set to True when A is a subset of B and aligned correspondingly. memlimit: The approximate memory usage limit in bytes for the library. For datasets require a larger memory, calculation will be split into smaller chunks. If the memory limit is smaller than minimum required, calculation can fail with an error message. memlimit=0 defaults to unlimited memory usage. Return: dictionary with following keys: ret:0 iff execution succeeded. p: numpy.ndarray((nt,nt2),dtype=ftype(='=f4' by default)). Probability function from for recommended combination of multiple tests. For more information on tests, see paper. ftype can be found in auto.py. Example: see findr.examples.geuvadis5
[ "Calculates", "probability", "of", "gene", "i", "regulating", "gene", "j", "with", "continuous", "data", "assisted", "method", "with", "the", "recommended", "combination", "of", "multiple", "tests", ".", "Probabilities", "are", "converted", "from", "likelihood", "ratios", "separately", "for", "each", "A", ".", "This", "gives", "better", "predictions", "when", "the", "number", "of", "secondary", "targets", "(", "dt2", ")", "is", "large", ".", "(", "Check", "program", "warnings", ".", ")", "dc", ":", "numpy", ".", "ndarray", "(", "nt", "ns", "dtype", "=", "ftype", "(", "=", "f4", "by", "default", "))", "Continuous", "anchor", "data", ".", "Entry", "dc", "[", "i", "j", "]", "is", "anchor", "i", "s", "value", "for", "sample", "j", ".", "Anchor", "i", "is", "used", "to", "infer", "the", "probability", "of", "gene", "i", "-", ">", "any", "other", "gene", ".", "dt", ":", "numpy", ".", "ndarray", "(", "nt", "ns", "dtype", "=", "ftype", "(", "=", "=", "f4", "by", "default", "))", "Gene", "expression", "data", "for", "A", "Entry", "dt", "[", "i", "j", "]", "is", "gene", "i", "s", "expression", "level", "for", "sample", "j", ".", "dt2", ":", "numpy", ".", "ndarray", "(", "nt2", "ns", "dtype", "=", "ftype", "(", "=", "=", "f4", "by", "default", "))", "Gene", "expression", "data", "for", "B", ".", "dt2", "has", "the", "same", "format", "as", "dt", "and", "can", "be", "identical", "with", "different", "from", "or", "a", "superset", "of", "dt", ".", "When", "dt2", "is", "a", "superset", "of", "(", "or", "identical", "with", ")", "dt", "dt2", "must", "be", "arranged", "to", "be", "identical", "with", "dt", "at", "its", "upper", "submatrix", "i", ".", "e", ".", "dt2", "[", ":", "nt", ":", "]", "=", "dt", "and", "set", "parameter", "nodiag", "=", "1", ".", "nodiag", ":", "skip", "diagonal", "regulations", "i", ".", "e", ".", "regulation", "A", "-", ">", "B", "for", "A", "=", "B", ".", "This", "should", "be", "set", "to", "True", "when", "A", "is", "a", "subset", "of", "B", "and", "aligned", "correspondingly", ".", "memlimit", ":", "The", "approximate", "memory", "usage", "limit", "in", "bytes", "for", "the", "library", ".", "For", "datasets", "require", "a", "larger", "memory", "calculation", "will", "be", "split", "into", "smaller", "chunks", ".", "If", "the", "memory", "limit", "is", "smaller", "than", "minimum", "required", "calculation", "can", "fail", "with", "an", "error", "message", ".", "memlimit", "=", "0", "defaults", "to", "unlimited", "memory", "usage", ".", "Return", ":", "dictionary", "with", "following", "keys", ":", "ret", ":", "0", "iff", "execution", "succeeded", ".", "p", ":", "numpy", ".", "ndarray", "((", "nt", "nt2", ")", "dtype", "=", "ftype", "(", "=", "=", "f4", "by", "default", "))", ".", "Probability", "function", "from", "for", "recommended", "combination", "of", "multiple", "tests", ".", "For", "more", "information", "on", "tests", "see", "paper", ".", "ftype", "can", "be", "found", "in", "auto", ".", "py", ".", "Example", ":", "see", "findr", ".", "examples", ".", "geuvadis5" ]
python
train
68.714286
pseudo-lang/pseudo
pseudo/code_generator.py
https://github.com/pseudo-lang/pseudo/blob/d0856d13e01a646156d3363f8c1bf352e6ea6315/pseudo/code_generator.py#L216-L387
def _parse_template(self, code, label): ''' Pare smart indented templates Takes a template a returns a list of sub-templates, taking in account the indentation of the original code based on the first line indentation(0) Special treatment of whitespace: returns special Offset and INTERNAL_WHITESPACE, so the generation can be configurable It auto detects the indentation width used, as the indent of the first indented line >>> indented(""" def %<code> e = %<code2> """) ['def', INTERNAL_WHITESPACE, Placeholder('code', 0), NEWLINE, Offset(1),'e', INTERNAL_WHITESPACE, '=', NEWLINE, Placeholder('code2', 1), NEWLINE] ''' if isinstance(code, tuple): return tuple(self._parse_template(c, label) for c in code) elif isinstance(code, dict): return { k: self._parse_template(v, label) if k != '_key' else v for k, v in code.items() } elif not isinstance(code, str): return [] lines = code.split('\n') parsed = [] if len(lines) == 1: i = re.match(r'^( +)', lines[0]) indent_size = len(i.group()) if i else 0 indent = 1 if i else 0 actual = lines base = 0 else: base = len(re.match(r'^( *)', lines[1]).group()) rebased = [line[base:] for line in lines] for line in rebased: i = re.match(r'^( +)', line) if i: indent_size = len(i.group()) break else: indent_size = 0 actual = rebased[1:] for line in actual: j = LINE_FIRS.match(line) indent = len(j.group()) // indent_size if j else 0 if parsed: parsed.append(Offset(indent)) in_placeholder = False in_action = False in_args = False in_string_arg = False in_double_arg = False in_type = False c = int(indent * indent_size) m = c placeholder = '' while m < len(line): # print(m, line[m], 'place:', in_placeholder, 'act:', in_action, 'a:', in_args, 's:', in_string_arg, yaml.dump(parsed)) f = line[m] next_f = line[m + 1] if m < len(line) - 1 else None if f == '%' and not in_placeholder and next_f == '<': m += 2 in_placeholder = True placeholder = '' continue elif f == ':' and in_placeholder: m += 1 in_placeholder = False in_action = True action = '' continue elif f == ' ' and in_placeholder: m += 1 continue elif f == ' ' and in_action: m += 1 in_action = False in_args = True args = [''] continue elif f == ' ' and (in_string_arg or in_double_arg): args[-1] += f m += 1 continue elif f == ' ' and in_args: m += 1 args.append('') continue elif f == '\'' and in_args: m += 1 if in_string_arg: in_string_arg = False if args[-1] == '\\n': args[-1] = '\n' args[-1] += f elif in_double_arg: args[-1] += f else: in_string_arg = True continue elif f == '"' and in_args: m += 1 if in_double_arg: in_double_arg = False if args[-1] == '\\n': args[-1] = '\n' args[-1] += f elif in_string_arg: args[-1] += f else: in_string_arg = True continue elif f == '>' and in_args and not in_string_arg and not in_double_arg: m += 1 if args[-1] == '': args = args[:-1] args = [arg[:-1] if arg[-1] == '\'' else int(arg) for arg in args] in_args = False parsed.append(Action(placeholder, action, args)) continue elif f == '>' and in_action: m += 1 in_action = False parsed.append(Action(placeholder, action, [])) elif f == '>' and in_placeholder: m += 1 q = None # if '.' in placeholder[1:]: # input(placeholder) if placeholder[0] == '#': q = Function(placeholder[1:]) elif placeholder[0] == '@': q = PseudoType(placeholder[1:].split('.')) elif placeholder[0] == '.': q = SubTemplate(label, placeholder[1:]) elif '.' in placeholder: q = SubElement(placeholder.split('.')) else: q = Placeholder(placeholder) in_placeholder = False parsed.append(q) elif f == ' ': m += 1 parsed.append(INTERNAL_WHITESPACE) elif in_placeholder: m += 1 placeholder += f elif in_action: m += 1 action += f elif in_args: m += 1 args[-1] += f else: m += 1 if parsed and isinstance(parsed[-1], str): parsed[-1] += f else: parsed.append(f) if len(actual) > 1: parsed.append(NEWLINE) return parsed
[ "def", "_parse_template", "(", "self", ",", "code", ",", "label", ")", ":", "if", "isinstance", "(", "code", ",", "tuple", ")", ":", "return", "tuple", "(", "self", ".", "_parse_template", "(", "c", ",", "label", ")", "for", "c", "in", "code", ")", "elif", "isinstance", "(", "code", ",", "dict", ")", ":", "return", "{", "k", ":", "self", ".", "_parse_template", "(", "v", ",", "label", ")", "if", "k", "!=", "'_key'", "else", "v", "for", "k", ",", "v", "in", "code", ".", "items", "(", ")", "}", "elif", "not", "isinstance", "(", "code", ",", "str", ")", ":", "return", "[", "]", "lines", "=", "code", ".", "split", "(", "'\\n'", ")", "parsed", "=", "[", "]", "if", "len", "(", "lines", ")", "==", "1", ":", "i", "=", "re", ".", "match", "(", "r'^( +)'", ",", "lines", "[", "0", "]", ")", "indent_size", "=", "len", "(", "i", ".", "group", "(", ")", ")", "if", "i", "else", "0", "indent", "=", "1", "if", "i", "else", "0", "actual", "=", "lines", "base", "=", "0", "else", ":", "base", "=", "len", "(", "re", ".", "match", "(", "r'^( *)'", ",", "lines", "[", "1", "]", ")", ".", "group", "(", ")", ")", "rebased", "=", "[", "line", "[", "base", ":", "]", "for", "line", "in", "lines", "]", "for", "line", "in", "rebased", ":", "i", "=", "re", ".", "match", "(", "r'^( +)'", ",", "line", ")", "if", "i", ":", "indent_size", "=", "len", "(", "i", ".", "group", "(", ")", ")", "break", "else", ":", "indent_size", "=", "0", "actual", "=", "rebased", "[", "1", ":", "]", "for", "line", "in", "actual", ":", "j", "=", "LINE_FIRS", ".", "match", "(", "line", ")", "indent", "=", "len", "(", "j", ".", "group", "(", ")", ")", "//", "indent_size", "if", "j", "else", "0", "if", "parsed", ":", "parsed", ".", "append", "(", "Offset", "(", "indent", ")", ")", "in_placeholder", "=", "False", "in_action", "=", "False", "in_args", "=", "False", "in_string_arg", "=", "False", "in_double_arg", "=", "False", "in_type", "=", "False", "c", "=", "int", "(", "indent", "*", "indent_size", ")", "m", "=", "c", "placeholder", "=", "''", "while", "m", "<", "len", "(", "line", ")", ":", "# print(m, line[m], 'place:', in_placeholder, 'act:', in_action, 'a:', in_args, 's:', in_string_arg, yaml.dump(parsed))", "f", "=", "line", "[", "m", "]", "next_f", "=", "line", "[", "m", "+", "1", "]", "if", "m", "<", "len", "(", "line", ")", "-", "1", "else", "None", "if", "f", "==", "'%'", "and", "not", "in_placeholder", "and", "next_f", "==", "'<'", ":", "m", "+=", "2", "in_placeholder", "=", "True", "placeholder", "=", "''", "continue", "elif", "f", "==", "':'", "and", "in_placeholder", ":", "m", "+=", "1", "in_placeholder", "=", "False", "in_action", "=", "True", "action", "=", "''", "continue", "elif", "f", "==", "' '", "and", "in_placeholder", ":", "m", "+=", "1", "continue", "elif", "f", "==", "' '", "and", "in_action", ":", "m", "+=", "1", "in_action", "=", "False", "in_args", "=", "True", "args", "=", "[", "''", "]", "continue", "elif", "f", "==", "' '", "and", "(", "in_string_arg", "or", "in_double_arg", ")", ":", "args", "[", "-", "1", "]", "+=", "f", "m", "+=", "1", "continue", "elif", "f", "==", "' '", "and", "in_args", ":", "m", "+=", "1", "args", ".", "append", "(", "''", ")", "continue", "elif", "f", "==", "'\\''", "and", "in_args", ":", "m", "+=", "1", "if", "in_string_arg", ":", "in_string_arg", "=", "False", "if", "args", "[", "-", "1", "]", "==", "'\\\\n'", ":", "args", "[", "-", "1", "]", "=", "'\\n'", "args", "[", "-", "1", "]", "+=", "f", "elif", "in_double_arg", ":", "args", "[", "-", "1", "]", "+=", "f", "else", ":", "in_string_arg", "=", "True", "continue", "elif", "f", "==", "'\"'", "and", "in_args", ":", "m", "+=", "1", "if", "in_double_arg", ":", "in_double_arg", "=", "False", "if", "args", "[", "-", "1", "]", "==", "'\\\\n'", ":", "args", "[", "-", "1", "]", "=", "'\\n'", "args", "[", "-", "1", "]", "+=", "f", "elif", "in_string_arg", ":", "args", "[", "-", "1", "]", "+=", "f", "else", ":", "in_string_arg", "=", "True", "continue", "elif", "f", "==", "'>'", "and", "in_args", "and", "not", "in_string_arg", "and", "not", "in_double_arg", ":", "m", "+=", "1", "if", "args", "[", "-", "1", "]", "==", "''", ":", "args", "=", "args", "[", ":", "-", "1", "]", "args", "=", "[", "arg", "[", ":", "-", "1", "]", "if", "arg", "[", "-", "1", "]", "==", "'\\''", "else", "int", "(", "arg", ")", "for", "arg", "in", "args", "]", "in_args", "=", "False", "parsed", ".", "append", "(", "Action", "(", "placeholder", ",", "action", ",", "args", ")", ")", "continue", "elif", "f", "==", "'>'", "and", "in_action", ":", "m", "+=", "1", "in_action", "=", "False", "parsed", ".", "append", "(", "Action", "(", "placeholder", ",", "action", ",", "[", "]", ")", ")", "elif", "f", "==", "'>'", "and", "in_placeholder", ":", "m", "+=", "1", "q", "=", "None", "# if '.' in placeholder[1:]:", "# input(placeholder)", "if", "placeholder", "[", "0", "]", "==", "'#'", ":", "q", "=", "Function", "(", "placeholder", "[", "1", ":", "]", ")", "elif", "placeholder", "[", "0", "]", "==", "'@'", ":", "q", "=", "PseudoType", "(", "placeholder", "[", "1", ":", "]", ".", "split", "(", "'.'", ")", ")", "elif", "placeholder", "[", "0", "]", "==", "'.'", ":", "q", "=", "SubTemplate", "(", "label", ",", "placeholder", "[", "1", ":", "]", ")", "elif", "'.'", "in", "placeholder", ":", "q", "=", "SubElement", "(", "placeholder", ".", "split", "(", "'.'", ")", ")", "else", ":", "q", "=", "Placeholder", "(", "placeholder", ")", "in_placeholder", "=", "False", "parsed", ".", "append", "(", "q", ")", "elif", "f", "==", "' '", ":", "m", "+=", "1", "parsed", ".", "append", "(", "INTERNAL_WHITESPACE", ")", "elif", "in_placeholder", ":", "m", "+=", "1", "placeholder", "+=", "f", "elif", "in_action", ":", "m", "+=", "1", "action", "+=", "f", "elif", "in_args", ":", "m", "+=", "1", "args", "[", "-", "1", "]", "+=", "f", "else", ":", "m", "+=", "1", "if", "parsed", "and", "isinstance", "(", "parsed", "[", "-", "1", "]", ",", "str", ")", ":", "parsed", "[", "-", "1", "]", "+=", "f", "else", ":", "parsed", ".", "append", "(", "f", ")", "if", "len", "(", "actual", ")", ">", "1", ":", "parsed", ".", "append", "(", "NEWLINE", ")", "return", "parsed" ]
Pare smart indented templates Takes a template a returns a list of sub-templates, taking in account the indentation of the original code based on the first line indentation(0) Special treatment of whitespace: returns special Offset and INTERNAL_WHITESPACE, so the generation can be configurable It auto detects the indentation width used, as the indent of the first indented line >>> indented(""" def %<code> e = %<code2> """) ['def', INTERNAL_WHITESPACE, Placeholder('code', 0), NEWLINE, Offset(1),'e', INTERNAL_WHITESPACE, '=', NEWLINE, Placeholder('code2', 1), NEWLINE]
[ "Pare", "smart", "indented", "templates" ]
python
train
37.180233
googleapis/oauth2client
oauth2client/contrib/django_util/__init__.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/contrib/django_util/__init__.py#L398-L413
def _redirect_with_params(url_name, *args, **kwargs): """Helper method to create a redirect response with URL params. This builds a redirect string that converts kwargs into a query string. Args: url_name: The name of the url to redirect to. kwargs: the query string param and their values to build. Returns: A properly formatted redirect string. """ url = urlresolvers.reverse(url_name, args=args) params = parse.urlencode(kwargs, True) return "{0}?{1}".format(url, params)
[ "def", "_redirect_with_params", "(", "url_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "url", "=", "urlresolvers", ".", "reverse", "(", "url_name", ",", "args", "=", "args", ")", "params", "=", "parse", ".", "urlencode", "(", "kwargs", ",", "True", ")", "return", "\"{0}?{1}\"", ".", "format", "(", "url", ",", "params", ")" ]
Helper method to create a redirect response with URL params. This builds a redirect string that converts kwargs into a query string. Args: url_name: The name of the url to redirect to. kwargs: the query string param and their values to build. Returns: A properly formatted redirect string.
[ "Helper", "method", "to", "create", "a", "redirect", "response", "with", "URL", "params", "." ]
python
valid
32.625
fhcrc/seqmagick
seqmagick/transform.py
https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L310-L327
def _reverse_annotations(old_record, new_record): """ Copy annotations form old_record to new_record, reversing any lists / tuples / strings. """ # Copy the annotations over for k, v in list(old_record.annotations.items()): # Trim if appropriate if isinstance(v, (tuple, list)) and len(v) == len(old_record): assert len(v) == len(old_record) v = v[::-1] new_record.annotations[k] = v # Letter annotations must be lists / tuples / strings of the same # length as the sequence for k, v in list(old_record.letter_annotations.items()): assert len(v) == len(old_record) new_record.letter_annotations[k] = v[::-1]
[ "def", "_reverse_annotations", "(", "old_record", ",", "new_record", ")", ":", "# Copy the annotations over", "for", "k", ",", "v", "in", "list", "(", "old_record", ".", "annotations", ".", "items", "(", ")", ")", ":", "# Trim if appropriate", "if", "isinstance", "(", "v", ",", "(", "tuple", ",", "list", ")", ")", "and", "len", "(", "v", ")", "==", "len", "(", "old_record", ")", ":", "assert", "len", "(", "v", ")", "==", "len", "(", "old_record", ")", "v", "=", "v", "[", ":", ":", "-", "1", "]", "new_record", ".", "annotations", "[", "k", "]", "=", "v", "# Letter annotations must be lists / tuples / strings of the same", "# length as the sequence", "for", "k", ",", "v", "in", "list", "(", "old_record", ".", "letter_annotations", ".", "items", "(", ")", ")", ":", "assert", "len", "(", "v", ")", "==", "len", "(", "old_record", ")", "new_record", ".", "letter_annotations", "[", "k", "]", "=", "v", "[", ":", ":", "-", "1", "]" ]
Copy annotations form old_record to new_record, reversing any lists / tuples / strings.
[ "Copy", "annotations", "form", "old_record", "to", "new_record", "reversing", "any", "lists", "/", "tuples", "/", "strings", "." ]
python
train
38.388889
projectshift/shift-schema
shiftschema/result.py
https://github.com/projectshift/shift-schema/blob/07787b540d3369bb37217ffbfbe629118edaf0eb/shiftschema/result.py#L298-L312
def get_messages(self, locale=None): """ Get a dictionary of translated messages """ if locale is None: locale = self.locale if self.translator: def translate(error): return self.translator.translate(error, locale) else: def translate(error): return error errors = deepcopy(self.errors) errors = self._translate_errors(errors, translate) return errors
[ "def", "get_messages", "(", "self", ",", "locale", "=", "None", ")", ":", "if", "locale", "is", "None", ":", "locale", "=", "self", ".", "locale", "if", "self", ".", "translator", ":", "def", "translate", "(", "error", ")", ":", "return", "self", ".", "translator", ".", "translate", "(", "error", ",", "locale", ")", "else", ":", "def", "translate", "(", "error", ")", ":", "return", "error", "errors", "=", "deepcopy", "(", "self", ".", "errors", ")", "errors", "=", "self", ".", "_translate_errors", "(", "errors", ",", "translate", ")", "return", "errors" ]
Get a dictionary of translated messages
[ "Get", "a", "dictionary", "of", "translated", "messages" ]
python
train
30.866667
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/quaternion.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/quaternion.py#L293-L318
def _euler_to_q(self, euler): """ Create q array from euler angles :param euler: array [roll, pitch, yaw] in rad :returns: array q which represents a quaternion [w, x, y, z] """ assert(len(euler) == 3) phi = euler[0] theta = euler[1] psi = euler[2] c_phi_2 = np.cos(phi / 2) s_phi_2 = np.sin(phi / 2) c_theta_2 = np.cos(theta / 2) s_theta_2 = np.sin(theta / 2) c_psi_2 = np.cos(psi / 2) s_psi_2 = np.sin(psi / 2) q = np.zeros(4) q[0] = (c_phi_2 * c_theta_2 * c_psi_2 + s_phi_2 * s_theta_2 * s_psi_2) q[1] = (s_phi_2 * c_theta_2 * c_psi_2 - c_phi_2 * s_theta_2 * s_psi_2) q[2] = (c_phi_2 * s_theta_2 * c_psi_2 + s_phi_2 * c_theta_2 * s_psi_2) q[3] = (c_phi_2 * c_theta_2 * s_psi_2 - s_phi_2 * s_theta_2 * c_psi_2) return q
[ "def", "_euler_to_q", "(", "self", ",", "euler", ")", ":", "assert", "(", "len", "(", "euler", ")", "==", "3", ")", "phi", "=", "euler", "[", "0", "]", "theta", "=", "euler", "[", "1", "]", "psi", "=", "euler", "[", "2", "]", "c_phi_2", "=", "np", ".", "cos", "(", "phi", "/", "2", ")", "s_phi_2", "=", "np", ".", "sin", "(", "phi", "/", "2", ")", "c_theta_2", "=", "np", ".", "cos", "(", "theta", "/", "2", ")", "s_theta_2", "=", "np", ".", "sin", "(", "theta", "/", "2", ")", "c_psi_2", "=", "np", ".", "cos", "(", "psi", "/", "2", ")", "s_psi_2", "=", "np", ".", "sin", "(", "psi", "/", "2", ")", "q", "=", "np", ".", "zeros", "(", "4", ")", "q", "[", "0", "]", "=", "(", "c_phi_2", "*", "c_theta_2", "*", "c_psi_2", "+", "s_phi_2", "*", "s_theta_2", "*", "s_psi_2", ")", "q", "[", "1", "]", "=", "(", "s_phi_2", "*", "c_theta_2", "*", "c_psi_2", "-", "c_phi_2", "*", "s_theta_2", "*", "s_psi_2", ")", "q", "[", "2", "]", "=", "(", "c_phi_2", "*", "s_theta_2", "*", "c_psi_2", "+", "s_phi_2", "*", "c_theta_2", "*", "s_psi_2", ")", "q", "[", "3", "]", "=", "(", "c_phi_2", "*", "c_theta_2", "*", "s_psi_2", "-", "s_phi_2", "*", "s_theta_2", "*", "c_psi_2", ")", "return", "q" ]
Create q array from euler angles :param euler: array [roll, pitch, yaw] in rad :returns: array q which represents a quaternion [w, x, y, z]
[ "Create", "q", "array", "from", "euler", "angles", ":", "param", "euler", ":", "array", "[", "roll", "pitch", "yaw", "]", "in", "rad", ":", "returns", ":", "array", "q", "which", "represents", "a", "quaternion", "[", "w", "x", "y", "z", "]" ]
python
train
35.692308
tensorflow/lucid
lucid/optvis/objectives.py
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/optvis/objectives.py#L214-L224
def direction_cossim(layer, vec, batch=None): """Visualize a direction (cossine similarity)""" def inner(T): act_mags = tf.sqrt(tf.reduce_sum(T(layer)**2, -1, keepdims=True)) vec_mag = tf.sqrt(tf.reduce_sum(vec**2)) mags = act_mags * vec_mag if batch is None: return tf.reduce_mean(T(layer) * vec.reshape([1, 1, 1, -1]) / mags) else: return tf.reduce_mean(T(layer)[batch] * vec.reshape([1, 1, -1]) / mags) return inner
[ "def", "direction_cossim", "(", "layer", ",", "vec", ",", "batch", "=", "None", ")", ":", "def", "inner", "(", "T", ")", ":", "act_mags", "=", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "T", "(", "layer", ")", "**", "2", ",", "-", "1", ",", "keepdims", "=", "True", ")", ")", "vec_mag", "=", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "vec", "**", "2", ")", ")", "mags", "=", "act_mags", "*", "vec_mag", "if", "batch", "is", "None", ":", "return", "tf", ".", "reduce_mean", "(", "T", "(", "layer", ")", "*", "vec", ".", "reshape", "(", "[", "1", ",", "1", ",", "1", ",", "-", "1", "]", ")", "/", "mags", ")", "else", ":", "return", "tf", ".", "reduce_mean", "(", "T", "(", "layer", ")", "[", "batch", "]", "*", "vec", ".", "reshape", "(", "[", "1", ",", "1", ",", "-", "1", "]", ")", "/", "mags", ")", "return", "inner" ]
Visualize a direction (cossine similarity)
[ "Visualize", "a", "direction", "(", "cossine", "similarity", ")" ]
python
train
40.545455
zarr-developers/zarr
zarr/core.py
https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L2244-L2302
def astype(self, dtype): """Returns a view that does on the fly type conversion of the underlying data. Parameters ---------- dtype : string or dtype NumPy dtype. Notes ----- This method returns a new Array object which is a view on the same underlying chunk data. Modifying any data via the view is currently not permitted and will result in an error. This is an experimental feature and its behavior is subject to change in the future. See Also -------- Array.view Examples -------- >>> import zarr >>> import numpy as np >>> data = np.arange(100, dtype=np.uint8) >>> a = zarr.array(data, chunks=10) >>> a[:] array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], dtype=uint8) >>> v = a.astype(np.float32) >>> v.is_view True >>> v[:] array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.], dtype=float32) """ dtype = np.dtype(dtype) filters = [] if self._filters: filters.extend(self._filters) filters.insert(0, AsType(encode_dtype=self._dtype, decode_dtype=dtype)) return self.view(filters=filters, dtype=dtype, read_only=True)
[ "def", "astype", "(", "self", ",", "dtype", ")", ":", "dtype", "=", "np", ".", "dtype", "(", "dtype", ")", "filters", "=", "[", "]", "if", "self", ".", "_filters", ":", "filters", ".", "extend", "(", "self", ".", "_filters", ")", "filters", ".", "insert", "(", "0", ",", "AsType", "(", "encode_dtype", "=", "self", ".", "_dtype", ",", "decode_dtype", "=", "dtype", ")", ")", "return", "self", ".", "view", "(", "filters", "=", "filters", ",", "dtype", "=", "dtype", ",", "read_only", "=", "True", ")" ]
Returns a view that does on the fly type conversion of the underlying data. Parameters ---------- dtype : string or dtype NumPy dtype. Notes ----- This method returns a new Array object which is a view on the same underlying chunk data. Modifying any data via the view is currently not permitted and will result in an error. This is an experimental feature and its behavior is subject to change in the future. See Also -------- Array.view Examples -------- >>> import zarr >>> import numpy as np >>> data = np.arange(100, dtype=np.uint8) >>> a = zarr.array(data, chunks=10) >>> a[:] array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], dtype=uint8) >>> v = a.astype(np.float32) >>> v.is_view True >>> v[:] array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.], dtype=float32)
[ "Returns", "a", "view", "that", "does", "on", "the", "fly", "type", "conversion", "of", "the", "underlying", "data", "." ]
python
train
40.745763
dougalsutherland/skl-groups
skl_groups/kernels/transform.py
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/kernels/transform.py#L400-L421
def fit(self, X, y=None): ''' Learn the linear transformation to flipped eigenvalues. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") # TODO: only get negative eigs somehow? memory = get_memory(self.memory) vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])( X, overwrite_a=not self.copy) vals = vals[:, None] self.flip_ = np.dot(vecs, np.sign(vals) * vecs.T) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "n", "=", "X", ".", "shape", "[", "0", "]", "if", "X", ".", "shape", "!=", "(", "n", ",", "n", ")", ":", "raise", "TypeError", "(", "\"Input must be a square matrix.\"", ")", "# TODO: only get negative eigs somehow?", "memory", "=", "get_memory", "(", "self", ".", "memory", ")", "vals", ",", "vecs", "=", "memory", ".", "cache", "(", "scipy", ".", "linalg", ".", "eigh", ",", "ignore", "=", "[", "'overwrite_a'", "]", ")", "(", "X", ",", "overwrite_a", "=", "not", "self", ".", "copy", ")", "vals", "=", "vals", "[", ":", ",", "None", "]", "self", ".", "flip_", "=", "np", ".", "dot", "(", "vecs", ",", "np", ".", "sign", "(", "vals", ")", "*", "vecs", ".", "T", ")", "return", "self" ]
Learn the linear transformation to flipped eigenvalues. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part.
[ "Learn", "the", "linear", "transformation", "to", "flipped", "eigenvalues", "." ]
python
valid
34.318182
CivicSpleen/ambry
ambry/library/search_backends/postgres_backend.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search_backends/postgres_backend.py#L525-L536
def all(self): """ Returns list with vids of all indexed partitions. """ partitions = [] query = text(""" SELECT dataset_vid, vid FROM partition_index;""") for result in self.execute(query): dataset_vid, vid = result partitions.append(PartitionSearchResult(dataset_vid=dataset_vid, vid=vid, score=1)) return partitions
[ "def", "all", "(", "self", ")", ":", "partitions", "=", "[", "]", "query", "=", "text", "(", "\"\"\"\n SELECT dataset_vid, vid\n FROM partition_index;\"\"\"", ")", "for", "result", "in", "self", ".", "execute", "(", "query", ")", ":", "dataset_vid", ",", "vid", "=", "result", "partitions", ".", "append", "(", "PartitionSearchResult", "(", "dataset_vid", "=", "dataset_vid", ",", "vid", "=", "vid", ",", "score", "=", "1", ")", ")", "return", "partitions" ]
Returns list with vids of all indexed partitions.
[ "Returns", "list", "with", "vids", "of", "all", "indexed", "partitions", "." ]
python
train
33.083333
tornadoweb/tornado
tornado/escape.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/escape.py#L186-L196
def utf8(value: Union[None, str, bytes]) -> Optional[bytes]: # noqa: F811 """Converts a string argument to a byte string. If the argument is already a byte string or None, it is returned unchanged. Otherwise it must be a unicode string and is encoded as utf8. """ if isinstance(value, _UTF8_TYPES): return value if not isinstance(value, unicode_type): raise TypeError("Expected bytes, unicode, or None; got %r" % type(value)) return value.encode("utf-8")
[ "def", "utf8", "(", "value", ":", "Union", "[", "None", ",", "str", ",", "bytes", "]", ")", "->", "Optional", "[", "bytes", "]", ":", "# noqa: F811", "if", "isinstance", "(", "value", ",", "_UTF8_TYPES", ")", ":", "return", "value", "if", "not", "isinstance", "(", "value", ",", "unicode_type", ")", ":", "raise", "TypeError", "(", "\"Expected bytes, unicode, or None; got %r\"", "%", "type", "(", "value", ")", ")", "return", "value", ".", "encode", "(", "\"utf-8\"", ")" ]
Converts a string argument to a byte string. If the argument is already a byte string or None, it is returned unchanged. Otherwise it must be a unicode string and is encoded as utf8.
[ "Converts", "a", "string", "argument", "to", "a", "byte", "string", "." ]
python
train
44.545455
MaxHalford/prince
prince/ca.py
https://github.com/MaxHalford/prince/blob/714c9cdfc4d9f8823eabf550a23ad01fe87c50d7/prince/ca.py#L82-L85
def eigenvalues_(self): """The eigenvalues associated with each principal component.""" utils.validation.check_is_fitted(self, 's_') return np.square(self.s_).tolist()
[ "def", "eigenvalues_", "(", "self", ")", ":", "utils", ".", "validation", ".", "check_is_fitted", "(", "self", ",", "'s_'", ")", "return", "np", ".", "square", "(", "self", ".", "s_", ")", ".", "tolist", "(", ")" ]
The eigenvalues associated with each principal component.
[ "The", "eigenvalues", "associated", "with", "each", "principal", "component", "." ]
python
train
47
StorjOld/pyp2p
pyp2p/nat_pmp.py
https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/nat_pmp.py#L361-L381
def map_udp_port(public_port, private_port, lifetime=3600, gateway_ip=None, retry=9, use_exception=True): """A high-level wrapper to map_port() that requests a mapping for a public UDP port on the NAT to a private UDP port on this host. Returns the complete response on success. public_port - the public port of the mapping requested private_port - the private port of the mapping requested lifetime - the duration of the mapping in seconds. Defaults to 3600, per specification. gateway_ip - the IP to the NAT-PMP compatible gateway. Defaults to using auto-detection function get_gateway_addr() retry - the number of times to retry the request if unsuccessful. Defaults to 9 as per specification. use_exception - throw an exception if an error result is received from the gateway. Defaults to True. """ return map_port(NATPMP_PROTOCOL_UDP, public_port, private_port, lifetime, gateway_ip=gateway_ip, retry=retry, use_exception=use_exception)
[ "def", "map_udp_port", "(", "public_port", ",", "private_port", ",", "lifetime", "=", "3600", ",", "gateway_ip", "=", "None", ",", "retry", "=", "9", ",", "use_exception", "=", "True", ")", ":", "return", "map_port", "(", "NATPMP_PROTOCOL_UDP", ",", "public_port", ",", "private_port", ",", "lifetime", ",", "gateway_ip", "=", "gateway_ip", ",", "retry", "=", "retry", ",", "use_exception", "=", "use_exception", ")" ]
A high-level wrapper to map_port() that requests a mapping for a public UDP port on the NAT to a private UDP port on this host. Returns the complete response on success. public_port - the public port of the mapping requested private_port - the private port of the mapping requested lifetime - the duration of the mapping in seconds. Defaults to 3600, per specification. gateway_ip - the IP to the NAT-PMP compatible gateway. Defaults to using auto-detection function get_gateway_addr() retry - the number of times to retry the request if unsuccessful. Defaults to 9 as per specification. use_exception - throw an exception if an error result is received from the gateway. Defaults to True.
[ "A", "high", "-", "level", "wrapper", "to", "map_port", "()", "that", "requests", "a", "mapping", "for", "a", "public", "UDP", "port", "on", "the", "NAT", "to", "a", "private", "UDP", "port", "on", "this", "host", ".", "Returns", "the", "complete", "response", "on", "success", ".", "public_port", "-", "the", "public", "port", "of", "the", "mapping", "requested", "private_port", "-", "the", "private", "port", "of", "the", "mapping", "requested", "lifetime", "-", "the", "duration", "of", "the", "mapping", "in", "seconds", ".", "Defaults", "to", "3600", "per", "specification", ".", "gateway_ip", "-", "the", "IP", "to", "the", "NAT", "-", "PMP", "compatible", "gateway", ".", "Defaults", "to", "using", "auto", "-", "detection", "function", "get_gateway_addr", "()", "retry", "-", "the", "number", "of", "times", "to", "retry", "the", "request", "if", "unsuccessful", ".", "Defaults", "to", "9", "as", "per", "specification", ".", "use_exception", "-", "throw", "an", "exception", "if", "an", "error", "result", "is", "received", "from", "the", "gateway", ".", "Defaults", "to", "True", "." ]
python
train
58.380952
chewse/djangorestframework-signed-permissions
signedpermissions/signing.py
https://github.com/chewse/djangorestframework-signed-permissions/blob/b1cc4c57999fc5be8361f60f0ada1d777b27feab/signedpermissions/signing.py#L35-L38
def unsign_filters_and_actions(sign, dotted_model_name): """Return the list of filters and actions for dotted_model_name.""" permissions = signing.loads(sign) return permissions.get(dotted_model_name, [])
[ "def", "unsign_filters_and_actions", "(", "sign", ",", "dotted_model_name", ")", ":", "permissions", "=", "signing", ".", "loads", "(", "sign", ")", "return", "permissions", ".", "get", "(", "dotted_model_name", ",", "[", "]", ")" ]
Return the list of filters and actions for dotted_model_name.
[ "Return", "the", "list", "of", "filters", "and", "actions", "for", "dotted_model_name", "." ]
python
train
53.25
Ehco1996/lazySpider
lazyspider/lazystore.py
https://github.com/Ehco1996/lazySpider/blob/6ae43fec7f784d7e515379e79bcaff06b7fd5ade/lazyspider/lazystore.py#L183-L199
def find_by_fields(self, table, queryset={}): ''' 从数据库里查询 符合多个条件的记录 Args: table: 表名字 str queryset : key 字段 value 值 dict return: 成功: [dict] 保存的记录 失败: -1 并打印返回报错信息 ''' querys = "" for k, v in queryset.items(): querys += "{} = '{}' and ".format(k, v) sql = "select * from {} where {} ".format( table, querys[:-4]) res = self.query(sql) return res
[ "def", "find_by_fields", "(", "self", ",", "table", ",", "queryset", "=", "{", "}", ")", ":", "querys", "=", "\"\"", "for", "k", ",", "v", "in", "queryset", ".", "items", "(", ")", ":", "querys", "+=", "\"{} = '{}' and \"", ".", "format", "(", "k", ",", "v", ")", "sql", "=", "\"select * from {} where {} \"", ".", "format", "(", "table", ",", "querys", "[", ":", "-", "4", "]", ")", "res", "=", "self", ".", "query", "(", "sql", ")", "return", "res" ]
从数据库里查询 符合多个条件的记录 Args: table: 表名字 str queryset : key 字段 value 值 dict return: 成功: [dict] 保存的记录 失败: -1 并打印返回报错信息
[ "从数据库里查询", "符合多个条件的记录", "Args", ":", "table", ":", "表名字", "str", "queryset", ":", "key", "字段", "value", "值", "dict", "return", ":", "成功:", "[", "dict", "]", "保存的记录", "失败:", "-", "1", "并打印返回报错信息" ]
python
train
29.529412
djaodjin/djaodjin-deployutils
deployutils/apps/django/management/commands/pullapp.py
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/apps/django/management/commands/pullapp.py#L200-L217
def migrate_all(): """ Create schema migrations for all apps specified in INSTALLED_APPS, then run a migrate command. """ if 'south' in settings.INSTALLED_APPS: return _south_migrate_all() from django.core.management.commands import makemigrations, migrate schema_args = [sys.executable, 'makemigrations'] for app in settings.INSTALLED_APPS: if not app.startswith('django'): schema_args += [app] schema_cmd = makemigrations.Command() schema_cmd.run_from_argv(schema_args) migrate_cmd = migrate.Command() sys.stderr.write("MIGRATE ALL!\n") return migrate_cmd.run_from_argv([sys.executable, 'migrate'])
[ "def", "migrate_all", "(", ")", ":", "if", "'south'", "in", "settings", ".", "INSTALLED_APPS", ":", "return", "_south_migrate_all", "(", ")", "from", "django", ".", "core", ".", "management", ".", "commands", "import", "makemigrations", ",", "migrate", "schema_args", "=", "[", "sys", ".", "executable", ",", "'makemigrations'", "]", "for", "app", "in", "settings", ".", "INSTALLED_APPS", ":", "if", "not", "app", ".", "startswith", "(", "'django'", ")", ":", "schema_args", "+=", "[", "app", "]", "schema_cmd", "=", "makemigrations", ".", "Command", "(", ")", "schema_cmd", ".", "run_from_argv", "(", "schema_args", ")", "migrate_cmd", "=", "migrate", ".", "Command", "(", ")", "sys", ".", "stderr", ".", "write", "(", "\"MIGRATE ALL!\\n\"", ")", "return", "migrate_cmd", ".", "run_from_argv", "(", "[", "sys", ".", "executable", ",", "'migrate'", "]", ")" ]
Create schema migrations for all apps specified in INSTALLED_APPS, then run a migrate command.
[ "Create", "schema", "migrations", "for", "all", "apps", "specified", "in", "INSTALLED_APPS", "then", "run", "a", "migrate", "command", "." ]
python
train
36.888889
gem/oq-engine
openquake/calculators/classical_risk.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/classical_risk.py#L28-L74
def classical_risk(riskinputs, riskmodel, param, monitor): """ Compute and return the average losses for each asset. :param riskinputs: :class:`openquake.risklib.riskinput.RiskInput` objects :param riskmodel: a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance :param param: dictionary of extra parameters :param monitor: :class:`openquake.baselib.performance.Monitor` instance """ result = dict(loss_curves=[], stat_curves=[]) weights = [w['default'] for w in param['weights']] statnames, stats = zip(*param['stats']) for ri in riskinputs: A = len(ri.assets) L = len(riskmodel.lti) R = ri.hazard_getter.num_rlzs loss_curves = numpy.zeros((R, L, A), object) avg_losses = numpy.zeros((R, L, A)) for out in riskmodel.gen_outputs(ri, monitor): r = out.rlzi for l, loss_type in enumerate(riskmodel.loss_types): # loss_curves has shape (A, C) for i, asset in enumerate(ri.assets): loss_curves[out.rlzi, l, i] = lc = out[loss_type][i] aid = asset['ordinal'] avg = scientific.average_loss(lc) avg_losses[r, l, i] = avg lcurve = (lc['loss'], lc['poe'], avg) result['loss_curves'].append((l, r, aid, lcurve)) # compute statistics for l, loss_type in enumerate(riskmodel.loss_types): for i, asset in enumerate(ri.assets): avg_stats = compute_stats(avg_losses[:, l, i], stats, weights) losses = loss_curves[0, l, i]['loss'] all_poes = numpy.array( [loss_curves[r, l, i]['poe'] for r in range(R)]) poes_stats = compute_stats(all_poes, stats, weights) result['stat_curves'].append( (l, asset['ordinal'], losses, poes_stats, avg_stats)) if R == 1: # the realization is the same as the mean del result['loss_curves'] return result
[ "def", "classical_risk", "(", "riskinputs", ",", "riskmodel", ",", "param", ",", "monitor", ")", ":", "result", "=", "dict", "(", "loss_curves", "=", "[", "]", ",", "stat_curves", "=", "[", "]", ")", "weights", "=", "[", "w", "[", "'default'", "]", "for", "w", "in", "param", "[", "'weights'", "]", "]", "statnames", ",", "stats", "=", "zip", "(", "*", "param", "[", "'stats'", "]", ")", "for", "ri", "in", "riskinputs", ":", "A", "=", "len", "(", "ri", ".", "assets", ")", "L", "=", "len", "(", "riskmodel", ".", "lti", ")", "R", "=", "ri", ".", "hazard_getter", ".", "num_rlzs", "loss_curves", "=", "numpy", ".", "zeros", "(", "(", "R", ",", "L", ",", "A", ")", ",", "object", ")", "avg_losses", "=", "numpy", ".", "zeros", "(", "(", "R", ",", "L", ",", "A", ")", ")", "for", "out", "in", "riskmodel", ".", "gen_outputs", "(", "ri", ",", "monitor", ")", ":", "r", "=", "out", ".", "rlzi", "for", "l", ",", "loss_type", "in", "enumerate", "(", "riskmodel", ".", "loss_types", ")", ":", "# loss_curves has shape (A, C)", "for", "i", ",", "asset", "in", "enumerate", "(", "ri", ".", "assets", ")", ":", "loss_curves", "[", "out", ".", "rlzi", ",", "l", ",", "i", "]", "=", "lc", "=", "out", "[", "loss_type", "]", "[", "i", "]", "aid", "=", "asset", "[", "'ordinal'", "]", "avg", "=", "scientific", ".", "average_loss", "(", "lc", ")", "avg_losses", "[", "r", ",", "l", ",", "i", "]", "=", "avg", "lcurve", "=", "(", "lc", "[", "'loss'", "]", ",", "lc", "[", "'poe'", "]", ",", "avg", ")", "result", "[", "'loss_curves'", "]", ".", "append", "(", "(", "l", ",", "r", ",", "aid", ",", "lcurve", ")", ")", "# compute statistics", "for", "l", ",", "loss_type", "in", "enumerate", "(", "riskmodel", ".", "loss_types", ")", ":", "for", "i", ",", "asset", "in", "enumerate", "(", "ri", ".", "assets", ")", ":", "avg_stats", "=", "compute_stats", "(", "avg_losses", "[", ":", ",", "l", ",", "i", "]", ",", "stats", ",", "weights", ")", "losses", "=", "loss_curves", "[", "0", ",", "l", ",", "i", "]", "[", "'loss'", "]", "all_poes", "=", "numpy", ".", "array", "(", "[", "loss_curves", "[", "r", ",", "l", ",", "i", "]", "[", "'poe'", "]", "for", "r", "in", "range", "(", "R", ")", "]", ")", "poes_stats", "=", "compute_stats", "(", "all_poes", ",", "stats", ",", "weights", ")", "result", "[", "'stat_curves'", "]", ".", "append", "(", "(", "l", ",", "asset", "[", "'ordinal'", "]", ",", "losses", ",", "poes_stats", ",", "avg_stats", ")", ")", "if", "R", "==", "1", ":", "# the realization is the same as the mean", "del", "result", "[", "'loss_curves'", "]", "return", "result" ]
Compute and return the average losses for each asset. :param riskinputs: :class:`openquake.risklib.riskinput.RiskInput` objects :param riskmodel: a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance :param param: dictionary of extra parameters :param monitor: :class:`openquake.baselib.performance.Monitor` instance
[ "Compute", "and", "return", "the", "average", "losses", "for", "each", "asset", "." ]
python
train
43.638298
pyvisa/pyvisa
pyvisa/ctwrapper/functions.py
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/ctwrapper/functions.py#L466-L486
def find_resources(library, session, query): """Queries a VISA system to locate the resources associated with a specified interface. Corresponds to viFindRsrc function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session (unused, just to uniform signatures). :param query: A regular expression followed by an optional logical expression. Use '?*' for all. :return: find_list, return_counter, instrument_description, return value of the library call. :rtype: ViFindList, int, unicode (Py2) or str (Py3), :class:`pyvisa.constants.StatusCode` """ find_list = ViFindList() return_counter = ViUInt32() instrument_description = create_string_buffer(constants.VI_FIND_BUFLEN) # [ViSession, ViString, ViPFindList, ViPUInt32, ViAChar] # ViString converts from (str, unicode, bytes) to bytes ret = library.viFindRsrc(session, query, byref(find_list), byref(return_counter), instrument_description) return find_list, return_counter.value, buffer_to_text(instrument_description), ret
[ "def", "find_resources", "(", "library", ",", "session", ",", "query", ")", ":", "find_list", "=", "ViFindList", "(", ")", "return_counter", "=", "ViUInt32", "(", ")", "instrument_description", "=", "create_string_buffer", "(", "constants", ".", "VI_FIND_BUFLEN", ")", "# [ViSession, ViString, ViPFindList, ViPUInt32, ViAChar]", "# ViString converts from (str, unicode, bytes) to bytes", "ret", "=", "library", ".", "viFindRsrc", "(", "session", ",", "query", ",", "byref", "(", "find_list", ")", ",", "byref", "(", "return_counter", ")", ",", "instrument_description", ")", "return", "find_list", ",", "return_counter", ".", "value", ",", "buffer_to_text", "(", "instrument_description", ")", ",", "ret" ]
Queries a VISA system to locate the resources associated with a specified interface. Corresponds to viFindRsrc function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session (unused, just to uniform signatures). :param query: A regular expression followed by an optional logical expression. Use '?*' for all. :return: find_list, return_counter, instrument_description, return value of the library call. :rtype: ViFindList, int, unicode (Py2) or str (Py3), :class:`pyvisa.constants.StatusCode`
[ "Queries", "a", "VISA", "system", "to", "locate", "the", "resources", "associated", "with", "a", "specified", "interface", "." ]
python
train
54.619048
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/interface/ve/ipv6/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/interface/ve/ipv6/__init__.py#L347-L368
def _set_ipv6_ve_intf_cmds(self, v, load=False): """ Setter method for ipv6_ve_intf_cmds, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_ve_intf_cmds (container) If this variable is read-only (config: false) in the source YANG file, then _set_ipv6_ve_intf_cmds is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ipv6_ve_intf_cmds() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=ipv6_ve_intf_cmds.ipv6_ve_intf_cmds, is_container='container', presence=False, yang_name="ipv6-ve-intf-cmds", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ipv6 mlds ve interface commands', u'cli-drop-node-name': None, u'callpoint': u'MldsVe'}}, namespace='urn:brocade.com:mgmt:brocade-mld-snooping', defining_module='brocade-mld-snooping', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ipv6_ve_intf_cmds must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=ipv6_ve_intf_cmds.ipv6_ve_intf_cmds, is_container='container', presence=False, yang_name="ipv6-ve-intf-cmds", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ipv6 mlds ve interface commands', u'cli-drop-node-name': None, u'callpoint': u'MldsVe'}}, namespace='urn:brocade.com:mgmt:brocade-mld-snooping', defining_module='brocade-mld-snooping', yang_type='container', is_config=True)""", }) self.__ipv6_ve_intf_cmds = t if hasattr(self, '_set'): self._set()
[ "def", "_set_ipv6_ve_intf_cmds", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "ipv6_ve_intf_cmds", ".", "ipv6_ve_intf_cmds", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"ipv6-ve-intf-cmds\"", ",", "rest_name", "=", "\"\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'ipv6 mlds ve interface commands'", ",", "u'cli-drop-node-name'", ":", "None", ",", "u'callpoint'", ":", "u'MldsVe'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mld-snooping'", ",", "defining_module", "=", "'brocade-mld-snooping'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"ipv6_ve_intf_cmds must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=ipv6_ve_intf_cmds.ipv6_ve_intf_cmds, is_container='container', presence=False, yang_name=\"ipv6-ve-intf-cmds\", rest_name=\"\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ipv6 mlds ve interface commands', u'cli-drop-node-name': None, u'callpoint': u'MldsVe'}}, namespace='urn:brocade.com:mgmt:brocade-mld-snooping', defining_module='brocade-mld-snooping', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__ipv6_ve_intf_cmds", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for ipv6_ve_intf_cmds, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_ve_intf_cmds (container) If this variable is read-only (config: false) in the source YANG file, then _set_ipv6_ve_intf_cmds is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ipv6_ve_intf_cmds() directly.
[ "Setter", "method", "for", "ipv6_ve_intf_cmds", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "interface", "/", "ve", "/", "ipv6", "/", "ipv6_ve_intf_cmds", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_ipv6_ve_intf_cmds", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_ipv6_ve_intf_cmds", "()", "directly", "." ]
python
train
83.090909