Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
385,500
def expand_abbreviations(template, abbreviations): if template in abbreviations: return abbreviations[template] prefix, sep, rest = template.partition() if prefix in abbreviations: return abbreviations[prefix].format(rest) return template
Expand abbreviations in a template name. :param template: The project template name. :param abbreviations: Abbreviation definitions.
385,501
def kill(self): if self._gt is not None and not self._gt.dead: _log.warn( , self, exc) super(QueueConsumer, self).kill() _log.debug(, self)
Kill the queue-consumer. Unlike `stop()` any pending message ack or requeue-requests, requests to remove providers, etc are lost and the consume thread is asked to terminate as soon as possible.
385,502
def __set_log_file_name(self): dir, _ = os.path.split(self.__logFileBasename) if len(dir) and not os.path.exists(dir): os.makedirs(dir) self.__logFileName = self.__logFileBasename+"."+self.__logFileExtension number = 0 while os.path.isfile(self.__logFileName): if os.stat(self.__logFileName).st_size/1e6 < self.__maxlogFileSize: break number += 1 self.__logFileName = self.__logFileBasename+"_"+str(number)+"."+self.__logFileExtension self.__logFileStream = None
Automatically set logFileName attribute
385,503
def create_project(self, project_name, desc): data = { "name": project_name, "description": desc } return self._post("/projects", data)
Send POST to /projects creating a new project with the specified name and desc. Raises DataServiceError on error. :param project_name: str name of the project :param desc: str description of the project :return: requests.Response containing the successful result
385,504
def sanitize_win_path(winpath): intab = if isinstance(winpath, six.text_type): winpath = winpath.translate(dict((ord(c), ) for c in intab)) elif isinstance(winpath, six.string_types): outtab = * len(intab) trantab = .maketrans(intab, outtab) if six.PY3 else string.maketrans(intab, outtab) winpath = winpath.translate(trantab) return winpath
Remove illegal path characters for windows
385,505
async def get_agents(self, addr=True, agent_cls=None): return await self.menv.get_agents(addr=True, agent_cls=None, as_coro=True)
Get addresses of all agents in all the slave environments. This is a managing function for :meth:`creamas.mp.MultiEnvironment.get_agents`. .. note:: Since :class:`aiomas.rpc.Proxy` objects do not seem to handle (re)serialization, ``addr`` and ``agent_cls`` parameters are omitted from the call to underlying multi-environment's :meth:`get_agents`. If :class:`aiomas.rpc.Proxy` objects from all the agents are needed, call each slave environment manager's :meth:`get_agents` directly.
385,506
def add_success(self, group=None, type_=, field=, description=): group = group or group = int(group.lower()[1:-1]) self.retcode = self.retcode or group if group != self.retcode: raise ValueError() type_ = type_ or p = Param(type_, field, description) self.params[][p.field] = p
parse and append a success data param
385,507
def unpack_rsp(cls, rsp_pb): if rsp_pb.retType != RET_OK: return RET_ERROR, rsp_pb.retMsg, None order_id = str(rsp_pb.s2c.orderID) modify_order_list = [{ : TRADE.REV_TRD_ENV_MAP[rsp_pb.s2c.header.trdEnv], : order_id }] return RET_OK, "", modify_order_list
Convert from PLS response to user response
385,508
def get_enterprise_program_enrollment_page(self, request, enterprise_customer, program_details): organizations = program_details[] organization = organizations[0] if organizations else {} platform_name = get_configuration_value(, settings.PLATFORM_NAME) program_title = program_details[] program_type_details = program_details[] program_type = program_type_details[] program_courses = program_details[] course_count = len(program_courses) course_count_text = ungettext( , , course_count, ).format(count=course_count) effort_info_text = ungettext_min_max( , , _(), program_details.get(), program_details.get(), ) length_info_text = ungettext_min_max( , , _(), program_details.get(), program_details.get(), ) if program_details[]: purchase_action = _() item = _() else: purchase_action = _() item = _() program_data_sharing_consent = get_data_sharing_consent( request.user.username, enterprise_customer.uuid, program_uuid=program_details[], ) if program_data_sharing_consent.exists and not program_data_sharing_consent.granted: messages.add_consent_declined_message(request, enterprise_customer, program_title) discount_data = program_details.get(, {}) one_click_purchase_eligibility = program_details.get(, False) : 2, : _(), : 1, : _(), : _(), : _(), : _(), : _(), : _(), : _(), : _(), : _(), : _(), : _(), : _(), : _().format( platform_name=platform_name, program_type=program_type, ), : _().format( platform_name=platform_name ), : organization.get(), : organization.get(), : _().format(organization=organization.get()), : _().format(item=item), : program_type_details[].get(, {}).get(, ), : program_type, : get_program_type_description(program_type), : program_title, : program_details[], : program_details[], : get_price_text(discount_data.get(, 0), request), : get_price_text(discount_data.get(, 0), request), : discount_data.get(, False), : program_courses, : [ _(), _(), ], : _().format(purchase_action=purchase_action), : program_details[], : program_details[], : course_count_text, : length_info_text, : effort_info_text, : one_click_purchase_eligibility, }) return render(request, , context=context_data)
Render Enterprise-specific program enrollment page.
385,509
def generate_html_report(self, include_turtle=False, exclude_warning=False, list_auxiliary_line=False) -> str: import os template = os.path.dirname(os.path.abspath(__file__)) + with open(template) as f: content = f.read().replace(, ) content = content.replace(, self.__html_entities_hierarchy(self.classes)) content = content.replace(, self.__html_entities_hierarchy(self.data_properties)) content = content.replace(, self.__html_entities_hierarchy(self.object_properties)) content = content.replace(, self.__html_classes(include_turtle)) properties = self.__html_properties(include_turtle) content = content.replace(, properties[0]) content = content.replace(, properties[1]) content = content.replace(, if exclude_warning else ) logs = if exclude_warning else self.ontology.log_stream.getvalue() content = content.replace(, .format(logs)) content = content.replace(, self.__show_list_auxiliary_line(list_auxiliary_line)) return content
Shows links to all classes and properties, a nice hierarchy of the classes, and then a nice description of all the classes with all the properties that apply to it. Example: http://www.cidoc-crm.org/sites/default/files/Documents/cidoc_crm_version_5.0.4.html :param include_turtle: include turtle related to this entity. :param exclude_warning: Exclude warning messages in HTML report :return: HTML in raw string
385,510
def uninstall_hook(ctx): try: lint_config = ctx.obj[0] hooks.GitHookInstaller.uninstall_commit_msg_hook(lint_config) hook_path = hooks.GitHookInstaller.commit_msg_hook_path(lint_config) click.echo(u"Successfully uninstalled gitlint commit-msg hook from {0}".format(hook_path)) ctx.exit(0) except hooks.GitHookInstallerError as e: click.echo(ustr(e), err=True) ctx.exit(GIT_CONTEXT_ERROR_CODE)
Uninstall gitlint commit-msg hook.
385,511
def confirm_login_allowed(self, user): if not user.is_active: raise forms.ValidationError( self.error_messages[], code=, )
Controls whether the given User may log in. This is a policy setting, independent of end-user authentication. This default behavior is to allow login by active users, and reject login by inactive users. If the given user cannot log in, this method should raise a ``forms.ValidationError``. If the given user may log in, this method should return None.
385,512
def global_position_int_cov_encode(self, time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance): return MAVLink_global_position_int_cov_message(time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance)
The filtered global position (e.g. fused GPS and accelerometers). The position is in GPS-frame (right-handed, Z-up). It is designed as scaled integer message since the resolution of float is not sufficient. NOTE: This message is intended for onboard networks / companion computers and higher-bandwidth links and optimized for accuracy and completeness. Please use the GLOBAL_POSITION_INT message for a minimal subset. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) time_utc : Timestamp (microseconds since UNIX epoch) in UTC. 0 for unknown. Commonly filled by the precision time source of a GPS receiver. (uint64_t) estimator_type : Class id of the estimator this estimate originated from. (uint8_t) lat : Latitude, expressed as degrees * 1E7 (int32_t) lon : Longitude, expressed as degrees * 1E7 (int32_t) alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t) relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t) vx : Ground X Speed (Latitude), expressed as m/s (float) vy : Ground Y Speed (Longitude), expressed as m/s (float) vz : Ground Z Speed (Altitude), expressed as m/s (float) covariance : Covariance matrix (first six entries are the first ROW, next six entries are the second row, etc.) (float)
385,513
def load_alias_hash(self): open_mode = if os.path.exists(GLOBAL_ALIAS_HASH_PATH) else with open(GLOBAL_ALIAS_HASH_PATH, open_mode) as alias_config_hash_file: self.alias_config_hash = alias_config_hash_file.read()
Load (create, if not exist) the alias hash file.
385,514
def cmdline_generator(param_iter, PathToBin=None, PathToCmd=None, PathsToInputs=None, PathToOutput=None, PathToStderr=, PathToStdout=, UniqueOutputs=False, InputParam=None, OutputParam=None): if not PathsToInputs: raise ValueError("No input file(s) specified.") if not PathToOutput: raise ValueError("No output file specified.") if not isinstance(PathsToInputs, list): PathsToInputs = [PathsToInputs] if PathToBin is None: PathToBin = if PathToCmd is None: PathToCmd = if PathToStdout is None: stdout_ = else: stdout_ = % PathToStdout if PathToStderr is None: stderr_ = else: stderr_ = % PathToStderr if OutputParam is None: output = % PathToOutput stdout_ = else: output_param = param_iter.AppParams[OutputParam] output_param.on( % PathToOutput) output = str(output_param) output_param.off() output_count = 0 base_command = .join([PathToBin, PathToCmd]) for params in param_iter: for inputfile in PathsToInputs: cmdline = [base_command] cmdline.extend(sorted(filter(None, map(str, params.values())))) if InputParam is None: input = % inputfile else: input_param = params[InputParam] input_param.on( % inputfile) input = str(input_param) input_param.off() cmdline.append(input) if UniqueOutputs: cmdline.append(.join([output, str(output_count)])) output_count += 1 else: cmdline.append(output) cmdline.append(stdout_) cmdline.append(stderr_) yield .join(cmdline)
Generates command lines that can be used in a cluster environment param_iter : ParameterIterBase subclass instance PathToBin : Absolute location primary command (i.e. Python) PathToCmd : Absolute location of the command PathsToInputs : Absolute location(s) of input file(s) PathToOutput : Absolute location of output file PathToStderr : Path to stderr PathToStdout : Path to stdout UniqueOutputs : Generate unique tags for output files InputParam : Application input parameter (if not specified, assumes stdin is to be used) OutputParam : Application output parameter (if not specified, assumes stdout is to be used)
385,515
def _set_show_bare_metal_state(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_bare_metal_state.show_bare_metal_state, is_leaf=True, yang_name="show-bare-metal-state", rest_name="show-bare-metal-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u: {u: u, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "rpc", : , }) self.__show_bare_metal_state = t if hasattr(self, ): self._set()
Setter method for show_bare_metal_state, mapped from YANG variable /brocade_preprovision_rpc/show_bare_metal_state (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_bare_metal_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_bare_metal_state() directly.
385,516
def sync_focus(self, *_): if self.display_popup: self.app.layout.focus(self.layout_manager.popup_dialog) return if self.confirm_text: return if self.prompt_command: return if self.command_mode: return if not self.pymux.arrangement.windows: return pane = self.pymux.arrangement.get_active_pane() self.app.layout.focus(pane.terminal)
Focus the focused window from the pymux arrangement.
385,517
def from_dict(cls, d): if d["normalized_angle_distance_weight"] is not None: nad_w = NormalizedAngleDistanceNbSetWeight.from_dict(d["normalized_angle_distance_weight"]) else: nad_w = None return cls(additional_condition=d["additional_condition"], symmetry_measure_type=d["symmetry_measure_type"], dist_ang_area_weight=DistanceAngleAreaNbSetWeight.from_dict(d["dist_ang_area_weight"]) if d["dist_ang_area_weight"] is not None else None, self_csm_weight=SelfCSMNbSetWeight.from_dict(d["self_csm_weight"]) if d["self_csm_weight"] is not None else None, delta_csm_weight=DeltaCSMNbSetWeight.from_dict(d["delta_csm_weight"]) if d["delta_csm_weight"] is not None else None, cn_bias_weight=CNBiasNbSetWeight.from_dict(d["cn_bias_weight"]) if d["cn_bias_weight"] is not None else None, angle_weight=AngleNbSetWeight.from_dict(d["angle_weight"]) if d["angle_weight"] is not None else None, normalized_angle_distance_weight=nad_w, ce_estimator=d["ce_estimator"])
Reconstructs the MultiWeightsChemenvStrategy object from a dict representation of the MultipleAbundanceChemenvStrategy object created using the as_dict method. :param d: dict representation of the MultiWeightsChemenvStrategy object :return: MultiWeightsChemenvStrategy object
385,518
def get_http_info_with_retriever(self, request, retriever): urlparts = urlparse.urlsplit(request.url) try: data = retriever(request) except Exception: data = {} return { : .format( urlparts.scheme, urlparts.netloc, urlparts.path), : urlparts.query, : request.method, : data, : request.cookies, : request.headers, : { : request.remote_addr, } }
Exact method for getting http_info but with form data work around.
385,519
def normal_cdf(x, mu=0, sigma=1): arg = (x - mu) / (sigma * numpy.sqrt(2)) res = (1 + erf(arg)) / 2 return res
Cumulative Normal Distribution Function. :param x: scalar or array of real numbers. :type x: numpy.ndarray, float :param mu: Mean value. Default 0. :type mu: float, numpy.ndarray :param sigma: Standard deviation. Default 1. :type sigma: float :returns: An approximation of the cdf of the normal. :rtype: numpy.ndarray Note: CDF of the normal distribution is defined as \frac12 [1 + erf(\frac{x - \mu}{\sigma \sqrt{2}})], x \in \R Source: http://en.wikipedia.org/wiki/Normal_distribution
385,520
def in_cwd(): configs = [] for filename in os.listdir(os.getcwd()): if filename.startswith() and is_config_file(filename): configs.append(filename) return configs
Return list of configs in current working directory. If filename is ``.tmuxp.py``, ``.tmuxp.json``, ``.tmuxp.yaml``. Returns ------- list configs in current working directory
385,521
def new(cls, user, provider, federated_id): if cls.get_user(provider, federated_id): raise exceptions.AuthError("Federation already") return cls.create(user_id=user.id, provider=provider, federated_id=federated_id)
Create a new login :param user: AuthUser :param provider: str - ie: facebook, twitter, ... :param federated_id: str - an id associated to provider :return:
385,522
def reset_next_ids(classes): for cls in classes: if cls.next_id is not None: cls.set_next_id(type(cls.next_id)(0))
For each class in the list, if the .next_id attribute is not None (meaning the table has an ID generator associated with it), set .next_id to 0. This has the effect of reseting the ID generators, and is useful in applications that process multiple documents and add new rows to tables in those documents. Calling this function between documents prevents new row IDs from growing continuously from document to document. There is no need to do this, it's purpose is merely aesthetic, but it can be confusing to open a document and find process ID 300 in the process table and wonder what happened to the other 299 processes. Example: >>> import lsctables >>> reset_next_ids(lsctables.TableByName.values())
385,523
def stop_processes(self): self.set_state(self.STATE_SHUTTING_DOWN) LOGGER.info() signal.signal(signal.SIGABRT, signal.SIG_IGN) signal.signal(signal.SIGALRM, signal.SIG_IGN) signal.signal(signal.SIGCHLD, signal.SIG_IGN) signal.signal(signal.SIGPROF, signal.SIG_IGN) signal.setitimer(signal.ITIMER_REAL, 0, 0) LOGGER.info() for proc in multiprocessing.active_children(): if int(proc.pid) != os.getpid(): try: os.kill(int(proc.pid), signal.SIGABRT) except OSError: pass for iteration in range(0, self.MAX_SHUTDOWN_WAIT): processes = len(self.active_processes(False)) if not processes: break LOGGER.info(, processes, iteration, self.MAX_SHUTDOWN_WAIT) try: time.sleep(0.5) except KeyboardInterrupt: break if len(self.active_processes(False)): self.kill_processes() LOGGER.debug() self.set_state(self.STATE_STOPPED)
Iterate through all of the consumer processes shutting them down.
385,524
def _make_params_pb(params, param_types): if params is not None: if param_types is None: raise ValueError("Specify when passing .") return Struct( fields={key: _make_value_pb(value) for key, value in params.items()} ) else: if param_types is not None: raise ValueError("Specify when passing .") return None
Helper for :meth:`execute_update`. :type params: dict, {str -> column value} :param params: values for parameter replacement. Keys must match the names used in ``dml``. :type param_types: dict[str -> Union[dict, .types.Type]] :param param_types: (Optional) maps explicit types for one or more param values; required if parameters are passed. :rtype: Union[None, :class:`Struct`] :returns: a struct message for the passed params, or None :raises ValueError: If ``param_types`` is None but ``params`` is not None. :raises ValueError: If ``params`` is None but ``param_types`` is not None.
385,525
def setup_packages(): CC=/path/to/compilerCC /path/to/compilerCC = /path/to/compiler src_path = dirname(abspath(sys.argv[0])) old_path = os.getcwd() os.chdir(src_path) sys.path.insert(0, src_path) python_dirs = ["theory/python_bindings", "mocks/python_bindings"] extensions = generate_extensions(python_dirs) common_dict = requirements_check() if install_required(): from distutils.sysconfig import get_config_var if get_config_var() != and version_info[0] == 2: msg = "The extensions all get the `.so` automatically. "\ "However, python expects the extension to be `{0}`"\ .format(get_config_var()) raise ValueError(msg) extra_string = if compiler != : extra_string = .format(compiler) command = "make libs {0}".format(extra_string) run_command(command) else: if in sys.argv: command = "make distclean" run_command(command) dirs_patterns = {: [, , , ], : [, , , ], : [, , ], : [, , ], : [], : [] } data_files = [] for d in dirs_patterns: patterns = dirs_patterns[d] f = recursive_glob(d, patterns) data_files.extend(f) data_files = ["../{0}".format(d) for d in data_files] long_description = read_text_file() min_np_major = int(common_dict[][0]) min_np_minor = int(common_dict[][0]) classifiers = [, , , , , , , , , , , , , ] metadata = dict( name=projectname, version=version, author=, author_email=, maintainer=, maintainer_email=, url=base_url, download_url=.format( base_url, projectname, version), description=, long_description=long_description, classifiers=classifiers, license=, platforms=["Linux", "Mac OSX", "Unix"], keywords=[, , , ], provides=[projectname], packages=find_packages(), ext_package=projectname, ext_modules=extensions, package_data={: data_files}, include_package_data=True, setup_requires=[, .format(min_np_major, min_np_minor)], install_requires=[.format(min_np_major, min_np_minor), , ], python_requires=, zip_safe=False, cmdclass={: BuildExtSubclass}) try: setup(**metadata) finally: del sys.path[0] os.chdir(old_path) return
Custom setup for Corrfunc package. Optional: Set compiler via 'CC=/path/to/compiler' or 'CC /path/to/compiler' or 'CC = /path/to/compiler' All the CC options are removed from sys.argv after being parsed.
385,526
def remove(self, point, **kwargs): node_for_remove = None if in kwargs: node_for_remove = self.find_node_with_payload(point, kwargs[], None) else: node_for_remove = self.find_node(point, None) if node_for_remove is None: return None parent = node_for_remove.parent minimal_node = self.__recursive_remove(node_for_remove) if parent is None: self.__root = minimal_node if minimal_node is not None: minimal_node.parent = None else: if parent.left is node_for_remove: parent.left = minimal_node elif parent.right is node_for_remove: parent.right = minimal_node return self.__root
! @brief Remove specified point from kd-tree. @details It removes the first found node that satisfy to the input parameters. Make sure that pair (point, payload) is unique for each node, othewise the first found is removed. @param[in] point (list): Coordinates of the point of removed node. @param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'payload'). <b>Keyword Args:</b><br> - payload (any): Payload of the node that should be removed. @return (node) Root if node has been successfully removed, otherwise None.
385,527
def _Bern_to_Fierz_III_IV_V(C, qqqq): if qqqq in [, , , , , ]: return { + qqqq + : C[ + qqqq] + 16 * C[ + qqqq], + qqqq + : C[ + qqqq] + 16 * C[ + qqqq], + qqqq + : C[ + qqqq] + 4 * C[ + qqqq], + qqqq + : C[ + qqqq] + 4 * C[ + qqqq], + qqqq + : C[ + qqqq] + 64 * C[ + qqqq], + qqqq + : C[ + qqqq] + 64 * C[ + qqqq], + qqqq + : C[ + qqqq] + 16 * C[ + qqqq], + qqqq + : C[ + qqqq] + 16 * C[ + qqqq], + qqqq + : C[ + qqqq] - 16 * C[ + qqqq], + qqqq + : C[ + qqqq] - 16 * C[ + qqqq], } if qqqq in [, , ]: return { + qqqq + : C[ + qqqq] + 16 * C[ + qqqq], + qqqq + : C[ + qqqq] + 16 * C[ + qqqq], + qqqq + : C[ + qqqq] + 4 * C[ + qqqq], + qqqq + : C[ + qqqq] + 4 * C[ + qqqq], + qqqq + : C[ + qqqq] + 64 * C[ + qqqq], + qqqq + : C[ + qqqq] + 64 * C[ + qqqq], + qqqq + : C[ + qqqq] + 16 * C[ + qqqq], + qqqq + : C[ + qqqq] + 16 * C[ + qqqq], + qqqq + : C[ + qqqq] - 16 * C[ + qqqq], + qqqq + : C[ + qqqq] - 16 * C[ + qqqq], } if qqqq in [, , , , , , , , , , , , , , , , , ,]: return { + qqqq + : C[ + qqqq] - C[ + qqqq] / 6 + 16 * C[ + qqqq] - (8 * C[ + qqqq]) / 3, + qqqq + : -8 * C[ + qqqq] + C[ + qqqq] / 2, + qqqq + : -8 * C[ + qqqq] + C[ + qqqq] / 2, + qqqq + : C[ + qqqq] - C[ + qqqq] / 6 + 16 * C[ + qqqq] - (8 * C[ + qqqq]) / 3, + qqqq + : C[ + qqqq] / 2 + 8 * C[ + qqqq], + qqqq + : C[ + qqqq] / 2 + 8 * C[ + qqqq], + qqqq + : C[ + qqqq] - C[ + qqqq] / 6 + 4 * C[ + qqqq] - (2 * C[ + qqqq]) / 3, + qqqq + : C[ + qqqq] - C[ + qqqq] / 6 + 4 * C[ + qqqq] - (2 * C[ + qqqq]) / 3, + qqqq + : C[ + qqqq] / 2 + 2 * C[ + qqqq], + qqqq + : C[ + qqqq] / 2 + 2 * C[ + qqqq], + qqqq + : -((32 * C[ + qqqq]) / 3) + C[ + qqqq] - C[ + qqqq] / 6 + 64 * C[ + qqqq], + qqqq + : -((32 * C[ + qqqq]) / 3) + C[ + qqqq] - C[ + qqqq] / 6 + 64 * C[ + qqqq], + qqqq + : 32 * C[ + qqqq] + C[ + qqqq] / 2, + qqqq + : 32 * C[ + qqqq] + C[ + qqqq] / 2, + qqqq + : -((8 * C[ + qqqq]) / 3) + C[ + qqqq] - C[ + qqqq] / 6 + 16 * C[ + qqqq], + qqqq + : -((8 * C[ + qqqq]) / 3) + C[ + qqqq] - C[ + qqqq] / 6 + 16 * C[ + qqqq], + qqqq + : 8 * C[ + qqqq] + C[ + qqqq] / 2, + qqqq + : 8 * C[ + qqqq] + C[ + qqqq] / 2, + qqqq + : (8 * C[ + qqqq]) / 3 + C[ + qqqq] - C[ + qqqq] / 6 - 16 * C[ + qqqq], + qqqq + : (8 * C[ + qqqq]) / 3 + C[ + qqqq] - C[ + qqqq] / 6 - 16 * C[ + qqqq], } raise ValueError("Case not implemented: {}".format(qqqq))
From Bern to 4-quark Fierz basis for Classes III, IV and V. `qqqq` should be of the form 'sbuc', 'sdcc', 'ucuu' etc.
385,528
def clean_all(ctx, dry_run=False): cleanup_dirs(ctx.clean_all.directories or [], dry_run=dry_run) cleanup_dirs(ctx.clean_all.extra_directories or [], dry_run=dry_run) cleanup_files(ctx.clean_all.files or [], dry_run=dry_run) cleanup_files(ctx.clean_all.extra_files or [], dry_run=dry_run) execute_cleanup_tasks(ctx, cleanup_all_tasks, dry_run=dry_run) clean(ctx, dry_run=dry_run)
Clean up everything, even the precious stuff. NOTE: clean task is executed first.
385,529
def __lookup_builtin(name): global __builtin_functions if __builtin_functions is None: builtins = dict() for proto in __builtins: pos = proto.find() name, params, defaults = proto[:pos], list(), dict() for param in proto[pos + 1:-1].split(): pos = param.find() if not pos < 0: param, value = param[:pos], param[pos + 1:] try: defaults[param] = __builtin_defaults[value] except KeyError: raise ValueError( % (name, param, value)) params.append(param) builtins[name] = (params, defaults) __builtin_functions = builtins try: params, defaults = __builtin_functions[name] except KeyError: params, defaults = tuple(), dict() __builtin_functions[name] = (params, defaults) print( "Warning: builtin function %r is missing prototype" % name, file=sys.stderr) return len(params), params, defaults
Lookup the parameter name and default parameter values for builtin functions.
385,530
def post_build_time_coverage(self): from ambry.util.datestimes import expand_to_years years = set() if self.metadata.about.time: for year in expand_to_years(self.metadata.about.time): years.add(year) if self.identity.btime: for year in expand_to_years(self.identity.btime): years.add(year) for p in self.partitions: years |= set(p.time_coverage)
Collect all of the time coverage for the bundle.
385,531
def xerrorbar(self, canvas, X, Y, error, color=None, label=None, **kwargs): raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
Make an errorbar along the xaxis for points at (X,Y) on the canvas. if error is two dimensional, the lower error is error[:,0] and the upper error is error[:,1] the kwargs are plotting library specific kwargs!
385,532
def sources(verbose=False): * ret = {} cmd = res = __salt__[](cmd) retcode = res[] if retcode != 0: ret[] = _exit_status(retcode) return ret for src in salt.utils.json.loads(res[]): ret[src[]] = src del src[] if not verbose: ret = list(ret) return ret
Return a list of available sources verbose : boolean (False) toggle verbose output .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' imgadm.sources
385,533
def apply (self, img): yup,uup,vup = self.getUpLimit() ydwn,udwn,vdwn = self.getDownLimit() yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) minValues = np.array([ydwn,udwn,vdwn],dtype=np.uint8) maxValues = np.array([yup,uup,vup], dtype=np.uint8) mask = cv2.inRange(yuv, minValues, maxValues) res = cv2.bitwise_and(img,img, mask= mask) return res
We convert RGB as BGR because OpenCV with RGB pass to YVU instead of YUV
385,534
def get_string(strings: Sequence[str], prefix: str, ignoreleadingcolon: bool = False, precedingline: str = "") -> Optional[str]: s = get_what_follows(strings, prefix, precedingline=precedingline) if ignoreleadingcolon: f = s.find(":") if f != -1: s = s[f+1:].strip() if len(s) == 0: return None return s
Find a string as per :func:`get_what_follows`. Args: strings: see :func:`get_what_follows` prefix: see :func:`get_what_follows` ignoreleadingcolon: if ``True``, restrict the result to what comes after its first colon (and whitespace-strip that) precedingline: see :func:`get_what_follows` Returns: the line fragment
385,535
def change_weibo_header(uri, headers, body): auth = headers.get() if auth: auth = auth.replace(, ) headers[] = auth return uri, headers, body
Since weibo is a rubbish server, it does not follow the standard, we need to change the authorization header for it.
385,536
def _setEndpoint(self, location): try: self._sforce.set_options(location = location) except: self._sforce.wsdl.service.setlocation(location) self._location = location
Set the endpoint after when Salesforce returns the URL after successful login()
385,537
def get_market_summary(self, market): return self._api_query(path_dict={ API_V1_1: , API_V2_0: }, options={: market, : market}, protection=PROTECTION_PUB)
Used to get the last 24 hour summary of all active exchanges in specific coin Endpoint: 1.1 /public/getmarketsummary 2.0 /pub/Market/GetMarketSummary :param market: String literal for the market(ex: BTC-XRP) :type market: str :return: Summaries of active exchanges of a coin in JSON :rtype : dict
385,538
def RENEWING(self): logger.debug() self.current_state = STATE_RENEWING if self.script is not None: self.script.script_init(self.client.lease, self.current_state) self.script.script_go() else: set_net(self.client.lease)
RENEWING state.
385,539
def contains(self, key_or_keypath): if isinstance(key_or_keypath, list): if len(key_or_keypath) == 0: return False val = self next_key = None for next_key in key_or_keypath: if next_key in val: val = val[next_key] else: return False return True else: return key_or_keypath in self.__dict__[]
Allows the 'in' operator to work for checking if a particular key (or keypath) is inside the dictionary.
385,540
def save(self, filename, binary=True): filename = os.path.abspath(os.path.expanduser(filename)) ext = vtki.get_ext(filename) if ext in [, ]: writer = vtk.vtkXMLMultiBlockDataWriter() else: raise Exception() writer.SetFileName(filename) writer.SetInputDataObject(self) if binary: writer.SetDataModeToBinary() else: writer.SetDataModeToAscii() writer.Write() return
Writes a ``MultiBlock`` dataset to disk. Written file may be an ASCII or binary vtm file. Parameters ---------- filename : str Filename of mesh to be written. File type is inferred from the extension of the filename unless overridden with ftype. Can be one of the following types (.vtm or .vtmb) binary : bool, optional Writes the file as binary when True and ASCII when False. Notes ----- Binary files write much faster than ASCII and have a smaller file size.
385,541
def create_keep_package(cls, package_name, recursive=True): return cls.create_keep(cls._format_package_glob(package_name, recursive))
Convenience constructor for a package keep rule. Essentially equivalent to just using ``shading_keep('package_name.**')``. :param string package_name: Package name to keep (eg, ``org.pantsbuild.example``). :param bool recursive: Whether to keep everything under any subpackage of ``package_name``, or just direct children of the package. (Defaults to True).
385,542
def _certify_int_param(value, negative=True, required=False): if value is None and not required: return if not isinstance(value, int): raise CertifierTypeError( message="expected integer, but value is of type {cls!r}".format( cls=value.__class__.__name__), value=value, required=required, ) if not negative and value < 0: raise CertifierValueError( message="expected positive integer, but value is negative")
A private certifier (to `certifiable`) to certify integers from `certify_int`. :param int value: The value to certify is an integer. :param bool negative: If the value can be negative. Default=False. :param bool required: If the value is required. Default=False. :raises CertifierParamError: Value was not an integer (if required and non-None).
385,543
def to_python(value, seen=None): seen = seen or set() if isinstance(value, framework.TupleLike): if value.ident in seen: raise RecursionException( % value) new_seen = seen.union([value.ident]) return {k: to_python(value[k], seen=new_seen) for k in value.exportable_keys()} if isinstance(value, dict): return {k: to_python(value[k], seen=seen) for k in value.keys()} if isinstance(value, list): return [to_python(x, seen=seen) for x in value] return value
Reify values to their Python equivalents. Does recursion detection, failing when that happens.
385,544
def update_metadata(self, scaling_group, metadata): if not isinstance(scaling_group, ScalingGroup): scaling_group = self.get(scaling_group) curr_meta = scaling_group.metadata curr_meta.update(metadata) return self.update(scaling_group, metadata=curr_meta)
Adds the given metadata dict to the existing metadata for the scaling group.
385,545
def rotate(l, steps=1): r if len(l): steps %= len(l) if steps: res = l[steps:] res.extend(l[:steps]) return res
r"""Rotates a list `l` `steps` to the left. Accepts `steps` > `len(l)` or < 0. >>> rotate([1,2,3]) [2, 3, 1] >>> rotate([1,2,3,4],-2) [3, 4, 1, 2] >>> rotate([1,2,3,4],-5) [4, 1, 2, 3] >>> rotate([1,2,3,4],1) [2, 3, 4, 1] >>> l = [1,2,3]; rotate(l) is not l True
385,546
def get_columns(self, df, usage, columns=None): columns_excluded = pd.Index([]) columns_included = df.columns if usage == self.INCLUDE: try: columns_included = columns_included.intersection(pd.Index(columns)) except TypeError: pass elif usage == self.EXCLUDE: try: columns_excluded = columns_excluded.union(pd.Index(columns)) except TypeError: pass columns_included = columns_included.difference(columns_excluded) return columns_included.intersection(df.columns)
Returns a `data_frame.columns`. :param df: dataframe to select columns from :param usage: should be a value from [ALL, INCLUDE, EXCLUDE]. this value only makes sense if attr `columns` is also set. otherwise, should be used with default value ALL. :param columns: * if `usage` is all, this value is not used. * if `usage` is INCLUDE, the `df` is restricted to the intersection between `columns` and the `df.columns` * if usage is EXCLUDE, returns the `df.columns` excluding these `columns` :return: `data_frame` columns, excluding `target_column` and `id_column` if given. `data_frame` columns, including/excluding the `columns` depending on `usage`.
385,547
def remap_name(name_generator, names, table=None): out = "" for name in names: if table and name in table[0].keys(): replacement = table[0][name] else: replacement = next(name_generator) out += "%s=%s\n" % (replacement, name) return out
Produces a series of variable assignments in the form of:: <obfuscated name> = <some identifier> for each item in *names* using *name_generator* to come up with the replacement names. If *table* is provided, replacements will be looked up there before generating a new unique name.
385,548
def explain_weights_lightgbm(lgb, vec=None, top=20, target_names=None, targets=None, feature_names=None, feature_re=None, feature_filter=None, importance_type=, ): coef = _get_lgb_feature_importances(lgb, importance_type) lgb_feature_names = lgb.booster_.feature_name() return get_feature_importance_explanation(lgb, vec, coef, feature_names=feature_names, estimator_feature_names=lgb_feature_names, feature_filter=feature_filter, feature_re=feature_re, top=top, description=DESCRIPTION_LIGHTGBM, num_features=coef.shape[-1], is_regression=isinstance(lgb, lightgbm.LGBMRegressor), )
Return an explanation of an LightGBM estimator (via scikit-learn wrapper LGBMClassifier or LGBMRegressor) as feature importances. See :func:`eli5.explain_weights` for description of ``top``, ``feature_names``, ``feature_re`` and ``feature_filter`` parameters. ``target_names`` and ``targets`` parameters are ignored. Parameters ---------- importance_type : str, optional A way to get feature importance. Possible values are: - 'gain' - the average gain of the feature when it is used in trees (default) - 'split' - the number of times a feature is used to split the data across all trees - 'weight' - the same as 'split', for compatibility with xgboost
385,549
def add_send_last_message(self, connection, send_last_message): self._send_last_message[connection] = send_last_message LOGGER.debug("Added send_last_message function " "for connection %s", connection)
Adds a send_last_message function to the Dispatcher's dictionary of functions indexed by connection. Args: connection (str): A locally unique identifier provided by the receiver of messages. send_last_message (fn): The method that should be called by the dispatcher to respond to messages which arrive via connection, when the connection should be closed after the message has been sent.
385,550
def degree_elevation(degree, ctrlpts, **kwargs): num = kwargs.get(, 1) check_op = kwargs.get(, True) if check_op: if degree + 1 != len(ctrlpts): raise GeomdlException("Degree elevation can only work with Bezier-type geometries") if num <= 0: raise GeomdlException("Cannot degree elevate " + str(num) + " times") num_pts_elev = degree + 1 + num pts_elev = [[0.0 for _ in range(len(ctrlpts[0]))] for _ in range(num_pts_elev)] for i in range(0, num_pts_elev): start = max(0, (i - num)) end = min(degree, i) for j in range(start, end + 1): coeff = linalg.binomial_coefficient(degree, j) * linalg.binomial_coefficient(num, (i - j)) coeff /= linalg.binomial_coefficient((degree + num), i) pts_elev[i] = [p1 + (coeff * p2) for p1, p2 in zip(pts_elev[i], ctrlpts[j])] return pts_elev
Computes the control points of the rational/non-rational spline after degree elevation. Implementation of Eq. 5.36 of The NURBS Book by Piegl & Tiller, 2nd Edition, p.205 Keyword Arguments: * ``num``: number of degree elevations Please note that degree elevation algorithm can only operate on Bezier shapes, i.e. curves, surfaces, volumes. :param degree: degree :type degree: int :param ctrlpts: control points :type ctrlpts: list, tuple :return: control points of the degree-elevated shape :rtype: list
385,551
def convert(filename, num_questions=None, solution=False, pages_per_q=DEFAULT_PAGES_PER_Q, folder=, output=, zoom=1): check_for_wkhtmltohtml() save_notebook(filename) nb = read_nb(filename, solution=solution) pdf_names = create_question_pdfs(nb, pages_per_q=pages_per_q, folder=folder, zoom=zoom) merge_pdfs(pdf_names, output) n_questions_found = len(pdf_names) - 1 if num_questions is not None and n_questions_found != num_questions: logging.warning( .format(num_questions, len(pdf_names)) ) try: from IPython.display import display, HTML display(HTML(DOWNLOAD_HTML.format(output))) except ImportError: print( .format(output)) print() print( )
Public method that exports nb to PDF and pads all the questions. If num_questions is specified, will also check the final PDF for missing questions. If the output font size is too small/large, increase or decrease the zoom argument until the size looks correct. If solution=True, we'll export solution cells instead of student cells. Use this option to generate the solutions to upload to Gradescope.
385,552
def add_broker(self, broker): if broker not in self._brokers: self._brokers.add(broker) else: self.log.warning( .format( broker_id=broker.id, rg_id=self._id, ) )
Add broker to current broker-list.
385,553
def _update_names(self): d = dict( table=self.table_name, time=self.time, space=self.space, grain=self.grain, variant=self.variant, segment=self.segment ) assert self.dataset name = PartialPartitionName(**d).promote(self.dataset.identity.name) self.name = str(name.name) self.vname = str(name.vname) self.cache_key = name.cache_key self.fqname = str(self.identity.fqname)
Update the derived names
385,554
def store_user_documents(user_document_gen, client, mongo_database_name, mongo_collection_name): mongo_database = client[mongo_database_name] mongo_collection = mongo_database[mongo_collection_name] for user_twitter_id, user_document_list in user_document_gen: document = user_document_list document["_id"] = int(user_twitter_id) mongo_collection.update({"_id": user_twitter_id}, document, upsert=True)
Stores Twitter list objects that a Twitter user is a member of in different mongo collections. Inputs: - user_document_gen: A python generator that yields a Twitter user id and an associated document list. - client: A pymongo MongoClient object. - mongo_database_name: The name of a Mongo database as a string. - mongo_collection_name: The name of the mongo collection as a string.
385,555
def init_tasks(): if "exists" not in env: env.exists = exists if "run" not in env: env.run = run if "cd" not in env: env.cd = cd if "max_releases" not in env: env.max_releases = 5 if "public_path" in env: public_path = env.public_path.rstrip("/") env.public_path = public_path run_hook("init_tasks")
Performs basic setup before any of the tasks are run. All tasks needs to run this before continuing. It only fires once.
385,556
def get_ip_reports(self, ips): api_name = (all_responses, ips) = self._bulk_cache_lookup(api_name, ips) responses = self._request_reports("ip", ips, ) for ip, response in zip(ips, responses): if self._cache: self._cache.cache_value(api_name, ip, response) all_responses[ip] = response return all_responses
Retrieves the most recent VT info for a set of ips. Args: ips: list of IPs. Returns: A dict with the IP as key and the VT report as value.
385,557
def is_visible(self, selector): self.debug_log("Is visible (%s)" % selector) element = self.find( selector, raise_exception=False, wait_until_present=False, wait_until_visible=False ) if element: if element.is_displayed(raise_exception=False): element.highlight( style=BROME_CONFIG[][] ) self.debug_log("is visible (%s): True" % selector) return True self.debug_log("is visible (%s): False" % selector) return False
Check if an element is visible in the dom or not This method will check if the element is displayed or not This method might (according to the config highlight:element_is_visible) highlight the element if it is visible This method won't wait until the element is visible or present This method won't raise any exception if the element is not visible Returns: bool: True if the element is visible; False otherwise
385,558
def models(cls, api_version=DEFAULT_API_VERSION): if api_version == : from .v2016_09_01 import models return models raise NotImplementedError("APIVersion {} is not available".format(api_version))
Module depends on the API version: * 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.resource.links.v2016_09_01.models>`
385,559
def to_pandas_dataframe(self, sample_column=False): import pandas as pd if sample_column: df = pd.DataFrame(self.data(sorted_by=None, sample_dict_cast=True)) else: continue df.loc[:, field] = self.record[field] return df
Convert a SampleSet to a Pandas DataFrame Returns: :obj:`pandas.DataFrame` Examples: >>> samples = dimod.SampleSet.from_samples([{'a': -1, 'b': +1, 'c': -1}, ... {'a': -1, 'b': -1, 'c': +1}], ... dimod.SPIN, energy=-.5) >>> samples.to_pandas_dataframe() # doctest: +SKIP a b c energy num_occurrences 0 -1 1 -1 -0.5 1 1 -1 -1 1 -0.5 1 >>> samples.to_pandas_dataframe(sample_column=True) # doctest: +SKIP sample energy num_occurrences 0 {'a': -1, 'b': 1, 'c': -1} -0.5 1 1 {'a': -1, 'b': -1, 'c': 1} -0.5 1
385,560
def vim_enter(self, filename): success = self.setup(True, False) if success: self.editor.message("start_message")
Set up EnsimeClient when vim enters. This is useful to start the EnsimeLauncher as soon as possible.
385,561
def sample(self, withReplacement=None, fraction=None, seed=None): is_withReplacement_set = \ type(withReplacement) == bool and isinstance(fraction, float) is_withReplacement_omitted_kwargs = \ withReplacement is None and isinstance(fraction, float) is_withReplacement_omitted_args = isinstance(withReplacement, float) if not (is_withReplacement_set or is_withReplacement_omitted_kwargs or is_withReplacement_omitted_args): argtypes = [ str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None] raise TypeError( "withReplacement (optional), fraction (required) and seed (optional)" " should be a bool, float and number; however, " "got [%s]." % ", ".join(argtypes)) if is_withReplacement_omitted_args: if fraction is not None: seed = fraction fraction = withReplacement withReplacement = None seed = long(seed) if seed is not None else None args = [arg for arg in [withReplacement, fraction, seed] if arg is not None] jdf = self._jdf.sample(*args) return DataFrame(jdf, self.sql_ctx)
Returns a sampled subset of this :class:`DataFrame`. :param withReplacement: Sample with replacement or not (default False). :param fraction: Fraction of rows to generate, range [0.0, 1.0]. :param seed: Seed for sampling (default a random seed). .. note:: This is not guaranteed to provide exactly the fraction specified of the total count of the given :class:`DataFrame`. .. note:: `fraction` is required and, `withReplacement` and `seed` are optional. >>> df = spark.range(10) >>> df.sample(0.5, 3).count() 7 >>> df.sample(fraction=0.5, seed=3).count() 7 >>> df.sample(withReplacement=True, fraction=0.5, seed=3).count() 1 >>> df.sample(1.0).count() 10 >>> df.sample(fraction=1.0).count() 10 >>> df.sample(False, fraction=1.0).count() 10
385,562
def eigenvectors(T, k=None, right=True, ncv=None, reversible=False, mu=None): r if k is None: raise ValueError("Number of eigenvectors required for decomposition of sparse matrix") else: if reversible: eigvec = eigenvectors_rev(T, k, right=right, ncv=ncv, mu=mu) return eigvec else: eigvec = eigenvectors_nrev(T, k, right=right, ncv=ncv) return eigvec
r"""Compute eigenvectors of given transition matrix. Parameters ---------- T : scipy.sparse matrix Transition matrix (stochastic matrix). k : int (optional) or array-like For integer k compute the first k eigenvalues of T else return those eigenvector sepcified by integer indices in k. right : bool, optional If True compute right eigenvectors, left eigenvectors otherwise ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k reversible : bool, optional Indicate that transition matrix is reversible mu : (M,) ndarray, optional Stationary distribution of T Returns ------- eigvec : numpy.ndarray, shape=(d, n) The eigenvectors of T ordered with decreasing absolute value of the corresponding eigenvalue. If k is None then n=d, if k is int then n=k otherwise n is the length of the given indices array. Notes ----- Eigenvectors are computed using the scipy interface to the corresponding ARPACK routines.
385,563
def path(self): names = [] obj = self while obj: names.insert(0, obj.name) obj = obj.parent_dir sep = self.filesystem._path_separator(self.name) if names[0] == sep: names.pop(0) dir_path = sep.join(names) is_drive = names and len(names[0]) == 2 and names[0][1] == if not is_drive: dir_path = sep + dir_path else: dir_path = sep.join(names) dir_path = self.filesystem.absnormpath(dir_path) return dir_path
Return the full path of the current object.
385,564
def save(self, to_save, **kwargs): check = kwargs.pop(, True) if check: self._valid_record(to_save) if in to_save: self.__collect.replace_one( {: to_save[]}, to_save, **kwargs) return to_save[] else: result = self.__collect.insert_one(to_save, **kwargs) return result.inserted_id
save method
385,565
def get_data(start, end, username=None, password=None, data_path=os.path.abspath(".")+): dl_link = "http://data.blitzortung.org/Data_1/Protected/Strokes/" if not os.path.exists(data_path): os.makedirs(data_path) if not username: username = input("Username to access Blitzorg with:") password = getpass.getpass( prompt=.format(username)) auth_handler = urllib.request.HTTPBasicAuthHandler() auth_handler.add_password(realm=, uri=, user=username, passwd=password) opener = urllib.request.build_opener(auth_handler) urllib.request.install_opener(opener) time_range = pd.date_range(start, end, freq=) for time_stamp in tqdm(time_range): tmp_link = dl_link+.join(return_time_elements(time_stamp))\ + tmp_name = "./tmp_data/bz-"+.join(return_time_elements(time_stamp))\ + ".json.gz" if os.path.isfile(tmp_name): print("{0} exists. Aborting download attempt".format(tmp_name)) else: try: urllib.request.urlretrieve(tmp_link, tmp_name) except Exception as inst: print(inst) print()
**Download data (badly) from Blitzorg** Using a specified time stamp for start and end, data is downloaded at a default frequency (10 minute intervals). If a directory called data is not present, it will be added to the cwd as the target for the downloads. This is probably a bad idea however. It is much better to 1) get the data from Blitzorg directly, or 2) if you only want a small part of the data and have an account, download a csv file via their web interface. :paramter start: string :parameter end: string :parameter freq: string :Example: >>> get_data(start="2015-02-01T06:30", end="2015-02-01T10:05")
385,566
def hash_folder(folder, regex=): file_hashes = {} for path in glob.glob(os.path.join(folder, regex)): if not os.path.isfile(path): continue with open(path, ) as fileP: md5_hash = hashlib.md5(fileP.read()).digest() file_name = os.path.basename(path) file_hashes[file_name] = urlsafe_b64encode(md5_hash) return file_hashes
Get the md5 sum of each file in the folder and return to the user :param folder: the folder to compute the sums over :param regex: an expression to limit the files we match :return: Note: by default we will hash every file in the folder Note: we will not match anything that starts with an underscore
385,567
def convert_to_duckling_language_id(cls, lang): if lang is not None and cls.is_supported(lang): return lang elif lang is not None and cls.is_supported(lang + "$core"): return lang + "$core" else: raise ValueError("Unsupported language . Supported languages: {}".format( lang, ", ".join(cls.SUPPORTED_LANGUAGES)))
Ensure a language identifier has the correct duckling format and is supported.
385,568
def align_and_build_tree(seqs, moltype, best_tree=False, params=None): aln = align_unaligned_seqs(seqs, moltype=moltype, params=params) tree = build_tree_from_alignment(aln, moltype, best_tree, params) return {:aln, :tree}
Returns an alignment and a tree from Sequences object seqs. seqs: a cogent.core.alignment.SequenceCollection object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object best_tree: if True (default:False), uses a slower but more accurate algorithm to build the tree. params: dict of parameters to pass in to the Muscle app controller. The result will be a tuple containing a cogent.core.alignment.Alignment and a cogent.core.tree.PhyloNode object (or None for the alignment and/or tree if either fails).
385,569
def year(columns, name=None): if columns < 0: raise BaseException() field = numeric(columns, name) field.addParseAction(_to_year) return field
Creates the grammar for a field containing a year. :param columns: the number of columns for the year :param name: the name of the field :return:
385,570
def load_configuration_from_file(directory, args): args = copy.copy(args) directory_or_file = directory if args.config is not None: directory_or_file = args.config options = _get_options(directory_or_file, debug=args.debug) args.report = options.get(, args.report) threshold_dictionary = docutils.frontend.OptionParser.thresholds args.report = int(threshold_dictionary.get(args.report, args.report)) args.ignore_language = get_and_split( options, , args.ignore_language) args.ignore_messages = options.get( , args.ignore_messages) args.ignore_directives = get_and_split( options, , args.ignore_directives) args.ignore_substitutions = get_and_split( options, , args.ignore_substitutions) args.ignore_roles = get_and_split( options, , args.ignore_roles) return args
Return new ``args`` with configuration loaded from file.
385,571
def _send(self): data = self.output_buffer.view() if not data: return if self.closed(): raise self.Error("Failed to write to closed connection {!r}".format(self.server.address)) if self.defunct(): raise self.Error("Failed to write to defunct connection {!r}".format(self.server.address)) self.socket.sendall(data) self.output_buffer.clear()
Send all queued messages to the server.
385,572
def label_contours(self, intervals, window=150, hop=30): window /= 1000.0 hop /= 1000.0 exposure = int(window / hop) boundary = window - hop final_index = utils.find_nearest_index(self.pitch_obj.timestamps, self.pitch_obj.timestamps[-1] - boundary) interval = np.median(np.diff(self.pitch_obj.timestamps)) window_step = window / interval hop_step = hop / interval start_index = 0 end_index = window_step contour_labels = {} means = [] while end_index < final_index: temp = self.pitch_obj.pitch[start_index:end_index][self.pitch_obj.pitch[start_index:end_index] > -10000] means.append(np.mean(temp)) start_index = start_index + hop_step end_index = start_index + window_step for i in xrange(exposure, len(means) - exposure + 1): _median = np.median(means[i - exposure:i]) if _median < -5000: continue ind = utils.find_nearest_index(_median, intervals) contour_end = (i - exposure) * hop_step + window_step contour_start = contour_end - hop_step if intervals[ind] in contour_labels.keys(): contour_labels[intervals[ind]].append([contour_start, contour_end]) else: contour_labels[intervals[ind]] = [[contour_start, contour_end]] self.contour_labels = contour_labels
In a very flowy contour, it is not trivial to say which pitch value corresponds to what interval. This function labels pitch contours with intervals by guessing from the characteristics of the contour and its melodic context. :param window: the size of window over which the context is gauged, in milliseconds. :param hop: hop size in milliseconds.
385,573
def subdomain_row_factory(cls, cursor, row): d = {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d
Dict row factory for subdomains
385,574
def ShouldRetry(self, exception): if self.current_retry_attempt_count < self._max_retry_attempt_count: self.current_retry_attempt_count += 1 self.retry_after_in_milliseconds = 0 if self._fixed_retry_interval_in_milliseconds: self.retry_after_in_milliseconds = self._fixed_retry_interval_in_milliseconds elif http_constants.HttpHeaders.RetryAfterInMilliseconds in exception.headers: self.retry_after_in_milliseconds = int(exception.headers[http_constants.HttpHeaders.RetryAfterInMilliseconds]) if self.cummulative_wait_time_in_milliseconds < self._max_wait_time_in_milliseconds: self.cummulative_wait_time_in_milliseconds += self.retry_after_in_milliseconds return True return False
Returns true if should retry based on the passed-in exception. :param (errors.HTTPFailure instance) exception: :rtype: boolean
385,575
def orientation_angle(im, approxangle=None, *, isshiftdft=False, truesize=None, rotateAngle=None): im = np.asarray(im) if rotateAngle is not None and not isshiftdft: scale = np.sqrt(.5 * (1 + (np.tan(rotateAngle) - 1)**2 / (np.tan(rotateAngle) + 1)**2)) im = rotate_scale(im, rotateAngle, scale) lp = polar_fft(im, isshiftdft=isshiftdft, logoutput=False, interpolation=, truesize=truesize) adis = lp.sum(-1) if approxangle is not None: amin = clamp_angle(approxangle - np.pi / 4 - np.pi / 2) amax = clamp_angle(approxangle + np.pi / 4 - np.pi / 2) angles = np.linspace(-np.pi / 2, np.pi / 2, lp.shape[0], endpoint=False) if amin > amax: adis[np.logical_and(angles > amax, angles < amin)] = adis.min() else: adis[np.logical_or(angles > amax, angles < amin)] = adis.min() ret = get_peak_pos(adis, wrap=True) anglestep = np.pi / lp.shape[0] ret = clamp_angle(ret * anglestep) if rotateAngle is not None: ret = clamp_angle(ret - rotateAngle) return ret
Give the highest contribution to the orientation Parameters ---------- im: 2d array The image approxangle: number, optional The approximate angle (None if unknown) isshiftdft: Boolean, default False True if the image has been processed (DFT, fftshift) truesize: 2 numbers, optional Truesize of the image if isshiftdft is True rotateAngle: number, optional The diagonals are more sensitives than the axis. rotate the image to avoid pixel orientation (flat or diagonal) Returns ------- angle: number The orientation of the image Notes ----- if approxangle is specified, search only within +- pi/4
385,576
def irods_filepath(det_id, run_id): data_path = "/in2p3/km3net/data/raw/sea" from km3pipe.db import DBManager if not isinstance(det_id, int): dts = DBManager().detectors det_id = int(dts[dts.OID == det_id].SERIALNUMBER.values[0]) return data_path + "/KM3NeT_{0:08}/{2}/KM3NeT_{0:08}_{1:08}.root" \ .format(det_id, run_id, run_id//1000)
Generate the iRODS filepath for given detector (O)ID and run ID
385,577
def restore(self, workspace_uuid): workspace = next((workspace for workspace in self.document_model.workspaces if workspace.uuid == workspace_uuid), None) if workspace is None: workspace = self.new_workspace() self._change_workspace(workspace)
Restore the workspace to the given workspace_uuid. If workspace_uuid is None then create a new workspace and use it.
385,578
def create_manage_py(self, apps): self.logger.debug() with open(self._get_manage_py_path(), mode=) as f: south_migration_modules = [] for app in apps: south_migration_modules.append(": " % {: app}) f.write(MANAGE_PY % { : "".join(apps), : self.apps_path, : ", ".join(south_migration_modules) })
Creates manage.py file, with a given list of installed apps. :param list apps:
385,579
def subgrid_kernel(kernel, subgrid_res, odd=False, num_iter=100): subgrid_res = int(subgrid_res) if subgrid_res == 1: return kernel nx, ny = np.shape(kernel) d_x = 1. / nx x_in = np.linspace(d_x/2, 1-d_x/2, nx) d_y = 1. / nx y_in = np.linspace(d_y/2, 1-d_y/2, ny) nx_new = nx * subgrid_res ny_new = ny * subgrid_res if odd is True: if nx_new % 2 == 0: nx_new -= 1 if ny_new % 2 == 0: ny_new -= 1 d_x_new = 1. / nx_new d_y_new = 1. / ny_new x_out = np.linspace(d_x_new/2., 1-d_x_new/2., nx_new) y_out = np.linspace(d_y_new/2., 1-d_y_new/2., ny_new) kernel_input = copy.deepcopy(kernel) kernel_subgrid = image_util.re_size_array(x_in, y_in, kernel_input, x_out, y_out) kernel_subgrid = kernel_norm(kernel_subgrid) for i in range(max(num_iter, 1)): if subgrid_res % 2 == 0: kernel_pixel = averaging_even_kernel(kernel_subgrid, subgrid_res) else: kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx) delta = kernel - kernel_pixel temp_kernel = kernel_input + delta kernel_subgrid = image_util.re_size_array(x_in, y_in, temp_kernel, x_out, y_out) kernel_subgrid = kernel_norm(kernel_subgrid) kernel_input = temp_kernel if subgrid_res % 2 == 0: return kernel_subgrid kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx) kernel_pixel = kernel_norm(kernel_pixel) delta_kernel = kernel_pixel - kernel_norm(kernel) id = np.ones((subgrid_res, subgrid_res)) delta_kernel_sub = np.kron(delta_kernel, id)/subgrid_res**2 return kernel_norm(kernel_subgrid - delta_kernel_sub)
creates a higher resolution kernel with subgrid resolution as an interpolation of the original kernel in an iterative approach :param kernel: initial kernel :param subgrid_res: subgrid resolution required :return: kernel with higher resolution (larger)
385,580
async def fetch(self, method, url, params=None, headers=None, data=None): logger.debug(, method, url, data) for retry_num in range(MAX_RETRIES): try: async with self.fetch_raw(method, url, params=params, headers=headers, data=data) as res: async with async_timeout.timeout(REQUEST_TIMEOUT): body = await res.read() logger.debug(, res.status, res.reason, body) except asyncio.TimeoutError: error_msg = except aiohttp.ServerDisconnectedError as err: error_msg = .format(err) except (aiohttp.ClientError, ValueError) as err: error_msg = .format(err) else: break logger.info(, retry_num, error_msg) else: logger.info(, MAX_RETRIES) raise exceptions.NetworkError(error_msg) if res.status != 200: logger.info(, res.status, res.reason) raise exceptions.NetworkError( .format(res.status, res.reason) ) return FetchResponse(res.status, body)
Make an HTTP request. Automatically uses configured HTTP proxy, and adds Google authorization header and cookies. Failures will be retried MAX_RETRIES times before raising NetworkError. Args: method (str): Request method. url (str): Request URL. params (dict): (optional) Request query string parameters. headers (dict): (optional) Request headers. data: (str): (optional) Request body data. Returns: FetchResponse: Response data. Raises: NetworkError: If the request fails.
385,581
def com_google_fonts_check_font_copyright(ttFont): import re from fontbakery.utils import get_name_entry_strings failed = False for string in get_name_entry_strings(ttFont, NameID.COPYRIGHT_NOTICE): does_match = re.search(r, string) if does_match: yield PASS, ("Name Table entry: Copyright field " " matches canonical pattern.").format(string) else: failed = True yield FAIL, ("Name Table entry: Copyright notices should match" " a pattern similar to:" " \n" "But instead we have got:" " ").format(string) if not failed: yield PASS, "Name table copyright entries are good"
Copyright notices match canonical pattern in fonts
385,582
def _get_name(self): if (self.tail_node is not None) and (self.head_node is not None): return "%s %s %s" % (self.tail_node.ID, self.conn, self.head_node.ID) else: return "Edge"
Property getter.
385,583
def convex_hull(features): points = sorted([s.point() for s in features]) l = reduce(_keep_left, points, []) u = reduce(_keep_left, reversed(points), []) return l.extend(u[i] for i in xrange(1, len(u) - 1)) or l
Returns points on convex hull of an array of points in CCW order.
385,584
def validate_event_type(sender, event, created): if event.code not in sender.event_codes(): raise ValueError("The Event.code is not a valid Event " "code.".format(event.code))
Verify that the Event's code is a valid one.
385,585
def _create_buffer(self): python_buffer = Buffer( name=DEFAULT_BUFFER, complete_while_typing=Condition(lambda: self.complete_while_typing), enable_history_search=Condition(lambda: self.enable_history_search), tempfile_suffix=, history=self.history, completer=ThreadedCompleter(self._completer), validator=ConditionalValidator( self._validator, Condition(lambda: self.enable_input_validation)), auto_suggest=ConditionalAutoSuggest( ThreadedAutoSuggest(AutoSuggestFromHistory()), Condition(lambda: self.enable_auto_suggest)), accept_handler=self._accept_handler, on_text_changed=self._on_input_timeout) return python_buffer
Create the `Buffer` for the Python input.
385,586
def unlock(arguments): import redis u = coil.utils.ask("Redis URL", "redis://localhost:6379/0") db = redis.StrictRedis.from_url(u) db.set(, 0) print("Database unlocked.") return 0
Unlock the database.
385,587
def get_gui_hint(self, hint): if hint == : if self.kwargs.get() == or self.kwargs.get() == 0: return elif self.kwargs.get() == : return return self.gui_hints.get(, ) elif hint == : hint_type = self.get_gui_hint() hint_default = self.gui_hints.get(, None) arg_default = self.kwargs.get(, None) preserved_value = None if in self.kwargs: preserved_value = config_manager.get_config_value(self.kwargs[]) if hint_type == : if preserved_value is not None: default = preserved_value elif hint_default is not None: default = hint_default.replace(, utils.get_cwd_or_homedir()) else: default = arg_default or return os.path.abspath(os.path.expanduser(default)) elif hint_type == : return hint_default or arg_default or False elif hint_type == : return hint_default or arg_default else: if hint_default == : hint_default = getpass.getuser() return preserved_value or hint_default or arg_default or
Returns the value for specified gui hint (or a sensible default value, if this argument doesn't specify the hint). Args: hint: name of the hint to get value for Returns: value of the hint specified in yaml or a sensible default
385,588
def hessian(self, x, y, n_sersic, R_sersic, k_eff, center_x=0, center_y=0): x_ = x - center_x y_ = y - center_y r = np.sqrt(x_**2 + y_**2) if isinstance(r, int) or isinstance(r, float): r = max(self._s, r) else: r[r < self._s] = self._s d_alpha_dr = self.d_alpha_dr(x, y, n_sersic, R_sersic, k_eff, center_x, center_y) alpha = -self.alpha_abs(x, y, n_sersic, R_sersic, k_eff, center_x, center_y) f_xx = -(d_alpha_dr/r + alpha/r**2) * x_**2/r + alpha/r f_yy = -(d_alpha_dr/r + alpha/r**2) * y_**2/r + alpha/r f_xy = -(d_alpha_dr/r + alpha/r**2) * x_*y_/r return f_xx, f_yy, f_xy
returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy
385,589
def create_release(): build = g.build release_name = request.form.get() utils.jsonify_assert(release_name, ) url = request.form.get() utils.jsonify_assert(release_name, ) release = models.Release( name=release_name, url=url, number=1, build_id=build.id) last_candidate = ( models.Release.query .filter_by(build_id=build.id, name=release_name) .order_by(models.Release.number.desc()) .first()) if last_candidate: release.number += last_candidate.number if last_candidate.status == models.Release.PROCESSING: canceled_task_count = work_queue.cancel( release_id=last_candidate.id) logging.info( , canceled_task_count, build.id, last_candidate.name, last_candidate.number) last_candidate.status = models.Release.BAD db.session.add(last_candidate) db.session.add(release) db.session.commit() signals.release_updated_via_api.send(app, build=build, release=release) logging.info( , build.id, release.name, url, release.number) return flask.jsonify( success=True, build_id=build.id, release_name=release.name, release_number=release.number, url=url)
Creates a new release candidate for a build.
385,590
def add_event(self, rule, callback): self.event_manager.add_event( zipline.utils.events.Event(rule, callback), )
Adds an event to the algorithm's EventManager. Parameters ---------- rule : EventRule The rule for when the callback should be triggered. callback : callable[(context, data) -> None] The function to execute when the rule is triggered.
385,591
def _calculate_unpack_filter(cls, includes=None, excludes=None, spec=None): include_patterns = cls.compile_patterns(includes or [], field_name=, spec=spec) logger.debug( .format(list(p.pattern for p in include_patterns))) exclude_patterns = cls.compile_patterns(excludes or [], field_name=, spec=spec) logger.debug( .format(list(p.pattern for p in exclude_patterns))) return lambda f: cls._file_filter(f, include_patterns, exclude_patterns)
Take regex patterns and return a filter function. :param list includes: List of include patterns to pass to _file_filter. :param list excludes: List of exclude patterns to pass to _file_filter.
385,592
def deployAll(self): targets = [Target.getTarget(iid) for iid, n, p in self.db.listTargets()] for target in targets: target.deploy() verbose()
Deploys all the items from the vault. Useful after a format
385,593
def _separate_epochs(activity_data, epoch_list): time1 = time.time() raw_data = [] labels = [] for sid in range(len(epoch_list)): epoch = epoch_list[sid] for cond in range(epoch.shape[0]): sub_epoch = epoch[cond, :, :] for eid in range(epoch.shape[1]): r = np.sum(sub_epoch[eid, :]) if r > 0: mat = activity_data[sid][:, sub_epoch[eid, :] == 1] mat = np.ascontiguousarray(mat.T) mat = zscore(mat, axis=0, ddof=0) mat = np.nan_to_num(mat) mat = mat / math.sqrt(r) raw_data.append(mat) labels.append(cond) time2 = time.time() logger.debug( % (time2 - time1) ) return raw_data, labels
create data epoch by epoch Separate data into epochs of interest specified in epoch_list and z-score them for computing correlation Parameters ---------- activity_data: list of 2D array in shape [nVoxels, nTRs] the masked activity data organized in voxel*TR formats of all subjects epoch_list: list of 3D array in shape [condition, nEpochs, nTRs] specification of epochs and conditions assuming all subjects have the same number of epochs len(epoch_list) equals the number of subjects Returns ------- raw_data: list of 2D array in shape [epoch length, nVoxels] the data organized in epochs and z-scored in preparation of correlation computation len(raw_data) equals the number of epochs labels: list of 1D array the condition labels of the epochs len(labels) labels equals the number of epochs
385,594
def fetch_url(url, method=, user_agent=, timeout=SOCKET_TIMEOUT): sock = httplib2.Http(timeout=timeout) request_headers = { : user_agent, : } try: headers, raw = sock.request(url, headers=request_headers, method=method) except: raise OEmbedHTTPException( % url) return headers, raw
Fetch response headers and data from a URL, raising a generic exception for any kind of failure.
385,595
def exports(self): if self._exports is None: self._exports = ExportList(self) return self._exports
:rtype: twilio.rest.preview.bulk_exports.export.ExportList
385,596
def create_dashboard(self, name): dashboard_configuration = { : name, : 2, : [] } res = requests.post(self.url + self._dashboards_api_endpoint, headers=self.hdrs, data=json.dumps({: dashboard_configuration}), verify=self.ssl_verify) return self._request_result(res)
**Description** Creates an empty dashboard. You can then add panels by using ``add_dashboard_panel``. **Arguments** - **name**: the name of the dashboard that will be created. **Success Return Value** A dictionary showing the details of the new dashboard. **Example** `examples/dashboard.py <https://github.com/draios/python-sdc-client/blob/master/examples/dashboard.py>`_
385,597
def wx_menu(self): from MAVProxy.modules.lib.wx_loader import wx menu = wx.Menu() for i in range(len(self.items)): m = self.items[i] m._append(menu) return menu
return a wx.Menu() for this menu
385,598
def sign_up(self): self.log("Bot player signing up.") self.subscribe_to_quorum_channel() while True: url = ( "{host}/participant/{self.worker_id}/" "{self.hit_id}/{self.assignment_id}/" "debug?fingerprint_hash={hash}&recruiter=bots:{bot_name}".format( host=self.host, self=self, hash=uuid.uuid4().hex, bot_name=self.__class__.__name__, ) ) try: result = requests.post(url) result.raise_for_status() except RequestException: self.stochastic_sleep() continue if result.json()["status"] == "error": self.stochastic_sleep() continue self.on_signup(result.json()) return True
Signs up a participant for the experiment. This is done using a POST request to the /participant/ endpoint.
385,599
def dict_to_vtk(data, path=, voxel_size=1, origin=(0, 0, 0)): r vs = voxel_size for entry in data: if data[entry].dtype == bool: data[entry] = data[entry].astype(np.int8) if data[entry].flags[]: data[entry] = np.ascontiguousarray(data[entry]) imageToVTK(path, cellData=data, spacing=(vs, vs, vs), origin=origin)
r""" Accepts multiple images as a dictionary and compiles them into a vtk file Parameters ---------- data : dict A dictionary of *key: value* pairs, where the *key* is the name of the scalar property stored in each voxel of the array stored in the corresponding *value*. path : string Path to output file voxel_size : int The side length of the voxels (voxels are cubic) origin : float data origin (according to selected voxel size) Notes ----- Outputs a vtk, vtp or vti file that can opened in ParaView