Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
3,400
def get_distributions(cls, ctx, extra_dist_dirs=[]): if extra_dist_dirs: raise BuildInterruptingException( ) dist_dir = ctx.dist_dir folders = glob.glob(join(dist_dir, )) for dir in extra_dist_dirs: folders.extend(glob.glob(join(dir, ))) dists = [] for folder in folders: if exists(join(folder, )): with open(join(folder, )) as fileh: dist_info = json.load(fileh) dist = cls(ctx) dist.name = folder.split()[-1] dist.dist_dir = folder dist.needs_build = False dist.recipes = dist_info[] if in dist_info: dist.archs = dist_info[] if in dist_info: dist.ndk_api = dist_info[] else: dist.ndk_api = None warning( "Distribution {distname}: ({distdir}) has been " "built with an unknown api target, ignoring it, " "you might want to delete it".format( distname=dist.name, distdir=dist.dist_dir ) ) dists.append(dist) return dists
Returns all the distributions found locally.
3,401
def thaw_parameter(self, name): i = self.get_parameter_names(include_frozen=True).index(name) self.unfrozen_mask[i] = True
Thaw a parameter by name Args: name: The name of the parameter
3,402
def send_with_media( self, *, text: str, files: List[str], captions: List[str]=[], ) -> List[OutputRecord]: try: self.ldebug(f"Uploading files {files}.") if captions is None: captions = [] if len(files) > len(captions): captions.extend([self.default_caption_message] * (len(files) - len(captions))) media_dicts = [] for i, file in enumerate(files): caption = captions[i] media_dicts.append(self.api.media_post(file, description=caption)) self.ldebug(f"Media ids {media_dicts}") except mastodon.MastodonError as e: return [self.handle_error( f"Bot {self.bot_name} encountered an error when uploading {files}:\n{e}\n", e )] try: status = self.api.status_post(status=text, media_ids=media_dicts) self.ldebug(f"Status object from toot: {status}.") return [TootRecord(record_data={ "toot_id": status["id"], "text": text, "media_ids": media_dicts, "captions": captions })] except mastodon.MastodonError as e: return [self.handle_error((f"Bot {self.bot_name} encountered an error when " f"sending post {text} with media dicts {media_dicts}:" f"\n{e}\n"), e)]
Upload media to mastodon, and send status and media, and captions if present. :param text: post text. :param files: list of files to upload with post. :param captions: list of captions to include as alt-text with files. :returns: list of output records, each corresponding to either a single post, or an error.
3,403
def index_config(request): if _permission_denied_check(request): return HttpResponseForbidden(, content_type=) content_list = getattr(settings, , []) if not content_list: for cls in six.itervalues(DigitalObject.defined_types): content_group = [model for model in getattr(cls, , []) if not model.startswith()] if content_group: content_list.append(content_group) response = { : content_list, : settings.SOLR_SERVER_URL } return HttpResponse(json.dumps(response), content_type=)
This view returns the index configuration of the current application as JSON. Currently, this consists of a Solr index url and the Fedora content models that this application expects to index. .. Note:: By default, Fedora system content models (such as ``fedora-system:ContentModel-3.0``) are excluded. Any application that actually wants to index such objects will need to customize this view to include them.
3,404
def _engineServicesRunning(): process = subprocess.Popen(["ps", "aux"], stdout=subprocess.PIPE) stdout = process.communicate()[0] result = process.returncode if result != 0: raise RuntimeError("Unable to check for running client job manager") running = False for line in stdout.split("\n"): if "python" in line and "clientjobmanager.client_job_manager" in line: running = True break return running
Return true if the engine services are running
3,405
def parse_parameters_from_response(self, response): lines = response.splitlines() pairs = [line.strip().split(, 1) for line in lines if in line] pairs = sorted(pairs) signature = ([unquote(v) for k, v in pairs if k == ] or [None])[0] query_string = . join([k + + v for k, v in pairs if k != ]) return (signature, query_string)
Returns a response signature and query string generated from the server response. 'h' aka signature argument is stripped from the returned query string.
3,406
def TermsProcessor(instance, placeholder, rendered_content, original_context): if in original_context: return rendered_content return mark_safe(replace_terms(rendered_content))
Adds links all placeholders plugins except django-terms plugins
3,407
def spans_columns(self, column_names): columns = self.get_columns() number_of_columns = len(columns) same_columns = True for i in range(number_of_columns): column = self._trim_quotes(columns[i].lower()) if i >= len(column_names) or column != self._trim_quotes( column_names[i].lower() ): same_columns = False return same_columns
Checks if this index exactly spans the given column names in the correct order. :type column_names: list :rtype: bool
3,408
def wait_until_running(self, callback=None): status = self.machine.scheduler.wait_until_running( self.job, self.worker_config.time_out) if status.running: self.online = True if callback: callback(self) else: raise TimeoutError("Timeout while waiting for worker to run: " + self.worker_config.name)
Waits until the remote worker is running, then calls the callback. Usually, this method is passed to a different thread; the callback is then a function patching results through to the result queue.
3,409
def set_zone(time_zone): * if time_zone not in list_zones(): raise SaltInvocationError(.format(time_zone)) salt.utils.mac_utils.execute_return_success( .format(time_zone)) return time_zone in get_zone()
Set the local time zone. Use ``timezone.list_zones`` to list valid time_zone arguments :param str time_zone: The time zone to apply :return: True if successful, False if not :rtype: bool :raises: SaltInvocationError on Invalid Timezone :raises: CommandExecutionError on failure CLI Example: .. code-block:: bash salt '*' timezone.set_zone America/Denver
3,410
def remove(self): if not self.can_remove(): raise AttributeError() data = self.data self.parents.remove(self) self.delete() return data
Removes the node from the graph. Note this does not remove the associated data object. See :func:`Node.can_remove` for limitations on what can be deleted. :returns: :class:`BaseNodeData` subclass associated with the deleted Node :raises AttributeError: if called on a ``Node`` that cannot be deleted
3,411
def _sim_trajectories(self, time_size, start_pos, rs, total_emission=False, save_pos=False, radial=False, wrap_func=wrap_periodic): time_size = int(time_size) num_particles = self.num_particles if total_emission: em = np.zeros(time_size, dtype=np.float32) else: em = np.zeros((num_particles, time_size), dtype=np.float32) POS = [] for i, sigma_1d in enumerate(self.sigma_1d): delta_pos = rs.normal(loc=0, scale=sigma_1d, size=3 * time_size) delta_pos = delta_pos.reshape(3, time_size) pos = np.cumsum(delta_pos, axis=-1, out=delta_pos) pos += start_pos[i] for coord in (0, 1, 2): pos[coord] = wrap_func(pos[coord], *self.box.b[coord]) Ro = sqrt(pos[0]**2 + pos[1]**2) Z = pos[2] current_em = self.psf.eval_xz(Ro, Z)**2 if total_emission: em += current_em.astype(np.float32) else: em[i] = current_em.astype(np.float32) if save_pos: pos_save = np.vstack((Ro, Z)) if radial else pos POS.append(pos_save[np.newaxis, :, :]) start_pos[i] = pos[:, -1:] return POS, em
Simulate (in-memory) `time_size` steps of trajectories. Simulate Brownian motion diffusion and emission of all the particles. Uses the attributes: num_particles, sigma_1d, box, psf. Arguments: time_size (int): number of time steps to be simulated. start_pos (array): shape (num_particles, 3), particles start positions. This array is modified to store the end position after this method is called. rs (RandomState): a `numpy.random.RandomState` object used to generate the random numbers. total_emission (bool): if True, store only the total emission array containing the sum of emission of all the particles. save_pos (bool): if True, save the particles 3D trajectories wrap_func (function): the function used to apply the boundary condition (use :func:`wrap_periodic` or :func:`wrap_mirror`). Returns: POS (list): list of 3D trajectories arrays (3 x time_size) em (array): array of emission (total or per-particle)
3,412
def fetch(dataset_uri, item_identifier): dataset = dtoolcore.DataSet.from_uri(dataset_uri) click.secho(dataset.item_content_abspath(item_identifier))
Return abspath to file with item content. Fetches the file from remote storage if required.
3,413
def parse_record( self, lines ): temp_lines = [] for line in lines: fields = line.rstrip( "\r\n" ).split( None, 1 ) if len( fields ) == 1: fields.append( "" ) temp_lines.append( fields ) lines = temp_lines motif = TransfacMotif() current_line = 0 while 1: if current_line >= len( lines ): break prefix, rest = lines[ current_line ] if prefix not in self.parse_actions: current_line += 1 continue action = self.parse_actions[ prefix ] if action[0] == "store_single": key = action[1] setattr( motif, key, rest ) current_line += 1 if action[0] == "store_single_list": key = action[1] if not getattr( motif, key ): setattr( motif, key, [] ) getattr( motif, key ).append( rest ) current_line += 1 if action[0] == "store_single_key_value": key = action[1] k, v = rest.strip().split( , 1 ) if not getattr( motif, key ): setattr( motif, key, {} ) getattr( motif, key )[k] = v current_line += 1 if action[0] == "store_block": key = action[1] value = [] while current_line < len( lines ) and lines[ current_line ][0] == prefix: value.append( lines[current_line][1] ) current_line += 1 setattr( motif, key, str.join( "\n", value ) ) if action[0] == "store_matrix": alphabet = rest.split() alphabet_size = len( alphabet ) rows = [] pattern = "" current_line += 1 while current_line < len( lines ): prefix, rest = lines[ current_line ] if not prefix.isdigit(): break values = rest.split() rows.append( [ float(_) for _ in values[:alphabet_size] ] ) if len( values ) > alphabet_size: pattern += values[alphabet_size] current_line += 1 if len( pattern ) != len( rows ): pattern = None matrix = FrequencyMatrix.from_rows( alphabet, rows ) setattr( motif, action[1], matrix ) if motif.id or motif.accession or motif.name: return motif
Parse a TRANSFAC record out of `lines` and return a motif.
3,414
def _weighting(weights, exponent): if np.isscalar(weights): weighting = NumpyTensorSpaceConstWeighting(weights, exponent) elif weights is None: weighting = NumpyTensorSpaceConstWeighting(1.0, exponent) else: arr = np.asarray(weights) weighting = NumpyTensorSpaceArrayWeighting(arr, exponent) return weighting
Return a weighting whose type is inferred from the arguments.
3,415
def rec_new(self, zone, record_type, name, content, ttl=1, priority=None, service=None, service_name=None, protocol=None, weight=None, port=None, target=None): params = { : , : zone, : record_type, : name, : content, : ttl } if priority is not None: params[] = priority if service is not None: params[] = service if service_name is not None: params[] = service_name if protocol is not None: params[] = protocol if weight is not None: params[] = weight if port is not None: params[] = port if target is not None: params[] = target return self._request(params)
Create a DNS record for the given zone :param zone: domain name :type zone: str :param record_type: Type of DNS record. Valid values are [A/CNAME/MX/TXT/SPF/AAAA/NS/SRV/LOC] :type record_type: str :param name: name of the DNS record :type name: str :param content: content of the DNS record :type content: str :param ttl: TTL of the DNS record in seconds. 1 = Automatic, otherwise, value must in between 120 and 4,294,967,295 seconds. :type ttl: int :param priority: [applies to MX/SRV] MX record priority. :type priority: int :param service: Service for SRV record :type service: str :param service_name: Service Name for SRV record :type service_name: str :param protocol: Protocol for SRV record. Values are [_tcp/_udp/_tls]. :type protocol: str :param weight: Weight for SRV record. :type weight: int :param port: Port for SRV record :type port: int :param target: Target for SRV record :type target: str :return: :rtype: dict
3,416
def set_unit_desired_state(self, unit, desired_state): if desired_state not in self._STATES: raise ValueError(.format( self._STATES )) }) return self.get_unit(unit)
Update the desired state of a unit running in the cluster Args: unit (str, Unit): The Unit, or name of the unit to update desired_state: State the user wishes the Unit to be in ("inactive", "loaded", or "launched") Returns: Unit: The unit that was updated Raises: fleet.v1.errors.APIError: Fleet returned a response code >= 400 ValueError: An invalid value was provided for ``desired_state``
3,417
def parse_ts(ts): dt = maya.parse(ts.strip()) return dt.datetime(naive=True)
parse timestamp. :param ts: timestamp in ISO8601 format :return: tbd!!!
3,418
def _conditional_toward_zero(method, sign): return method is RoundingMethods.ROUND_HALF_ZERO or \ (method is RoundingMethods.ROUND_HALF_DOWN and sign == 1) or \ (method is RoundingMethods.ROUND_HALF_UP and sign == -1)
Whether to round toward zero. :param method: rounding method :type method: element of RoundingMethods.METHODS() :param int sign: -1, 0, or 1 as appropriate Complexity: O(1)
3,419
def _social_auth_login(self, request, **kwargs): if request.user.is_authenticated(): if not request.user.is_active or not request.user.is_staff: raise PermissionDenied() else: messages.add_message(request, messages.WARNING, ) return redirect_to_login(request.get_full_path())
View function that redirects to social auth login, in case the user is not logged in.
3,420
def files_mkdir(self, path, parents=False, **kwargs): kwargs.setdefault("opts", {"parents": parents}) args = (path,) return self._client.request(, args, **kwargs)
Creates a directory within the MFS. .. code-block:: python >>> c.files_mkdir("/test") b'' Parameters ---------- path : str Filepath within the MFS parents : bool Create parent directories as needed and do not raise an exception if the requested directory already exists
3,421
def _onCompletionListItemSelected(self, index): model = self._widget.model() selectedWord = model.words[index] textToInsert = selectedWord[len(model.typedText()):] self._qpart.textCursor().insertText(textToInsert) self._closeCompletion()
Item selected. Insert completion to editor
3,422
def process_view(self, request, view_func, view_args, view_kwargs): if view_func == login: return cas_login(request, *view_args, **view_kwargs) elif view_func == logout: return cas_logout(request, *view_args, **view_kwargs) if settings.CAS_ADMIN_PREFIX: if not request.path.startswith(settings.CAS_ADMIN_PREFIX): return None elif not view_func.__module__.startswith(): return None try: is_authenticated = request.user.is_authenticated() except TypeError: is_authenticated = request.user.is_authenticated if is_authenticated: if request.user.is_staff: return None else: error = ( ) return HttpResponseForbidden(error) params = urlencode({REDIRECT_FIELD_NAME: request.get_full_path()}) return HttpResponseRedirect(reverse(cas_login) + + params)
Forwards unauthenticated requests to the admin page to the CAS login URL, as well as calls to django.contrib.auth.views.login and logout.
3,423
def exists(name, region=None, key=None, keyid=None, profile=None): topics = get_all_topics(region=region, key=key, keyid=keyid, profile=profile) if name.startswith(): return name in list(topics.values()) else: return name in list(topics.keys())
Check to see if an SNS topic exists. CLI example:: salt myminion boto_sns.exists mytopic region=us-east-1
3,424
def create_parser(subparsers): parser = subparsers.add_parser( , help=, usage="%(prog)s [options] cluster/[role]/[env] <topology-name> [container-id]", add_help=True) args.add_titles(parser) args.add_cluster_role_env(parser) args.add_topology(parser) parser.add_argument( , nargs=, type=int, default=-1, help=) args.add_config(parser) args.add_service_url(parser) args.add_verbose(parser) parser.set_defaults(subcommand=) return parser
:param subparsers: :return:
3,425
def get_filter_solvers(self, filter_): solvers_classes = [s for s in self.filter_solver_classes if s.can_solve(filter_)] if solvers_classes: solvers = [] for solver_class in solvers_classes: if solver_class not in self._filter_solvers_cache: self._filter_solvers_cache[solver_class] = solver_class(self) solvers.append(self._filter_solvers_cache[solver_class]) return solvers raise SolverNotFound(self, filter_)
Returns the filter solvers that can solve the given filter. Arguments --------- filter : dataql.resources.BaseFilter An instance of the a subclass of ``BaseFilter`` for which we want to get the solver classes that can solve it. Returns ------- list The list of filter solvers instances that can solve the given resource. Raises ------ dataql.solvers.exceptions.SolverNotFound When no solver is able to solve the given filter. Example ------- >>> from dataql.resources import Filter >>> registry = Registry() >>> registry.get_filter_solvers(Filter(name='foo')) [<FilterSolver>] >>> registry.get_filter_solvers(None) # doctest: +ELLIPSIS Traceback (most recent call last): dataql.solvers.exceptions.SolverNotFound: No solvers found for this kind of object:...
3,426
def receiver(url, **kwargs): res = url_to_resources(url) fnc = res["receiver"] return fnc(res.get("url"), **kwargs)
Return receiver instance from connection url string url <str> connection url eg. 'tcp://0.0.0.0:8080'
3,427
def set_learning_objectives(self, objective_ids): if not isinstance(objective_ids, list): raise errors.InvalidArgument() if self.get_learning_objectives_metadata().is_read_only(): raise errors.NoAccess() idstr_list = [] for object_id in objective_ids: if not self._is_valid_id(object_id): raise errors.InvalidArgument() idstr_list.append(str(object_id)) self._my_map[] = idstr_list
Sets the learning objectives. arg: objective_ids (osid.id.Id[]): the learning objective ``Ids`` raise: InvalidArgument - ``objective_ids`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
3,428
def add_assay(self, name, assay): if not type(assay) is numpy.ndarray: raise Exception("Invalid assay. It must be a numpy array.") elif not assay.shape == (self.size_class_count,): raise Exception( "Invalid assay: It must have the same number of elements " "as the material has size classes.") elif name in self.assays.keys(): raise Exception( "Invalid assay: An assay with that name already exists.") self.assays[name] = assay
Add an assay to the material. :param name: The name of the new assay. :param assay: A numpy array containing the size class mass fractions for the assay. The sequence of the assay's elements must correspond to the sequence of the material's size classes.
3,429
def get_count(self, *args, **selectors): obj = self.get_object(**selectors) return self.get_count_of_object(obj)
Return the count of UI object with *selectors* Example: | ${count} | Get Count | text=Accessibility | # Get the count of UI object text=Accessibility | | ${accessibility_text} | Get Object | text=Accessibility | # These two keywords combination | | ${count} | Get Count Of Object | ${accessibility_text} | # do the same thing. |
3,430
def _submit_gauges_from_histogram(self, metric_name, metric, scraper_config, hostname=None): for sample in metric.samples: val = sample[self.SAMPLE_VALUE] if not self._is_value_valid(val): self.log.debug("Metric value is not supported for metric {}".format(sample[self.SAMPLE_NAME])) continue custom_hostname = self._get_hostname(hostname, sample, scraper_config) if sample[self.SAMPLE_NAME].endswith("_sum"): tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname) self.gauge( "{}.{}.sum".format(scraper_config[], metric_name), val, tags=tags, hostname=custom_hostname, ) elif sample[self.SAMPLE_NAME].endswith("_count"): tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname) self.gauge( "{}.{}.count".format(scraper_config[], metric_name), val, tags=tags, hostname=custom_hostname, ) elif ( scraper_config[] and sample[self.SAMPLE_NAME].endswith("_bucket") and "Inf" not in sample[self.SAMPLE_LABELS]["le"] ): sample[self.SAMPLE_LABELS]["le"] = float(sample[self.SAMPLE_LABELS]["le"]) tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname) self.gauge( "{}.{}.count".format(scraper_config[], metric_name), val, tags=tags, hostname=custom_hostname, )
Extracts metrics from a prometheus histogram and sends them as gauges
3,431
def replace_handler(logger, match_handler, reconfigure): handler, other_logger = find_handler(logger, match_handler) if handler and other_logger and reconfigure: other_logger.removeHandler(handler) logger = other_logger return handler, logger
Prepare to replace a handler. :param logger: Refer to :func:`find_handler()`. :param match_handler: Refer to :func:`find_handler()`. :param reconfigure: :data:`True` if an existing handler should be replaced, :data:`False` otherwise. :returns: A tuple of two values: 1. The matched :class:`~logging.Handler` object or :data:`None` if no handler was matched. 2. The :class:`~logging.Logger` to which the matched handler was attached or the logger given to :func:`replace_handler()`.
3,432
def setup_columns(self): tv = self.view[] tv.set_model(self.model) cell = gtk.CellRendererText() tvcol = gtk.TreeViewColumn(, cell) def cell_data_func(col, cell, mod, it): if mod[it][0]: cell.set_property(, mod[it][0].name) return tvcol.set_cell_data_func(cell, cell_data_func) tv.append_column(tvcol) return
Creates the treeview stuff
3,433
def dump_http(method, url, request_headers, response, output_stream): output_stream.write() parsed_url = urlsplit(url) http_path = parsed_url.path if parsed_url.query: http_path = http_path + + parsed_url.query output_stream.write(.format(method, http_path)) for k, v in list(request_headers.items()): if k is : v = re.sub(r, , v) output_stream.write(.format(k.title(), v)) output_stream.write() output_stream.write(.format(response.status)) for k, v in list(response.getheaders().items()): output_stream.write(.format(k.title(), v)) if response.status != 200 and \ response.status != 204 and response.status != 206: output_stream.write(.format(response.read())) output_stream.write()
Dump all headers and response headers into output_stream. :param request_headers: Dictionary of HTTP request headers. :param response_headers: Dictionary of HTTP response headers. :param output_stream: Stream where the request is being dumped at.
3,434
def _gatherDataFromLookups(gpos, scriptOrder): lookupIndexes = _gatherLookupIndexes(gpos) seenLookups = set() kerningDictionaries = [] leftClassDictionaries = [] rightClassDictionaries = [] for script in scriptOrder: kerning = [] leftClasses = [] rightClasses = [] for lookupIndex in lookupIndexes[script]: if lookupIndex in seenLookups: continue seenLookups.add(lookupIndex) result = _gatherKerningForLookup(gpos, lookupIndex) if result is None: continue k, lG, rG = result kerning.append(k) leftClasses.append(lG) rightClasses.append(rG) if kerning: kerningDictionaries.append(kerning) leftClassDictionaries.append(leftClasses) rightClassDictionaries.append(rightClasses) return kerningDictionaries, leftClassDictionaries, rightClassDictionaries
Gather kerning and classes from the applicable lookups and return them in script order.
3,435
def parse_args(): description = ( "Get Wikipedia article info and Wikidata via MediaWiki APIs.\n\n" "Gets a random English Wikipedia article by default, or in the\n" "language -lang, or from the wikisite -wiki, or by specific\n" "title -title. The output is a plain text extract unless -HTML.") epilog = ("Powered by https://github.com/siznax/wptools/ %s" % wptools.__version__) argp = argparse.ArgumentParser( description=description, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog) argp.add_argument("-H", "-HTML", action=, help="output HTML extract") argp.add_argument("-l", "-lang", default=, help="language code") argp.add_argument("-n", "-nowrap", action=, help="do not wrap text") argp.add_argument("-q", "-query", action=, help="show query and exit") argp.add_argument("-s", "-silent", action=, help="quiet output to stderr") argp.add_argument("-t", "-title", help="get a specific title") argp.add_argument("-v", "-verbose", action=, help="HTTP status to stderr") argp.add_argument("-w", "-wiki", help="use alternative wikisite") return argp.parse_args()
parse main() args
3,436
def first_true(iterable, default=False, pred=None): return next(filter(pred, iterable), default)
Returns the first true value in the iterable. If no true value is found, returns *default* If *pred* is not None, returns the first item for which pred(item) is true.
3,437
def _back_transform(self,inplace=True): if not self.istransformed: raise Exception("ParameterEnsemble already back transformed") istransformed = self.pst.parameter_data.loc[:,"partrans"] == "log" if inplace: self.loc[:,istransformed] = 10.0**(self.loc[:,istransformed]) self.loc[:,:] = (self.loc[:,:] -\ self.pst.parameter_data.offset)/\ self.pst.parameter_data.scale self.__istransformed = False else: vals = (self.pst.parameter_data.parval1 -\ self.pst.parameter_data.offset) /\ self.pst.parameter_data.scale new_en = ParameterEnsemble(pst=self.pst.get(),data=self.loc[:,:].copy(), columns=self.columns, mean_values=vals,istransformed=False) new_en.loc[:,istransformed] = 10.0**(self.loc[:,istransformed]) new_en.loc[:,:] = (new_en.loc[:,:] -\ new_en.pst.parameter_data.offset)/\ new_en.pst.parameter_data.scale return new_en
Private method to remove log10 transformation from ensemble Parameters ---------- inplace: bool back transform self in place Returns ------ ParameterEnsemble : ParameterEnsemble if inplace if False Note ---- Don't call this method unless you know what you are doing
3,438
def reconfigure_messaging(self, msg_host, msg_port): self._messaging.create_external_route( , host=msg_host, port=msg_port)
force messaging reconnector to the connect to the (host, port)
3,439
def create_task_from_cu(cu, prof=None): try: logger.debug( % cu.name) if prof: prof.prof(, uid=cu.name.split()[0].strip()) task = Task() task.uid = cu.name.split()[0].strip() task.name = cu.name.split()[1].strip() task.parent_stage[] = cu.name.split()[2].strip() task.parent_stage[] = cu.name.split()[3].strip() task.parent_pipeline[] = cu.name.split()[4].strip() task.parent_pipeline[] = cu.name.split()[5].strip() task.rts_uid = cu.uid if cu.state == rp.DONE: task.exit_code = 0 else: task.exit_code = 1 task.path = ru.Url(cu.sandbox).path if prof: prof.prof(, uid=cu.name.split()[0].strip()) logger.debug( % (task.uid, cu.name)) return task except Exception, ex: logger.exception( % ex) raise
Purpose: Create a Task based on the Compute Unit. Details: Currently, only the uid, parent_stage and parent_pipeline are retrieved. The exact initial Task (that was converted to a CUD) cannot be recovered as the RP API does not provide the same attributes for a CU as for a CUD. Also, this is not required for the most part. TODO: Add exit code, stdout, stderr and path attributes to a Task. These can be extracted from a CU :arguments: :cu: RP Compute Unit :return: Task
3,440
def add_loaded_callback(self, callback): if callback not in self._cb_aldb_loaded: self._cb_aldb_loaded.append(callback)
Add a callback to be run when the ALDB load is complete.
3,441
def unchunk(self): plan, padding, vshape, split = self.plan, self.padding, self.vshape, self.split nchunks = self.getnumber(plan, vshape) full_shape = concatenate((nchunks, plan)) n = len(vshape) perm = concatenate(list(zip(range(n), range(n, 2*n)))) if self.uniform: def _unchunk(it): ordered = sorted(it, key=lambda kv: kv[0][split:]) keys, values = zip(*ordered) yield keys[0][:split], asarray(values).reshape(full_shape).transpose(perm).reshape(vshape) else: def _unchunk(it): ordered = sorted(it, key=lambda kv: kv[0][split:]) keys, values = zip(*ordered) k_chks = [k[split:] for k in keys] arr = empty(nchunks, dtype=) for (i, d) in zip(k_chks, values): arr[i] = d yield keys[0][:split], allstack(arr.tolist()) if self.padded: removepad = self.removepad rdd = self._rdd.map(lambda kv: (kv[0], removepad(kv[0][split:], kv[1], nchunks, padding, axes=range(n)))) else: rdd = self._rdd if array_equal(self.plan, self.vshape): rdd = rdd.map(lambda kv: (kv[0][:split], kv[1])) ordered = self._ordered else: ranges = self.kshape npartitions = int(prod(ranges)) if len(self.kshape) == 0: partitioner = lambda k: 0 else: partitioner = lambda k: ravel_multi_index(k[:split], ranges) rdd = rdd.partitionBy(numPartitions=npartitions, partitionFunc=partitioner).mapPartitions(_unchunk) ordered = True if array_equal(self.vshape, [1]): rdd = rdd.mapValues(lambda v: squeeze(v)) newshape = self.shape[:-1] else: newshape = self.shape return BoltArraySpark(rdd, shape=newshape, split=self._split, dtype=self.dtype, ordered=ordered)
Convert a chunked array back into a full array with (key,value) pairs where key is a tuple of indices, and value is an ndarray.
3,442
def _calculate_comparison_stats(truth_vcf): min_stat_size = 50 min_median_size = 250 sizes = [] svtypes = set([]) with utils.open_gzipsafe(truth_vcf) as in_handle: for call in (l.rstrip().split("\t") for l in in_handle if not l.startswith(" stats = _summarize_call(call) if stats["size"] > min_stat_size: sizes.append(stats["size"]) svtypes.add(stats["svtype"]) pct10 = int(np.percentile(sizes, 10)) pct25 = int(np.percentile(sizes, 25)) pct50 = int(np.percentile(sizes, 50)) pct75 = int(np.percentile(sizes, 75)) ranges_detailed = [(int(min(sizes)), pct10), (pct10, pct25), (pct25, pct50), (pct50, pct75), (pct75, max(sizes))] ranges_split = [(int(min(sizes)), pct50), (pct50, max(sizes))] return {"min_size": int(min(sizes) * 0.95), "max_size": int(max(sizes) + 1.05), "svtypes": svtypes, "merge_size": int(np.percentile([x for x in sizes if x > min_median_size], 50)), "ranges": []}
Identify calls to validate from the input truth VCF.
3,443
def open(uri, mode, kerberos=False, user=None, password=None): if mode == : return BufferedInputBase(uri, mode, kerberos=kerberos, user=user, password=password) else: raise NotImplementedError( % mode)
Implement streamed reader from a web site. Supports Kerberos and Basic HTTP authentication. Parameters ---------- url: str The URL to open. mode: str The mode to open using. kerberos: boolean, optional If True, will attempt to use the local Kerberos credentials user: str, optional The username for authenticating over HTTP password: str, optional The password for authenticating over HTTP Note ---- If neither kerberos or (user, password) are set, will connect unauthenticated.
3,444
def find_all_matches(finder, ireq, pre=False): candidates = clean_requires_python(finder.find_all_candidates(ireq.name)) versions = {candidate.version for candidate in candidates} allowed_versions = _get_filtered_versions(ireq, versions, pre) if not pre and not allowed_versions: allowed_versions = _get_filtered_versions(ireq, versions, True) candidates = {c for c in candidates if c.version in allowed_versions} return candidates
Find all matching dependencies using the supplied finder and the given ireq. :param finder: A package finder for discovering matching candidates. :type finder: :class:`~pip._internal.index.PackageFinder` :param ireq: An install requirement. :type ireq: :class:`~pip._internal.req.req_install.InstallRequirement` :return: A list of matching candidates. :rtype: list[:class:`~pip._internal.index.InstallationCandidate`]
3,445
def set_tile(self, row, col, value): if col < 0: print("ERROR - x less than zero", col) col = 0 if col > self.grid_width - 1 : print("ERROR - x larger than grid", col) col = self.grid_width - 1 if row < 0: print("ERROR - y less than zero", row) row = 0 if row > self.grid_height - 1: print("ERROR - y larger than grid", row) row = self.grid_height - 1 self.grid[row][col] = value
Set the tile at position row, col to have the given value.
3,446
def tarbell_configure(command, args): puts("Configuring Tarbell. Press ctrl-c to bail out!") puts("\nWriting {0}".format(colored.green(path))) settings.save() if all: puts("\n- Done configuring Tarbell. Type `{0}` for help.\n" .format(colored.green("tarbell"))) return settings
Tarbell configuration routine.
3,447
def createJobSubscriptionAsync(self, ackCallback, callback, jobExecutionType=jobExecutionTopicType.JOB_WILDCARD_TOPIC, jobReplyType=jobExecutionTopicReplyType.JOB_REQUEST_TYPE, jobId=None): topic = self._thingJobManager.getJobTopic(jobExecutionType, jobReplyType, jobId) return self._AWSIoTMQTTClient.subscribeAsync(topic, self._QoS, ackCallback, callback)
**Description** Asynchronously creates an MQTT subscription to a jobs related topic based on the provided arguments **Syntax** .. code:: python #Subscribe to notify-next topic to monitor change in job referred to by $next myAWSIoTMQTTJobsClient.createJobSubscriptionAsync(callback, jobExecutionTopicType.JOB_NOTIFY_NEXT_TOPIC) #Subscribe to notify topic to monitor changes to jobs in pending list myAWSIoTMQTTJobsClient.createJobSubscriptionAsync(callback, jobExecutionTopicType.JOB_NOTIFY_TOPIC) #Subscribe to receive messages for job execution updates myAWSIoTMQTTJobsClient.createJobSubscriptionAsync(callback, jobExecutionTopicType.JOB_UPDATE_TOPIC, jobExecutionTopicReplyType.JOB_ACCEPTED_REPLY_TYPE) #Subscribe to receive messages for describing a job execution myAWSIoTMQTTJobsClient.createJobSubscriptionAsync(callback, jobExecutionTopicType.JOB_DESCRIBE_TOPIC, jobExecutionTopicReplyType.JOB_ACCEPTED_REPLY_TYPE, jobId) **Parameters** *ackCallback* - Callback to be invoked when the client receives a SUBACK. Should be in form :code:`customCallback(mid, data)`, where :code:`mid` is the packet id for the disconnect request and :code:`data` is the granted QoS for this subscription. *callback* - Function to be called when a new message for the subscribed job topic comes in. Should be in form :code:`customCallback(client, userdata, message)`, where :code:`message` contains :code:`topic` and :code:`payload`. Note that :code:`client` and :code:`userdata` are here just to be aligned with the underneath Paho callback function signature. These fields are pending to be deprecated and should not be depended on. *jobExecutionType* - Member of the jobExecutionTopicType class specifying the jobs topic to subscribe to Defaults to jobExecutionTopicType.JOB_WILDCARD_TOPIC *jobReplyType* - Member of the jobExecutionTopicReplyType class specifying the (optional) reply sub-topic to subscribe to Defaults to jobExecutionTopicReplyType.JOB_REQUEST_TYPE which indicates the subscription isn't intended for a jobs reply topic *jobId* - JobId of the topic if the topic type requires one. Defaults to None **Returns** Subscribe request packet id, for tracking purpose in the corresponding callback.
3,448
def Register(self, name, constructor): precondition.AssertType(name, Text) if name in self._constructors: message = "Duplicated constructors %r and %r for name " message %= (constructor, self._constructors[name], name) raise ValueError(message) self._constructors[name] = constructor
Registers a new constructor in the factory. Args: name: A name associated with given constructor. constructor: A constructor function that creates instances. Raises: ValueError: If there already is a constructor associated with given name.
3,449
def insertValue(self, pos, configValue, displayValue=None): self._configValues.insert(pos, configValue) self._displayValues.insert(pos, displayValue if displayValue is not None else configValue)
Will insert the configValue in the configValues and the displayValue in the displayValues list. If displayValue is None, the configValue is set in the displayValues as well
3,450
def _execute(self, sql, params): try: return self._execute_unsafe(sql, params) except MySQLdb.OperationalError as ex: if ex.args[0] in (2006, 2013, 2055): self._log("Connection with server is lost. Trying to reconnect.") self.connect() return self._execute_unsafe(sql, params) raise
Execute statement with reconnecting by connection closed error codes. 2006 (CR_SERVER_GONE_ERROR): MySQL server has gone away 2013 (CR_SERVER_LOST): Lost connection to MySQL server during query 2055 (CR_SERVER_LOST_EXTENDED): Lost connection to MySQL server at '%s', system error: %d
3,451
def find_field_by_name(browser, field_type, name): return ElementSelector( browser, field_xpath(field_type, ) % string_literal(name), filter_displayed=True, )
Locate the control input with the given ``name``. :param browser: ``world.browser`` :param string field_type: a field type (i.e. `button`) :param string name: ``name`` attribute Returns: an :class:`ElementSelector`
3,452
def table(self, name, database=None, schema=None): if database is not None and database != self.current_database: return self.database(name=database).table(name=name, schema=schema) else: alch_table = self._get_sqla_table(name, schema=schema) node = self.table_class(alch_table, self, self._schemas.get(name)) return self.table_expr_class(node)
Create a table expression that references a particular a table called `name` in a MySQL database called `database`. Parameters ---------- name : str The name of the table to retrieve. database : str, optional The database in which the table referred to by `name` resides. If ``None`` then the ``current_database`` is used. schema : str, optional The schema in which the table resides. If ``None`` then the `public` schema is assumed. Returns ------- table : TableExpr A table expression.
3,453
def build(self): self.defined_gates = set(STANDARD_GATE_NAMES) prog = self._recursive_builder(self.operation, self.gate_name, self.control_qubits, self.target_qubit) return prog
Builds this controlled gate. :return: The controlled gate, defined by this object. :rtype: Program
3,454
def item_count(self, request, variant_id=None): bid = utils.basket_id(request) item = ProductVariant.objects.get(id=variant_id) try: count = BasketItem.objects.get(basket_id=bid, variant=item).quantity except BasketItem.DoesNotExist: count = 0 return Response(data={"quantity": count}, status=status.HTTP_200_OK)
Get quantity of a single item in the basket
3,455
async def download(self, resource_url): resolver_path = self.find_path_from_url(resource_url) await self.apply_resolver_path(resource_url, resolver_path)
Download given Resource URL by finding path through graph and applying each step
3,456
def build_bam_tags(): def _combine_filters(fam, paired_align, align): filters = [x.filter_value for x in [fam, align] if x and x.filter_value] if filters: return ";".join(filters).replace(, ) return None boolean_tag_value = {True:1} tags = [ BamTag("X0", "Z", ("filter (why the alignment was excluded)"), _combine_filters), BamTag("X1", "Z", ("leftmost~rightmost matched pair positions"), lambda fam, pair, align: pair.positions()), BamTag("X2", "Z", ("L~R CIGARs"), lambda fam, pair, align: pair.cigars()), BamTag("X3", "i", "unique identifier for this alignment family", lambda fam, pair, align: fam.umi_sequence), BamTag("X4", "Z", ("L~R UMT barcodes for this alignment family; because " "of fuzzy matching the family UMT may be distinct " "from the UMT of the original alignment"), lambda fam, pair, align: fam.umt()), BamTag("X5", "i", "family size (number of align pairs in this family)", lambda fam, pair, align: fam.included_pair_count), BamTag("X6", "i", ("presence of this tag signals that this alignment " "would be the template for the consensus alignment"), lambda fam, pair, align: boolean_tag_value.get(fam.is_consensus_template(align), None))] return tags
builds the list of BAM tags to be added to output BAMs
3,457
def validate(self): if self.unique_identifier is not None: if not isinstance(self.unique_identifier, attributes.UniqueIdentifier): msg = "invalid unique identifier" raise TypeError(msg) if self.compromise_occurrence_date is not None: if not isinstance(self.compromise_occurrence_date, primitives.DateTime): msg = "invalid compromise time" raise TypeError(msg) if not isinstance(self.revocation_reason, objects.RevocationReason): msg = "invalid revocation reason" raise TypeError(msg)
Error check the attributes of the ActivateRequestPayload object.
3,458
def unicode_to_hex(unicode_string): if unicode_string is None: return None acc = [] for c in unicode_string: s = hex(ord(c)).replace("0x", "").upper() acc.append("U+" + ("0" * (4 - len(s))) + s) return u" ".join(acc)
Return a string containing the Unicode hexadecimal codepoint of each Unicode character in the given Unicode string. Return ``None`` if ``unicode_string`` is ``None``. Example:: a => U+0061 ab => U+0061 U+0062 :param str unicode_string: the Unicode string to convert :rtype: (Unicode) str
3,459
def _encode_observations(self, observations): return [ Observation( self._session.obj.run( self._encoded_image_t.obj, feed_dict={self._decoded_image_p.obj: observation} ), self._decode_png ) for observation in observations ]
Encodes observations as PNG.
3,460
def retweet(self, id): try: self._client.retweet(id=id) return True except TweepError as e: if e.api_code == TWITTER_PAGE_DOES_NOT_EXISTS_ERROR: return False raise
Retweet a tweet. :param id: ID of the tweet in question :return: True if success, False otherwise
3,461
def bytes_to_number(b, endian=): if endian == : b = reversed(b) n = 0 for i, ch in enumerate(bytearray(b)): n ^= ch << i * 8 return n
Convert a string to an integer. :param b: String or bytearray to convert. :param endian: Byte order to convert into ('big' or 'little' endian-ness, default 'big') Assumes bytes are 8 bits. This is a special-case version of string_to_number with a full base-256 ASCII alphabet. It is the reverse of ``number_to_bytes(n)``. Examples:: >>> bytes_to_number(b'*') 42 >>> bytes_to_number(b'\\xff') 255 >>> bytes_to_number(b'\\x01\\x00') 256 >>> bytes_to_number(b'\\x00\\x01', endian='little') 256
3,462
def http_post(self, path, query_data={}, post_data={}, files=None, **kwargs): result = self.http_request(, path, query_data=query_data, post_data=post_data, files=files, **kwargs) try: if result.headers.get(, None) == : return result.json() except Exception: raise GitlabParsingError( error_message="Failed to parse the server message") return result
Make a POST request to the Gitlab server. Args: path (str): Path or full URL to query ('/projects' or 'http://whatever/v4/api/projecs') query_data (dict): Data to send as query parameters post_data (dict): Data to send in the body (will be converted to json) files (dict): The files to send to the server **kwargs: Extra options to send to the server (e.g. sudo) Returns: The parsed json returned by the server if json is return, else the raw content Raises: GitlabHttpError: When the return code is not 2xx GitlabParsingError: If the json data could not be parsed
3,463
def main(): expr_list = [ "max(-_.千幸福的笑脸{घोड़ा=馬, " "dn2=dv2,千幸福的笑脸घ=千幸福的笑脸घ}) gte 100 " "times 3 && " "(min(ເຮືອນ{dn3=dv3,家=дом}) < 10 or sum(biz{dn5=dv5}) >99 and " "count(fizzle) lt 0or count(baz) > 1)".decode(), "max(foo{hostname=mini-mon,千=千}, 120) > 100 and (max(bar)>100 " " or max(biz)>100)".decode(), "max(foo)>=100", "test_metric{this=that, that = this} < 1", "max ( 3test_metric5 { this = that }) lt 5 times 3", "3test_metric5 lt 3", "ntp.offset > 1 or ntp.offset < -5", "max(3test_metric5{its it}) lt 5 times 3", "count(log.error{test=1}, deterministic) > 1.0", "count(log.error{test=1}, deterministic, 120) > 1.0", "last(test_metric{hold=here}) < 13", "count(log.error{test=1}, deterministic, 130) > 1.0", "count(log.error{test=1}, deterministic) > 1.0 times 0", ] for expr in expr_list: print(.format(expr.encode())) sub_exprs = [] try: alarm_expr_parser = AlarmExprParser(expr) sub_exprs = alarm_expr_parser.sub_expr_list except Exception as ex: print("Parse failed: {}".format(ex)) for sub_expr in sub_exprs: print(.format( sub_expr.fmtd_sub_expr_str.encode())) print(.format( sub_expr.dimensions_str.encode())) print(.format( sub_expr.deterministic)) print(.format( sub_expr.period)) print("") print("")
Used for development and testing.
3,464
def applicationpolicy(arg=None): def _mutator(func): wrapped = singledispatch(func) @wraps(wrapped) def wrapper(*args, **kwargs): event = kwargs.get() or args[-1] return wrapped.dispatch(type(event))(*args, **kwargs) wrapper.register = wrapped.register return wrapper assert isfunction(arg), arg return _mutator(arg)
Decorator for application policy method. Allows policy to be built up from methods registered for different event classes.
3,465
def _find_by_android(self, browser, criteria, tag, constraints): return self._filter_elements( browser.find_elements_by_android_uiautomator(criteria), tag, constraints)
Find element matches by UI Automator.
3,466
def selectlastrow(self, window_name, object_name): object_handle = self._get_object_handle(window_name, object_name) if not object_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) cell = object_handle.AXRows[-1] if not cell.AXSelected: object_handle.activate() cell.AXSelected = True else: pass return 1
Select last row @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer
3,467
async def get_creds(self, proof_req_json: str, filt: dict = None, filt_dflt_incl: bool = False) -> (Set[str], str): LOGGER.debug(, proof_req_json, filt) if filt is None: filt = {} rv = None creds_json = await anoncreds.prover_get_credentials_for_proof_req(self.wallet.handle, proof_req_json) creds = json.loads(creds_json) cred_ids = set() if filt: for cd_id in filt: try: json.loads(await self.get_cred_def(cd_id)) except AbsentCredDef: LOGGER.warning(, cd_id) filt.pop(cd_id) for inner_creds in {**creds[], **creds[]}.values(): for cred in inner_creds: cred_info = cred[] if filt: cred_cd_id = cred_info[] if cred_cd_id not in filt: if filt_dflt_incl: cred_ids.add(cred_info[]) continue if in (filt[cred_cd_id] or {}): if not {k: str(filt[cred_cd_id].get(, {})[k]) for k in filt[cred_cd_id].get(, {})}.items() <= cred_info[].items(): continue if in (filt[cred_cd_id] or {}): minima = filt[cred_cd_id].get(, {}) try: if any((attr not in cred_info[]) or (int(cred_info[][attr]) < int(minima[attr])) for attr in minima): continue except ValueError: continue cred_ids.add(cred_info[]) else: cred_ids.add(cred_info[]) if filt: creds = json.loads(prune_creds_json(creds, cred_ids)) rv = (cred_ids, json.dumps(creds)) LOGGER.debug(, rv) return rv
Get credentials from HolderProver wallet corresponding to proof request and filter criteria; return credential identifiers from wallet and credentials json. Return empty set and empty production for no such credentials. :param proof_req_json: proof request json as Verifier creates; has entries for proof request's nonce, name, and version; plus credential's requested attributes, requested predicates. I.e., :: { 'nonce': string, # indy-sdk makes no semantic specification on this value 'name': string, # indy-sdk makes no semantic specification on this value 'version': numeric-string, # indy-sdk makes no semantic specification on this value 'requested_attributes': { '<attr_uuid>': { # aka attr_referent, a proof-request local identifier 'name': string, # attribute name (matches case- and space-insensitively) 'restrictions' [ # optional { "schema_id": string, # optional "schema_issuer_did": string, # optional "schema_name": string, # optional "schema_version": string, # optional "issuer_did": string, # optional "cred_def_id": string # optional }, { ... # if more than one restriction given, combined disjunctively (i.e., via OR) } ], 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet 'from': int, # optional, epoch seconds 'to': int # optional, epoch seconds } }, ... }, 'requested_predicates': { '<pred_uuid>': { # aka predicate_referent, a proof-request local predicate identifier 'name': string, # attribute name (matches case- and space-insensitively) 'p_type': '>=', 'p_value': int, # predicate value 'restrictions': [ # optional { "schema_id": string, # optional "schema_issuer_did": string, # optional "schema_name": string, # optional "schema_version": string, # optional "issuer_did": string, # optional "cred_def_id": string # optional }, { ... # if more than one restriction given, combined disjunctively (i.e., via OR) } ], 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet 'from': int, # optional, epoch seconds 'to': int # optional, epoch seconds } }, ... }, 'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet 'from': Optional<int>, 'to': Optional<int> } } :param filt: filter for matching attribute-value pairs and predicates; dict mapping each cred def id to dict (specify empty dict or none for no filter, matching all) mapping attributes to values to match or compare. E.g., :: { 'Vx4E82R17q...:3:CL:16:0': { 'attr-match': { 'name': 'Alex', 'sex': 'M', 'favouriteDrink': None }, 'minima': { # if both attr-match and minima present, combined conjunctively (i.e., via AND) 'favouriteNumber' : 10, 'score': '100' # nicety: implementation converts to int for caller }, }, 'R17v42T4pk...:3:CL:19:0': { 'attr-match': { 'height': 175, 'birthdate': '1975-11-15' # combined conjunctively (i.e., via AND) } }, 'Z9ccax812j...:3:CL:27:0': { 'attr-match': {} # match all attributes on this cred def } ... } :param filt_dflt_incl: whether to include (True) all credentials from wallet that filter does not identify by cred def, or to exclude (False) all such credentials :return: tuple with (set of referents, creds json for input proof request); empty set and empty production for no such credential
3,468
def get_reference_templates(self, ref_types): return OrderedDict([(x, self.get_reference_template(x)) for x in ref_types])
Return the reference templates for the types as an ordered dictionary.
3,469
def putscript(self, name, content): content = tools.to_bytes(content) content = tools.to_bytes("{%d+}" % len(content)) + CRLF + content code, data = ( self.__send_command("PUTSCRIPT", [name.encode("utf-8"), content])) if code == "OK": return True return False
Upload a script to the server See MANAGESIEVE specifications, section 2.6 :param name: script's name :param content: script's content :rtype: boolean
3,470
def svm_version_path(version): return os.path.join(Spark.HOME_DIR, Spark.SVM_DIR, .format(version))
Path to specified spark version. Accepts semantic version numbering. :param version: Spark version as String :return: String.
3,471
def insert(self, cache_key, paths, overwrite=False): missing_files = [f for f in paths if not os.path.exists(f)] if missing_files: raise ArtifactCacheError(.format(missing_files)) if not overwrite: if self.has(cache_key): logger.debug(.format(cache_key)) return False try: self.try_insert(cache_key, paths) return True except NonfatalArtifactCacheError as e: logger.error(.format(e)) return False
Cache the output of a build. By default, checks cache.has(key) first, only proceeding to create and insert an artifact if it is not already in the cache (though `overwrite` can be used to skip the check and unconditionally insert). :param CacheKey cache_key: A CacheKey object. :param list<str> paths: List of absolute paths to generated dirs/files. These must be under the artifact_root. :param bool overwrite: Skip check for existing, insert even if already in cache.
3,472
def create_build_configuration_set_raw(**kwargs): config_set = _create_build_config_set_object(**kwargs) response = utils.checked_api_call(pnc_api.build_group_configs, , body=config_set) if response: return response.content
Create a new BuildConfigurationSet.
3,473
def set_scanner (type, scanner): if __debug__: from .scanner import Scanner assert isinstance(type, basestring) assert issubclass(scanner, Scanner) validate (type) __types [type][] = scanner
Sets a scanner class that will be used for this 'type'.
3,474
def missing_intervals(startdate, enddate, start, end, dateconverter=None, parseinterval=None, intervals=None): parseinterval = parseinterval or default_parse_interval dateconverter = dateconverter or todate startdate = dateconverter(parseinterval(startdate, 0)) enddate = max(startdate, dateconverter(parseinterval(enddate, 0))) if intervals is not None and not isinstance(intervals, Intervals): intervals = Intervals(intervals) calc_intervals = Intervals() if start: if startdate < start: calc_start = startdate calc_end = parseinterval(start, -1) if calc_end >= calc_start: calc_intervals.append(Interval(calc_start, calc_end)) if enddate > end: calc_start = parseinterval(end, 1) calc_end = enddate if calc_end >= calc_start: calc_intervals.append(Interval(calc_start, calc_end)) else: start = startdate end = enddate calc_intervals.append(Interval(startdate, enddate)) if calc_intervals: if intervals: calc_intervals.extend(intervals) elif intervals: calc_intervals = intervals return calc_intervals
Given a ``startdate`` and an ``enddate`` dates, evaluate the date intervals from which data is not available. It return a list of two-dimensional tuples containing start and end date for the interval. The list could countain 0,1 or 2 tuples.
3,475
def showMetadata(dat): _tmp = rm_values_fields(copy.deepcopy(dat)) print(json.dumps(_tmp, indent=2)) return
Display the metadata specified LiPD in pretty print | Example | showMetadata(D["Africa-ColdAirCave.Sundqvist.2013"]) :param dict dat: Metadata :return none:
3,476
def _latex_format(obj: Any) -> str: if isinstance(obj, float): try: return sympy.latex(symbolize(obj)) except ValueError: return "{0:.4g}".format(obj) return str(obj)
Format an object as a latex string.
3,477
def _get_required_fn(fn, root_path): if not fn.startswith(root_path): raise ValueError("Both paths have to be absolute or local!") replacer = "/" if root_path.endswith("/") else "" return fn.replace(root_path, replacer, 1)
Definition of the MD5 file requires, that all paths will be absolute for the package directory, not for the filesystem. This function converts filesystem-absolute paths to package-absolute paths. Args: fn (str): Local/absolute path to the file. root_path (str): Local/absolute path to the package directory. Returns: str: Package-absolute path to the file. Raises: ValueError: When `fn` is absolute and `root_path` relative or \ conversely.
3,478
def write(self, data, auto_flush=True): self.temporary_file.write(data) if auto_flush: self.flush()
<Purpose> Writes a data string to the file. <Arguments> data: A string containing some data. auto_flush: Boolean argument, if set to 'True', all data will be flushed from internal buffer. <Exceptions> None. <Return> None.
3,479
def upload(self, *args, **kwargs): for job in self.jobs: job.upload(*args, **kwargs)
Runs command on every job in the run.
3,480
def load_plugins(self): logger.info("Loading plugins...") for (plugin_name, plugin_path, plugin_cfg) in self.config.plugins: logger.debug("Loading plugin %s from %s", plugin_name, plugin_path) if plugin_path == "yandextank.plugins.Overload": logger.warning( "Deprecated plugin name: \n" "There is a new generic plugin now.\n" "Correcting to ") plugin_path = "yandextank.plugins.DataUploader overload" try: plugin = il.import_module(plugin_path) except ImportError: logger.warning(, plugin_name, plugin_path) logger.debug(, plugin_name, plugin_path, exc_info=True) raise try: instance = getattr(plugin, )(self, cfg=plugin_cfg, name=plugin_name) except AttributeError: logger.warning(, plugin_name) raise else: self.register_plugin(self.PLUGIN_PREFIX + plugin_name, instance) logger.debug("Plugin instances: %s", self._plugins)
Tells core to take plugin options and instantiate plugin classes
3,481
def tag(self, text): matches = self._match(text.text) matches = self._resolve_conflicts(matches) if self.return_layer: return matches else: text[self.layer_name] = matches
Retrieves list of regex_matches in text. Parameters ---------- text: Text The estnltk text object to search for events. Returns ------- list of matches
3,482
def parse_arguments(filters, arguments, modern=False): params = DotDict() for i in filters: count = len(i) param = None if count <= 1: param = arguments.get(i[0]) else: param = arguments.get(i[0], i[1]) if count >= 3: types = i[2] if modern: if isinstance(types, list) and param is not None: assert len(types) == 1 if not isinstance(param, list): param = [param] param = [check_type(x, types[0]) for x in param] else: param = check_type(param, types) else: if not isinstance(types, list): types = [types] for t in reversed(types): if t == "list" and not isinstance(param, list): if param is None or param == : param = [] else: param = [param] elif t == "list" and isinstance(param, list): continue elif isinstance(param, list) and "list" not in types: param = " ".join(param) param = check_type(param, t) elif isinstance(param, list): param = [check_type(x, t) for x in param] else: param = check_type(param, t) params[i[0]] = param return params
Return a dict of parameters. Take a list of filters and for each try to get the corresponding value in arguments or a default value. Then check that value's type. The @modern parameter indicates how the arguments should be interpreted. The old way is that you always specify a list and in the list you write the names of types as strings. I.e. instad of `str` you write `'str'`. The modern way allows you to specify arguments by real Python types and entering it as a list means you accept and expect it to be a list. For example, using the modern way: filters = [ ("param1", "default", [str]), ("param2", None, int), ("param3", ["list", "of", 4, "values"], [str]) ] arguments = { "param1": "value1", "unknown": 12345 } => { "param1": ["value1"], "param2": 0, "param3": ["list", "of", "4", "values"] } And an example for the old way: filters = [ ("param1", "default", ["list", "str"]), ("param2", None, "int"), ("param3", ["list", "of", 4, "values"], ["list", "str"]) ] arguments = { "param1": "value1", "unknown": 12345 } => { "param1": ["value1"], "param2": 0, "param3": ["list", "of", "4", "values"] } The reason for having the modern and the non-modern way is transition of legacy code. One day it will all be the modern way.
3,483
def _show_doc(cls, fmt_func, keys=None, indent=0, grouped=False, func=None, include_links=False, *args, **kwargs): def titled_group(groupname): bars = str_indent + * len(groupname) + return bars + str_indent + groupname + + bars func = func or default_print_func keys = cls._enhance_keys(keys, *args, **kwargs) str_indent = " " * indent if grouped: grouped_keys = DefaultOrderedDict(list) for fmto in map(lambda key: getattr(cls, key), keys): grouped_keys[fmto.groupname].append(fmto.key) text = "\n\n".join( titled_group(group) + cls._show_doc( fmt_func, keys, indent=indent, grouped=False, func=str, include_links=include_links) for group, keys in six.iteritems(grouped_keys)) return func(text.rstrip()) if include_links or (include_links is None and cls.include_links): long_keys = list(map(lambda key: % ( cls.__module__, cls.__name__, key), keys)) else: long_keys = keys text = .join(str_indent + long_key + + fmt_func( key, long_key, getattr(cls, key).__doc__) for long_key, key in zip( long_keys, keys)) return func(text)
Classmethod to print the formatoptions and their documentation This function is the basis for the :meth:`show_summaries` and :meth:`show_docs` methods Parameters ---------- fmt_func: function A function that takes the key, the key as it is printed, and the documentation of a formatoption as argument and returns what shall be printed %(Plotter.show_keys.parameters)s Other Parameters ---------------- %(Plotter.show_keys.other_parameters)s Returns ------- %(Plotter.show_keys.returns)s See Also -------- show_summaries, show_docs
3,484
def flatten(struct): if struct is None: return [] flat = [] if isinstance(struct, dict): for _, result in six.iteritems(struct): flat += flatten(result) return flat if isinstance(struct, six.string_types): return [struct] try: iterator = iter(struct) except TypeError: return [struct] for result in iterator: flat += flatten(result) return flat
Creates a flat list of all all items in structured output (dicts, lists, items): .. code-block:: python >>> sorted(flatten({'a': 'foo', 'b': 'bar'})) ['bar', 'foo'] >>> sorted(flatten(['foo', ['bar', 'troll']])) ['bar', 'foo', 'troll'] >>> flatten('foo') ['foo'] >>> flatten(42) [42]
3,485
def from_opcode(cls, opcode, arg=_no_arg): return type(cls)(opname[opcode], (cls,), {}, opcode=opcode)(arg)
Create an instruction from an opcode and raw argument. Parameters ---------- opcode : int Opcode for the instruction to create. arg : int, optional The argument for the instruction. Returns ------- intsr : Instruction An instance of the instruction named by ``opcode``.
3,486
def toVerticalPotential(Pot,R,phi=None): Pot= flatten(Pot) if _APY_LOADED: if isinstance(R,units.Quantity): if hasattr(Pot,): R= R.to(units.kpc).value/Pot._ro else: R= R.to(units.kpc).value/Pot[0]._ro if isinstance(phi,units.Quantity): phi= phi.to(units.rad).value if isinstance(Pot,list): out= [] for pot in Pot: if isinstance(pot,linearPotential): out.append(pot) elif isinstance(pot,Potential): out.append(verticalPotential(pot,R,phi=phi)) elif isinstance(pot,planarPotential): raise PotentialError("Input to cannot be a planarPotential") else: raise PotentialError("Input to is neither an RZPotential-instance or a list of such instances") return out elif isinstance(Pot,Potential): return verticalPotential(Pot,R,phi=phi) elif isinstance(Pot,linearPotential): return Pot elif isinstance(Pot,planarPotential): raise PotentialError("Input to cannot be a planarPotential") else: raise PotentialError("Input to is neither an Potential-instance or a list of such instances")
NAME: toVerticalPotential PURPOSE: convert a Potential to a vertical potential at a given R INPUT: Pot - Potential instance or list of such instances R - Galactocentric radius at which to evaluate the vertical potential (can be Quantity) phi= (None) Galactocentric azimuth at which to evaluate the vertical potential (can be Quantity); required if Pot is non-axisymmetric OUTPUT: (list of) linearPotential instance(s) HISTORY: 2018-10-07 - Written - Bovy (UofT)
3,487
def last_commit(): try: root = subprocess.check_output( [, , ], stderr=subprocess.STDOUT).strip() return root.decode() except subprocess.CalledProcessError: return None
Returns the SHA1 of the last commit.
3,488
def hil_controls_send(self, time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode, force_mavlink1=False): return self.send(self.hil_controls_encode(time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode), force_mavlink1=force_mavlink1)
Sent from autopilot to simulation. Hardware in the loop control outputs time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) roll_ailerons : Control output -1 .. 1 (float) pitch_elevator : Control output -1 .. 1 (float) yaw_rudder : Control output -1 .. 1 (float) throttle : Throttle 0 .. 1 (float) aux1 : Aux 1, -1 .. 1 (float) aux2 : Aux 2, -1 .. 1 (float) aux3 : Aux 3, -1 .. 1 (float) aux4 : Aux 4, -1 .. 1 (float) mode : System mode (MAV_MODE) (uint8_t) nav_mode : Navigation mode (MAV_NAV_MODE) (uint8_t)
3,489
def _future_completed(future): exc = future.exception() if exc: log.debug("Failed to run task on executor", exc_info=exc)
Helper for run_in_executor()
3,490
def write_mates(self): if self.chrom is not None: U.debug("Dumping %i mates for contig %s" % ( len(self.read1s), self.chrom)) for read in self.infile.fetch(reference=self.chrom, multiple_iterators=True): if any((read.is_unmapped, read.mate_is_unmapped, read.is_read1)): continue key = read.query_name, read.reference_name, read.reference_start if key in self.read1s: self.outfile.write(read) self.read1s.remove(key) U.debug("%i mates remaining" % len(self.read1s))
Scan the current chromosome for matches to any of the reads stored in the read1s buffer
3,491
def _add_redundancy_router_interfaces(self, context, router, itfc_info, new_port, redundancy_router_ids=None, ha_settings_db=None, create_ha_group=True): router_id = router[] if ha_settings_db is None: ha_settings_db = self._get_ha_settings_by_router_id(context, router_id) if ha_settings_db is None: return e_context = context.elevated() add_by_subnet = (itfc_info is not None and in itfc_info and len(new_port[]) > 1) if (add_by_subnet is False or (itfc_info is None and create_ha_group is True)): self._create_ha_group(e_context, router, new_port, ha_settings_db) fixed_ips = self._get_fixed_ips_subnets(new_port[]) for r_id in (redundancy_router_ids or self._get_redundancy_router_ids(e_context, router_id)): if add_by_subnet is True: ports = self._core_plugin.get_ports( e_context, filters={: [r_id], : [new_port[]]}, fields=[, ]) redundancy_port = ports[0] fixed_ips = redundancy_port[] fixed_ip = {: itfc_info[]} fixed_ips.append(fixed_ip) self._core_plugin.update_port( e_context, redundancy_port[], {: {: fixed_ips}}) else: redundancy_port = self._create_hidden_port( e_context, new_port[], , fixed_ips) interface_info = {: redundancy_port[]} self.add_router_interface(e_context, r_id, interface_info)
To be called in add_router_interface() AFTER interface has been added to router in DB.
3,492
def session_dump(self, cell, hash, fname_session): logging.debug(.format(hash, fname_session)) inject_code = [, .format(fname_session), ] inject_cell = nbf.v4.new_code_cell(.join(inject_code)) reply, outputs = super().run_cell(inject_cell) errors = list(filter(lambda out: out.output_type == , outputs)) if len(errors): logging.info(.format(hash)) logging.debug( .format(hash, CellExecutionError.from_cell_and_msg(cell, errors[0]))) self.disable_cache = True os.remove(fname_session) return False return True
Dump ipython session to file :param hash: cell hash :param fname_session: output filename :return:
3,493
def meanAndStdDev(self, limit=None): if limit is None or len(self.values) < limit: limit = len(self.values) if limit > 0: mean = sum(self.values[-limit:]) / float(limit) sumSq = 0. for v in self.values[-limit:]: sumSq += (v - mean) * (v - mean) return mean, math.sqrt(sumSq / limit) else: return None
return the mean and the standard deviation optionally limited to the last limit values
3,494
def add_cell_markdown(self, cell_str): logging.debug("add_cell_markdown: {}".format(cell_str)) cell = .join(cell_str.split()) cell = nbf.v4.new_markdown_cell(cell) self.nb[].append(cell)
Add a markdown cell :param cell_str: markdown text :return:
3,495
def clear_database(engine: Connectable, schemas: Iterable[str] = ()) -> None: assert check_argument_types() metadatas = [] all_schemas = (None,) all_schemas += tuple(schemas) for schema in all_schemas: metadata = MetaData() metadata.reflect(engine, schema=schema, views=True) metadatas.append(metadata) for metadata in metadatas: metadata.drop_all(engine, checkfirst=False)
Clear any tables from an existing database. :param engine: the engine or connection to use :param schemas: full list of schema names to expect (ignored for SQLite)
3,496
def auto_assign_decodings(self, decodings): nrz_decodings = [decoding for decoding in decodings if decoding.is_nrz or decoding.is_nrzi] fallback = nrz_decodings[0] if nrz_decodings else None candidate_decodings = [decoding for decoding in decodings if decoding not in nrz_decodings and not decoding.contains_cut] for message in self.messages: decoder_found = False for decoder in candidate_decodings: if decoder.applies_for_message(message.plain_bits): message.decoder = decoder decoder_found = True break if not decoder_found and fallback: message.decoder = fallback
:type decodings: list of Encoding
3,497
def _split_path(path): path = path.strip() list_path = path.split() sentinel = list_path.pop(0) return sentinel, list_path, path
split a path return by the api return - the sentinel: - the rest of the path as a list. - the original path stripped of / for normalisation.
3,498
def on_menu_make_MagIC_results_tables(self, event): self.on_menu_save_interpretation(None) dia = demag_dialogs.magic_pmag_specimens_table_dialog(None) CoorTypes = [] if self.test_mode: CoorTypes = [] elif dia.ShowModal() == wx.ID_OK: if dia.cb_spec_coor.GetValue() == True: CoorTypes.append() if dia.cb_geo_coor.GetValue() == True: CoorTypes.append() if dia.cb_tilt_coor.GetValue() == True: CoorTypes.append() else: self.user_warning("MagIC tables not saved") print("MagIC tables not saved") return self.PmagRecsOld = {} if self.data_model == 3.0: FILES = [] else: FILES = [] for FILE in FILES: self.PmagRecsOld[FILE] = [] meas_data = [] try: meas_data, file_type = pmag.magic_read( os.path.join(self.WD, FILE)) print(("-I- Read old magic file %s\n" % os.path.join(self.WD, FILE))) os.remove(os.path.join(self.WD, FILE)) print(("-I- Delete old magic file %s\n" % os.path.join(self.WD, FILE))) except (OSError, IOError) as e: continue for rec in meas_data: if "magic_method_codes" in list(rec.keys()): if "LP-DIR" not in rec[] and "DE-" not in rec[]: self.PmagRecsOld[FILE].append(rec) specimens_list = list(self.pmag_results_data[].keys()) specimens_list.sort() PmagSpecs = [] for specimen in specimens_list: for dirtype in CoorTypes: i = 0 for fit in self.pmag_results_data[][specimen]: mpars = fit.get(dirtype) if not mpars: mpars = self.get_PCA_parameters( specimen, fit, fit.tmin, fit.tmax, dirtype, fit.PCA_type) if not mpars or not in list(mpars.keys()): self.user_warning("Could not calculate interpretation for specimen %s and fit %s in coordinate system %s while exporting pmag tables, skipping" % ( specimen, fit.name, dirtype)) continue PmagSpecRec = {} PmagSpecRec["magic_software_packages"] = pmag.get_version( ) + PmagSpecRec["er_specimen_name"] = specimen PmagSpecRec["er_sample_name"] = self.Data_hierarchy[][specimen] PmagSpecRec["er_site_name"] = self.Data_hierarchy[][specimen] PmagSpecRec["er_location_name"] = self.Data_hierarchy[][specimen] if specimen in list(self.Data_hierarchy[].keys()): PmagSpecRec["er_expedition_name"] = self.Data_hierarchy[][specimen] PmagSpecRec["er_citation_names"] = "This study" if "magic_experiment_name" in self.Data[specimen]: PmagSpecRec["magic_experiment_names"] = self.Data[specimen]["magic_experiment_name"] if in list(self.Data[specimen].keys()): PmagSpecRec["magic_instrument_codes"] = self.Data[specimen][] PmagSpecRec[] = PmagSpecRec[] = mpars["specimen_direction_type"] PmagSpecRec[] = "%.1f" % mpars["specimen_dec"] PmagSpecRec[] = "%.1f" % mpars["specimen_inc"] PmagSpecRec[] = "g" if fit in self.bad_fits: PmagSpecRec[] = "b" if "C" in fit.tmin or "C" in fit.tmax: PmagSpecRec[] = "K" else: PmagSpecRec[] = "T" if "C" in fit.tmin: PmagSpecRec[] = "%.0f" % ( mpars["measurement_step_min"]+273.) elif "mT" in fit.tmin: PmagSpecRec[] = "%8.3e" % ( mpars["measurement_step_min"]*1e-3) else: if PmagSpecRec[] == "K": PmagSpecRec[] = "%.0f" % ( mpars["measurement_step_min"]+273.) else: PmagSpecRec[] = "%8.3e" % ( mpars["measurement_step_min"]*1e-3) if "C" in fit.tmax: PmagSpecRec[] = "%.0f" % ( mpars["measurement_step_max"]+273.) elif "mT" in fit.tmax: PmagSpecRec[] = "%8.3e" % ( mpars["measurement_step_max"]*1e-3) else: if PmagSpecRec[] == "K": PmagSpecRec[] = "%.0f" % ( mpars["measurement_step_min"]+273.) else: PmagSpecRec[] = "%8.3e" % ( mpars["measurement_step_min"]*1e-3) PmagSpecRec[] = "%.0f" % mpars["specimen_n"] calculation_type = mpars[] PmagSpecRec["magic_method_codes"] = self.Data[specimen][] + \ ":"+calculation_type+":"+dirtype PmagSpecRec["specimen_comp_n"] = str( len(self.pmag_results_data["specimens"][specimen])) PmagSpecRec["specimen_comp_name"] = fit.name if fit in self.bad_fits: PmagSpecRec["specimen_flag"] = "b" else: PmagSpecRec["specimen_flag"] = "g" if calculation_type in ["DE-BFL", "DE-BFL-A", "DE-BFL-O"]: PmagSpecRec[] = PmagSpecRec[] = "%.1f" % float( mpars["specimen_mad"]) PmagSpecRec[] = "%.1f" % float( mpars[]) PmagSpecRec[] = "" elif calculation_type in ["DE-BFP"]: PmagSpecRec[] = PmagSpecRec[] = "%.1f" % float( mpars[]) PmagSpecRec[] = "" PmagSpecRec[] = "" if self.data_model == 3.0: if not in list(mpars.keys()) or \ not in list(mpars.keys()): self.calculate_best_fit_vectors( high_level_type="sites", high_level_name=PmagSpecRec["er_site_name"], dirtype=dirtype) mpars = fit.get(dirtype) try: PmagSpecRec[] = "%.1f" % mpars[] PmagSpecRec[] = "%.1f" % mpars[] except KeyError: print("Error calculating BFV during export of interpretations for %s, %s, %s" % ( fit.name, specimen, dirtype)) elif calculation_type in ["DE-FM"]: PmagSpecRec[] = PmagSpecRec[] = "" PmagSpecRec[] = "" PmagSpecRec[] = "%.1f" % float( mpars[]) if dirtype == : PmagSpecRec[] = "100" elif dirtype == : PmagSpecRec[] = "0" else: PmagSpecRec[] = "-1" PmagSpecs.append(PmagSpecRec) i += 1 if in list(self.PmagRecsOld.keys()): for rec in self.PmagRecsOld[]: PmagSpecs.append(rec) PmagSpecs_fixed = self.merge_pmag_recs(PmagSpecs) if len(PmagSpecs_fixed) == 0: self.user_warning( "No data to save to MagIC tables please create some interpretations before saving") print("No data to save, MagIC tables not written") return if self.data_model == 3.0: ndf2_5 = DataFrame(PmagSpecs_fixed) if in ndf2_5.columns: print(("specimen data stored in %s\n" % os.path.join(self.WD, "pmag_specimens.txt"))) TEXT = "specimens interpretations are saved in pmag_specimens.txt.\nPress OK for pmag_samples/pmag_sites/pmag_results tables." dlg = wx.MessageDialog( self, caption="Other Pmag Tables", message=TEXT, style=wx.OK | wx.CANCEL) result = self.show_dlg(dlg) if result == wx.ID_OK: dlg.Destroy() else: dlg.Destroy() return dia = demag_dialogs.magic_pmag_tables_dialog( None, self.WD, self.Data, self.Data_info) if self.show_dlg(dia) == wx.ID_OK: self.On_close_MagIC_dialog(dia)
Creates or Updates Specimens or Pmag Specimens MagIC table, overwrites .redo file for safety, and starts User dialog to generate other MagIC tables for later contribution to the MagIC database. The following describes the steps used in the 2.5 data format to do this: 1. read pmag_specimens.txt, pmag_samples.txt, pmag_sites.txt, and sort out lines with LP-DIR in magic_codes 2. saves a clean pmag_*.txt files without LP-DIR stuff as pmag_*.txt.tmp 3. write a new file pmag_specimens.txt 4. merge pmag_specimens.txt and pmag_specimens.txt.tmp using combine_magic.py 5. delete pmag_specimens.txt.tmp 6 (optional) extracting new pag_*.txt files (except pmag_specimens.txt) using specimens_results_magic.py 7: if #6: merge pmag_*.txt and pmag_*.txt.tmp using combine_magic.py if not #6: save pmag_*.txt.tmp as pmag_*.txt
3,499
def train_nn_segmentation_classifier(X, y): def build_mlp(input_var=None): n_classes = 2 l_in = lasagne.layers.InputLayer(shape=X.shape, input_var=input_var) hiddens = [64, 64, 64] layers = [l_in] for n_units in hiddens: l_hidden_1 = lasagne.layers.DenseLayer( layers[-1], num_units=n_units, l_output = lasagne.layers.DenseLayer(layers[-1], num_units=n_classes, nonlinearity=softmax) return l_output def iterate_minibatches(inputs, targets, batchsize, shuffle=False): assert len(inputs) == len(targets) if shuffle: indices = np.arange(len(inputs)) np.random.shuffle(indices) for start_idx in range(0, len(inputs) - batchsize + 1, batchsize): if shuffle: excerpt = indices[start_idx:start_idx + batchsize] else: excerpt = slice(start_idx, start_idx + batchsize) yield inputs[excerpt], targets[excerpt] input_var = T.matrix() target_var = T.ivector() network = build_mlp(input_var) num_epochs = 7 X_train, X_val = X[:-100], X[-100:] y_train, y_val = y[:-100], y[-100:] prediction = lasagne.layers.get_output(network) loss = lasagne.objectives.categorical_crossentropy(prediction, target_var) loss = loss.mean() params = lasagne.layers.get_all_params(network, trainable=True) updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.01, momentum=0.9) test_prediction = lasagne.layers.get_output(network, deterministic=True) train_fn = theano.function([input_var, target_var], loss, updates=updates) print("Starting training...") for epoch in range(num_epochs): train_err = 0 train_batches = 0 start_time = time.time() for batch in iterate_minibatches(X_train, y_train, 20, shuffle=True): inputs, targets = batch train_err += train_fn(inputs, targets) train_batches += 1 print("Epoch {0} of {1} took {2:.3f}s".format( epoch + 1, num_epochs, time.time() - start_time)) print(" training loss:\t\t{0:.6f}".format(train_err / train_batches)) predict_fn = theano.function([input_var], test_prediction) return predict_fn
Train a neural network classifier. Parameters ---------- X : numpy array A list of feature vectors y : numpy array A list of labels Returns ------- Theano expression : The trained neural network