Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
21,900
def genre(self): _genre = "" if " m." in self._indMorph: _genre += "m" if " f." in self._indMorph: _genre += "f" if " n." in self._indMorph: _genre += "n" _genre = _genre.strip() if self._renvoi and not _genre: lr = self._lemmatiseur.lemme(self._renvoi) if lr: return lr.genre() return _genre
Cette routine convertit les indications morphologiques, données dans le fichier lemmes.la, pour exprimer le genre du mot dans la langue courante. :return: Genre :rtype: str
21,901
def cli(ctx, uuid, output_format="gzip"): return ctx.gi.io.download(uuid, output_format=output_format)
Download pre-prepared data by UUID Output: The downloaded content
21,902
def cummedian(expr, sort=None, ascending=True, unique=False, preceding=None, following=None): data_type = _stats_type(expr) return _cumulative_op(expr, CumMedian, sort=sort, ascending=ascending, unique=unique, preceding=preceding, following=following, data_type=data_type)
Calculate cumulative median of a sequence expression. :param expr: expression for calculation :param sort: name of the sort column :param ascending: whether to sort in ascending order :param unique: whether to eliminate duplicate entries :param preceding: the start point of a window :param following: the end point of a window :return: calculated column
21,903
def create(self, export): target_url = self.client.get_url(self._URL_KEY, , ) r = self.client.request(, target_url, json=export._serialize()) return export._deserialize(r.json(), self)
Create and start processing a new Export. :param Export export: The Export to create. :rtype: Export
21,904
def _truncate(p_str, p_repl): text_lim = _columns() - len(escape_ansi(p_str)) - 4 truncated_str = re.sub(re.escape(p_repl), p_repl[:text_lim] + , p_str) return truncated_str
Returns p_str with truncated and ended with '...' version of p_repl. Place of the truncation is calculated depending on p_max_width.
21,905
def iter_tree(jottapath, JFS): filedirlist = JFS.getObject( % jottapath) log.debug("got tree: %s", filedirlist) if not isinstance(filedirlist, JFSFileDirList): yield ( , tuple(), tuple() ) for path in filedirlist.tree: yield path
Get a tree of of files and folders. use as an iterator, you get something like os.walk
21,906
def new(cls, alias, sealed_obj, algorithm, key, key_size): timestamp = int(time.time()) * 1000 raise NotImplementedError("Creating Secret Keys not implemented")
Helper function to create a new SecretKeyEntry. :returns: A loaded :class:`SecretKeyEntry` instance, ready to be placed in a keystore.
21,907
def _get_sliced(self,slice,df=None): df=self.df.copy() if df==None else df if type(slice) not in (list,tuple): raise Exception() if len(slice)!=2: raise Exception() a,b=slice a=None if a in (,None) else utils.make_string(a) b=None if b in (,None) else utils.make_string(b) return df.ix[a:b]
Returns a sliced DataFrame Parameters ---------- slice : tuple(from,to) from : str to : str States the 'from' and 'to' values which will get rendered as df.ix[from:to] df : DataFrame If omitted then the QuantFigure.DataFrame is resampled.
21,908
def InitializeUpload(self, http_request, http=None, client=None): if self.strategy is None: raise exceptions.UserError( ) if http is None and client is None: raise exceptions.UserError() if self.strategy != RESUMABLE_UPLOAD: return http = http or client.http if client is not None: http_request.url = client.FinalizeTransferUrl(http_request.url) self.EnsureUninitialized() http_response = http_wrapper.MakeRequest(http, http_request, retries=self.num_retries) if http_response.status_code != http_client.OK: raise exceptions.HttpError.FromResponse(http_response) self.__server_chunk_granularity = http_response.info.get( ) url = http_response.info[] if client is not None: url = client.FinalizeTransferUrl(url) self._Initialize(http, url) if self.auto_transfer: return self.StreamInChunks() return http_response
Initialize this upload from the given http_request.
21,909
async def related_artists(self) -> List[Artist]: related = await self.__client.http.artist_related_artists(self.id) return list(Artist(self.__client, item) for item in related[])
Get Spotify catalog information about artists similar to a given artist. Similarity is based on analysis of the Spotify community’s listening history. Returns ------- artists : List[Artits] The artists deemed similar.
21,910
def evaluate_impl(expression, params=None): which = expression.which() if which == : return capnp.Promise(expression.literal) elif which == : return read_value(expression.previousResult) elif which == : assert expression.parameter < len(params) return capnp.Promise(params[expression.parameter]) elif which == : call = expression.call func = call.function paramPromises = [evaluate_impl(param, params) for param in call.params] joinedParams = capnp.join_promises(paramPromises) ret = (joinedParams .then(lambda vals: func.call(vals)) .then(lambda result: result.value)) return ret else: raise ValueError("Unknown expression type: " + which)
Implementation of CalculatorImpl::evaluate(), also shared by FunctionImpl::call(). In the latter case, `params` are the parameter values passed to the function; in the former case, `params` is just an empty list.
21,911
def commit_and_try_merge2master(git_action, file_content, study_id, auth_info, parent_sha, commit_msg=, merged_sha=None): return generic_commit_and_try_merge2master_wf(git_action, file_content, doc_id=study_id, auth_info=auth_info, parent_sha=parent_sha, commit_msg=commit_msg, merged_sha=merged_sha, doctype_display_name="study")
Actually make a local Git commit and push it to our remote
21,912
def plural(self): element = self._first() if element: if re.search(|\-|,]+)meervoud: ([\w|\s|\, element, re.U).groups()[0].split() results = [x.replace(, ).strip() for x in results] return results else: return [] return [None]
Tries to scrape the plural version from vandale.nl.
21,913
def task_verify(self, task): taskidprojecturl for each in (, , , ): if each not in task or not task[each]: logger.error(, each, task) return False if task[] not in self.projects: logger.error(, task[]) return False project = self.projects[task[]] if not project.active: logger.error(, task[]) return False return True
return False if any of 'taskid', 'project', 'url' is not in task dict or project in not in task_queue
21,914
def default_error_handler(exception): http_exception = isinstance(exception, exceptions.HTTPException) code = exception.code if http_exception else 500 if code == 500: current_app.logger.error(exception) if has_app_context() and has_request_context(): headers = request.headers if in headers and headers[] == : return json_error_handler(exception) return template_error_handler(exception)
Default error handler Will display an error page with the corresponding error code from template directory, for example, a not found will load a 404.html etc. Will first look in userland app templates and if not found, fallback to boiler templates to display a default page. :param exception: Exception :return: string
21,915
def l2_log_loss(event_times, predicted_event_times, event_observed=None): r if event_observed is None: event_observed = np.ones_like(event_times) ix = event_observed.astype(bool) return np.power(np.log(event_times[ix]) - np.log(predicted_event_times[ix]), 2).mean()
r""" Calculates the l2 log-loss of predicted event times to true event times for *non-censored* individuals only. .. math:: 1/N \sum_{i} (log(t_i) - log(q_i))**2 Parameters ---------- event_times: a (n,) array of observed survival times. predicted_event_times: a (n,) array of predicted survival times. event_observed: a (n,) array of censorship flags, 1 if observed, 0 if not. Default None assumes all observed. Returns ------- l2-log-loss: a scalar
21,916
def in6_getifaddr(): ret = [] try: fdesc = open("/proc/net/if_inet6", "rb") except IOError: return ret for line in fdesc: tmp = plain_str(line).split() addr = scapy.utils6.in6_ptop( b.join( struct.unpack(, tmp[0].encode()) ).decode() ) ret.append((addr, int(tmp[3], 16), tmp[5])) fdesc.close() return ret
Returns a list of 3-tuples of the form (addr, scope, iface) where 'addr' is the address of scope 'scope' associated to the interface 'ifcace'. This is the list of all addresses of all interfaces available on the system.
21,917
def _workflow_complete(workflow_stage_dict: dict): complete_stages = [] for _, stage_config in workflow_stage_dict.items(): complete_stages.append((stage_config[] == )) if all(complete_stages): LOG.info() return True return False
Check if the workflow is complete. This function checks if the entire workflow is complete. This function is used by `execute_processing_block`. Args: workflow_stage_dict (dict): Workflow metadata dictionary. Returns: bool, True if the workflow is complete, otherwise False.
21,918
def init(argv): config.set_default({ "driver": {}, "fs": {}, "mongodb": {}, }) init_parsecmdline(argv[1:]) fs.init(dry_run=_opt["dry_run"], **config.get_entry()) signal.signal(signal.SIGINT, _handle_signal) signal.signal(signal.SIGTERM, _handle_signal)
Bootstrap the whole thing :param argv: list of command line arguments
21,919
def printText (self, stream=None): if stream is None: stream = sys.stdout stream.write( % self.seqid ) stream.write( % self.version ) stream.write( % self.crc32 ) stream.write( % len(self.commands) ) stream.write( % self.duration ) stream.write() for line in self.lines: stream.write( str(line) ) stream.write()
Prints a text representation of this sequence to the given stream or standard output.
21,920
def nameddict(name, props): class NamedDict(object): def __init__(self, *args, **kwargs): self.__store = {}.fromkeys(props) if args: for i, k in enumerate(props[:len(args)]): self[k] = args[i] for k, v in kwargs.items(): self[k] = v def __getattr__(self, key): if key.startswith(): return self.__dict__[key] else: return self.__store[key] def __setattr__(self, key, value): if key.startswith(): object.__setattr__(self, key, value) else: self.__setitem__(key, value) def __getitem__(self, key): return self.__store[key] def __setitem__(self, key, value): if key not in props: raise AttributeError("NamedDict(%s) has no attribute %s, avaliables are %s" % ( name, key, props)) self.__store[key] = value def __dict__(self): return self.__store def __str__(self): return % (name, str(self.__store)) return NamedDict
Point = nameddict('Point', ['x', 'y']) pt = Point(x=1, y=2) pt.y = 3 print pt
21,921
def create_fixed_rate_shipping(cls, fixed_rate_shipping, **kwargs): kwargs[] = True if kwargs.get(): return cls._create_fixed_rate_shipping_with_http_info(fixed_rate_shipping, **kwargs) else: (data) = cls._create_fixed_rate_shipping_with_http_info(fixed_rate_shipping, **kwargs) return data
Create FixedRateShipping Create a new FixedRateShipping This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_fixed_rate_shipping(fixed_rate_shipping, async=True) >>> result = thread.get() :param async bool :param FixedRateShipping fixed_rate_shipping: Attributes of fixedRateShipping to create (required) :return: FixedRateShipping If the method is called asynchronously, returns the request thread.
21,922
def ggplot2_style(ax): ax.grid(True, , color=, linestyle=, linewidth=1.4) ax.grid(True, , color=, linestyle=, linewidth=0.7) ax.patch.set_facecolor() ax.set_axisbelow(True) ax.xaxis.set_minor_locator(MultipleLocator( (plt.xticks()[0][1]-plt.xticks()[0][0]) / 2.0 )) ax.yaxis.set_minor_locator(MultipleLocator( (plt.yticks()[0][1]-plt.yticks()[0][0]) / 2.0 )) for child in ax.get_children(): if isinstance(child, mpl.spines.Spine): child.set_alpha(0) for line in ax.get_xticklines() + ax.get_yticklines(): line.set_markersize(5) line.set_color("gray") line.set_markeredgewidth(1.4) for line in ax.xaxis.get_ticklines(minor=True) + ax.yaxis.get_ticklines(minor=True): line.set_markersize(0) mpl.rcParams[] = mpl.rcParams[] = ax.xaxis.set_ticks_position() ax.yaxis.set_ticks_position() if ax.legend_ <> None: lg = ax.legend_ lg.get_frame().set_linewidth(0) lg.get_frame().set_alpha(0.5)
Styles an axes to appear like ggplot2 Must be called after all plot and axis manipulation operations have been carried out (needs to know final tick spacing)
21,923
def _count(self, cmd, collation=None): with self._socket_for_reads() as (sock_info, slave_ok): res = self._command( sock_info, cmd, slave_ok, allowable_errors=["ns missing"], codec_options=self.__write_response_codec_options, read_concern=self.read_concern, collation=collation) if res.get("errmsg", "") == "ns missing": return 0 return int(res["n"])
Internal count helper.
21,924
def send_last_message(self, message_type, data, connection_id, callback=None, one_way=False): if connection_id not in self._connections: raise ValueError("Unknown connection id: {}".format(connection_id)) connection_info = self._connections.get(connection_id) if connection_info.connection_type == \ ConnectionType.ZMQ_IDENTITY: message = validator_pb2.Message( correlation_id=_generate_id(), content=data, message_type=message_type) fut = future.Future(message.correlation_id, message.content, callback, timeout=self._connection_timeout) if not one_way: self._futures.put(fut) self._send_receive_thread.send_last_message( msg=message, connection_id=connection_id) return fut del self._connections[connection_id] return connection_info.connection.send_last_message( message_type, data, callback=callback)
Send a message of message_type and close the connection. :param connection_id: the identity for the connection to send to :param message_type: validator_pb2.Message.* enum value :param data: bytes serialized protobuf :return: future.Future
21,925
def add_listener(self, listener, message_type, data=None, one_shot=False): lst = self._one_shots if one_shot else self._listeners if message_type not in lst: lst[message_type] = [] lst[message_type].append(Listener(listener, data))
Add a listener that will receice incoming messages.
21,926
def cull_nonmatching_trees(nexson, tree_id, curr_version=None): if curr_version is None: curr_version = detect_nexson_version(nexson) if not _is_by_id_hbf(curr_version): nexson = convert_nexson_format(nexson, BY_ID_HONEY_BADGERFISH) nexml_el = get_nexml_el(nexson) tree_groups = nexml_el[] tree_groups_to_del = [] for tgi, tree_group in tree_groups.items(): tbi = tree_group[] if tree_id in tbi: trees_to_del = [i for i in tbi.keys() if i != tree_id] for tid in trees_to_del: tree_group[].remove(tid) del tbi[tid] else: tree_groups_to_del.append(tgi) for tgid in tree_groups_to_del: nexml_el[].remove(tgid) del tree_groups[tgid] return nexson
Modifies `nexson` and returns it in version 1.2.1 with any tree that does not match the ID removed. Note that this does not search through the NexSON for every node, edge, tree that was deleted. So the resulting NexSON may have broken references !
21,927
def get_tile_locations_by_gid(self, gid): for l in self.visible_tile_layers: for x, y, _gid in [i for i in self.layers[l].iter_data() if i[2] == gid]: yield x, y, l
Search map for tile locations by the GID Return (int, int, int) tuples, where the layer is index of the visible tile layers. Note: Not a fast operation. Cache results if used often. :param gid: GID to be searched for :rtype: generator of tile locations
21,928
def setup_daemon_log_file(cfstore): level = loglevel(cfstore.daemon_log_level) handler = logging.FileHandler(filename=cfstore.daemon_log_path) handler.setLevel(level) logger.setLevel(level) logger.addHandler(handler)
Attach file handler to RASH logger. :type cfstore: rash.config.ConfigStore
21,929
def p2sh_input(outpoint, stack_script, redeem_script, sequence=None): if sequence is None: sequence = guess_sequence(redeem_script) stack_script = script_ser.serialize(stack_script) redeem_script = script_ser.hex_serialize(redeem_script) redeem_script = script_ser.serialize(redeem_script) return tb.make_legacy_input( outpoint=outpoint, stack_script=stack_script, redeem_script=redeem_script, sequence=sequence)
OutPoint, str, str, int -> TxIn Create a signed legacy TxIn from a p2pkh prevout
21,930
def item_frequency(sa, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT, title=LABEL_DEFAULT): if (not isinstance(sa, tc.data_structures.sarray.SArray) or sa.dtype != str): raise ValueError("turicreate.visualization.item_frequency supports " + "SArrays of dtype str") title = _get_title(title) plt_ref = tc.extensions.plot_item_frequency(sa, xlabel, ylabel, title) return Plot(plt_ref)
Plots an item frequency of the sarray provided as input, and returns the resulting Plot object. The function supports SArrays with dtype str. Parameters ---------- sa : SArray The data to get an item frequency for. Must have dtype str xlabel : str (optional) The text label for the X axis. Defaults to "Values". ylabel : str (optional) The text label for the Y axis. Defaults to "Count". title : str (optional) The title of the plot. Defaults to LABEL_DEFAULT. If the value is LABEL_DEFAULT, the title will be "<xlabel> vs. <ylabel>". If the value is None, the title will be omitted. Otherwise, the string passed in as the title will be used as the plot title. Returns ------- out : Plot A :class: Plot object that is the item frequency plot. Examples -------- Make an item frequency of an SArray. >>> x = turicreate.SArray(['a','ab','acd','ab','a','a','a','ab','cd']) >>> ifplt = turicreate.visualization.item_frequency(x)
21,931
def get_auth_token(self, user_payload): now = datetime.utcnow() payload = { : user_payload } if in self.verify_claims: payload[] = now if in self.verify_claims: payload[] = now + self.leeway if in self.verify_claims: payload[] = now + self.expiration_delta if self.audience is not None: payload[] = self.audience if self.issuer is not None: payload[] = self.issuer return jwt.encode( payload, self.secret_key, algorithm=self.algorithm, json_encoder=ExtendedJSONEncoder).decode()
Create a JWT authentication token from ``user_payload`` Args: user_payload(dict, required): A `dict` containing required information to create authentication token
21,932
def makeWidget(self, qscreen: QtGui.QScreen): self.window = self.ContainerWindow( self.signals, self.title, self.parent) self.window.show() self.window.windowHandle().setScreen(qscreen) self.n_xscreen = self.gpu_handler.getXScreenNum(qscreen) self.main_widget = self.ContainerWidget(self.window) self.main_layout = QtWidgets.QVBoxLayout(self.main_widget) self.window.setCentralWidget(self.main_widget) self.grid_widget = self.GridWidget(self.main_widget) self.main_layout.addWidget(self.grid_widget) self.grid_layout = QtWidgets.QGridLayout(self.grid_widget) self.grid_layout.setHorizontalSpacing(2) self.grid_layout.setVerticalSpacing(2) self.grid_layout.setContentsMargins(0, 0, 0, 0) class ScreenMenu(QuickMenu): title = "Change Screen" elements = [ QuickMenuElement(title="Screen 1"), QuickMenuElement(title="Screen 2") ] if (len(self.gpu_handler.true_screens) > 1): self.button = QtWidgets.QPushButton( "Change Screen", self.main_widget) self.main_layout.addWidget(self.button) self.button.setSizePolicy( QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum) self.button.clicked.connect(self.change_xscreen_slot) self.placeChildren()
TODO: activate after gpu-hopping has been debugged self.screenmenu = ScreenMenu(self.window) self.screenmenu.screen_1.triggered.connect(self.test_slot) self.screenmenu.screen_2.triggered.connect(self.test_slot)
21,933
def _map_exercise_row_to_dict(self, row): row_cleaned = _clean_dict(row) license_id = row_cleaned[CONTENT_LICENSE_ID_KEY] if license_id: license_dict = dict( license_id=row_cleaned[CONTENT_LICENSE_ID_KEY], description=row_cleaned.get(CONTENT_LICENSE_DESCRIPTION_KEY, None), copyright_holder=row_cleaned.get(CONTENT_LICENSE_COPYRIGHT_HOLDER_KEY, None) ) else: license_dict = None randomize_raw = row_cleaned.get(EXERCISE_RANDOMIZE_KEY, None) if randomize_raw is None or randomize_raw.lower() in CSV_STR_TRUE_VALUES: randomize = True elif randomize_raw.lower() in CSV_STR_FALSE_VALUES: randomize = False else: raise ValueError( + randomize_raw + ) exercise_data = dict( mastery_model=exercises.M_OF_N, randomize=randomize, ) m_value = row_cleaned.get(EXERCISE_M_KEY, None) if m_value: exercise_data[] = m_value n_value = row_cleaned.get(EXERCISE_N_KEY, None) if n_value: exercise_data[] = n_value exercise_dict = dict( chan_path=row_cleaned[CONTENT_PATH_KEY], title=row_cleaned[CONTENT_TITLE_KEY], source_id=row_cleaned[EXERCISE_SOURCEID_KEY], description=row_cleaned.get(CONTENT_DESCRIPTION_KEY, None), author=row_cleaned.get(CONTENT_AUTHOR_KEY, None), language=row_cleaned.get(CONTENT_LANGUAGE_KEY, None), license=license_dict, exercise_data=exercise_data, thumbnail_chan_path=row_cleaned.get(CONTENT_THUMBNAIL_KEY, None) ) return exercise_dict
Convert dictionary keys from raw CSV Exercise format to ricecooker keys.
21,934
def next_generation(self, mut_rate=0, max_mut_amt=0, log_base=10): if self.__num_processes > 1: process_pool = Pool(processes=self.__num_processes) members = [m.get() for m in self.__members] else: members = self.__members if len(members) == 0: raise Exception( ) selected_members = self.__select_fn(members) reproduction_probs = list(reversed(logspace(0.0, 1.0, num=len(selected_members), base=log_base))) reproduction_probs = reproduction_probs / sum(reproduction_probs) self.__members = [] for _ in range(self.__pop_size): parent_1 = nrandom.choice(selected_members, p=reproduction_probs) parent_2 = nrandom.choice(selected_members, p=reproduction_probs) feed_dict = {} for param in self.__parameters: which_parent = uniform(0, 1) if which_parent < 0.5: feed_dict[param.name] = parent_1.parameters[param.name] else: feed_dict[param.name] = parent_2.parameters[param.name] feed_dict[param.name] = self.__mutate_parameter( feed_dict[param.name], param, mut_rate, max_mut_amt ) if self.__num_processes > 1: self.__members.append(process_pool.apply_async( self._start_process, [self.__cost_fn, feed_dict, self.__cost_fn_args]) ) else: self.__members.append( Member( feed_dict, self.__cost_fn(feed_dict, self.__cost_fn_args) ) ) if self.__num_processes > 1: process_pool.close() process_pool.join() self.__determine_best_member()
Generates the next population from a previously evaluated generation Args: mut_rate (float): mutation rate for new members (0.0 - 1.0) max_mut_amt (float): how much the member is allowed to mutate (0.0 - 1.0, proportion change of mutated parameter) log_base (int): the higher this number, the more likely the first Members (chosen with supplied selection function) are chosen as parents for the next generation
21,935
def write(self, transport, protocol, *data): if not self._write: raise AttributeError() if self.protocol: protocol = self.protocol if self._write.data_type: data = _dump(self._write.data_type, data) else: data = () if isinstance(transport, SimulatedTransport): self.simulate_write(data) else: protocol.write(transport, self._write.header, *data)
Generates and sends a command message unit. :param transport: An object implementing the `.Transport` interface. It is used by the protocol to send the message. :param protocol: An object implementing the `.Protocol` interface. :param data: The program data. :raises AttributeError: if the command is not writable.
21,936
def execute(self, eopatch): feature_type, feature_name = next(self.feature(eopatch)) data = eopatch[feature_type][feature_name].copy() reference_bands = self._get_reference_band(data) indices = self._get_indices(reference_bands) composite_image = np.empty((data.shape[1:]), np.float32) composite_image[:] = self.no_data_value for scene_id, scene in enumerate(data): composite_image = np.where(np.dstack([indices]) == scene_id, scene, composite_image) eopatch[self.composite_type][self.composite_name] = composite_image return eopatch
Compute composite array merging temporal frames according to the compositing method :param eopatch: eopatch holding time-series :return: eopatch with composite image of time-series
21,937
def to_jsondict(self, encode_string=base64.b64encode): dict_ = {} encode = lambda key, val: self._encode_value(key, val, encode_string) for k, v in obj_attrs(self): dict_[k] = encode(k, v) return {self.__class__.__name__: dict_}
This method returns a JSON style dict to describe this object. The returned dict is compatible with json.dumps() and json.loads(). Suppose ClassName object inherits StringifyMixin. For an object like the following:: ClassName(Param1=100, Param2=200) this method would produce:: { "ClassName": {"Param1": 100, "Param2": 200} } This method takes the following arguments. .. tabularcolumns:: |l|L| ============= ===================================================== Argument Description ============= ===================================================== encode_string (Optional) specify how to encode attributes which has python 'str' type. The default is base64. This argument is used only for attributes which don't have explicit type annotations in _TYPE class attribute. ============= =====================================================
21,938
def _create_latent_variables(self): self.latent_variables.add_z(, fam.Flat(transform=), fam.Normal(0,3)) self.latent_variables.add_z(, fam.Flat(transform=None), fam.Normal(0,3)) for parm in range(1,self.ar+1): self.latent_variables.add_z( + str(parm) + , fam.Flat(transform=), fam.Normal(0,3))
Creates model latent variables Returns ---------- None (changes model attributes)
21,939
def as_tensor_dict(self, padding_lengths: Dict[str, Dict[str, int]] = None) -> Dict[str, DataArray]: padding_lengths = padding_lengths or self.get_padding_lengths() tensors = {} for field_name, field in self.fields.items(): tensors[field_name] = field.as_tensor(padding_lengths[field_name]) return tensors
Pads each ``Field`` in this instance to the lengths given in ``padding_lengths`` (which is keyed by field name, then by padding key, the same as the return value in :func:`get_padding_lengths`), returning a list of torch tensors for each field. If ``padding_lengths`` is omitted, we will call ``self.get_padding_lengths()`` to get the sizes of the tensors to create.
21,940
def dotilt(dec, inc, bed_az, bed_dip): rad = old_div(np.pi, 180.) X = dir2cart([dec, inc, 1.]) sa, ca = -np.sin(bed_az * rad), np.cos(bed_az * rad) cdp, sdp = np.cos(bed_dip * rad), np.sin(bed_dip * rad) xc = X[0] * (sa * sa + ca * ca * cdp) + X[1] * \ (ca * sa * (1. - cdp)) + X[2] * sdp * ca yc = X[0] * ca * sa * (1. - cdp) + X[1] * \ (ca * ca + sa * sa * cdp) - X[2] * sa * sdp zc = X[0] * ca * sdp - X[1] * sdp * sa - X[2] * cdp Dir = cart2dir([xc, yc, -zc]) return Dir[0], Dir[1]
Does a tilt correction on a direction (dec,inc) using bedding dip direction and bedding dip. Parameters ---------- dec : declination directions in degrees inc : inclination direction in degrees bed_az : bedding dip direction bed_dip : bedding dip Returns ------- dec,inc : a tuple of rotated dec, inc values Examples ------- >>> pmag.dotilt(91.2,43.1,90.0,20.0) (90.952568837153436, 23.103411670066617)
21,941
def on_directory_button_clicked(self): self.output_directory.setText(QFileDialog.getExistingDirectory( self, self.tr()))
Show a dialog to choose directory. .. versionadded: 3.3
21,942
def get_tu(source, lang=, all_warnings=False, flags=None): args = list(flags or []) name = if lang == : name = args.append() elif lang == : name = elif lang != : raise Exception( % lang) if all_warnings: args += [, ] return TranslationUnit.from_source(name, args, unsaved_files=[(name, source)])
Obtain a translation unit from source and language. By default, the translation unit is created from source file "t.<ext>" where <ext> is the default file extension for the specified language. By default it is C, so "t.c" is the default file name. Supported languages are {c, cpp, objc}. all_warnings is a convenience argument to enable all compiler warnings.
21,943
def systemInformationType2bis(): a = L2PseudoLength(l2pLength=0x15) b = TpPd(pd=0x6) c = MessageType(mesType=0x2) d = NeighbourCellsDescription() e = RachControlParameters() f = Si2bisRestOctets() packet = a / b / c / d / e / f return packet
SYSTEM INFORMATION TYPE 2bis Section 9.1.33
21,944
def changeset_info(changeset): keys = [tag.attrib.get() for tag in changeset.getchildren()] keys += [, , , , ] values = [tag.attrib.get() for tag in changeset.getchildren()] values += [ changeset.get(), changeset.get(), changeset.get(), get_bounds(changeset), changeset.get() ] return dict(zip(keys, values))
Return a dictionary with id, user, user_id, bounds, date of creation and all the tags of the changeset. Args: changeset: the XML string of the changeset.
21,945
def snapshot(name, suffix=None, connection=None, username=None, password=None): return _virt_call(name, , , , suffix=suffix, connection=connection, username=username, password=password)
Takes a snapshot of a particular VM or by a UNIX-style wildcard. .. versionadded:: 2016.3.0 :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 .. code-block:: yaml domain_name: virt.snapshot: - suffix: periodic domain*: virt.snapshot: - suffix: periodic
21,946
def load_auth(configfile): logging.debug( % configfile) try: cf = open(configfile) except IOError: logging.error("Unable to find ." % configfile) exit(1) config = configparser.SafeConfigParser({: False}) config.readfp(cf) cf.close() rv = {} try: rv = {: config.get(, ), : config.get(, ), : config.get(, ), : config.get(, )} except configparser.NoSectionError: logging.error("No section in " % configfile) exit(1) except configparser.NoOptionError as e: logging.error("Missing option in auth file : %s" % (configfile, e.message)) exit(1) return rv
Get authentication data from the AUTH_CONF file.
21,947
def delete(self, name=None): "Delete the shelve data file." logger.info() self.close() for path in Path(self.create_path.parent, self.create_path.name), \ Path(self.create_path.parent, self.create_path.name + ): logger.debug(f) if path.exists(): path.unlink() break
Delete the shelve data file.
21,948
def lookups(self, request, model_admin): qs = model_admin.get_queryset(request) qs.filter(id__range=(1, 99)) for item in qs: dp = DeviceProtocol.objects.filter(pk=item.id).first() if dp: yield (dp.pk, dp.app_name)
Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar.
21,949
def plot_mask_cells(mask_cells, padding=16): fig, axes = plt.subplots(len(mask_cells), 3, figsize=(12, 10)) for idx, (axes, mask_cell) in enumerate(zip(axes, mask_cells), 1): ax1, ax2, ax3 = axes true_mask, predicted_mask, cell = mask_cell plot_mask_cell( true_mask, predicted_mask, cell, .format(idx), ax1, ax2, ax3, padding=padding) fig.tight_layout() return fig, axes
Plots cells with their true mask, predicted mask. Parameters ---------- mask_cells: list of tuples (`true_mask`, `predicted_mask`, `cell`) padding: int (default=16) Padding around mask to remove.
21,950
def setEditorData(self, spinBox, index): if index.isValid(): value = index.model().data(index, QtCore.Qt.EditRole) spinBox.setValue(value)
Sets the data to be displayed and edited by the editor from the data model item specified by the model index. Args: spinBox (BigIntSpinbox): editor widget. index (QModelIndex): model data index.
21,951
def run(items, background=None): if not background: background = [] paired = vcfutils.get_paired(items + background) if paired: out = _run_paired(paired) else: out = items logger.warn("GATK4 CNV calling currently only available for somatic samples: %s" % ", ".join([dd.get_sample_name(d) for d in items + background])) return out
Detect copy number variations from batched set of samples using GATK4 CNV calling. TODO: implement germline calling with DetermineGermlineContigPloidy and GermlineCNVCaller
21,952
def readSignal(self, chn, start=0, n=None): if start < 0: return np.array([]) if n is not None and n < 0: return np.array([]) nsamples = self.getNSamples() if chn < len(nsamples): if n is None: n = nsamples[chn] elif n > nsamples[chn]: return np.array([]) x = np.zeros(n, dtype=np.float64) self.readsignal(chn, start, n, x) return x else: return np.array([])
Returns the physical data of signal chn. When start and n is set, a subset is returned Parameters ---------- chn : int channel number start : int start pointer (default is 0) n : int length of data to read (default is None, by which the complete data of the channel are returned) Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> x = f.readSignal(0,0,1000) >>> int(x.shape[0]) 1000 >>> x2 = f.readSignal(0) >>> int(x2.shape[0]) 120000 >>> f._close() >>> del f
21,953
def to_json(self, path, root_array=True, mode=WRITE_MODE, compression=None): with universal_write_open(path, mode=mode, compression=compression) as output: if root_array: json.dump(self.to_list(), output) else: json.dump(self.to_dict(), output)
Saves the sequence to a json file. If root_array is True, then the sequence will be written to json with an array at the root. If it is False, then the sequence will be converted from a sequence of (Key, Value) pairs to a dictionary so that the json root is a dictionary. :param path: path to write file :param root_array: write json root as an array or dictionary :param mode: file open mode
21,954
def handle_invocation(self, message): req_id = message.request_id reg_id = message.registration_id if reg_id in self._registered_calls: handler = self._registered_calls[reg_id][REGISTERED_CALL_CALLBACK] invoke = WampInvokeWrapper(self,handler,message) invoke.start() else: error_uri = self.get_full_uri() self.send_message(ERROR( request_code = WAMP_INVOCATION, request_id = req_id, details = {}, error =error_uri ))
Passes the invocation request to the appropriate callback.
21,955
def restore_db(release=None): assert "mysql_user" in env, "Missing mysqL_user in env" assert "mysql_password" in env, "Missing mysql_password in env" assert "mysql_host" in env, "Missing mysql_host in env" assert "mysql_db" in env, "Missing mysql_db in env" if not release: release = paths.get_current_release_name() if not release: raise Exception("Release %s was not found" % release) backup_file = "mysql/%s.sql.gz" % release backup_path = paths.get_backup_path(backup_file) if not env.exists(backup_path): raise Exception("Backup file %s not found" % backup_path) env.run("gunzip < %s | mysql -u %s -p%s -h %s %s" % (backup_path, env.mysql_user, env.mysql_password, env.mysql_host, env.mysql_db))
Restores backup back to version, uses current version by default.
21,956
def pca(X, n_components=2, random_state=None): pca_ = PCA(n_components=n_components, random_state=random_state) embedding = pca_.fit_transform(X) normalization = np.std(embedding[:, 0]) * 100 embedding /= normalization return embedding
Initialize an embedding using the top principal components. Parameters ---------- X: np.ndarray The data matrix. n_components: int The dimension of the embedding space. random_state: Union[int, RandomState] If the value is an int, random_state is the seed used by the random number generator. If the value is a RandomState instance, then it will be used as the random number generator. If the value is None, the random number generator is the RandomState instance used by `np.random`. Returns ------- initialization: np.ndarray
21,957
def scourCoordinates(data, options, force_whitespace=False, control_points=[], flags=[]): if data is not None: newData = [] c = 0 previousCoord = for coord in data: is_control_point = c in control_points scouredCoord = scourUnitlessLength(coord, renderer_workaround=options.renderer_workaround, is_control_point=is_control_point) if options.renderer_workaround: if len(newData) > 0: for i in range(1, len(newData)): if newData[i][0] == and in newData[i - 1]: newData[i - 1] += return .join(newData) else: return .join(newData) return
Serializes coordinate data with some cleanups: - removes all trailing zeros after the decimal - integerize coordinates if possible - removes extraneous whitespace - adds spaces between values in a subcommand if required (or if force_whitespace is True)
21,958
async def Claim(self, claims): _params = dict() msg = dict(type=, request=, version=2, params=_params) _params[] = claims reply = await self.rpc(msg) return reply
claims : typing.Sequence[~SingularClaim] Returns -> typing.Sequence[~ErrorResult]
21,959
def index_search_document(self, *, index): cache_key = self.search_document_cache_key new_doc = self.as_search_document(index=index) cached_doc = cache.get(cache_key) if new_doc == cached_doc: logger.debug("Search document for %r is unchanged, ignoring update.", self) return [] cache.set(cache_key, new_doc, timeout=get_setting("cache_expiry", 60)) get_client().index( index=index, doc_type=self.search_doc_type, body=new_doc, id=self.pk )
Create or replace search document in named index. Checks the local cache to see if the document has changed, and if not aborts the update, else pushes to ES, and then resets the local cache. Cache timeout is set as "cache_expiry" in the settings, and defaults to 60s.
21,960
def validate(self, raw_data, **kwargs): try: converted_data = float(raw_data) super(FloatField, self).validate(converted_data, **kwargs) return raw_data except ValueError: raise ValidationException(self.messages[], repr(raw_data))
Convert the raw_data to a float.
21,961
def slideshow(self, **kwargs): for i, cycle in enumerate(self.cycles): cycle.plot(title="Relaxation step %s" % (i + 1), tight_layout=kwargs.pop("tight_layout", True), show=kwargs.pop("show", True))
Uses matplotlib to plot the evolution of the structural relaxation. Args: ax_list: List of axes. If None a new figure is produced. Returns: `matplotlib` figure
21,962
async def connect(self): self.pool = await aiopg.create_pool( loop=self.loop, timeout=self.timeout, database=self.database, **self.connect_params)
Create connection pool asynchronously.
21,963
def add_connector(self, connector_type, begin_x, begin_y, end_x, end_y): cxnSp = self._add_cxnSp( connector_type, begin_x, begin_y, end_x, end_y ) self._recalculate_extents() return self._shape_factory(cxnSp)
Add a newly created connector shape to the end of this shape tree. *connector_type* is a member of the :ref:`MsoConnectorType` enumeration and the end-point values are specified as EMU values. The returned connector is of type *connector_type* and has begin and end points as specified.
21,964
def _load_key(private_object): if libcrypto_version_info < (1,) and private_object.algorithm == and private_object.hash_algo == : raise AsymmetricKeyError(pretty_message( , private_object.bit_size )) source = private_object.unwrap().dump() buffer = buffer_from_bytes(source) evp_pkey = libcrypto.d2i_AutoPrivateKey(null(), buffer_pointer(buffer), len(source)) if is_null(evp_pkey): handle_openssl_error(0) return PrivateKey(evp_pkey, private_object)
Loads a private key into a PrivateKey object :param private_object: An asn1crypto.keys.PrivateKeyInfo object :return: A PrivateKey object
21,965
def beds_to_boolean(beds, ref=None, beds_sorted=False, ref_sorted=False, **kwargs): beds = copy.deepcopy(beds) fns = [] for i,v in enumerate(beds): if type(v) == str: fns.append(v) beds[i] = pbt.BedTool(v) else: fns.append(v.fn) if not beds_sorted: beds[i] = beds[i].sort() names = _sample_names(fns, kwargs) if ref: if type(ref) == str: ref = pbt.BedTool(ref) if not ref_sorted: ref = ref.sort() else: ref = combine(beds) ind = [] for r in ref: ind.append(.format(r.chrom, r.start, r.stop)) bdf = pd.DataFrame(0, index=ind, columns=names) for i,bed in enumerate(beds): res = ref.intersect(bed, sorted=True, wa=True) ind = [] for r in res: ind.append(.format(r.chrom, r.start, r.stop)) bdf.ix[ind, names[i]] = 1 return bdf
Compare a list of bed files or BedTool objects to a reference bed file and create a boolean matrix where each row is an interval and each column is a 1 if that file has an interval that overlaps the row interval and a 0 otherwise. If no reference bed is provided, the provided bed files will be merged into a single bed and compared to that. Parameters ---------- beds : list List of paths to bed files or BedTool objects. ref : str or BedTool Reference bed file to compare against. If no reference bed is provided, the provided bed files will be merged into a single bed and compared to that. beds_sorted : boolean Whether the bed files in beds are already sorted. If False, all bed files in beds will be sorted. ref_sorted : boolean Whether the reference bed file is sorted. If False, ref will be sorted. names : list of strings Names to use for columns of output files. Overrides define_sample_name if provided. define_sample_name : function that takes string as input Function mapping filename to sample name (or basename). For instance, you may have the basename in the path and use a regex to extract it. The basenames will be used as the column names. If this is not provided, the columns will be named as the input files. Returns ------- out : pandas.DataFrame Boolean data frame indicating whether each bed file has an interval that overlaps each interval in the reference bed file.
21,966
def classifiers(self): objects = javabridge.get_env().get_object_array_elements( javabridge.call(self.jobject, "getClassifiers", "()[Lweka/classifiers/Classifier;")) result = [] for obj in objects: result.append(Classifier(jobject=obj)) return result
Returns the list of base classifiers. :return: the classifier list :rtype: list
21,967
def for_default_graph(*args, **kwargs): graph = tf.get_default_graph() collection = graph.get_collection(_BOOKKEEPER) if collection: if args or kwargs: raise ValueError( % (args, kwargs)) return collection[0] else: books = BOOKKEEPER_FACTORY(*args, g=graph, **kwargs) graph.add_to_collection(_BOOKKEEPER, books) return books
Creates a bookkeeper for the default graph. Args: *args: Arguments to pass into Bookkeeper's constructor. **kwargs: Arguments to pass into Bookkeeper's constructor. Returns: A new Bookkeeper. Raises: ValueError: If args or kwargs are provided and the Bookkeeper already exists.
21,968
def multihistogram(args): p = OptionParser(multihistogram.__doc__) p.add_option("--kmin", default=15, type="int", help="Minimum K-mer size, inclusive") p.add_option("--kmax", default=30, type="int", help="Maximum K-mer size, inclusive") p.add_option("--vmin", default=2, type="int", help="Minimum value, inclusive") p.add_option("--vmax", default=100, type="int", help="Maximum value, inclusive") opts, args, iopts = p.set_image_options(args, figsize="10x5", dpi=300) if len(args) < 1: sys.exit(not p.print_help()) histfiles = args[:-1] species = args[-1] fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) A = fig.add_axes([.08, .12, .38, .76]) B = fig.add_axes([.58, .12, .38, .76]) lines = [] legends = [] genomesizes = [] for histfile in histfiles: ks = KmerSpectrum(histfile) x, y = ks.get_xy(opts.vmin, opts.vmax) K = get_number(op.basename(histfile).split(".")[0].split("-")[-1]) if not opts.kmin <= K <= opts.kmax: continue line, = A.plot(x, y, , lw=1) lines.append(line) legends.append("K = {0}".format(K)) ks.analyze(K=K) genomesizes.append((K, ks.genomesize / 1e6)) leg = A.legend(lines, legends, shadow=True, fancybox=True) leg.get_frame().set_alpha(.5) title = "{0} genome K-mer histogram".format(species) A.set_title(markup(title)) xlabel, ylabel = "Coverage (X)", "Counts" A.set_xlabel(xlabel) A.set_ylabel(ylabel) set_human_axis(A) title = "{0} genome size estimate".format(species) B.set_title(markup(title)) x, y = zip(*genomesizes) B.plot(x, y, "ko", mfc=) t = np.linspace(opts.kmin - .5, opts.kmax + .5, 100) p = np.poly1d(np.polyfit(x, y, 2)) B.plot(t, p(t), "r:") xlabel, ylabel = "K-mer size", "Estimated genome size (Mb)" B.set_xlabel(xlabel) B.set_ylabel(ylabel) set_ticklabels_helvetica(B) labels = ((.04, .96, ), (.54, .96, )) panel_labels(root, labels) normalize_axes(root) imagename = species + ".multiK.pdf" savefig(imagename, dpi=iopts.dpi, iopts=iopts)
%prog multihistogram *.histogram species Plot the histogram based on a set of K-mer hisotograms. The method is based on Star et al.'s method (Atlantic Cod genome paper).
21,969
def eval_autoregressive(self, features=None, decode_length=50): results = self._slow_greedy_infer(features, decode_length=decode_length) return results["logits"], results["losses"]
Autoregressive eval. Quadratic time in decode_length. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. Returns: logits: `Tensor` losses: a dictionary: {loss-name (string): floating point `Scalar`}. Contains a single key "training".
21,970
def get_simple_devices_info(self): j = self.data_request({: }).json() self.scenes = [] items = j.get() for item in items: self.scenes.append(VeraScene(item, self)) if j.get(): self.temperature_units = j.get() self.categories = {} cats = j.get() for cat in cats: self.categories[cat.get()] = cat.get() self.device_id_map = {} devs = j.get() for dev in devs: dev[] = self.categories.get(dev.get()) self.device_id_map[dev.get()] = dev
Get basic device info from Vera.
21,971
def enqueue(self, name=None, action=None, method=None, wait_url=None, wait_url_method=None, workflow_sid=None, **kwargs): return self.nest(Enqueue( name=name, action=action, method=method, wait_url=wait_url, wait_url_method=wait_url_method, workflow_sid=workflow_sid, **kwargs ))
Create a <Enqueue> element :param name: Friendly name :param action: Action URL :param method: Action URL method :param wait_url: Wait URL :param wait_url_method: Wait URL method :param workflow_sid: TaskRouter Workflow SID :param kwargs: additional attributes :returns: <Enqueue> element
21,972
def enable_servicegroup_passive_svc_checks(self, servicegroup): for service_id in servicegroup.get_services(): self.enable_passive_svc_checks(self.daemon.services[service_id])
Enable passive service checks for a servicegroup Format of the line that triggers function call:: ENABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS;<servicegroup_name> :param servicegroup: servicegroup to enable :type servicegroup: alignak.objects.servicegroup.Servicegroup :return: None
21,973
def create_random_ind_grow(self, depth=0): "Random individual using grow method" lst = [] self._depth = depth self._create_random_ind_grow(depth=depth, output=lst) return lst
Random individual using grow method
21,974
def rsa_check_base64_sign_str(self, cipher, sign, b64=True): with open(self.key_file) as fp: key_ = RSA.importKey(fp.read()) v = pkcs.new(key_) sign = base64.b64decode(sign) if b64 else sign cipher = helper.to_bytes(cipher) return v.verify(SHA.new(cipher), sign)
验证服务端数据 ``rsa`` 签名
21,975
def stepfiles_iterator(path_prefix, wait_minutes=0, min_steps=0, path_suffix=".index", sleep_sec=10): if not path_prefix.endswith(os.sep) and os.path.isdir(path_prefix): path_prefix += os.sep stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps) tf.logging.info("Found %d files with steps: %s", len(stepfiles), ", ".join(str(x.steps) for x in reversed(stepfiles))) exit_time = time.time() + wait_minutes * 60 while True: if not stepfiles and wait_minutes: tf.logging.info( "Waiting till %s if a new file matching %s*-[0-9]*%s appears", time.asctime(time.localtime(exit_time)), path_prefix, path_suffix) while True: stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps) if stepfiles or time.time() > exit_time: break time.sleep(sleep_sec) if not stepfiles: return stepfile = stepfiles.pop() exit_time, min_steps = (stepfile.ctime + wait_minutes * 60, stepfile.steps + 1) yield stepfile
Continuously yield new files with steps in filename as they appear. This is useful for checkpoint files or other files whose names differ just in an integer marking the number of steps and match the wildcard path_prefix + "*-[0-9]*" + path_suffix. Unlike `tf.contrib.training.checkpoints_iterator`, this implementation always starts from the oldest files (and it cannot miss any file). Note that the oldest checkpoint may be deleted anytime by Tensorflow (if set up so). It is up to the user to check that the files returned by this generator actually exist. Args: path_prefix: The directory + possible common filename prefix to the files. wait_minutes: The maximum amount of minutes to wait between files. min_steps: Skip files with lower global step. path_suffix: Common filename suffix (after steps), including possible extension dot. sleep_sec: How often to check for new files. Yields: named tuples (filename, mtime, ctime, steps) of the files as they arrive.
21,976
def load_external_types(self, path): folder, filename = os.path.split(path) try: fileobj, pathname, description = imp.find_module(filename, [folder]) mod = imp.load_module(filename, fileobj, pathname, description) except ImportError as exc: raise ArgumentError("could not import module in order to load external types", module_path=path, parent_directory=folder, module_name=filename, error=str(exc)) self.load_type_module(mod)
Given a path to a python package or module, load that module, search for all defined variables inside of it that do not start with _ or __ and inject them into the type system. If any of the types cannot be injected, silently ignore them unless verbose is True. If path points to a module it should not contain the trailing .py since this is added automatically by the python import system
21,977
def add(self,dimlist,dimvalues): for i,d in enumerate(dimlist): self[d] = dimvalues[i] self.set_ndims()
add dimensions :parameter dimlist: list of dimensions :parameter dimvalues: list of values for dimlist
21,978
def get_safe_struct(self): safe = {} main_folder = self.get_main_folder() safe[main_folder] = {} safe[main_folder][AwsConstants.AUX_DATA] = {} if self.data_source is not DataSource.SENTINEL2_L1C or self.baseline != : ecmwft_file = AwsConstants.ECMWFT if self.data_source is DataSource.SENTINEL2_L1C or \ self.safe_type is EsaSafeType.OLD_TYPE else AwsConstants.AUX_ECMWFT safe[main_folder][AwsConstants.AUX_DATA][self.get_aux_data_name()] = self.get_url(ecmwft_file) if self.is_early_compact_l2a(): safe[main_folder][AwsConstants.AUX_DATA][self.add_file_extension(AwsConstants.GIPP, remove_path=True)] =\ self.get_url(AwsConstants.GIPP) safe[main_folder][AwsConstants.IMG_DATA] = {} if self.data_source is DataSource.SENTINEL2_L1C: for band in self.bands: safe[main_folder][AwsConstants.IMG_DATA][self.get_img_name(band)] = self.get_url(band) if self.safe_type == EsaSafeType.COMPACT_TYPE: safe[main_folder][AwsConstants.IMG_DATA][self.get_img_name(AwsConstants.TCI)] =\ self.get_url(AwsConstants.TCI) else: for resolution in AwsConstants.RESOLUTIONS: safe[main_folder][AwsConstants.IMG_DATA][resolution] = {} for band_name in self.bands: resolution, band = band_name.split() if self._band_exists(band_name): safe[main_folder][AwsConstants.IMG_DATA][resolution][self.get_img_name(band, resolution)] =\ self.get_url(band_name) safe[main_folder][AwsConstants.QI_DATA] = {} safe[main_folder][AwsConstants.QI_DATA][self.get_qi_name()] = self.get_gml_url() for qi_type in AwsConstants.QI_LIST: for band in AwsConstants.S2_L1C_BANDS: safe[main_folder][AwsConstants.QI_DATA][self.get_qi_name(qi_type, band)] = self.get_gml_url(qi_type, band) if self.has_reports(): for metafile in [AwsConstants.FORMAT_CORRECTNESS, AwsConstants.GENERAL_QUALITY, AwsConstants.GEOMETRIC_QUALITY, AwsConstants.SENSOR_QUALITY]: metafile_name = self.add_file_extension(metafile, remove_path=True) safe[main_folder][AwsConstants.QI_DATA][metafile_name] = self.get_qi_url(metafile_name) if self.data_source is DataSource.SENTINEL2_L2A: for mask in AwsConstants.CLASS_MASKS: for resolution in [AwsConstants.R20m, AwsConstants.R60m]: if self.baseline <= : mask_name = self.get_img_name(mask, resolution) else: mask_name = self.get_qi_name(.format(mask), resolution.lstrip(), MimeType.JP2) safe[main_folder][AwsConstants.QI_DATA][mask_name] =\ self.get_qi_url(.format(mask, resolution.lstrip())) if self.is_early_compact_l2a(): safe[main_folder][AwsConstants.QI_DATA][self.get_img_name(AwsConstants.PVI)] = self.get_preview_url() preview_type = if self.data_source is DataSource.SENTINEL2_L2A and self.baseline >= else safe[main_folder][AwsConstants.QI_DATA][self.get_preview_name()] = self.get_preview_url(preview_type) safe[main_folder][self.get_tile_metadata_name()] = self.get_url(AwsConstants.METADATA) return safe
Describes a structure inside tile folder of ESA product .SAFE structure. :return: nested dictionaries representing .SAFE structure :rtype: dict
21,979
def _to_dict(self): _dict = {} if hasattr(self, ) and self.text is not None: _dict[] = self.text if hasattr(self, ) and self.tense is not None: _dict[] = self.tense return _dict
Return a json dictionary representing this model.
21,980
def _events(self, using_url, filters=None, limit=None): if not isinstance(limit, (int, NoneType)): limit = None if filters is None: filters = [] if isinstance(filters, string_types): filters = filters.split() if not self.blocking: self.blocking = True while self.blocking: params = { : self._last_seen_id, : limit, } if filters: params[] = .join(map(str, filters)) try: data = self.get(using_url, params=params, raw_exceptions=True) except (ConnectTimeout, ConnectionError) as e: data = None except Exception as e: reraise(, e) if data: self._last_seen_id = data[-1][] for event in data: self._count += 1 yield event
A long-polling method that queries Syncthing for events.. Args: using_url (str): REST HTTP endpoint filters (List[str]): Creates an "event group" in Syncthing to only receive events that have been subscribed to. limit (int): The number of events to query in the history to catch up to the current state. Returns: generator[dict]
21,981
def sep_dist_clay(ConcClay, material): return ((material.Density/ConcClay)*((np.pi * material.Diameter ** 3)/6))**(1/3)
Return the separation distance between clay particles.
21,982
def get_edges(self, src_ids=[], dst_ids=[], fields={}, format=): if not _is_non_string_iterable(src_ids): src_ids = [src_ids] if not _is_non_string_iterable(dst_ids): dst_ids = [dst_ids] if type(src_ids) not in (list, SArray): raise TypeError() if type(dst_ids) not in (list, SArray): raise TypeError() if len(src_ids) == 0 and len(dst_ids) > 0: src_ids = [None] * len(dst_ids) if len(dst_ids) == 0 and len(src_ids) > 0: dst_ids = [None] * len(src_ids) with cython_context(): sf = SFrame(_proxy=self.__proxy__.get_edges(src_ids, dst_ids, fields)) if (format == ): return sf if (format == ): assert HAS_PANDAS, if sf.num_rows() == 0: return pd.DataFrame() else: return sf.head(sf.num_rows()).to_dataframe() elif (format == ): return _dataframe_to_edge_list(sf.to_dataframe()) else: raise ValueError("Invalid format specifier")
get_edges(self, src_ids=list(), dst_ids=list(), fields={}, format='sframe') Return a collection of edges and their attributes. This function is used to find edges by vertex IDs, filter on edge attributes, or list in-out neighbors of vertex sets. Parameters ---------- src_ids, dst_ids : list or SArray, optional Parallel arrays of vertex IDs, with each pair corresponding to an edge to fetch. Only edges in this list are returned. ``None`` can be used to designate a wild card. For instance, ``src_ids=[1, 2, None]``, ``dst_ids=[3, None, 5]`` will fetch the edge 1->3, all outgoing edges of 2 and all incoming edges of 5. src_id and dst_id may be left empty, which implies an array of all wild cards. fields : dict, optional Dictionary specifying equality constraints on field values. For example, ``{'relationship': 'following'}``, returns only edges whose 'relationship' field equals 'following'. ``None`` can be used as a value to designate a wild card. e.g. ``{'relationship': None}`` will find all edges with the field 'relationship' regardless of the value. format : {'sframe', 'list'}, optional Output format. The 'sframe' output (default) contains columns __src_id and __dst_id with edge vertex IDs and a column for each edge attribute. List output returns a list of Edge objects. Returns ------- out : SFrame | list [Edge] An SFrame or list of edges. See Also -------- edges, get_vertices Examples -------- Return all edges in the graph. >>> from turicreate import SGraph, Edge >>> g = SGraph().add_edges([Edge(0, 1, attr={'rating': 5}), Edge(0, 2, attr={'rating': 2}), Edge(1, 2)]) >>> g.get_edges(src_ids=[None], dst_ids=[None]) +----------+----------+--------+ | __src_id | __dst_id | rating | +----------+----------+--------+ | 0 | 2 | 2 | | 0 | 1 | 5 | | 1 | 2 | None | +----------+----------+--------+ Return edges with the attribute "rating" of 5. >>> g.get_edges(fields={'rating': 5}) +----------+----------+--------+ | __src_id | __dst_id | rating | +----------+----------+--------+ | 0 | 1 | 5 | +----------+----------+--------+ Return edges 0 --> 1 and 1 --> 2 (if present in the graph). >>> g.get_edges(src_ids=[0, 1], dst_ids=[1, 2]) +----------+----------+--------+ | __src_id | __dst_id | rating | +----------+----------+--------+ | 0 | 1 | 5 | | 1 | 2 | None | +----------+----------+--------+
21,983
def get_query_uri(self):
Return the uri used for queries on time series data.
21,984
def connectTo( self, node, cls = None ): if ( not node ): return con = self.scene().addConnection(cls) con.setOutputNode(self) con.setInputNode(node) return con
Creates a connection between this node and the inputed node. :param node | <XNode> cls | <subclass of XNodeConnection> || None :return <XNodeConnection>
21,985
def get(self, name: str, default: Optional[Any]=None) -> Any: return self.__dict__.get(name, default)
Get a named attribute of this instance, or return the default.
21,986
def mark(self, value=1): self.counter += value self.m1_rate.update(value) self.m5_rate.update(value) self.m15_rate.update(value)
Record an event with the meter. By default it will record one event. :param value: number of event to record
21,987
def _get_ssl(self): return smtplib.SMTP_SSL( self.server, self.port, context=ssl.create_default_context() )
Get an SMTP session with SSL.
21,988
def rand_imancon_NOTWORKING(x, rho): import numpy as np from scipy.stats import norm import warnings warnings.warn(( "This implementation of the the Iman-Conover methods is " "not working properly. Please check if " "are close to the target correlation C. For example " " and . ")) n, d = x.shape a = norm.ppf(np.arange(1, n + 1) / (n + 1)) M = np.nan * np.empty((n, d)) M[:, 0] = a for k in range(1, d): np.random.shuffle(a) M[:, k] = a E = np.corrcoef(M, rowvar=0) F = np.linalg.cholesky(E).T invF = np.linalg.inv(F) C = np.linalg.cholesky(rho).T T = np.dot(M, np.dot(invF, C)) idx = np.argsort(T, axis=0) X = np.sort(x, axis=0) Y = np.nan * np.empty((n, d)) for k in range(0, d): Y[:, k] = X[idx[:, k], k] return Y
Iman-Conover Method to generate random ordinal variables. Implementation from Mildenhall (2005) that is NOT working. x : ndarray <obs x cols> matrix with "cols" ordinal variables that are uncorrelated. rho : ndarray Spearman Rank Correlation Matrix Links * Iman, R.L., Conover, W.J., 1982. A distribution-free approach to inducing rank correlation among input variables. Communications in Statistics - Simulation and Computation 11, 311–334. https://doi.org/10.1080/03610918208812265 * Mildenhall, S.J., 2005. Correlation and Aggregate Loss Distributions With An Emphasis On The Iman-Conover Method 101. (Page 45-49)
21,989
def _get_application_tags(self): application_tags = {} if isinstance(self.Location, dict): if (self.APPLICATION_ID_KEY in self.Location.keys() and self.Location[self.APPLICATION_ID_KEY] is not None): application_tags[self._SAR_APP_KEY] = self.Location[self.APPLICATION_ID_KEY] if (self.SEMANTIC_VERSION_KEY in self.Location.keys() and self.Location[self.SEMANTIC_VERSION_KEY] is not None): application_tags[self._SAR_SEMVER_KEY] = self.Location[self.SEMANTIC_VERSION_KEY] return application_tags
Adds tags to the stack if this resource is using the serverless app repo
21,990
def show(self): msg = if self._process: msg += .format(self._process.pid) msg += .format(self._process.poll()) msg += .format(self.running()) msg += .format(self._port) msg += .format(self.url()) print(msg, end=)
Show state.
21,991
def cdl_addmon(self, source_url, save_path = , timeout = 3600): / rpath = self.__get_cdl_dest(source_url, save_path) return self.__cdl_addmon(source_url, rpath, timeout)
Usage: cdl_addmon <source_url> [save_path] [timeout] - add an offline (cloud) download task and monitor the download progress source_url - the URL to download file from. save_path - path on PCS to save file to. default is to save to root directory '/'. timeout - timeout in seconds. default is 3600 seconds.
21,992
def parse_strike_dip(strike, dip): strike = parse_azimuth(strike) dip, direction = split_trailing_letters(dip) if direction is not None: expected_direc = strike + 90 if opposite_end(expected_direc, direction): strike += 180 if strike > 360: strike -= 360 return strike, dip
Parses strings of strike and dip and returns strike and dip measurements following the right-hand-rule. Dip directions are parsed, and if the measurement does not follow the right-hand-rule, the opposite end of the strike measurement is returned. Accepts either quadrant-formatted or azimuth-formatted strikes. For example, this would convert a strike of "N30E" and a dip of "45NW" to a strike of 210 and a dip of 45. Parameters ---------- strike : string A strike measurement. May be in azimuth or quadrant format. dip : string The dip angle and direction of a plane. Returns ------- azi : float Azimuth in degrees of the strike of the plane with dip direction indicated following the right-hand-rule. dip : float Dip of the plane in degrees.
21,993
def parseData(self, data, host, port, options): data = self.obj.parse(data) data[] = host log.debug( , data, host, port ) send_this_event = True for key in options: if key in data: if isinstance(options[key], (six.string_types, int)): if six.text_type(options[key]) != six.text_type(data[key]): send_this_event = False break elif isinstance(options[key], list): for opt in options[key]: if six.text_type(opt) == six.text_type(data[key]): break else: send_this_event = False break else: raise Exception( ) else: raise Exception( ) if send_this_event: if in data: topic = for i in range(2, len(self.title)): topic += + six.text_type(data[self.title[i]]) log.debug( , data, host ) result = {: True, : data, : topic} return result else: raise Exception( ) else: result = {: False} return result
This function will parse the raw syslog data, dynamically create the topic according to the topic specified by the user (if specified) and decide whether to send the syslog data as an event on the master bus, based on the constraints given by the user. :param data: The raw syslog event data which is to be parsed. :param host: The IP of the host from where syslog is forwarded. :param port: Port of the junos device from which the data is sent :param options: kwargs provided by the user in the configuration file. :return: The result dictionary which contains the data and the topic, if the event is to be sent on the bus.
21,994
def _pack_prms(): config_dict = { "Paths": prms.Paths.to_dict(), "FileNames": prms.FileNames.to_dict(), "Db": prms.Db.to_dict(), "DbCols": prms.DbCols.to_dict(), "DataSet": prms.DataSet.to_dict(), "Reader": prms.Reader.to_dict(), "Instruments": prms.Instruments.to_dict(), "Batch": prms.Batch.to_dict(), } return config_dict
if you introduce new 'save-able' parameter dictionaries, then you have to include them here
21,995
def setHorCrossPlotAutoRangeOn(self, axisNumber): setXYAxesAutoRangeOn(self, self.xAxisRangeCti, self.horCrossPlotRangeCti, axisNumber)
Sets the horizontal cross-hair plot's auto-range on for the axis with number axisNumber. :param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes).
21,996
def node_contents_str(tag): if not tag: return None tag_string = for child_tag in tag.children: if isinstance(child_tag, Comment): tag_string += % unicode_value(child_tag) else: tag_string += unicode_value(child_tag) return tag_string if tag_string != else None
Return the contents of a tag, including it's children, as a string. Does not include the root/parent of the tag.
21,997
def update_limits(self): if self.limits_updated: logger.debug() return self.connect() ta_results = self._poll() self._update_services(ta_results) self.limits_updated = True
Poll 'Service Limits' check results from Trusted Advisor, if possible. Iterate over all :py:class:`~.AwsLimit` objects for the given services and update their limits from TA if present in TA checks. :param services: dict of service name (string) to :py:class:`~._AwsService` objects :type services: dict
21,998
def parse_int(str_num): str_num = (str_num or "").strip().upper() if not str_num: return None base = 10 if str_num.startswith(): base = 16 str_num = str_num[2:] if str_num.endswith(): base = 16 str_num = str_num[:-1] if str_num.startswith(): base = 16 str_num = str_num[1:] try: return int(str_num, base) except ValueError: return None
Given an integer number, return its value, or None if it could not be parsed. Allowed formats: DECIMAL, HEXA (0xnnn, $nnnn or nnnnh) :param str_num: (string) the number to be parsed :return: an integer number or None if it could not be parsedd
21,999
def modify_job(self, name, schedule, persist=True): if name in self.opts[]: self.delete_job(name, persist) elif name in self._get_schedule(include_opts=False): log.warning("Cannot modify job %s, itschedule'][name] = schedule if persist: self.persist()
Modify a job in the scheduler. Ignores jobs from pillar