Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
28,500
def iterate(self, max_iter=150): r self._run_alg(max_iter) self.retrieve_outputs() self.x_final = self._x_new
r"""Iterate This method calls update until either convergence criteria is met or the maximum number of iterations is reached Parameters ---------- max_iter : int, optional Maximum number of iterations (default is ``150``)
28,501
def build_masked_loss(loss_function, mask_value): def masked_loss_function(y_true, y_pred): mask = K.cast(K.not_equal(y_true, mask_value), K.floatx()) return loss_function(y_true * mask, y_pred * mask) return masked_loss_function
Builds a loss function that masks based on targets Args: loss_function: The loss function to mask mask_value: The value to mask in the targets Returns: function: a loss function that acts like loss_function with masked inputs
28,502
async def identity_of(client: Client, search: str) -> dict: return await client.get(MODULE + % search, schema=IDENTITY_OF_SCHEMA)
GET Identity data written in the blockchain :param client: Client to connect to the api :param search: UID or public key :return:
28,503
def decipher(self,string,keep_punct=False): if self.invkey == : for i in : self.invkey += self.i2a(self.key.index(i)) if not keep_punct: string = self.remove_punctuation(string) ret = for c in string.upper(): if c.isalpha(): ret += self.invkey[self.a2i(c)] else: ret += c return ret
Decipher string using Simple Substitution cipher according to initialised key. Example:: plaintext = SimpleSubstitution('AJPCZWRLFBDKOTYUQGENHXMIVS').decipher(ciphertext) :param string: The string to decipher. :param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False. :returns: The deciphered string.
28,504
def get_meta_type_by_dir(dir_name): parent_data = get_default_metadata_data() child_data = get_child_metadata_data() data = parent_data[] + child_data for item in data: if in item and item[].lower() == dir_name.lower(): return item elif in item and item[].lower() == dir_name.lower(): return item s use that to detect metadata types config.describeconfig.describemetadataObjectsdirectoryNamedirectoryNametagNametagName'].lower() == dir_name.lower(): return item except: pass
> quick and dirty fix for users experiencing issues with "newer" metadata types not properly tested by mm > if the project has a cached .describe, let's use that to detect metadata types
28,505
def image_shift(xshift=0, yshift=0, axes="gca"): if axes=="gca": axes = _pylab.gca() e = axes.images[0].get_extent() e[0] = e[0] + xshift e[1] = e[1] + xshift e[2] = e[2] + yshift e[3] = e[3] + yshift axes.images[0].set_extent(e) _pylab.draw()
This will shift an image to a new location on x and y.
28,506
def drop(self): api = self._instance._client.database_admin_api metadata = _metadata_with_prefix(self.name) api.drop_database(self.name, metadata=metadata)
Drop this database. See https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase
28,507
def reset(self): self.clean_cache_upstream() self.set_mode_train() for step_obj in self.all_upstream_steps.values(): step_obj.is_fittable = DEFAULT_TRAINING_SETUP[] step_obj.force_fitting = DEFAULT_TRAINING_SETUP[] step_obj.persist_output = DEFAULT_TRAINING_SETUP[] step_obj.cache_output = DEFAULT_TRAINING_SETUP[] step_obj.load_persisted_output = DEFAULT_TRAINING_SETUP[] logger.info( .format(self.name)) return self
Reset all upstream Steps to the default training parameters and cleans cache for all upstream Steps including this Step. Defaults are: 'mode': 'train', 'is_fittable': True, 'force_fitting': True, 'persist_output': False, 'cache_output': False, 'load_persisted_output': False
28,508
def _set_allowed_ouis(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("oui",allowed_ouis.allowed_ouis, yang_name="allowed-ouis", rest_name="allowed-ouis", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: None, u: None, u: u, u: None}}), is_container=, yang_name="allowed-ouis", rest_name="allowed-ouis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None, u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "list", : , }) self.__allowed_ouis = t if hasattr(self, ): self._set()
Setter method for allowed_ouis, mapped from YANG variable /interface/port_channel/switchport/port_security/allowed_ouis (list) If this variable is read-only (config: false) in the source YANG file, then _set_allowed_ouis is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_allowed_ouis() directly. YANG Description: List of allowed OUIs
28,509
def add_timing(self, label, secs, is_tool=False): self._timings_by_path[label] += secs if is_tool: self._tool_labels.add(label) if self._path and os.path.exists(os.path.dirname(self._path)): with open(self._path, ) as f: for x in self.get_all(): f.write(.format(**x))
Aggregate timings by label. secs - a double, so fractional seconds are allowed. is_tool - whether this label represents a tool invocation.
28,510
def nvmlDeviceGetComputeMode(handle): r c_mode = _nvmlComputeMode_t() fn = _nvmlGetFunctionPointer("nvmlDeviceGetComputeMode") ret = fn(handle, byref(c_mode)) _nvmlCheckReturn(ret) return bytes_to_str(c_mode.value)
r""" /** * Retrieves the current compute mode for the device. * * For all products. * * See \ref nvmlComputeMode_t for details on allowed compute modes. * * @param device The identifier of the target device * @param mode Reference in which to return the current compute mode * * @return * - \ref NVML_SUCCESS if \a mode has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlDeviceSetComputeMode() */ nvmlReturn_t DECLDIR nvmlDeviceGetComputeMode
28,511
def retry(f, exc_classes=DEFAULT_EXC_CLASSES, logger=None, retry_log_level=logging.INFO, retry_log_message="Connection broken in (error: ); " "retrying with new connection.", max_failures=None, interval=0, max_failure_log_level=logging.ERROR, max_failure_log_message="Max retries reached for . Aborting."): exc_classes = tuple(exc_classes) @wraps(f) def deco(*args, **kwargs): failures = 0 while True: try: return f(*args, **kwargs) except exc_classes as e: if logger is not None: logger.log(retry_log_level, retry_log_message.format(f=f.func_name, e=e)) gevent.sleep(interval) failures += 1 if max_failures is not None \ and failures > max_failures: if logger is not None: logger.log(max_failure_log_level, max_failure_log_message.format( f=f.func_name, e=e)) raise return deco
Decorator to automatically reexecute a function if the connection is broken for any reason.
28,512
def league(page): soup = BeautifulSoup(page) try: return soup.find().text.split()[0].split()[0] except: return None
Return the league name
28,513
def register_work_from_cbk(self, cbk_name, cbk_data, deps, work_class, manager=None): work_workdir = os.path.join(self.workdir, "w" + str(len(self))) work = work_class(workdir=work_workdir, manager=manager) self._works.append(work) deps = [Dependency(node, exts) for node, exts in deps.items()] if not deps: raise ValueError("A callback must have deps!") work.add_deps(deps) cbk = FlowCallback(cbk_name, self, deps=deps, cbk_data=cbk_data) self._callbacks.append(cbk) return work
Registers a callback function that will generate the :class:`Task` of the :class:`Work`. Args: cbk_name: Name of the callback function (must be a bound method of self) cbk_data: Additional data passed to the callback function. deps: List of :class:`Dependency` objects specifying the dependency of the work. work_class: :class:`Work` class to instantiate. manager: The :class:`TaskManager` responsible for the submission of the task. If manager is None, we use the `TaskManager` specified during the creation of the :class:`Flow`. Returns: The :class:`Work` that will be finalized by the callback.
28,514
def update(self, value, tys=None, override=True): if isinstance(value, WeldObject): self.context.update(value.context) else: value_str = str(value) if value_str in WeldObject._registry: name = WeldObject._registry[value_str] else: name = WeldObject.generate_input_name(value_str) self.context[name] = value if tys is not None and not override: self.argtypes[name] = tys return name
Update this context. if value is another context, the names from that context are added into this one. Otherwise, a new name is assigned and returned. TODO tys for inputs.
28,515
def set_log_level(logger, level): if level > 2: level = 2 if level < -1: level = -1 levels = { -1: logging.ERROR, 0: logging.WARN, 1: logging.INFO, 2: logging.DEBUG } logger.setLevel(levels[level])
Dynamic reconfiguration of the log level
28,516
def connect_edges_pd(graph): edges = graph.dframe() edges.index.name = edges = edges.reset_index() nodes = graph.nodes.dframe() src, tgt = graph.kdims x, y, idx = graph.nodes.kdims[:3] df = pd.merge(edges, nodes, left_on=[src.name], right_on=[idx.name]) df = df.rename(columns={x.name: , y.name: }) df = pd.merge(df, nodes, left_on=[tgt.name], right_on=[idx.name]) df = df.rename(columns={x.name: , y.name: }) df = df.sort_values().drop([], axis=1) edge_segments = [] for i, edge in df.iterrows(): start = edge[], edge[] end = edge[], edge[] edge_segments.append(np.array([start, end])) return edge_segments
Given a Graph element containing abstract edges compute edge segments directly connecting the source and target nodes. This operation depends on pandas and is a lot faster than the pure NumPy equivalent.
28,517
def getThirdpartyLibs(self, libs, configuration = , includePlatformDefaults = True): if includePlatformDefaults == True: libs = self._defaultThirdpartyLibs() + libs interrogator = self._getUE4BuildInterrogator() return interrogator.interrogate(self.getPlatformIdentifier(), configuration, libs, self._getLibraryOverrides())
Retrieves the ThirdPartyLibraryDetails instance for Unreal-bundled versions of the specified third-party libraries
28,518
def _init_taxid2asscs(self): taxid2asscs = cx.defaultdict(list) for ntanno in self.associations: taxid2asscs[ntanno.tax_id].append(ntanno) assert len(taxid2asscs) != 0, "**FATAL: NO TAXIDS: {F}".format(F=self.filename) prt = sys.stdout num_taxids = len(taxid2asscs) prt.write(.format(N=num_taxids)) if num_taxids < 5: prt.write(.format(Ts=.join(sorted(str(t) for t in taxid2asscs)))) prt.write() return dict(taxid2asscs)
Create dict with taxid keys and annotation namedtuple list.
28,519
def list(): try: devs = hid.enumerate(VENDOR_ID,PRODUCT_ID) serials = list(map(lambda d:d.get(), devs)) return serials except IOError as e: return []
List blink(1) devices connected, by serial number :return: List of blink(1) device serial numbers
28,520
def _process(self, segments): mlh, mlw = self.max_line_height, self.max_line_width s = segments.astype(numpy.uint32) order = mlw * (s[:, 1] // mlh) + s[:, 0] sort_order = numpy.argsort(order) return segments[sort_order]
sort segments in read order - left to right, up to down
28,521
def messages_from_response(response): messages = [] if hasattr(response, ) and response.context and \ in response.context: messages = response.context[] elif hasattr(response, ): morsel = response.cookies.get() if not morsel: return [] from django.contrib.messages.storage.cookie import CookieStorage store = CookieStorage(FakeRequest()) messages = store._decode(morsel.value) else: return [] return [(m.message, m.level) for m in messages]
Returns a list of the messages from the django MessageMiddleware package contained within the given response. This is to be used during unit testing when trying to see if a message was set properly in a view. :param response: HttpResponse object, likely obtained through a test client.get() or client.post() call :returns: a list of tuples (message_string, message_level), one for each message in the response context
28,522
def plot_freq(self, x, y, title=, ylabel=None, scale=): freq = self.frequency scaling = freq[].get_value() if ylabel is None: if freq[].get_value(): ylabel = elif == scaling: ylabel = elif == scaling: ylabel = self.parent.plot_dialog = PlotDialog(self.parent) self.parent.plot_dialog.canvas.plot(x, y, title, ylabel, scale=scale) self.parent.show_plot_dialog()
Plot mean frequency spectrum and display in dialog. Parameters ---------- x : list vector with frequencies y : ndarray vector with amplitudes title : str plot title ylabel : str plot y label scale : str semilogy, loglog or linear
28,523
def _closed_cb(self, final_frame=None): if final_frame: self._connection.send_frame(final_frame) try: self._notify_close_listeners() finally: self._pending_events = deque() self._frame_buffer = deque() for protocol_class in self._class_map.values(): protocol_class._cleanup() delattr(self, protocol_class.name) self._connection = None self._class_map = None self._close_listeners = set()
"Private" callback from the ChannelClass when a channel is closed. Only called after broker initiated close, or we receive a close_ok. Caller has the option to send a final frame, to be used to bypass any synchronous or otherwise-pending frames so that the channel can be cleanly closed.
28,524
def _scheduled_check_for_summaries(self): if self._analysis_process is None: return timed_out = time.time() - self._analyze_start_time > self.time_limit if timed_out: self._handle_results( , ) return try: self._analysis_process.join(0.001) except AssertionError: pass if not self._analysis_process.is_alive(): self._handle_results(, ) return self._base.after(self._POLL_PERIOD_MILLISECONDS, self._scheduled_check_for_summaries)
Present the results if they have become available or timed out.
28,525
def mouseReleaseEvent( self, event ): event.setAccepted(False) if self._hotspotPressed: event.accept() self._hotspotPressed = False return scene = self.scene() if ( self.isLocked() or self._ignoreMouseEvents or \ (scene and (scene.inViewMode() or scene.isConnecting()))): event.ignore() self._ignoreMouseEvents = False return super(XNode, self).mouseReleaseEvent(event) self.emitGeometryChanged() if ( scene ): scene.blockSelectionSignals(False) delta = datetime.datetime.now() - self._pressTime if not scene.signalsBlocked() and delta.seconds < 1: scene.nodeClicked.emit(self)
Overloads the mouse release event to ignore the event when the \ scene is in view mode, and release the selection block signal. :param event <QMouseReleaseEvent>
28,526
def me(self): self_id = self._state.user.id return self.get_member(self_id)
Similar to :attr:`Client.user` except an instance of :class:`Member`. This is essentially used to get the member version of yourself.
28,527
def quantile(self, prob=None, combine_method="interpolate", weights_column=None): if len(self) == 0: return self if prob is None: prob = [0.01, 0.1, 0.25, 0.333, 0.5, 0.667, 0.75, 0.9, 0.99] if weights_column is None: weights_column = "_" else: assert_is_type(weights_column, str, I(H2OFrame, lambda wc: wc.ncol == 1 and wc.nrow == self.nrow)) if isinstance(weights_column, H2OFrame): merged = self.cbind(weights_column) weights_column = merged.names[-1] return H2OFrame._expr(expr=ExprNode("quantile", merged, prob, combine_method, weights_column)) return H2OFrame._expr(expr=ExprNode("quantile", self, prob, combine_method, weights_column))
Compute quantiles. :param List[float] prob: list of probabilities for which quantiles should be computed. :param str combine_method: for even samples this setting determines how to combine quantiles. This can be one of ``"interpolate"``, ``"average"``, ``"low"``, ``"high"``. :param weights_column: optional weights for each row. If not given, all rows are assumed to have equal importance. This parameter can be either the name of column containing the observation weights in this frame, or a single-column separate H2OFrame of observation weights. :returns: a new H2OFrame containing the quantiles and probabilities.
28,528
def on_sighup(self, signal_unused, frame_unused): for setting in self.http_config: if getattr(self.http_server, setting) != self.http_config[setting]: LOGGER.debug(, setting) setattr(self.http_server, setting, self.http_config[setting]) for setting in self.settings: if self.app.settings[setting] != self.settings[setting]: LOGGER.debug(, setting) self.app.settings[setting] = self.settings[setting] self.app.handlers = [] self.app.named_handlers = {} routes = self.namespace.config.get(config.ROUTES) self.app.add_handlers(".*$", self.app.prepare_routes(routes)) LOGGER.info()
Reload the configuration :param int signal_unused: Unused signal number :param frame frame_unused: Unused frame the signal was caught in
28,529
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.Tensor): binary_mask = (torch.rand(tensor_for_masking.size()) > dropout_probability).to(tensor_for_masking.device) dropout_mask = binary_mask.float().div(1.0 - dropout_probability) return dropout_mask
Computes and returns an element-wise dropout mask for a given tensor, where each element in the mask is dropped out with probability dropout_probability. Note that the mask is NOT applied to the tensor - the tensor is passed to retain the correct CUDA tensor type for the mask. Parameters ---------- dropout_probability : float, required. Probability of dropping a dimension of the input. tensor_for_masking : torch.Tensor, required. Returns ------- A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability). This scaling ensures expected values and variances of the output of applying this mask and the original tensor are the same.
28,530
def extract_features(self, text): try: return self.feature_extractor(text, self.train_set) except (TypeError, AttributeError): return self.feature_extractor(text)
Extracts features from a body of text. :rtype: dictionary of features
28,531
def pos_tag_sents( sentences: List[List[str]], engine: str = "perceptron", corpus: str = "orchid" ) -> List[List[Tuple[str, str]]]: if not sentences: return [] return [pos_tag(sent, engine=engine, corpus=corpus) for sent in sentences]
Part of Speech tagging Sentence function. :param list sentences: a list of lists of tokenized words :param str engine: * unigram - unigram tagger * perceptron - perceptron tagger (default) * artagger - RDR POS tagger :param str corpus: * orchid - annotated Thai academic articles (default) * orchid_ud - annotated Thai academic articles using Universal Dependencies Tags * pud - Parallel Universal Dependencies (PUD) treebanks :return: returns a list of labels regarding which part of speech it is
28,532
def _import_bibdoc(): try: from invenio.bibdocfile import BibRecDocs, BibDoc except ImportError: from invenio.legacy.bibdocfile.api import BibRecDocs, BibDoc return BibRecDocs, BibDoc
Import BibDocFile.
28,533
def _do_broker_main(self): self._waker.broker_ident = thread.get_ident() try: while self._alive: self._loop_once() fire(self, ) self._broker_shutdown() except Exception: LOG.exception() self._alive = False self._exitted = True self._broker_exit()
Broker thread main function. Dispatches IO events until :meth:`shutdown` is called.
28,534
def _auth( self, username, password, pkey, key_filenames, allow_agent, look_for_keys, gss_auth, gss_kex, gss_deleg_creds, gss_host, passphrase, ): saved_exception = None two_factor = False allowed_types = set() two_factor_types = {"keyboard-interactive", "password"} if passphrase is None and password is not None: passphrase = password if gss_kex and self._transport.gss_kex_used: try: self._transport.auth_gssapi_keyex(username) return except Exception as e: saved_exception = e if gss_auth: try: return self._transport.auth_gssapi_with_mic( username, gss_host, gss_deleg_creds ) except Exception as e: saved_exception = e if pkey is not None: try: self._log( DEBUG, "Trying SSH key {}".format( hexlify(pkey.get_fingerprint()) ), ) allowed_types = set( self._transport.auth_publickey(username, pkey) ) two_factor = allowed_types & two_factor_types if not two_factor: return except SSHException as e: saved_exception = e if not two_factor: for key_filename in key_filenames: for pkey_class in (RSAKey, DSSKey, ECDSAKey, Ed25519Key): try: key = self._key_from_filepath( key_filename, pkey_class, passphrase ) allowed_types = set( self._transport.auth_publickey(username, key) ) two_factor = allowed_types & two_factor_types if not two_factor: return break except SSHException as e: saved_exception = e if not two_factor and allow_agent: if self._agent is None: self._agent = Agent() for key in self._agent.get_keys(): try: id_ = hexlify(key.get_fingerprint()) self._log(DEBUG, "Trying SSH agent key {}".format(id_)) allowed_types = set( self._transport.auth_publickey(username, key) ) two_factor = allowed_types & two_factor_types if not two_factor: return break except (SSHException, IOError) as e: saved_exception = e if password is not None: try: self._transport.auth_password(username, password) return except SSHException as e: saved_exception = e elif two_factor: try: self._transport.auth_interactive_dumb(username) return except SSHException as e: saved_exception = e if saved_exception is not None: raise saved_exception raise SSHException("No authentication methods available")
Try, in order: - The key(s) passed in, if one was passed in. - Any key we can find through an SSH agent (if allowed). - Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in ~/.ssh/ (if allowed). - Plain username/password auth, if a password was given. (The password might be needed to unlock a private key [if 'passphrase' isn't also given], or for two-factor authentication [for which it is required].)
28,535
def me(self): if self._me is not None: return self._me self._me = self.Person("{username}@{server}".format( username=self.client.nickname, server=self.client.server, )) return self._me
Returns :class:`Person <pypump.models.person.Person>` instance of the logged in user. Example: >>> pump.me <Person: [email protected]>
28,536
def generate_type(self): types = enforce_list(self._definition[]) try: python_types = .join(JSON_TYPE_TO_PYTHON_TYPE[t] for t in types) except KeyError as exc: raise JsonSchemaDefinitionException(.format(exc)) extra = if in types: extra += .format( variable=self._variable, ) if ( in types or in types) and not in types: extra += .format(variable=self._variable) with self.l(, python_types, extra): self.l(, .join(types))
Validation of type. Can be one type or list of types. Since draft 06 a float without fractional part is an integer. .. code-block:: python {'type': 'string'} {'type': ['string', 'number']}
28,537
def reserve_time_slot(self, calendar_event, participant_id=None, **kwargs): from canvasapi.calendar_event import CalendarEvent calendar_event_id = obj_or_id(calendar_event, "calendar_event", (CalendarEvent,)) if participant_id: uri = .format( calendar_event_id, participant_id ) else: uri = .format(calendar_event_id) response = self.__requester.request( , uri, _kwargs=combine_kwargs(**kwargs) ) return CalendarEvent(self.__requester, response.json())
Return single Calendar Event by id :calls: `POST /api/v1/calendar_events/:id/reservations \ <https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.reserve>`_ :param calendar_event: The object or ID of the calendar event. :type calendar_event: :class:`canvasapi.calendar_event.CalendarEvent` or int :param participant_id: The ID of the participant, if given. :type participant_id: str :rtype: :class:`canvasapi.calendar_event.CalendarEvent`
28,538
def upload_list(book_id_list, rdf_library=None): with open(book_id_list, ) as f: cache = {} for book_id in f: book_id = book_id.strip() try: if int(book_id) in missing_pgid: print(u.format(book_id)) continue upload_book(book_id, rdf_library=rdf_library, cache=cache) except Exception as e: print(u.format(book_id)) logger.error(u"Error processing: {}\r{}".format(book_id, e))
Uses the fetch, make, push subcommands to add a list of pg books
28,539
def allstats(self, approximate=False): statcollect = [] for x in self.layers(): try: stats = x.ComputeStatistics(approximate) except RuntimeError: stats = None stats = dict(zip([, , , ], stats)) statcollect.append(stats) return statcollect
Compute some basic raster statistics Parameters ---------- approximate: bool approximate statistics from overviews or a subset of all tiles? Returns ------- list of dicts a list with a dictionary of statistics for each band. Keys: `min`, `max`, `mean`, `sdev`. See :osgeo:meth:`gdal.Band.ComputeStatistics`.
28,540
def get_by_name(account_name, bucket, region=, json_path=, alias=None): for account in get_all_accounts(bucket, region, json_path)[]: if in account[]: if account[] == account_name: return account elif alias: for a in account[]: if a == account_name: return account
Given an account name, attempts to retrieve associated account info.
28,541
def _create_dim_scales(self): dim_order = self._dim_order.maps[0] for dim in sorted(dim_order, key=lambda d: dim_order[d]): if dim not in self._h5group: size = self._current_dim_sizes[dim] kwargs = {} if self._dim_sizes[dim] is None: kwargs["maxshape"] = (None,) self._h5group.create_dataset( name=dim, shape=(size,), dtype=, **kwargs) h5ds = self._h5group[dim] h5ds.attrs[] = dim_order[dim] if len(h5ds.shape) > 1: dims = self._variables[dim].dimensions coord_ids = np.array([dim_order[d] for d in dims], ) h5ds.attrs[] = coord_ids scale_name = dim if dim in self.variables else NOT_A_VARIABLE h5ds.dims.create_scale(h5ds, scale_name) for subgroup in self.groups.values(): subgroup._create_dim_scales()
Create all necessary HDF5 dimension scale.
28,542
def future_timeout_manager(timeout=None, ioloop=None): ioloop = ioloop or tornado.ioloop.IOLoop.current() t0 = ioloop.time() def _remaining(): return timeout - (ioloop.time() - t0) if timeout else None def maybe_timeout(f): if not timeout: return f else: remaining = _remaining() deadline = ioloop.time() + remaining return with_timeout(deadline, f, ioloop) maybe_timeout.remaining = _remaining return maybe_timeout
Create Helper function for yielding with a cumulative timeout if required Keeps track of time over multiple timeout calls so that a single timeout can be placed over multiple operations. Parameters ---------- timeout : int or None Timeout, or None for no timeout ioloop : IOLoop instance or None tornado IOloop instance to use, or None for IOLoop.current() Return value ------------ maybe_timeout : func Accepts a future, and wraps it in :func:tornado.gen.with_timeout. maybe_timeout raises :class:`tornado.gen.TimeoutError` if the timeout expires Has a function attribute `remaining()` that returns the remaining timeout or None if timeout == None Example ------- :: @tornado.gen.coroutine def multi_op(timeout): maybe_timeout = future_timeout_manager(timeout) result1 = yield maybe_timeout(op1()) result2 = yield maybe_timeout(op2()) # If the cumulative time of op1 and op2 exceeds timeout, # :class:`tornado.gen.TimeoutError` is raised
28,543
def modify_subscription_status(netid, subscription_code, status): url = _netid_subscription_url(netid, subscription_code) body = { : , : str(status) } response = post_resource(url, json.dumps(body)) return _json_to_subscriptions(response)
Post a subscription 'modify' action for the given netid and subscription_code
28,544
def _get_package(auth, owner, package_name): package = ( Package.query .filter_by(owner=owner, name=package_name) .join(Package.access) .filter(_access_filter(auth)) .one_or_none() ) if package is None: raise PackageNotFoundException(owner, package_name, auth.is_logged_in) return package
Helper for looking up a package and checking permissions. Only useful for *_list functions; all others should use more efficient queries.
28,545
def get_student_current_grade(self, username, course_id): resp = self.requester.get( urljoin( self.base_url, .format( username=username, course_key=course_id ) ) ) resp.raise_for_status() return CurrentGrade(resp.json()[0])
Returns an CurrentGrade object for the user in a course Args: username (str): an edx user's username course_id (str): an edX course id. Returns: CurrentGrade: object representing the student current grade for a course
28,546
async def genTempCoreProxy(mods=None): with s_common.getTempDir() as dirn: async with await s_cortex.Cortex.anit(dirn) as core: if mods: for mod in mods: await core.loadCoreModule(mod) async with core.getLocalProxy() as prox: object.__setattr__(prox, , core) yield prox
Get a temporary cortex proxy.
28,547
def build_fncall( ctx, fndoc, argdocs=(), kwargdocs=(), hug_sole_arg=False, trailing_comment=None, ): if callable(fndoc): fndoc = general_identifier(fndoc) has_comment = bool(trailing_comment) argdocs = list(argdocs) kwargdocs = list(kwargdocs) kwargdocs = [ ( comment_doc( concat([ keyword_arg(binding), ASSIGN_OP, doc.doc ]), doc.annotation.value ) if is_commented(doc) else concat([ keyword_arg(binding), ASSIGN_OP, doc ]) ) for binding, doc in kwargdocs ] if not (argdocs or kwargdocs): return concat([ fndoc, LPAREN, RPAREN, ]) if ( hug_sole_arg and not kwargdocs and len(argdocs) == 1 and not is_commented(argdocs[0]) ): return group( concat([ fndoc, LPAREN, argdocs[0], RPAREN ]) ) allarg_docs = [*argdocs, *kwargdocs] if trailing_comment: allarg_docs.append(commentdoc(trailing_comment)) parts = [] for idx, doc in enumerate(allarg_docs): last = idx == len(allarg_docs) - 1 if is_commented(doc): has_comment = True comment_str = doc.annotation.value doc = doc.doc else: comment_str = None part = concat([doc, NIL if last else COMMA]) if comment_str: part = group( flat_choice( when_flat=concat([ part, , commentdoc(comment_str) ]), when_broken=concat([ commentdoc(comment_str), HARDLINE, part, ]), ) ) if not last: part = concat([part, HARDLINE if has_comment else LINE]) parts.append(part) outer = ( always_break if has_comment else group ) return outer( concat([ fndoc, LPAREN, nest( ctx.indent, concat([ SOFTLINE, concat(parts), ]) ), SOFTLINE, RPAREN ]) )
Builds a doc that looks like a function call, from docs that represent the function, arguments and keyword arguments. If ``hug_sole_arg`` is True, and the represented functional call is done with a single non-keyword argument, the function call parentheses will hug the sole argument doc without newlines and indentation in break mode. This makes a difference in calls like this:: > hug_sole_arg = False frozenset( [ 1, 2, 3, 4, 5 ] ) > hug_sole_arg = True frozenset([ 1, 2, 3, 4, 5, ]) If ``trailing_comment`` is provided, the text is rendered as a comment after the last argument and before the closing parenthesis. This will force the function call to be broken to multiple lines.
28,548
def _get_default_parameter_values(sam_template): default_values = {} parameter_definition = sam_template.get("Parameters", None) if not parameter_definition or not isinstance(parameter_definition, dict): LOG.debug("No Parameters detected in the template") return default_values for param_name, value in parameter_definition.items(): if isinstance(value, dict) and "Default" in value: default_values[param_name] = value["Default"] LOG.debug("Collected default values for parameters: %s", default_values) return default_values
Method to read default values for template parameters and return it Example: If the template contains the following parameters defined Parameters: Param1: Type: String Default: default_value1 Param2: Type: String Default: default_value2 then, this method will grab default value for Param1 and return the following result: { Param1: "default_value1", Param2: "default_value2" } :param dict sam_template: SAM template :return dict: Default values for parameters
28,549
def create(self, language, tagged_text, source_channel=values.unset): data = values.of({: language, : tagged_text, : source_channel, }) payload = self._version.create( , self._uri, data=data, ) return SampleInstance( self._version, payload, assistant_sid=self._solution[], task_sid=self._solution[], )
Create a new SampleInstance :param unicode language: The ISO language-country string that specifies the language used for the new sample :param unicode tagged_text: The text example of how end users might express the task :param unicode source_channel: The communication channel from which the new sample was captured :returns: Newly created SampleInstance :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
28,550
def subcommand(self, *args): return Subcommand(*args, directory=self.directory, env_vars=self.env_vars)
Get subcommand acting on a service. Subcommand will run in service directory and with the environment variables used to run the service itself. Args: *args: Arguments to run command (e.g. "redis-cli", "-n", "1") Returns: Subcommand object.
28,551
def arange(start,stop,step=1,**kwargs): expand_value = 1 if step > 0 else -1 return matlabarray(np.arange(start, stop+expand_value, step, **kwargs).reshape(1,-1),**kwargs)
>>> a=arange(1,10) # 1:10 >>> size(a) matlabarray([[ 1, 10]])
28,552
def static(self, uri, file_or_directory, *args, **kwargs): name = kwargs.pop("name", "static") if not name.startswith(self.name + "."): name = "{}.{}".format(self.name, name) kwargs.update(name=name) strict_slashes = kwargs.get("strict_slashes") if strict_slashes is None and self.strict_slashes is not None: kwargs.update(strict_slashes=self.strict_slashes) static = FutureStatic(uri, file_or_directory, args, kwargs) self.statics.append(static)
Create a blueprint static route from a decorated function. :param uri: endpoint at which the route will be accessible. :param file_or_directory: Static asset.
28,553
def start_monitoring(self): logging.info("Multi-plugin health monitor: Started in thread.") try: while True: new_ips = self.get_new_working_set() if new_ips: logging.debug("Sending list of %d IPs to %d plugins." % (len(new_ips), len(self.plugins))) for q in self.monitor_ip_queues.values(): q.put(new_ips) all_failed_ips = self._accumulate_ips_from_plugins( "failed", self.failed_queue_lookup, self.report_failed_acc) if all_failed_ips: self.q_failed_ips.put(all_failed_ips) all_questionable_ips = self._accumulate_ips_from_plugins( "questionable", self.questionable_queue_lookup, self.report_questionable_acc) if all_questionable_ips: self.q_questionable_ips.put(all_questionable_ips) time.sleep(self.get_monitor_interval()) except common.StopReceived: return
Pass IP lists to monitor sub-plugins and get results from them. Override the common definition of this function, since in the multi plugin it's a little different: Instead of monitoring ourselves, we just use a number of other plugins to gather results. The multi plugin just serves as a proxy and (de)multiplexer for those other plugins. Note that we don't have to push any updates about failed IPs if nothing new was detected. Therefore, our own updates can be entirely driven by updates from the sub-plugin, which keeps our architecture simple.
28,554
def merge_elisions(elided: List[str]) -> str: results = list(elided[0]) for line in elided: for idx, car in enumerate(line): if car == " ": results[idx] = " " return "".join(results)
Given a list of strings with different space swapping elisions applied, merge the elisions, taking the most without compounding the omissions. :param elided: :return: >>> merge_elisions([ ... "ignavae agua multum hiatus", "ignav agua multum hiatus" ,"ignavae agua mult hiatus"]) 'ignav agua mult hiatus'
28,555
def partial_fit(self, X, y): X, y = filter_by_label(X, y, self.reference_label) super().partial_fit(X, y) return self
:X: {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. :y: Healthy 'h' or 'sick_name'
28,556
def gen_add(src1, src2, dst): assert src1.size == src2.size return ReilBuilder.build(ReilMnemonic.ADD, src1, src2, dst)
Return an ADD instruction.
28,557
def seek(self, offset, whence=None, partition=None): if whence is None: if partition is None: for tmp_partition in self.offsets: self.offsets[tmp_partition] = offset else: self.offsets[partition] = offset elif whence == 1: if partition is None: for tmp_partition, _offset in self.offsets.items(): self.offsets[tmp_partition] = _offset + offset else: self.offsets[partition] += offset elif whence in (0, 2): reqs = [] deltas = {} if partition is None: (delta, rem) = divmod(offset, len(self.offsets)) for tmp_partition, r in izip_longest(self.offsets.keys(), repeat(1, rem), fillvalue=0): deltas[tmp_partition] = delta + r for tmp_partition in self.offsets.keys(): if whence == 0: reqs.append(OffsetRequestPayload(self.topic, tmp_partition, -2, 1)) elif whence == 2: reqs.append(OffsetRequestPayload(self.topic, tmp_partition, -1, 1)) else: pass else: deltas[partition] = offset if whence == 0: reqs.append(OffsetRequestPayload(self.topic, partition, -2, 1)) elif whence == 2: reqs.append(OffsetRequestPayload(self.topic, partition, -1, 1)) else: pass resps = self.client.send_offset_request(reqs) for resp in resps: self.offsets[resp.partition] = \ resp.offsets[0] + deltas[resp.partition] else: raise ValueError( % (whence,)) self.fetch_offsets = self.offsets.copy() self.count_since_commit += 1 if self.auto_commit: self.commit() self.queue = queue.Queue()
Alter the current offset in the consumer, similar to fseek Arguments: offset: how much to modify the offset whence: where to modify it from, default is None * None is an absolute offset * 0 is relative to the earliest available offset (head) * 1 is relative to the current offset * 2 is relative to the latest known offset (tail) partition: modify which partition, default is None. If partition is None, would modify all partitions.
28,558
def _message_to_payload(cls, message): try: return json.loads(message.decode()) except UnicodeDecodeError: message = except json.JSONDecodeError: message = raise cls._error(cls.PARSE_ERROR, message, True, None)
Returns a Python object or a ProtocolError.
28,559
def picknthweekday(year, month, dayofweek, hour, minute, whichweek): first = datetime.datetime(year, month, 1, hour, minute) weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1)) for n in range(whichweek): dt = weekdayone+(whichweek-n)*ONEWEEK if dt.month == month: return dt
dayofweek == 0 means Sunday, whichweek 5 means last instance
28,560
def cornice_enable_openapi_explorer( config, api_explorer_path=, permission=NO_PERMISSION_REQUIRED, route_factory=None, **kwargs): config.add_route(, api_explorer_path, factory=route_factory) config.add_view(, permission=permission, route_name=)
:param config: Pyramid configurator object :param api_explorer_path: where to expose Swagger UI interface view :param permission: pyramid permission for those views :param route_factory: factory for context object for those routes This registers and configures the view that serves api explorer
28,561
def check_species_object(species_name_or_object): if isinstance(species_name_or_object, Species): return species_name_or_object elif isinstance(species_name_or_object, str): return find_species_by_name(species_name_or_object) else: raise ValueError("Unexpected type for species: %s : %s" % ( species_name_or_object, type(species_name_or_object)))
Helper for validating user supplied species names or objects.
28,562
def ensemble(transform, loglikelihood, parameter_names, nsteps=40000, nburn=400, start=0.5, **problem): import emcee import progressbar if in problem: numpy.random.seed(problem[]) n_params = len(parameter_names) nwalkers = 50 + n_params * 2 if nwalkers > 200: nwalkers = 200 p0 = [numpy.random.rand(n_params) for i in xrange(nwalkers)] start = start + numpy.zeros(n_params) p0[0] = start def like(cube): cube = numpy.array(cube) if (cube <= 1e-10).any() or (cube >= 1-1e-10).any(): return -1e100 params = transform(cube) return loglikelihood(params) sampler = emcee.EnsembleSampler(nwalkers, n_params, like, live_dangerously=True) print pos, prob, state = sampler.run_mcmc(p0, nburn / nwalkers) sampler.reset() print pbar = progressbar.ProgressBar( widgets=[progressbar.Percentage(), progressbar.Counter(), progressbar.Bar(), progressbar.ETA()], maxval=nsteps).start() for results in sampler.sample(pos, iterations=nsteps / nwalkers, rstate0=state): pbar.update(pbar.currval + 1) pbar.finish() print "Mean acceptance fraction:", numpy.mean(sampler.acceptance_fraction) chain = sampler.flatchain final = chain[-1] print chain_post = numpy.array([transform(params) for params in chain]) chain_prob = sampler.flatlnprobability return dict(start=final, chain=chain_post, chain_prior=chain, chain_prob=chain_prob, method=)
**Ensemble MCMC** via `emcee <http://dan.iel.fm/emcee/>`_
28,563
def get_next_line(self): line = self.freq_file.readline().strip().split() if len(line) < 1: self.load_genotypes() line = self.freq_file.readline().strip().split() info_line = self.info_file.readline().strip().split() info = float(info_line[4]) exp_freq = float(info_line[3]) return line, info, exp_freq
If we reach the end of the file, we simply open the next, until we \ run out of archives to process
28,564
def complete_token_filtered_with_next(aliases, prefix, expanded, commands): complete_ary = list(aliases.keys()) expanded_ary = list(expanded.keys()) result = [] for cmd in complete_ary: if cmd.startswith(prefix): if cmd in aliases and ( 0 == len(set(expanded_ary) - set([aliases[cmd]]))): result.append([cmd, aliases[cmd]]) pass pass pass return sorted(result, key=lambda pair: pair[0])
Find all starting matches in dictionary *aliases* that start with *prefix*, but filter out any matches already in *expanded*.
28,565
def validate_scopes(value_list): for value in value_list: if value not in current_oauth2server.scopes: raise ScopeDoesNotExists(value) return True
Validate if each element in a list is a registered scope. :param value_list: The list of scopes. :raises invenio_oauth2server.errors.ScopeDoesNotExists: The exception is raised if a scope is not registered. :returns: ``True`` if it's successfully validated.
28,566
def to_keypoints(self): from imgaug.augmentables.kps import Keypoint return [Keypoint(x=point[0], y=point[1]) for point in self.exterior]
Convert this polygon's `exterior` to ``Keypoint`` instances. Returns ------- list of imgaug.Keypoint Exterior vertices as ``Keypoint`` instances.
28,567
def mouse_move_event(self, event): self.example.mouse_position_event(event.x(), event.y())
Forward mouse cursor position events to the example
28,568
def to_transfac(self): m = "%s\t%s\t%s\n" % ("DE", self.id, "unknown") for i, (row, cons) in enumerate(zip(self.pfm, self.to_consensus())): m += "%i\t%s\t%s\n" % (i, "\t".join([str(int(x)) for x in row]), cons) m += "XX" return m
Return motif formatted in TRANSFAC format Returns ------- m : str String of motif in TRANSFAC format.
28,569
def _is_primitive(thing): primitive = (int, str, bool, float) return isinstance(thing, primitive)
Determine if the value is a primitive
28,570
def hasModule(self, mod): if self._modules is None: self._initModuleList() return mod in self._modules
Returns True if mod is among the loaded modules. @param mod: Module name. @return: Boolean
28,571
def validate_address(address: Any) -> None: if not is_address(address): raise ValidationError(f"Expected an address, got: {address}") if not is_canonical_address(address): raise ValidationError( "Py-EthPM library only accepts canonicalized addresses. " f"{address} is not in the accepted format." )
Raise a ValidationError if an address is not canonicalized.
28,572
def deleteSelected(self): ndeleted = self.deleteBy(self.isSelected) nselected = len(self._selectedRows) self._selectedRows.clear() if ndeleted != nselected: error( % nselected)
Delete all selected rows.
28,573
def program_supports_compression (program, compression): if program in (, ): return compression in (, , , , , ) + py_lzma elif program in (, , ): return compression in (, ) + py_lzma return False
Decide if the given program supports the compression natively. @return: True iff the program supports the given compression format natively, else False.
28,574
def read_int32(self, little_endian=True): if little_endian: endian = "<" else: endian = ">" return self.unpack( % endian, 4)
Read 4 bytes as a signed integer value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int:
28,575
def create_objective_bank_hierarchy(self, alias, desc, genus): url_path = self._urls.hierarchy() data = { : re.sub(r, , alias.lower()), : { : alias }, : { : desc }, : str(genus) } return self._post_request(url_path, data)
Create a bank hierarchy with the given alias :param alias: :return:
28,576
def MGMT_COMM_SET(self, Addr=, xCommissionerSessionID=None, xSteeringData=None, xBorderRouterLocator=None, xChannelTlv=None, ExceedMaxPayload=False): print % self.port try: cmd = WPANCTL_CMD + print "-------------------------------" print xCommissionerSessionID print xSteeringData print str(xSteeringData) + + str(hex(xSteeringData)[2:]) print xBorderRouterLocator print xChannelTlv print ExceedMaxPayload print "-------------------------------" if xCommissionerSessionID != None: cmd += + str(xCommissionerSessionID) elif xCommissionerSessionID is None: if self.isActiveCommissioner is True: cmd += + self.__getCommissionerSessionId().lstrip() else: pass if xSteeringData != None: cmd += + str(len(hex(xSteeringData)[2:])) + str(hex(xSteeringData)[2:]) if xBorderRouterLocator != None: cmd += + str(hex(xBorderRouterLocator)) if xChannelTlv != None: cmd += + hex(xChannelTlv).lstrip().zfill(4) print cmd return self.__sendCommand(cmd)[0] != except Exception, e: ModuleHelper.WriteIntoDebugLogger( + str(e))
send MGMT_COMM_SET command Returns: True: successful to send MGMT_COMM_SET False: fail to send MGMT_COMM_SET
28,577
def add_announcement_view(request): if request.method == "POST": form = AnnouncementForm(request.POST) logger.debug(form) if form.is_valid(): obj = form.save() obj.user = request.user obj.content = safe_html(obj.content) obj.save() announcement_posted_hook(request, obj) messages.success(request, "Successfully added announcement.") return redirect("index") else: messages.error(request, "Error adding announcement") else: form = AnnouncementForm() return render(request, "announcements/add_modify.html", {"form": form, "action": "add"})
Add an announcement.
28,578
async def identify(self, request): if hasattr(request, ): return request._session_identity token = request.cookies.get(self._cookie_name) if token is None: token = getAuthorizationTokenFromHeader(request) if token is None: raise Unauthorized() identity = await self.decode_jwt(token) setattr(request, , identity) return identity
从request中得到登录身份identity
28,579
def update_allowed(self): return self.update_action.allowed(self.column.table.request, self.datum, self)
Determines whether update of given cell is allowed. Calls allowed action of defined UpdateAction of the Column.
28,580
def plugin_is_enabled(name, runas=None): * if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.user.get_user() return name in list_enabled_plugins(runas)
Return whether the plugin is enabled. CLI Example: .. code-block:: bash salt '*' rabbitmq.plugin_is_enabled rabbitmq_plugin_name
28,581
def _get_line_type(line): stripped = line.strip() if not stripped: return remainder = re.sub(r"\s+", " ", re.sub(CHORD_RE, "", stripped)) if len(remainder) * 2 < len(re.sub(r"\s+", " ", stripped)): return return
Decide the line type in function of its contents
28,582
def _load_config(initial_namespace=None, defaults=None): if defaults: config = ConfigLoader() config.update_from_object(defaults) namespace = getattr(config, , initial_namespace) app_config = getattr(config, , None) if app_config: if namespace is None: config.update_from_object(app_config) else: _temp = ConfigLoader() _temp.update_from_object(app_config, lambda key: key.startswith(namespace)) config.update(_temp.namespace(namespace)) return config
Kwargs: initial_namespace: defaults:
28,583
def GetPossibleGroup(self): this_method = self._call_queue.pop() assert this_method == self group = None try: group = self._call_queue[-1] except IndexError: pass return group
Returns a possible group from the end of the call queue or None if no other methods are on the stack.
28,584
def url_builder(self, endpoint, params=None, url_params=None): if url_params is None: url_params = OrderedDict() url_params[self.AUTH_PARAM] = self.api_token return super().url_builder( endpoint, params=params, url_params=url_params, )
Add authentication URL parameter.
28,585
def populate_timestamps(self,update_header=False): ii_start, ii_stop = 0, self.n_ints_in_file if self.t_start: ii_start = self.t_start if self.t_stop: ii_stop = self.t_stop t0 = self.header[b] t_delt = self.header[b] if update_header: timestamps = ii_start * t_delt / 24./60./60. + t0 else: timestamps = np.arange(ii_start, ii_stop) * t_delt / 24./60./60. + t0 return timestamps
Populate time axis. IF update_header then only return tstart
28,586
def persist_revision_once(tokens, revision): token_map = {id(token):token for token in tokens} for token in token_map.values(): token.persist(revision)
This function makes sure that a revision is only marked as persisting for a token once. This is important since some diff algorithms allow tokens to be copied more than once in a revision. The id(token) should unique to the in-memory representation of any object, so we use that as unique token instance identifier.
28,587
async def start(self): successful = 0 try: for adapter in self.adapters: await adapter.start() successful += 1 self._started = True except: for adapter in self.adapters[:successful]: await adapter.stop() raise
Start all adapters managed by this device adapter. If there is an error starting one or more adapters, this method will stop any adapters that we successfully started and raise an exception.
28,588
def clear(self): except AttributeError: pass self.includes = None
Completely clear a Node of all its cached state (so that it can be re-evaluated by interfaces that do continuous integration builds).
28,589
def filter_by_gene_expression( self, gene_expression_dict, min_expression_value=0.0): return self.filter_any_above_threshold( multi_key_fn=lambda effect: effect.gene_ids, value_dict=gene_expression_dict, threshold=min_expression_value)
Filters variants down to those which have overlap a gene whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- gene_expression_dict : dict Dictionary mapping Ensembl gene IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection
28,590
def get_coordination_symmetry_measures(self, only_minimum=True, all_csms=True, optimization=None): test_geometries = self.allcg.get_implemented_geometries( len(self.local_geometry.coords)) if len(self.local_geometry.coords) == 1: if len(test_geometries) == 0: return {} result_dict = {: {: 0.0, : [0], : , : {0: 0}, : {0: 0}, : None, : None, : None}} if all_csms: for csmtype in [, , , , , ]: result_dict[][.format(csmtype)] = 0.0 result_dict[][.format(csmtype)] = None result_dict[][.format(csmtype)] = None result_dict[][.format(csmtype)] = None return result_dict result_dict = {} for geometry in test_geometries: self.perfect_geometry = AbstractGeometry.from_cg(cg=geometry, centering_type=self.centering_type, include_central_site_in_centroid= self.include_central_site_in_centroid) points_perfect = self.perfect_geometry.points_wcs_ctwcc() cgsm = self.coordination_geometry_symmetry_measures(geometry, points_perfect=points_perfect, optimization=optimization) result, permutations, algos, local2perfect_maps, perfect2local_maps = cgsm if only_minimum: if len(result) > 0: imin = np.argmin([rr[] for rr in result]) if geometry.algorithms is not None: algo = algos[imin] else: algo = algos result_dict[geometry.mp_symbol] = {: result[imin][], : permutations[ imin], : algo, : local2perfect_maps[ imin], : perfect2local_maps[ imin], : 1.0 / result[imin][], : np.linalg.inv(result[imin][]), : result[imin][]} if all_csms: self._update_results_all_csms(result_dict, permutations, imin, geometry) else: result_dict[geometry.mp_symbol] = {: result, : permutations, : algos, : local2perfect_maps, : perfect2local_maps} return result_dict
Returns the continuous symmetry measures of the current local geometry in a dictionary. :return: the continuous symmetry measures of the current local geometry in a dictionary.
28,591
def handle(request, message=None, redirect=None, ignore=False, escalate=False, log_level=None, force_log=None): exc_type, exc_value, exc_traceback = sys.exc_info() log_method = getattr(LOG, log_level or "exception") force_log = force_log or os.environ.get("HORIZON_TEST_RUN", False) force_silence = getattr(exc_value, "silence_logging", False) if wrap: raise HandledException([exc_type, exc_value, exc_traceback]) if message: ret = handle_recoverable(request, user_message, redirect, ignore, escalate, handled, force_silence, force_log, log_method, log_entry, log_level) if ret: return ret six.reraise(exc_type, exc_value, exc_traceback)
Centralized error handling for Horizon. Because Horizon consumes so many different APIs with completely different ``Exception`` types, it's necessary to have a centralized place for handling exceptions which may be raised. Exceptions are roughly divided into 3 types: #. ``UNAUTHORIZED``: Errors resulting from authentication or authorization problems. These result in being logged out and sent to the login screen. #. ``NOT_FOUND``: Errors resulting from objects which could not be located via the API. These generally result in a user-facing error message, but are otherwise returned to the normal code flow. Optionally a redirect value may be passed to the error handler so users are returned to a different view than the one requested in addition to the error message. #. ``RECOVERABLE``: Generic API errors which generate a user-facing message but drop directly back to the regular code flow. All other exceptions bubble the stack as normal unless the ``ignore`` argument is passed in as ``True``, in which case only unrecognized errors are bubbled. If the exception is not re-raised, an appropriate wrapper exception class indicating the type of exception that was encountered will be returned.
28,592
def ensemble_change_summary(ensemble1, ensemble2, pst,bins=10, facecolor=,logger=None,filename=None,**kwargs): if logger is None: logger=Logger(,echo=False) logger.log("plot ensemble change") if isinstance(ensemble1, str): ensemble1 = pd.read_csv(ensemble1,index_col=0) ensemble1.columns = ensemble1.columns.str.lower() if isinstance(ensemble2, str): ensemble2 = pd.read_csv(ensemble2,index_col=0) ensemble2.columns = ensemble2.columns.str.lower() unnamed1 = [col for col in ensemble1.columns if "unnamed:" in col] if len(unnamed1) != 0: ensemble1 = ensemble1.iloc[:,:-1] unnamed2 = [col for col in ensemble2.columns if "unnamed:" in col] if len(unnamed2) != 0: ensemble2 = ensemble2.iloc[:,:-1] d = set(ensemble1.columns).symmetric_difference(set(ensemble2. columns)) if len(d) != 0: logger.lraise("ensemble1 does not have the same columns as ensemble2: {0}". format(.join(d))) if "grouper" in kwargs: raise NotImplementedError() else: en_cols = set(ensemble1.columns) if len(en_cols.symmetric_difference(set(pst.par_names))) == 0: par = pst.parameter_data.loc[pst.adj_par_names,:] grouper = par.groupby(par.pargp).groups grouper["all"] = pst.adj_par_names li = par.loc[par.partrans == "log","parnme"] ensemble1.loc[:,li] = ensemble1.loc[:,li].apply(np.log10) ensemble2.loc[:, li] = ensemble2.loc[:, li].apply(np.log10) elif len(en_cols.symmetric_difference(set(pst.obs_names))) == 0: obs = pst.observation_data.loc[pst.nnz_obs_names,:] grouper = obs.groupby(obs.obgnme).groups grouper["all"] = pst.nnz_obs_names else: logger.lraise("could not match ensemble cols with par or obs...") en1_mn, en1_std = ensemble1.mean(axis=0), ensemble1.std(axis=0) en2_mn, en2_std = ensemble2.mean(axis=0), ensemble2.std(axis=0) mn_diff = -1 * (en2_mn - en1_mn) std_diff = 100 * (((en1_std - en2_std) / en1_std)) std_diff[en1_std.index[en1_std==0]] = np.nan fig = plt.figure(figsize=figsize) if "fig_title" in kwargs: plt.figtext(0.5,0.5,kwargs["fig_title"]) else: plt.figtext(0.5, 0.5, "pyemu.Pst.plot(kind=)\nfrom pest control file \n at {1}" .format(pst.filename, str(datetime.now())), ha="center") figs = [] ax_count = 0 for g, names in grouper.items(): logger.log("plotting change for {0}".format(g)) mn_g = mn_diff.loc[names] std_g = std_diff.loc[names] if mn_g.shape[0] == 0: logger.statement("no entries for group ".format(g)) logger.log("plotting change for {0}".format(g)) continue if ax_count % (nr * nc) == 0: if ax_count > 0: plt.tight_layout() figs.append(fig) fig = plt.figure(figsize=figsize) axes = get_page_axes() ax_count = 0 ax = axes[ax_count] mn_g.hist(ax=ax,facecolor=facecolor,alpha=0.5,edgecolor=None,bins=bins) ax.set_yticklabels([]) ax.set_xlabel("mean change",labelpad=0.1) ax.set_title("{0}) mean change group:{1}, {2} entries\nmax:{3:10G}, min:{4:10G}". format(abet[ax_count], g, mn_g.shape[0],mn_g.max(),mn_g.min()), loc="left") ax.grid() ax_count += 1 ax = axes[ax_count] std_g.hist(ax=ax, facecolor=facecolor, alpha=0.5, edgecolor=None, bins=bins) ax.set_yticklabels([]) ax.set_xlabel("sigma percent reduction", labelpad=0.1) ax.set_title("{0}) sigma change group:{1}, {2} entries\nmax:{3:10G}, min:{4:10G}". format(abet[ax_count], g, mn_g.shape[0], std_g.max(), std_g.min()), loc="left") ax.grid() ax_count += 1 logger.log("plotting change for {0}".format(g)) for a in range(ax_count, nr * nc): axes[a].set_axis_off() axes[a].set_yticks([]) axes[a].set_xticks([]) plt.tight_layout() figs.append(fig) if filename is not None: plt.tight_layout() with PdfPages(filename) as pdf: for fig in figs: pdf.savefig(fig) plt.close(fig) logger.log("plot ensemble change") else: logger.log("plot ensemble change") return figs
helper function to plot first and second moment change histograms Parameters ---------- ensemble1 : varies str or pd.DataFrames ensemble2 : varies str or pd.DataFrame pst : pyemu.Pst pst instance facecolor : str the histogram facecolor. filename : str the name of the pdf to create. If None, return figs without saving. Default is None.
28,593
def put(text, cbname): global _lastSel _checkTkInit() if cbname == : _theRoot.clipboard_clear() if text: _theRoot.clipboard_append(text) return if cbname == : _lastSel = text _theRoot.selection_handle(ch_handler, selection=) _theRoot.selection_own(selection=) return raise RuntimeError("Unexpected clipboard name: "+str(cbname))
Put the given string into the given clipboard.
28,594
def get_path_name(self): path = fix_raw_path(os.path.dirname(os.path.abspath(self.path))) name = os.path.basename(self.path) return path, name
Gets path and name of song :return: Name of path, name of file (or folder)
28,595
def _pdf_find_urls(bytes, mimetype): try: ascii_bytes = b.join(re.compile(b).findall(bytes)) ascii_bytes = ascii_bytes.replace(b, b) except: return [] urls = [] embedded_text = set(re.compile(b).findall(ascii_bytes)) return urls
This function finds URLs inside of PDF bytes.
28,596
def strip_tags(s): def handle_match(m): name = m.group(1) if name in html_entities: return unichr(html_entities[name]) if name[:2] in (" try: return unichr(int(name[2:], 16)) except ValueError: return u"" elif name.startswith(" try: return unichr(int(name[1:])) except ValueError: return u"" return u"" return _entity_re.sub(handle_match, _striptags_re.sub("", s))
Resolve HTML entities and remove tags from a string.
28,597
def migrate(gandi, resource, force, background): oper = gandi.disk.migrate(resource, datacenter_id, background) if background: output_generic(gandi, oper, output_keys) return oper
Migrate a disk to another datacenter.
28,598
def from_directory_import( self, module, real_names, local_names, import_alias_mapping, skip_init=False ): module_path = module[1] init_file_location = os.path.join(module_path, ) init_exists = os.path.isfile(init_file_location) if init_exists and not skip_init: package_name = os.path.split(module_path)[1] return self.add_module( module=(module[0], init_file_location), module_or_package_name=package_name, local_names=local_names, import_alias_mapping=import_alias_mapping, is_init=True, from_from=True ) for real_name in real_names: full_name = os.path.join(module_path, real_name) if os.path.isdir(full_name): new_init_file_location = os.path.join(full_name, ) if os.path.isfile(new_init_file_location): self.add_module( module=(real_name, new_init_file_location), module_or_package_name=real_name, local_names=local_names, import_alias_mapping=import_alias_mapping, is_init=True, from_from=True, from_fdid=True ) else: raise Exception() else: file_module = (real_name, full_name + ) self.add_module( module=file_module, module_or_package_name=real_name, local_names=local_names, import_alias_mapping=import_alias_mapping, from_from=True ) return IgnoredNode()
Directories don't need to be packages.
28,599
def to_ndarray(self): assert self.indices is None, "sparseTensor to ndarray is not supported" return np.array(self.storage, dtype=get_dtype(self.bigdl_type)).reshape(self.shape)
Transfer JTensor to ndarray. As SparseTensor may generate an very big ndarray, so we don't support this function for SparseTensor. :return: a ndarray