Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
16,400
def handle_msec_timestamp(self, m, master): if m.get_type() == : return msec = m.time_boot_ms if msec + 30000 < master.highest_msec: self.say() print(, msec, master.highest_msec) self.status.highest_msec = msec for mm in self.mpstate.mav_master: mm.link_delayed = False mm.highest_msec = msec return master.highest_msec = msec if msec > self.status.highest_msec: self.status.highest_msec = msec if msec < self.status.highest_msec and len(self.mpstate.mav_master) > 1 and self.mpstate.settings.checkdelay: master.link_delayed = True else: master.link_delayed = False
special handling for MAVLink packets with a time_boot_ms field
16,401
def atanh(x, context=None): return _apply_function_in_current_context( BigFloat, mpfr.mpfr_atanh, (BigFloat._implicit_convert(x),), context, )
Return the inverse hyperbolic tangent of x.
16,402
def mode_str_to_int(modestr): mode = 0 for iteration, char in enumerate(reversed(modestr[-6:])): mode += int(char) << iteration * 3 return mode
:param modestr: string like 755 or 644 or 100644 - only the last 6 chars will be used :return: String identifying a mode compatible to the mode methods ids of the stat module regarding the rwx permissions for user, group and other, special flags and file system flags, i.e. whether it is a symlink for example.
16,403
def regex_find(pattern, content): find = re.findall(pattern, content) if not find: cij.err("pattern <%r> is invalid, no matches!" % pattern) cij.err("content: %r" % content) return if len(find) >= 2: cij.err("pattern <%r> is too simple, matched more than 2!" % pattern) cij.err("content: %r" % content) return return find[0]
Find the given 'pattern' in 'content
16,404
def update(self, *args, **kwargs): arg = dict_arg(*args, **kwargs) if isinstance(arg, list): for key, val in arg: self[key] = val else: super(AssocDict, self).update(arg)
Preserves order if given an assoc list.
16,405
def directive(self, name, default=None): return getattr(self, , {}).get(name, hug.defaults.directives.get(name, default))
Returns the loaded directive with the specified name, or default if passed name is not present
16,406
def update_server_map(self, config): self.server_map = dict([(member[], member[]) for member in config[]])
update server_map ({member_id:hostname})
16,407
def create_queue(self, queue_name, queue=None, fail_on_exist=False): s resource manifest is immutable. queue_name: Name of the queue to create. queue: Queue object to create. fail_on_exist: Specify whether to throw an exception when the queue exists. queue_namePUT/' request.body = _get_request_body(_convert_queue_to_xml(queue)) request.path, request.query = self._httpclient._update_request_uri_query(request) request.headers = self._update_service_bus_header(request) if not fail_on_exist: try: self._perform_request(request) return True except AzureHttpError as ex: _dont_fail_on_exist(ex) return False else: self._perform_request(request) return True
Creates a new queue. Once created, this queue's resource manifest is immutable. queue_name: Name of the queue to create. queue: Queue object to create. fail_on_exist: Specify whether to throw an exception when the queue exists.
16,408
def dedent(s): head, _, tail = s.partition() dedented_tail = textwrap.dedent(tail) result = "{head}\n{tail}".format( head=head, tail=dedented_tail) return result
Removes the hanging dedent from all the first line of a string.
16,409
def msg2agent(msg, processor=None, legacy=False, **config): if processor.agent is not NotImplemented: return processor.agent(msg, **config) else: usernames = processor.usernames(msg, **config) return None
Return the single username who is the "agent" for an event. An "agent" is the one responsible for the event taking place, for example, if one person gives karma to another, then both usernames are returned by msg2usernames, but only the one who gave the karma is returned by msg2agent. If the processor registered to handle the message does not provide an agent method, then the *first* user returned by msg2usernames is returned (whether that is correct or not). Here we assume that if a processor implements `agent`, then it knows what it is doing and we should trust that. But if it does not implement it, we'll try our best guess. If there are no users returned by msg2usernames, then None is returned.
16,410
def rate_limit_info(): import json import time r = requests.get(gh_url + "/rate_limit", auth=login.auth()) out = json.loads(r.text) mins = (out["resources"]["core"]["reset"]-time.time())/60 return out["resources"]["core"]["remaining"], mins
Returns (requests_remaining, minutes_to_reset)
16,411
async def on_message(message): server = message.server author = message.author channel = message.channel content = message.content data = datatools.get_data() if not data["discord"]["servers"][server.id][_data.modulename]["activated"]: return await _data.cache[server.id].movehere(channel)
The on_message event handler for this module Args: message (discord.Message): Input message
16,412
def stop(self): logger.debug("Stopping playback") self.clock.stop() self.status = READY
Stops the video stream and resets the clock.
16,413
def _tobytes(self, skipprepack = False): stream = BytesIO() self._tostream(stream, skipprepack) return stream.getvalue()
Convert the struct to bytes. This is the standard way to convert a NamedStruct to bytes. :param skipprepack: if True, the prepack stage is skipped. For parser internal use. :returns: converted bytes
16,414
def from_json(cls, data): user = cls() user.user_id = data[] user.username = data[] user.auth_system = data[] user.roles = data[] return user
Return object based on JSON / dict input Args: data (dict): Dictionary containing a serialized User object Returns: :obj:`User`: User object representing the data
16,415
def get_normalized(self): magnitude = self.get_magnitude() if magnitude > 0: magnitude = 1.0 / magnitude return Point(self.x * magnitude, self.y * magnitude) else: return self
Returns a vector of unit length, unless it is the zero vector, in which case it is left as is.
16,416
def fix_e702(self, result, logical): if not logical: return [] logical_lines = logical[2] for line in logical_lines: if (result[] == and in line and STARTSWITH_DEF_REGEX.match(line)): return [] line_index = result[] - 1 target = self.source[line_index] if target.rstrip().endswith(): self.source[line_index] = target.rstrip() self.source[line_index + 1] = self.source[line_index + 1].lstrip() return [line_index + 1, line_index + 2] if target.rstrip().endswith(): self.source[line_index] = target.rstrip() + return [line_index + 1] offset = result[] - 1 first = target[:offset].rstrip().rstrip() second = (_get_indentation(logical_lines[0]) + target[offset:].lstrip().lstrip()) inline_comment = None if target[offset:].lstrip().lstrip()[:2] == : inline_comment = target[offset:].lstrip() if inline_comment: self.source[line_index] = first + inline_comment else: self.source[line_index] = first + + second return [line_index + 1]
Put semicolon-separated compound statement on separate lines.
16,417
def set_mode(self,mode): if mode < 0 or mode > 3: raise ValueError() self._device.mode(mode)
Set SPI mode which controls clock polarity and phase. Should be a numeric value 0, 1, 2, or 3. See wikipedia page for details on meaning: http://en.wikipedia.org/wiki/Serial_Peripheral_Interface_Bus
16,418
def _norm_perm_list_from_perm_dict(self, perm_dict): high_perm_dict = self._highest_perm_dict_from_perm_dict(perm_dict) return [ [k, list(sorted(high_perm_dict[k]))] for k in ORDERED_PERM_LIST if high_perm_dict.get(k, False) ]
Return a minimal, ordered, hashable list of subjects and permissions.
16,419
def isObjectClassified(self, objectName, minOverlap=None, maxL2Size=None): L2Representation = self.getL2Representations() objectRepresentation = self.objectL2Representations[objectName] sdrSize = self.config["L2Params"]["sdrSize"] if minOverlap is None: minOverlap = sdrSize / 2 if maxL2Size is None: maxL2Size = 1.5*sdrSize numCorrectClassifications = 0 for col in xrange(self.numColumns): overlapWithObject = len(objectRepresentation[col] & L2Representation[col]) if ( overlapWithObject >= minOverlap and len(L2Representation[col]) <= maxL2Size ): numCorrectClassifications += 1 return numCorrectClassifications == self.numColumns
Return True if objectName is currently unambiguously classified by every L2 column. Classification is correct and unambiguous if the current L2 overlap with the true object is greater than minOverlap and if the size of the L2 representation is no more than maxL2Size :param minOverlap: min overlap to consider the object as recognized. Defaults to half of the SDR size :param maxL2Size: max size for the L2 representation Defaults to 1.5 * SDR size :return: True/False
16,420
def is_date_type(cls): if not isinstance(cls, type): return False return issubclass(cls, date) and not issubclass(cls, datetime)
Return True if the class is a date type.
16,421
def _from_pointer(pointer, incref): if pointer == ffi.NULL: raise ValueError() if incref: cairo.cairo_font_face_reference(pointer) self = object.__new__(FONT_TYPE_TO_CLASS.get( cairo.cairo_font_face_get_type(pointer), FontFace)) FontFace.__init__(self, pointer) return self
Wrap an existing :c:type:`cairo_font_face_t *` cdata pointer. :type incref: bool :param incref: Whether increase the :ref:`reference count <refcounting>` now. :return: A new instance of :class:`FontFace` or one of its sub-classes, depending on the face’s type.
16,422
def ansible_inventory_temp_file( self, keys=[, , ] ): lansible = LagoAnsible(self._prefix) return lansible.get_inventory_temp_file(keys=keys)
Context manager which returns Ansible inventory written on a tempfile. This is the same as :func:`~ansible_inventory`, only the inventory file is written to a tempfile. Args: keys (list of str): Path to the keys that will be used to create groups. Yields: tempfile.NamedTemporaryFile: Temp file containing the inventory
16,423
def _deserialize_key(cls, key): if key in cls._KEYS_OVERLAPPING: return key + cls._SUFFIX_KEY_OVERLAPPING return key
:type key: str :rtype: str
16,424
def from_scalars(**kwargs): import numpy as np return from_arrays(**{k: np.array([v]) for k, v in kwargs.items()})
Similar to from_arrays, but convenient for a DataFrame of length 1. Example: >>> import vaex >>> df = vaex.from_scalars(x=1, y=2) :rtype: DataFrame
16,425
def _is_dtype_type(arr_or_dtype, condition): if arr_or_dtype is None: return condition(type(None)) if isinstance(arr_or_dtype, np.dtype): return condition(arr_or_dtype.type) elif isinstance(arr_or_dtype, type): if issubclass(arr_or_dtype, (PandasExtensionDtype, ExtensionDtype)): arr_or_dtype = arr_or_dtype.type return condition(np.dtype(arr_or_dtype).type) elif arr_or_dtype is None: return condition(type(None)) if hasattr(arr_or_dtype, ): arr_or_dtype = arr_or_dtype.dtype elif is_list_like(arr_or_dtype): return condition(type(None)) try: tipo = pandas_dtype(arr_or_dtype).type except (TypeError, ValueError, UnicodeEncodeError): if is_scalar(arr_or_dtype): return condition(type(None)) return False return condition(tipo)
Return a boolean if the condition is satisfied for the arr_or_dtype. Parameters ---------- arr_or_dtype : array-like The array-like or dtype object whose dtype we want to extract. condition : callable[Union[np.dtype, ExtensionDtypeType]] Returns ------- bool : if the condition is satisifed for the arr_or_dtype
16,426
def hira2kata(text, ignore=): if ignore: h2k_map = _exclude_ignorechar(ignore, H2K_TABLE.copy()) return _convert(text, h2k_map) return _convert(text, H2K_TABLE)
Convert Hiragana to Full-width (Zenkaku) Katakana. Parameters ---------- text : str Hiragana string. ignore : str Characters to be ignored in converting. Return ------ str Katakana string. Examples -------- >>> print(jaconv.hira2kata('ともえまみ')) トモエマミ >>> print(jaconv.hira2kata('まどまぎ', ignore='ど')) マどマギ
16,427
def makescacoldesc(columnname, value, datamanagertype=, datamanagergroup=, options=0, maxlen=0, comment=, valuetype=, keywords={}): vtype = valuetype if vtype == : vtype = _value_type_name(value) rec2 = {: vtype, : datamanagertype, : datamanagergroup, : options, : maxlen, : comment, : keywords} return {: columnname, : rec2}
Create description of a scalar column. A description for a scalar column can be created from a name for the column and a data value, which is used only to determine the type of the column. Note that a dict value is also possible. It is possible to create the column description in more detail by giving the data manager name, group, option, and comment as well. The data manager type tells which data manager (storage manager) is used to store the columns. The data manager type and group are explained in more detail in the `casacore Tables <../../casacore/doc/html/group__Tables__module.html>`_ documentation. It returns a dict with fields `name` and `desc` which can thereafter be used to build a table description using function :func:`maketabdesc`. `columname` Name of column `value` Example data value used to determine the column's data type. It is only used if argument `valuetype` is not given. `datamanagertype` Type of data manager which can be one of StandardStMan (default) or IncrementalStMan. The latter one can save disk space if many subsequent cells in the column will have the same value. `datamanagergroup` Data manager group. Only for the expert user. `options` Options. Need not be filled in. `maxlen` Maximum length of string values in a column. Default 0 means unlimited. `comment` Comment: informational for user. `valuetype` A string giving the column's data type. Possible data types are bool (or boolean), uchar (or byte), short, int (or integer), uint, float, double, complex, dcomplex, and string. 'keywords' A dict defining initial keywords for the column. For example:: scd1 = makescacoldesc("col2", "")) scd2 = makescacoldesc("col1", 1, "IncrementalStMan") td = maketabdesc([scd1, scd2]) This creates a table description consisting of an integer column `col1`, and a string column `col2`. `col1` uses the IncrementalStMan storage manager, while `col2` uses the default storage manager StandardStMan.
16,428
def process_sels(self): self.rels = [] for l in self.core_sels: if self.wght[l] == self.minw: self.garbage.add(l) self.rels.append(-l) else: self.wght[l] -= self.minw self.topv += 1 self.oracle.add_clause([l, self.topv]) self.rels.append(self.topv)
Process soft clause selectors participating in a new core. The negation :math:`\\neg{s}` of each selector literal :math:`s` participating in the unsatisfiable core is added to the list of relaxation literals, which will be later used to create a new totalizer object in :func:`create_sum`. If the weight associated with a selector is equal to the minimal weight of the core, e.g. ``self.minw``, the selector is marked as garbage and will be removed in :func:`filter_assumps`. Otherwise, the clause is split as described in [1]_.
16,429
def get_class(self): classes = {: HelloFailedCode, : BadRequestCode, : BadActionCode, : BadInstructionCode, : BadMatchCode, : FlowModFailedCode, : GroupModFailedCode, : PortModFailedCode, : QueueOpFailedCode, : SwitchConfigFailedCode, : RoleRequestFailedCode, : MeterModFailedCode, : TableModFailedCode, : TableFeaturesFailedCode} return classes.get(self.name, GenericFailedCode)
Return a Code class based on current ErrorType value. Returns: enum.IntEnum: class referenced by current error type.
16,430
def with_fakes(method): @wraps(method) def apply_clear_and_verify(*args, **kw): clear_calls() method(*args, **kw) verify() return apply_clear_and_verify
Decorator that calls :func:`fudge.clear_calls` before method() and :func:`fudge.verify` afterwards.
16,431
def _value_to_color(value, cmap): cm = plt.get_cmap(cmap) rgba = cm(value) return [int(round(255*v)) for v in rgba[0:3]]
Convert a value in the range [0,1] to an RGB tuple using a colormap.
16,432
def base_geodetic_crs(self): base = self.element.find(GML_NS + ) href = base.attrib[XLINK_NS + ] return get(href)
The :class:`GeodeticCRS` on which this projection is based.
16,433
def get_subresource_path_by(resource, subresource_path): if isinstance(subresource_path, six.string_types): subresource_path = [subresource_path] elif not subresource_path: raise ValueError() body = resource.json for path_item in subresource_path: body = body.get(path_item, {}) if not body: raise exception.MissingAttributeError( attribute=.join(subresource_path), resource=resource.path) if not in body: raise exception.MissingAttributeError( attribute=.join(subresource_path)+, resource=resource.path) return body[]
Helper function to find the resource path :param resource: ResourceBase instance from which the path is loaded. :param subresource_path: JSON field to fetch the value from. Either a string, or a list of strings in case of a nested field. It should also include the '@odata.id' :raises: MissingAttributeError, if required path is missing. :raises: ValueError, if path is empty. :raises: AttributeError, if json attr not found in resource
16,434
def _get_format_from_style(self, token, style): result = QtGui.QTextCharFormat() items = list(style.style_for_token(token).items()) for key, value in items: if value is None and key == : value = drift_color(self.background, 1000).name() if value: if key == : result.setForeground(self._get_brush(value)) elif key == : result.setBackground(self._get_brush(value)) elif key == : result.setFontWeight(QtGui.QFont.Bold) elif key == : result.setFontItalic(value) elif key == : result.setUnderlineStyle( QtGui.QTextCharFormat.SingleUnderline) elif key == : result.setFontStyleHint(QtGui.QFont.SansSerif) elif key == : result.setFontStyleHint(QtGui.QFont.Times) elif key == : result.setFontStyleHint(QtGui.QFont.TypeWriter) if token in [Token.Literal.String, Token.Literal.String.Doc, Token.Comment]: result.setObjectType(result.UserObject) return result
Returns a QTextCharFormat for token by reading a Pygments style.
16,435
def adjoin(space: int, *lists: Sequence[str]) -> str: lengths = [max(map(len, x)) + space for x in lists[:-1]] lengths.append(max(map(len, lists[-1]))) max_len = max(map(len, lists)) chains = ( itertools.chain( (x.ljust(length) for x in lst), itertools.repeat( * length, max_len - len(lst)), ) for lst, length in zip(lists, lengths) ) return .join(map(.join, zip(*chains)))
Glue together two sets of strings using `space`.
16,436
def subjects(auth, label=None, project=None): .........AB1234C/data/experiments/XNAT_S0001AB1234CXNAT_S0001MyProject url = .format(auth.url.rstrip()) logger.debug(, url) columns = [ , , ] payload = { : .join(columns) } if label: payload[] = label if project: payload[] = project r = requests.get(url, params=payload, auth=(auth.username, auth.password), verify=CHECK_CERTIFICATE) if r.status_code != requests.codes.ok: raise AccessionError(.format(r.status_code, r.url)) try: results = r.json() __quick_validate(results) except ResultSetError as e: raise ResultSetError(.format(e.message, r.url)) results = results[] if int(results[]) == 0: raise NoSubjectsError(.format(r.url)) for item in results[]: yield Subject(uri=item[], id=item[], project=item[], label=item[])
Retrieve Subject tuples for subjects returned by this function. Example: >>> import yaxil >>> auth = yaxil.XnatAuth(url='...', username='...', password='...') >>> yaxil.subjects(auth, 'AB1234C') Subject(uri=u'/data/experiments/XNAT_S0001', label=u'AB1234C', id=u'XNAT_S0001', project=u'MyProject') :param auth: XNAT authentication :type auth: :mod:`yaxil.XnatAuth` :param label: XNAT Subject label :type label: str :param project: XNAT Subject Project :type project: str :returns: Subject objects :rtype: :mod:`yaxil.Subject`
16,437
def add_vec_to_mat(mat, vec, axis=None, inplace=False, target=None, substract=False): assert mat.flags.c_contiguous if axis is None: if vec.shape[0] == mat.shape[0]: axis = 0 elif vec.shape[0] == mat.shape[1]: axis = 1 else: raise ValueError( ) n, m = mat.shape block = (_compilation_constants[], _compilation_constants[], 1) gridx = ceil_div(n, block[0]) gridy = ceil_div(m, block[1]) grid = (gridx, gridy, 1) if inplace: target = mat elif target is None: target = gpuarray.empty_like(mat) if axis == 0: assert vec.shape[0] == mat.shape[0] add_col_vec_kernel.prepared_call( grid, block, mat.gpudata, vec.gpudata, target.gpudata, np.uint32(n), np.uint32(m), np.int32(substract)) elif axis == 1: assert vec.shape[0] == mat.shape[1] add_row_vec_kernel.prepared_call( grid, block, mat.gpudata, vec.gpudata, target.gpudata, np.uint32(n), np.uint32(m), np.int32(substract)) return target
Add a vector to a matrix
16,438
def import_or_die(module_name, entrypoint_names): log_debug("Importing {}".format(module_name)) module_name = os.path.abspath(module_name) if module_name.endswith(): module_name,ext = os.path.splitext(module_name) modname = os.path.basename(module_name) dirname = os.path.dirname(module_name) if dirname and dirname not in sys.path: sys.path.append(dirname) if modname in sys.modules: user_module = sys.modules.get(modname) user_module = importlib.reload(user_module) raise ImportError(e) if not entrypoint_names: return existing_names = dir(user_module) for method in entrypoint_names: if method in existing_names: return getattr(user_module, method) if len(entrypoint_names) > 1: entrypoints = "one of {}".format(.join(entrypoint_names)) else: entrypoints = entrypoint_names[0] raise ImportError("Required entrypoint function or symbol ({}) not found in your code".format(entrypoints))
Import user code; return reference to usercode function. (str) -> function reference
16,439
def _compute_std_dev(self, X): self._sigma = [] if X.shape[0] <= 1: self._sigma = [0.0] else: for x_mean in range(X.shape[0]): std_dev = np.sqrt(sum([np.linalg.norm(x - x_mean) ** 2 for x in X]) / float(X.shape[0]-1)) self._sigma.append(std_dev) return self._sigma
Computes the standard deviation of a Gaussian Distribution with mean vector X[i]
16,440
def click_partial_link_text(self, partial_link_text, timeout=settings.SMALL_TIMEOUT): if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT: timeout = self.__get_new_timeout(timeout) if self.browser == : if self.is_partial_link_text_visible(partial_link_text): element = self.wait_for_partial_link_text(partial_link_text) element.click() return soup = self.get_beautiful_soup() html_links = soup.fetch() for html_link in html_links: if partial_link_text in html_link.text: for html_attribute in html_link.attrs: if html_attribute[0] == : href = html_attribute[1] if href.startswith(): link = "http:" + href elif href.startswith(): url = self.driver.current_url domain_url = self.get_domain_url(url) link = domain_url + href else: link = href self.open(link) return raise Exception( % partial_link_text) raise Exception( "Partial link text {%s} was not found!" % partial_link_text) element = self.wait_for_partial_link_text( partial_link_text, timeout=timeout) self.__demo_mode_highlight_if_active( partial_link_text, by=By.PARTIAL_LINK_TEXT) pre_action_url = self.driver.current_url try: element.click() except (StaleElementReferenceException, ENI_Exception): self.wait_for_ready_state_complete() time.sleep(0.05) element = self.wait_for_partial_link_text( partial_link_text, timeout=timeout) element.click() if settings.WAIT_FOR_RSC_ON_CLICKS: self.wait_for_ready_state_complete() if self.demo_mode: if self.driver.current_url != pre_action_url: self.__demo_mode_pause_if_active() else: self.__demo_mode_pause_if_active(tiny=True)
This method clicks the partial link text on a page.
16,441
def bind(self, fn: "Callable[[Any], Reader]") -> : r return Reader(lambda x: fn(self.run(x)).run(x))
r"""Bind a monadic function to the Reader. Haskell: Reader: m >>= k = Reader $ \r -> runReader (k (runReader m r)) r Function: h >>= f = \w -> f (h w) w
16,442
def add_markdown_cell(self, text): markdown_cell = { "cell_type": "markdown", "metadata": {}, "source": [rst2md(text)] } self.work_notebook["cells"].append(markdown_cell)
Add a markdown cell to the notebook Parameters ---------- code : str Cell content
16,443
def mouseReleaseEvent(self, event): initial_state = event.isAccepted() event.ignore() self.mouse_released.emit(event) if not event.isAccepted(): event.setAccepted(initial_state) super(CodeEdit, self).mouseReleaseEvent(event)
Emits mouse_released signal. :param event: QMouseEvent
16,444
def make_wcs_data_from_hpx_data(self, hpx_data, wcs, normalize=True): wcs_data = np.zeros(wcs.npix) self.fill_wcs_map_from_hpx_data(hpx_data, wcs_data, normalize) return wcs_data
Creates and fills a wcs map from the hpx data using the pre-calculated mappings hpx_data : the input HEALPix data wcs : the WCS object normalize : True -> perserve integral by splitting HEALPix values between bins
16,445
def concurrent_slots(slots): for i, slot in enumerate(slots): for j, other_slot in enumerate(slots[i + 1:]): if slots_overlap(slot, other_slot): yield (i, j + i + 1)
Yields all concurrent slot indices.
16,446
def dot (self, other): if self.z: return (self.x * other.x) + (self.y * other.y) + (self.z * other.z) else: return (self.x * other.x) + (self.y * other.y)
dot (self, other) -> number Returns the dot product of this Point with another.
16,447
def yaw_pitch_roll(self): self._normalise() yaw = np.arctan2(2*(self.q[0]*self.q[3] - self.q[1]*self.q[2]), 1 - 2*(self.q[2]**2 + self.q[3]**2)) pitch = np.arcsin(2*(self.q[0]*self.q[2] + self.q[3]*self.q[1])) roll = np.arctan2(2*(self.q[0]*self.q[1] - self.q[2]*self.q[3]), 1 - 2*(self.q[1]**2 + self.q[2]**2)) return yaw, pitch, roll
Get the equivalent yaw-pitch-roll angles aka. intrinsic Tait-Bryan angles following the z-y'-x'' convention Returns: yaw: rotation angle around the z-axis in radians, in the range `[-pi, pi]` pitch: rotation angle around the y'-axis in radians, in the range `[-pi/2, -pi/2]` roll: rotation angle around the x''-axis in radians, in the range `[-pi, pi]` The resulting rotation_matrix would be R = R_x(roll) R_y(pitch) R_z(yaw) Note: This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
16,448
def ellipsize(s, max_length=60): if len(s) > max_length: ellipsis = return s[:(max_length - len(ellipsis))] + ellipsis else: return s
>>> print(ellipsize(u'lorem ipsum dolor sit amet', 40)) lorem ipsum dolor sit amet >>> print(ellipsize(u'lorem ipsum dolor sit amet', 20)) lorem ipsum dolor...
16,449
def matches(self, txt: str) -> bool: if r in self.pattern_re.pattern: txt = txt.encode().decode() match = self.pattern_re.match(txt) return match is not None and match.end() == len(txt)
Determine whether txt matches pattern :param txt: text to check :return: True if match
16,450
def list(cls, path): file_info = cls.parse_remote(path) connection = cls.connect(path) bucket = connection.get_bucket(file_info.bucket) region = "@%s" % file_info.region if file_info.region else "" output = [] for key in bucket.get_all_keys(prefix=file_info.key): output.append(cls._S3_FILE % {"bucket": file_info.bucket, "key": key.name, "region": region}) return output
Return a list containing the names of the entries in the directory given by path. The list is in arbitrary order.
16,451
def closeEvent(self, event): lf = self.browser.get_current_selection() if lf: self.last_file.emit(lf) return super(GenesisWin, self).close()
Send last file signal on close event :param event: The close event :type event: :returns: None :rtype: None :raises: None
16,452
def get_remote_file(url): try: return requests.get(url) except requests.exceptions.ConnectionError as e: print("Connection error!") print(e.message.reason) exit(1)
Wrapper around ``request.get`` which nicely handles connection errors
16,453
def delete_thing(self, lid): logger.info("delete_thing(lid=\"%s\")", lid) evt = self.delete_thing_async(lid) self._wait_and_except_if_failed(evt)
Delete a Thing Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `lid` (required) (string) local identifier of the Thing you want to delete
16,454
def affine(self, pixelbuffer=0): return Affine( self.pixel_x_size, 0, self.bounds(pixelbuffer).left, 0, -self.pixel_y_size, self.bounds(pixelbuffer).top )
Return an Affine object of tile. - pixelbuffer: tile buffer in pixels
16,455
def validateDtd(self, doc, dtd): if doc is None: doc__o = None else: doc__o = doc._o if dtd is None: dtd__o = None else: dtd__o = dtd._o ret = libxml2mod.xmlValidateDtd(self._o, doc__o, dtd__o) return ret
Try to validate the document against the dtd instance Basically it does check all the definitions in the DtD. Note the the internal subset (if present) is de-coupled (i.e. not used), which could give problems if ID or IDREF is present.
16,456
def process_order(self, order): try: dt_orders = self._orders_by_modified[order.dt] except KeyError: self._orders_by_modified[order.dt] = OrderedDict([ (order.id, order), ]) self._orders_by_id[order.id] = order else: self._orders_by_id[order.id] = dt_orders[order.id] = order move_to_end(dt_orders, order.id, last=True) move_to_end(self._orders_by_id, order.id, last=True)
Keep track of an order that was placed. Parameters ---------- order : zp.Order The order to record.
16,457
def jwt_get_secret_key(payload=None): User = get_user_model() if api_settings.JWT_GET_USER_SECRET_KEY: user = User.objects.get(pk=payload.get()) key = str(api_settings.JWT_GET_USER_SECRET_KEY(user)) return key return api_settings.JWT_SECRET_KEY
For enchanced security you may use secret key on user itself. This way you have an option to logout only this user if: - token is compromised - password is changed - etc.
16,458
def auto_directory(rel_name): dir_name = rel_path(rel_name, check=False) if not os.path.exists(dir_name): os.makedirs(dir_name, exist_ok=True) return dir_name
if you're using py.path you make do that as: py.path.local(full_path).ensure_dir()
16,459
def create_participant(worker_id, hit_id, assignment_id, mode): try: session.connection().execute("LOCK TABLE participant IN EXCLUSIVE MODE NOWAIT") except exc.OperationalError as e: e.orig = TransactionRollbackError() raise e missing = [p for p in (worker_id, hit_id, assignment_id) if p == "undefined"] if missing: msg = "/participant POST: required values were " return error_response(error_type=msg, status=403) fingerprint_hash = request.args.get("fingerprint_hash") try: fingerprint_found = models.Participant.query.filter_by( fingerprint_hash=fingerprint_hash ).one_or_none() except MultipleResultsFound: fingerprint_found = True if fingerprint_hash and fingerprint_found: db.logger.warning("Same browser fingerprint detected.") if mode == "live": return error_response( error_type="/participant POST: Same participant dectected.", status=403 ) already_participated = models.Participant.query.filter_by( worker_id=worker_id ).one_or_none() if already_participated: db.logger.warning("Worker has already participated.") return error_response( error_type="/participant POST: worker has already participated.", status=403 ) duplicate = models.Participant.query.filter_by( assignment_id=assignment_id, status="working" ).one_or_none() if duplicate: msg = app.logger.warning(msg.format(duplicate.id)) q.enqueue(worker_function, "AssignmentReassigned", None, duplicate.id) nonfailed_count = ( models.Participant.query.filter( (models.Participant.status == "working") | (models.Participant.status == "overrecruited") | (models.Participant.status == "submitted") | (models.Participant.status == "approved") ).count() + 1 ) recruiter_name = request.args.get("recruiter", "undefined") if not recruiter_name or recruiter_name == "undefined": recruiter = recruiters.from_config(_config()) if recruiter: recruiter_name = recruiter.nickname participant = models.Participant( recruiter_id=recruiter_name, worker_id=worker_id, assignment_id=assignment_id, hit_id=hit_id, mode=mode, fingerprint_hash=fingerprint_hash, ) exp = Experiment(session) overrecruited = exp.is_overrecruited(nonfailed_count) if overrecruited: participant.status = "overrecruited" session.add(participant) session.flush() result = {"participant": participant.__json__()} if exp.quorum: quorum = {"q": exp.quorum, "n": nonfailed_count, "overrecruited": overrecruited} db.queue_message(WAITING_ROOM_CHANNEL, dumps(quorum)) result["quorum"] = quorum return success_response(**result)
Create a participant. This route is hit early on. Any nodes the participant creates will be defined in reference to the participant object. You must specify the worker_id, hit_id, assignment_id, and mode in the url.
16,460
def dusk(self, date=None, local=True, use_elevation=True): if local and self.timezone is None: raise ValueError("Local time requested but Location has no timezone set.") if self.astral is None: self.astral = Astral() if date is None: date = datetime.date.today() elevation = self.elevation if use_elevation else 0 dusk = self.astral.dusk_utc(date, self.latitude, self.longitude, observer_elevation=elevation) if local: return dusk.astimezone(self.tz) else: return dusk
Calculates the dusk time (the time in the evening when the sun is a certain number of degrees below the horizon. By default this is 6 degrees but can be changed by setting the :attr:`solar_depression` property.) :param date: The date for which to calculate the dusk time. If no date is specified then the current date will be used. :type date: :class:`~datetime.date` :param local: True = Time to be returned in location's time zone; False = Time to be returned in UTC. If not specified then the time will be returned in local time :type local: bool :param use_elevation: True = Return times that allow for the location's elevation; False = Return times that don't use elevation. If not specified then times will take elevation into account. :type use_elevation: bool :returns: The date and time at which dusk occurs. :rtype: :class:`~datetime.datetime`
16,461
def _clear_dict(endpoint_props): return dict( (prop_name, prop_val) for prop_name, prop_val in six.iteritems(endpoint_props) if prop_val is not None )
Eliminates None entries from the features of the endpoint dict.
16,462
def allowed(self, method, _dict, allow): for key in _dict.keys(): if key not in allow: raise LunrError(" is not an argument for method " % (key, method))
Only these items are allowed in the dictionary
16,463
def matrix_rank(a, tol=None, validate_args=False, name=None): with tf.compat.v1.name_scope(name, , [a, tol]): a = tf.convert_to_tensor(value=a, dtype_hint=tf.float32, name=) assertions = _maybe_validate_matrix(a, validate_args) if assertions: with tf.control_dependencies(assertions): a = tf.identity(a) s = tf.linalg.svd(a, compute_uv=False) if tol is None: if a.shape[-2:].is_fully_defined(): m = np.max(a.shape[-2:].as_list()) else: m = tf.reduce_max(input_tensor=tf.shape(input=a)[-2:]) eps = np.finfo(a.dtype.as_numpy_dtype).eps tol = (eps * tf.cast(m, a.dtype) * tf.reduce_max(input_tensor=s, axis=-1, keepdims=True)) return tf.reduce_sum(input_tensor=tf.cast(s > tol, tf.int32), axis=-1)
Compute the matrix rank; the number of non-zero SVD singular values. Arguments: a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be pseudo-inverted. tol: Threshold below which the singular value is counted as "zero". Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`). validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: "matrix_rank". Returns: matrix_rank: (Batch of) `int32` scalars representing the number of non-zero singular values.
16,464
def get_aggs(self): res = self.fetch_aggregation_results() if in res and in res[][str(self.parent_agg_counter - 1)]: try: agg = res[][str(self.parent_agg_counter - 1)][]["50.0"] if agg == : agg = None except Exception as e: raise RuntimeError("Multivalue aggregation result not supported") elif in res and in res[][str(self.parent_agg_counter - 1)]: agg = res[][str(self.parent_agg_counter - 1)][] else: agg = res[][] return agg
Compute the values for single valued aggregations :returns: the single aggregation value
16,465
def run(self, options): self.logger.debug("debug enabled...") depends = [] nil_tools = [] self.logger.info("depends list: %s", depends) for v in depends: real_path = shutil.which(v) if real_path: self.print_message("Found {}:{}..." " {}".format(v, real_path, termcolor.colored( , color=))) else: nil_tools.append(v) self.error_message( .format(v, ), prefix=, suffix=) pass if nil_tools: self.print_message() self.error("please install missing tools...") else: self.print_message("\nNo error found," "you can use cliez in right way.") self.logger.debug("check finished...") pass pass
.. todo:: check network connection :param Namespace options: parse result from argparse :return:
16,466
def _set_ospf(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("vrf",ospf.ospf, yang_name="ospf", rest_name="ospf", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: u, u: u, u: None, u: None, u: None, u: u}}), is_container=, yang_name="ospf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: u, u: None, u: None, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "list", : , }) self.__ospf = t if hasattr(self, ): self._set()
Setter method for ospf, mapped from YANG variable /rbridge_id/ipv6/router/ospf (list) If this variable is read-only (config: false) in the source YANG file, then _set_ospf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ospf() directly.
16,467
def join(self, timeout=None): if not self._started: raise RuntimeError("cannot join thread before it is started") if compat.getcurrent() is self._glet: raise RuntimeError("cannot join current thread") self._finished.wait(timeout)
block until this thread terminates .. note:: this method can block the calling coroutine if the thread has not yet completed. :param timeout: the maximum time to wait. with the default of ``None``, waits indefinitely :type timeout: int, float or None :raises: `RuntimeError` if called inside the thread, or it has not yet been started
16,468
def addComponentEditor(self): row = self._model.rowCount() comp_stack_editor = ExploreComponentEditor() self.ui.trackStack.addWidget(comp_stack_editor) idx_button = IndexButton(row) idx_button.pickMe.connect(self.ui.trackStack.setCurrentIndex) self.trackBtnGroup.addButton(idx_button) self.ui.trackBtnLayout.addWidget(idx_button) self.ui.trackStack.setCurrentIndex(row) comp_stack_editor.closePlease.connect(self.removeComponentEditor) delay = Silence() comp_stack_editor.delaySpnbx.setValue(delay.duration()) self._model.insertComponent(delay, row,0) self._allComponents.append([x() for x in self.stimuli_types if x.explore]) for stim in self._allComponents[row]: editor = wrapComponent(stim).showEditor() comp_stack_editor.addWidget(editor, stim.name) exvocal = comp_stack_editor.widgetForName("Vocalization") if exvocal is not None: exvocal.filelistView.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) initcomp = self._allComponents[row][0] self._model.insertComponent(initcomp, row, 1) self.buttons.append(idx_button) comp_stack_editor.exploreStimTypeCmbbx.currentIndexChanged.connect(lambda x : self.setStimIndex(row, x)) comp_stack_editor.delaySpnbx.valueChanged.connect(lambda x : self.setDelay(row, x)) comp_stack_editor.valueChanged.connect(self.valueChanged.emit) return comp_stack_editor
Adds a new component to the model, and an editor for this component to this editor
16,469
def get_queryset(self, request): qs = self.model._default_manager.get_queryset() ordering = self.get_ordering(request) if ordering: qs = qs.order_by(*ordering) return qs
Returns a QuerySet of all model instances that can be edited by the admin site.
16,470
def get_sequence(self): if not self.address: raise StellarAddressInvalidError() address = self.horizon.account(self.address) return int(address.get())
Get the sequence number for a given account via Horizon. :return: The current sequence number for a given account :rtype: int
16,471
def run(self, lines): ret = [] for line in lines: while True: match = re.search(r, line) if match != None: title = match.group(1) line = re.sub(r, title, line, count=1) else: break ret.append(line) return ret
Filter method
16,472
def unpack_results( data: bytes, repetitions: int, key_sizes: Sequence[Tuple[str, int]] ) -> Dict[str, np.ndarray]: bits_per_rep = sum(size for _, size in key_sizes) total_bits = repetitions * bits_per_rep byte_arr = np.frombuffer(data, dtype=).reshape((len(data), 1)) bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1).astype(bool) bits = bits[:total_bits].reshape((repetitions, bits_per_rep)) results = {} ofs = 0 for key, size in key_sizes: results[key] = bits[:, ofs:ofs + size] ofs += size return results
Unpack data from a bitstring into individual measurement results. Args: data: Packed measurement results, in the form <rep0><rep1>... where each repetition is <key0_0>..<key0_{size0-1}><key1_0>... with bits packed in little-endian order in each byte. repetitions: number of repetitions. key_sizes: Keys and sizes of the measurements in the data. Returns: Dict mapping measurement key to a 2D array of boolean results. Each array has shape (repetitions, size) with size for that measurement.
16,473
def print_struct(struct, ident=0): if not isinstance(struct, (str, bytes, list, tuple)) and hasattr(struct, ): print() for item in struct: print(" "*ident, end=) print_struct(item, ident+1) print(" "*ident + "]") elif not hasattr(struct, ): print(struct) else: if ident: print() for name, _ in struct._fields_: print(" "*ident + "{}:".format(name), end=) print_struct(getattr(struct, name), ident+1)
>>> from ctypes import * >>> class Test(Structure): ... _fields_ = [('foo', c_int)] ... >>> class Test2(Structure): ... _fields_ = [('foo', Test), ('bar', c_int)] ... >>> t = Test2() >>> t.foo.foo = 2 >>> t.bar = 1 >>> print_struct(t) foo: foo: 2 bar: 1
16,474
def height(self): if self.interactive: if self._height is None: self._height = self.term.height return self._height
Terminal height.
16,475
def move_user_data(primary, secondary): submissions = Submission.objects.filter(authors__id=secondary.pk) for subm in submissions: if subm.submitter == secondary: subm.submitter = primary subm.authors.remove(secondary) subm.authors.add(primary) subm.save() try: for course in secondary.profile.courses.all(): primary.profile.courses.add(course) primary.profile.save() except UserProfile.DoesNotExist: pass
Moves all submissions and other data linked to the secondary user into the primary user. Nothing is deleted here, we just modify foreign user keys.
16,476
def convert_bool(string): if string == : return True, True elif string == : return True, False else: return False, False
Check whether string is boolean.
16,477
def compose_suffix(num_docs=0, num_topics=0, suffix=None): if not isinstance(suffix, basestring): suffix = .format(num_docs, num_topics) return suffix
Create a short, informative, but not-so-unique identifying string for a trained model If a str suffix is provided then just pass it through. >>> compose_suffix(num_docs=100, num_topics=20) '_100X20' >>> compose_suffix(suffix='_sfx') '_sfx' >>> compose_suffix(suffix='') '' >>> compose_suffix(suffix=None) '_0X0'
16,478
def _write_coco_results(self, _coco, detections): cats = [cat[] for cat in _coco.loadCats(_coco.getCatIds())] class_to_coco_ind = dict(zip(cats, _coco.getCatIds())) results = [] for cls_ind, cls in enumerate(self.classes): if cls == : continue logger.info( % (cls, cls_ind, self.num_classes - 1)) coco_cat_id = class_to_coco_ind[cls] results.extend(self._coco_results_one_category(detections[cls_ind], coco_cat_id)) logger.info( % self._result_file) with open(self._result_file, ) as f: json.dump(results, f, sort_keys=True, indent=4)
example results [{"image_id": 42, "category_id": 18, "bbox": [258.15,41.29,348.26,243.78], "score": 0.236}, ...]
16,479
def fshdev(k): k = np.array(k) if len(k.shape) != 0: n = k.shape[0] else: n = 1 R1 = random.random(size=n) R2 = random.random(size=n) L = np.exp(-2 * k) a = R1 * (1 - L) + L fac = np.sqrt(-np.log(a)/(2 * k)) inc = 90. - np.degrees(2 * np.arcsin(fac)) dec = np.degrees(2 * np.pi * R2) if n == 1: return dec[0], inc[0] else: return dec, inc
Generate a random draw from a Fisher distribution with mean declination of 0 and inclination of 90 with a specified kappa. Parameters ---------- k : kappa (precision parameter) of the distribution k can be a single number or an array of values Returns ---------- dec, inc : declination and inclination of random Fisher distribution draw if k is an array, dec, inc are returned as arrays, otherwise, single values
16,480
def export_to(self, appliance, location): if not isinstance(appliance, IAppliance): raise TypeError("appliance can only be an instance of type IAppliance") if not isinstance(location, basestring): raise TypeError("location can only be an instance of type basestring") description = self._call("exportTo", in_p=[appliance, location]) description = IVirtualSystemDescription(description) return description
Exports the machine to an OVF appliance. See :py:class:`IAppliance` for the steps required to export VirtualBox machines to OVF. in appliance of type :class:`IAppliance` Appliance to export this machine to. in location of type str The target location. return description of type :class:`IVirtualSystemDescription` VirtualSystemDescription object which is created for this machine.
16,481
def cat( self, source, buffersize=None, memsize=2 ** 24, compressed=False, encoding=, raw=False, ): assert self._is_s3(source) or isinstance(source, Key), key = self._get_key(source) if not isinstance(source, Key) else source compressed = (compressed or key.name.endswith()) and not raw if compressed: decompress = zlib.decompressobj(16 + zlib.MAX_WBITS) size = 0 bytes_read = 0 err = None undecoded = if key: for i in range(100): obj = self.s3.Object(key.bucket.name, key.name) buffersize = buffersize if buffersize is not None else 2 ** 20 if not size: size = obj.content_length elif size != obj.content_length: raise AwsError() r = obj.get(Range="bytes={}-".format(bytes_read)) try: while bytes_read < size: if size - bytes_read > buffersize: bytes = r[].read(amt=buffersize) else: bytes = r[].read() if compressed: s = decompress.decompress(bytes) else: s = bytes if encoding and not raw: try: decoded = undecoded + s.decode(encoding) undecoded = yield decoded except UnicodeDecodeError: undecoded += s if len(undecoded) > memsize: raise else: yield s bytes_read += len(bytes) except zlib.error: logger.error("Error while decompressing [%s]", key.name) raise except UnicodeDecodeError: raise except Exception: err = True pass if size <= bytes_read: break if size != bytes_read: if err: raise Exception else: raise AwsError( % source.name) if undecoded: assert encoding is not None decoded = undecoded.decode(encoding) yield decoded
Returns an iterator for the data in the key or nothing if the key doesn't exist. Decompresses data on the fly (if compressed is True or key ends with .gz) unless raw is True. Pass None for encoding to skip encoding.
16,482
def edit(self, entry, name, mark=False): fcid = None if isinstance(entry, File): fcid = entry.fid elif isinstance(entry, Directory): fcid = entry.cid else: raise APIError() is_mark = 0 if mark is True: is_mark = 1 if self._req_files_edit(fcid, name, is_mark): entry.reload() return True else: raise APIError()
Edit an entry (file or directory) :param entry: :class:`.BaseFile` object :param str name: new name for the entry :param bool mark: whether to bookmark the entry
16,483
def create_default_users_and_perms(): default_perms = ( ("add_user", "Add User"), ("edit_user", "Edit User"), ("add_role", "Add Role"), ("edit_role", "Edit Role"), ("add_perm", "Add Permission"), ("edit_perm", "Edit Permission"), ("add_network", "Add network"), ("edit_network", "Edit network"), ("delete_network", "Delete network"), ("share_network", "Share network"), ("edit_topology", "Edit network topology"), ("add_project", "Add Project"), ("edit_project", "Edit Project"), ("delete_project", "Delete Project"), ("share_project", "Share Project"), ("edit_data", "Edit network data"), ("view_data", "View network data"), ("add_template", "Add Template"), ("edit_template", "Edit Template"), ("add_dimension", "Add Dimension"), ("update_dimension", "Update Dimension"), ("delete_dimension", "Delete Dimension"), ("add_unit", "Add Unit"), ("update_unit", "Update Unit"), ("delete_unit", "Delete Unit") ) default_roles = ( ("admin", "Administrator"), ("dev", "Developer"), ("modeller", "Modeller / Analyst"), ("manager", "Manager"), ("grad", "Graduate"), ("developer", "Developer"), ("decision", "Decision Maker"), ) roleperms = ( (, "add_user"), (, "edit_user"), (, "add_role"), (, "edit_role"), (, "add_perm"), (, "edit_perm"), (, "add_network"), (, "edit_network"), (, "delete_network"), (, "share_network"), (, "add_project"), (, "edit_project"), (, "delete_project"), (, "share_project"), (, "edit_topology"), (, "edit_data"), (, "view_data"), (, "add_template"), (, "edit_template"), (, "add_dimension"), (, "update_dimension"), (, "delete_dimension"), (, "add_unit"), (, "update_unit"), (, "delete_unit"), ("developer", "add_network"), ("developer", "edit_network"), ("developer", "delete_network"), ("developer", "share_network"), ("developer", "add_project"), ("developer", "edit_project"), ("developer", "delete_project"), ("developer", "share_project"), ("developer", "edit_topology"), ("developer", "edit_data"), ("developer", "view_data"), ("developer", "add_template"), ("developer", "edit_template"), (, "add_dimension"), (, "update_dimension"), (, "delete_dimension"), (, "add_unit"), (, "update_unit"), (, "delete_unit"), ("modeller", "add_network"), ("modeller", "edit_network"), ("modeller", "delete_network"), ("modeller", "share_network"), ("modeller", "edit_topology"), ("modeller", "add_project"), ("modeller", "edit_project"), ("modeller", "delete_project"), ("modeller", "share_project"), ("modeller", "edit_data"), ("modeller", "view_data"), ("manager", "edit_data"), ("manager", "view_data"), ) id_maps_dict = { "perm": {}, "role": {} } perm_dict = {} for code, name in default_perms: perm = Perm(code=code, name=name) perm_dict[code] = perm perms_by_name = db.DBSession.query(Perm).filter(Perm.code==code).all() if len(perms_by_name)==0: log.debug(" db.DBSession.add(perm) db.DBSession.flush() perm_by_name = db.DBSession.query(Perm).filter(Perm.code==code).one() id_maps_dict["perm"][code] = perm_by_name.id role_dict = {} for code, name in default_roles: role = Role(code=code, name=name) role_dict[code] = role roles_by_name = db.DBSession.query(Role).filter(Role.code==code).all() if len(roles_by_name)==0: log.debug(" db.DBSession.add(role) db.DBSession.flush() role_by_name = db.DBSession.query(Role).filter(Role.code==code).one() id_maps_dict["role"][code] = role_by_name.id for role_code, perm_code in roleperms: links_found = db.DBSession.query(RolePerm).filter(RolePerm.role_id==id_maps_dict["role"][role_code]).filter(RolePerm.perm_id==id_maps_dict["perm"][perm_code]).all() if len(links_found)==0: log.debug(" roleperm = RolePerm() roleperm.role_id = id_maps_dict["role"][role_code] roleperm.perm_id = id_maps_dict["perm"][perm_code] db.DBSession.add(roleperm) db.DBSession.flush() db.DBSession.flush()
Adds the roles and perm to the DB. It adds only roles, perms and links between them that are not inside the db It is possible adding new role or perm and connecting them just modifiying the following lists
16,484
def version(self): this_path = os.path.dirname(os.path.realpath(__file__)) version_file = os.path.join(this_path, ) return open(version_file).read().strip()
Return the version number of the Lending Club Investor tool Returns ------- string The version number string
16,485
def _decompress_dicom(dicom_file, output_file): gdcmconv_executable = _get_gdcmconv() subprocess.check_output([gdcmconv_executable, , dicom_file, output_file])
This function can be used to convert a jpeg compressed image to an uncompressed one for further conversion :param input_file: single dicom file to decompress
16,486
def create_segments(self, segments): for segment in segments: s_res = MechResource(segment[], a_const.SEGMENT_RESOURCE, a_const.CREATE) self.provision_queue.put(s_res)
Enqueue segment creates
16,487
def pyephem_earthsun_distance(time): import ephem sun = ephem.Sun() earthsun = [] for thetime in time: sun.compute(ephem.Date(thetime)) earthsun.append(sun.earth_distance) return pd.Series(earthsun, index=time)
Calculates the distance from the earth to the sun using pyephem. Parameters ---------- time : pd.DatetimeIndex Returns ------- pd.Series. Earth-sun distance in AU.
16,488
def get_max(array): largest = -np.inf for i in array: try: if i > largest: largest = i except: pass if np.isinf(largest): raise ValueError("there's no numeric value in array!") else: return largest
Get maximum value of an array. Automatically ignore invalid data. **中文文档** 获得最大值。
16,489
def _check_convergence(current_position, next_position, current_objective, next_objective, next_gradient, grad_tolerance, f_relative_tolerance, x_tolerance): grad_converged = norm(next_gradient, dims=1) <= grad_tolerance x_converged = norm(next_position - current_position, dims=1) <= x_tolerance f_converged = (norm(next_objective - current_objective, dims=0) <= f_relative_tolerance * current_objective) return grad_converged | x_converged | f_converged
Checks if the algorithm satisfies the convergence criteria.
16,490
def _decode_image(fobj, session, filename): buf = fobj.read() image = tfds.core.lazy_imports.cv2.imdecode( np.fromstring(buf, dtype=np.uint8), flags=3) if image is None: logging.warning( "Image %s could not be decoded by OpenCV, falling back to TF", filename) try: image = tf.image.decode_image(buf, channels=3) image = session.run(image) except tf.errors.InvalidArgumentError: logging.fatal("Image %s could not be decoded by Tensorflow", filename) if len(image.shape) == 4: image = image.reshape(image.shape[1:]) return image
Reads and decodes an image from a file object as a Numpy array. The SUN dataset contains images in several formats (despite the fact that all of them have .jpg extension). Some of them are: - BMP (RGB) - PNG (grayscale, RGBA, RGB interlaced) - JPEG (RGB) - GIF (1-frame RGB) Since TFDS assumes that all images have the same number of channels, we convert all of them to RGB. Args: fobj: File object to read from. session: TF session used to decode the images. filename: Filename of the original image in the archive. Returns: Numpy array with shape (height, width, channels).
16,491
def get_simulated_data(nmr_problems): nmr_observed_tanks = 10 nmr_tanks_ground_truth = normal(nmr_problems, 1, mean=250, std=30, ctype=) observations = uniform(nmr_problems, nmr_observed_tanks, low=0, high=nmr_tanks_ground_truth, ctype=) return observations, nmr_tanks_ground_truth
Simulate some data. This returns the simulated tank observations and the corresponding ground truth maximum number of tanks. Args: nmr_problems (int): the number of problems Returns: tuple: (observations, nmr_tanks_ground_truth)
16,492
def get_instance(page_to_consume): global _instances if isinstance(page_to_consume, basestring): uri = page_to_consume page_to_consume = page.get_instance(uri) elif isinstance(page_to_consume, page.Page): uri = page_to_consume.uri else: raise TypeError( "get_instance() expects a parker.Page or basestring derivative." ) page_to_consume.fetch() parsed_page = parser.parse(page_to_consume) try: instance = _instances[uri] except KeyError: instance = ConsumePage( parsed_page ) _instances[uri] = instance return instance
Return an instance of ConsumePage.
16,493
def _map_update_posterior(self): self.global_posterior_ = self.global_prior_.copy() prior_centers = self.get_centers(self.global_prior_) prior_widths = self.get_widths(self.global_prior_) prior_centers_mean_cov = self.get_centers_mean_cov(self.global_prior_) prior_widths_mean_var = self.get_widths_mean_var(self.global_prior_) center_size = self.K * self.n_dim posterior_size = center_size + self.K for k in np.arange(self.K): next_centers = np.zeros((self.n_dim, self.n_subj)) next_widths = np.zeros(self.n_subj) for s in np.arange(self.n_subj): center_start = s * posterior_size width_start = center_start + center_size start_idx = center_start + k * self.n_dim end_idx = center_start + (k + 1) * self.n_dim next_centers[:, s] = self.gather_posterior[start_idx:end_idx]\ .copy() next_widths[s] = self.gather_posterior[width_start + k].copy() posterior_mean, posterior_cov = self._map_update( prior_centers[k].T.copy(), from_tri_2_sym(prior_centers_mean_cov[k], self.n_dim), self.global_centers_cov_scaled, next_centers) self.global_posterior_[k * self.n_dim:(k + 1) * self.n_dim] =\ posterior_mean.T start_idx = self.map_offset[2] + k * self.cov_vec_size end_idx = self.map_offset[2] + (k + 1) * self.cov_vec_size self.global_posterior_[start_idx:end_idx] =\ from_sym_2_tri(posterior_cov) common = 1.0 /\ (prior_widths_mean_var[k] + self.global_widths_var_scaled) observation_mean = np.mean(next_widths) tmp = common * self.global_widths_var_scaled self.global_posterior_[self.map_offset[1] + k] = \ prior_widths_mean_var[k] * common * observation_mean +\ tmp * prior_widths[k] self.global_posterior_[self.map_offset[3] + k] = \ prior_widths_mean_var[k] * tmp return self
Maximum A Posterior (MAP) update of HTFA parameters Returns ------- HTFA Returns the instance itself.
16,494
def delete_all(self): try: self.conn.indices.delete_mapping( index=self.index, doc_type=self.type) except TransportError: logger.warn(, self.index, self.type, exc_info=True)
Deletes all feature collections. This does not destroy the ES index, but instead only deletes all FCs with the configured document type (defaults to ``fc``).
16,495
def highpass(cutoff): R = thub(exp(cutoff - pi), 2) return (1 - R) / (1 + R * z ** -1)
This strategy uses an exponential approximation for cut-off frequency calculation, found by matching the one-pole Laplace lowpass filter and mirroring the resulting filter to get a highpass.
16,496
def fit_df(self, labels, dfs, pstate_col=PSTATE_COL): assert len(labels) == len(dfs) for label in set(labels): label_dfs = [s for l,s in zip(labels, dfs) if l == label] pohmm = self.pohmm_factory() pohmm.fit_df(label_dfs, pstate_col=pstate_col) self.pohmms[label] = pohmm return self
Fit the classifier with labels y and DataFrames dfs
16,497
def _request(self, url, params={}): r = self._session.get(url=url, params=params, headers=DEFAULT_ORIGIN) return r
Makes a request using the currently open session. :param url: A url fragment to use in the creation of the master url
16,498
def cross_entropy_error(self, input_data, targets, average=True, cache=None, prediction=False, sum_errors=True): loss = [] if cache is None: cache = self.n_tasks * [None] for targets_task, cache_task, task in \ izip(targets, cache, self.tasks): loss.append(task.cross_entropy_error( input_data, targets_task, average=average, cache=cache_task, prediction=prediction)) if sum_errors: return sum(loss) else: return loss
Computes the cross-entropy error for all tasks.
16,499
def install(cls, uninstallable, prefix, path_items, root=None, warning=None): root = cls._abs_root(root) importables = tuple(cls._iter_importables(root=root, path_items=path_items, prefix=prefix)) vendor_importer = cls(root=root, importables=importables, uninstallable=uninstallable, warning=warning) sys.meta_path.insert(0, vendor_importer) _tracer().log(.format(vendor_importer), V=3) return vendor_importer
Install an importer for modules found under ``path_items`` at the given import ``prefix``. :param bool uninstallable: ``True`` if the installed importer should be uninstalled and any imports it performed be un-imported when ``uninstall`` is called. :param str prefix: The import prefix the installed importer will be responsible for. :param path_items: The paths relative to ``root`` containing modules to expose for import under ``prefix``. :param str root: The root path of the distribution containing the vendored code. NB: This is the the path to the pex code, which serves as the root under which code is vendored at ``pex/vendor/_vendored``. :param str warning: An optional warning to emit if any imports are made through the installed importer. :return: