code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _get_groups(self, data): groups = [] for attribute in SOURCE_KEYS: for k, v in data[attribute].items(): if k == None: k = 'Sources' if k not in groups: groups.append(k) for k, v in data['include_files'].items(): if k == None: k = 'Includes' if k not in groups: groups.append(k) return groups
Get all groups defined
def set_data_type(self, data_type): validate_type(data_type, type(None), *six.string_types) if isinstance(data_type, *six.string_types): data_type = str(data_type).upper() if not data_type in ({None} | set(DSTREAM_TYPE_MAP.keys())): raise ValueError("Provided data type not in available set of types") self._data_type = data_type
Set the data type for ths data point The data type is actually associated with the stream itself and should not (generally) vary on a point-per-point basis. That being said, if creating a new stream by writing a datapoint, it may be beneficial to include this information. The data type provided should be in the set of available data types of { INTEGER, LONG, FLOAT, DOUBLE, STRING, BINARY, UNKNOWN }.
def laplacian_eigenmaps(self, num_dims=None, normed=True, val_thresh=1e-8): L = self.laplacian(normed=normed) return _null_space(L, num_dims, val_thresh, overwrite=True)
Laplacian Eigenmaps embedding. num_dims : dimension of embedded coordinates, defaults to input dimension normed : used for .laplacian() calculation val_thresh : threshold for omitting vectors with near-zero eigenvalues
def get_addr_of_native_method(self, soot_method): for name, symbol in self.native_symbols.items(): if soot_method.matches_with_native_name(native_method=name): l.debug("Found native symbol '%s' @ %x matching Soot method '%s'", name, symbol.rebased_addr, soot_method) return symbol.rebased_addr native_symbols = "\n".join(self.native_symbols.keys()) l.warning("No native method found that matches the Soot method '%s'. " "Skipping statement.", soot_method.name) l.debug("Available symbols (prefix + encoded class path + encoded method " "name):\n%s", native_symbols) return None
Get address of the implementation from a native declared Java function. :param soot_method: Method descriptor of a native declared function. :return: CLE address of the given method.
def generate_http_manifest(self): base_path = os.path.dirname(self.translate_path(self.path)) self.dataset = dtoolcore.DataSet.from_uri(base_path) admin_metadata_fpath = os.path.join(base_path, ".dtool", "dtool") with open(admin_metadata_fpath) as fh: admin_metadata = json.load(fh) http_manifest = { "admin_metadata": admin_metadata, "manifest_url": self.generate_url(".dtool/manifest.json"), "readme_url": self.generate_url("README.yml"), "overlays": self.generate_overlay_urls(), "item_urls": self.generate_item_urls() } return bytes(json.dumps(http_manifest), "utf-8")
Return http manifest. The http manifest is the resource that defines a dataset as HTTP enabled (published).
def predict(model, x): if not hasattr(x, "chunks") and hasattr(x, "to_dask_array"): x = x.to_dask_array() assert x.ndim == 2 if len(x.chunks[1]) > 1: x = x.rechunk(chunks=(x.chunks[0], sum(x.chunks[1]))) func = partial(_predict, model) xx = np.zeros((1, x.shape[1]), dtype=x.dtype) dt = model.predict(xx).dtype return x.map_blocks(func, chunks=(x.chunks[0], (1,)), dtype=dt).squeeze()
Predict with a scikit learn model Parameters ---------- model : scikit learn classifier x : dask Array See docstring for ``da.learn.fit``
def build(self, n, vec): for i in range(-self.maxDisplacement, self.maxDisplacement+1): next = vec + [i] if n == 1: print '{:>5}\t'.format(next), " = ", printSequence(self.encodeMotorInput(next)) else: self.build(n-1, next)
Recursive function to help print motor coding scheme.
def update_gradebook(self, gradebook_form): if self._catalog_session is not None: return self._catalog_session.update_catalog(catalog_form=gradebook_form) collection = JSONClientValidated('grading', collection='Gradebook', runtime=self._runtime) if not isinstance(gradebook_form, ABCGradebookForm): raise errors.InvalidArgument('argument type is not an GradebookForm') if not gradebook_form.is_for_update(): raise errors.InvalidArgument('the GradebookForm is for update only, not create') try: if self._forms[gradebook_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('gradebook_form already used in an update transaction') except KeyError: raise errors.Unsupported('gradebook_form did not originate from this session') if not gradebook_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') collection.save(gradebook_form._my_map) self._forms[gradebook_form.get_id().get_identifier()] = UPDATED return objects.Gradebook(osid_object_map=gradebook_form._my_map, runtime=self._runtime, proxy=self._proxy)
Updates an existing gradebook. arg: gradebook_form (osid.grading.GradebookForm): the form containing the elements to be updated raise: IllegalState - ``gradebook_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``gradebook_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``gradebook_form did not originate from get_gradebook_form_for_update()`` *compliance: mandatory -- This method must be implemented.*
def stop(self, timeout_s=None): self.stopped.set() if self.thread: self.thread.join(timeout_s) return not self.thread.isAlive() else: return True
Stops the interval. If a timeout is provided and stop returns False then the thread is effectively abandoned in whatever state it was in (presumably dead-locked). Args: timeout_s: The time in seconds to wait on the thread to finish. By default it's forever. Returns: False if a timeout was provided and we timed out.
def create(self, weight, priority, enabled, friendly_name, sip_url): data = values.of({ 'Weight': weight, 'Priority': priority, 'Enabled': enabled, 'FriendlyName': friendly_name, 'SipUrl': sip_url, }) payload = self._version.create( 'POST', self._uri, data=data, ) return OriginationUrlInstance(self._version, payload, trunk_sid=self._solution['trunk_sid'], )
Create a new OriginationUrlInstance :param unicode weight: The value that determines the relative load the URI should receive compared to others with the same priority :param unicode priority: The relative importance of the URI :param bool enabled: Whether the URL is enabled :param unicode friendly_name: A string to describe the resource :param unicode sip_url: The SIP address you want Twilio to route your Origination calls to :returns: Newly created OriginationUrlInstance :rtype: twilio.rest.trunking.v1.trunk.origination_url.OriginationUrlInstance
def _signal(self, sig, pid=None): log = self._params.get('log', self._discard) if pid is None: pids = self.get_pids() else: pids = [pid] for pid in pids: try: os.kill(pid, sig) log.debug("Signalled '%s' pid %d with %s", self._name, pid, utils.signame(sig)) except Exception as e: log.warning("Failed to signal '%s' pid %d with %s -- %s", self._name, pid, utils.signame(sig), e)
Send a signal to one or all pids associated with this task. Never fails, but logs signalling faults as warnings.
def locked(self): conn = self._get_connection() try: self._lock(conn) yield conn finally: self._unlock(conn)
Context generator for `with` statement, yields thread-safe connection. :return: thread-safe connection :rtype: pydbal.connection.Connection
def get_stetson_k(self, mag, avg, err): residual = (mag - avg) / err stetson_k = np.sum(np.fabs(residual)) \ / np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag)) return stetson_k
Return Stetson K feature. Parameters ---------- mag : array_like An array of magnitude. avg : float An average value of magnitudes. err : array_like An array of magnitude errors. Returns ------- stetson_k : float Stetson K value.
def get_unassigned_ports(self): uri = "{}/unassignedPortsForPortMonitor".format(self.data["uri"]) response = self._helper.do_get(uri) return self._helper.get_members(response)
Gets the collection ports from the member interconnects which are eligible for assignment to an anlyzer port Returns: dict: Collection of ports
def get_permissions_for_role(role, brain_or_object): obj = api.get_object(brain_or_object) valid_roles = get_valid_roles_for(obj) if role not in valid_roles: raise ValueError("The Role '{}' is invalid.".format(role)) out = [] for item in obj.ac_inherited_permissions(1): name, value = item[:2] permission = Permission(name, value, obj) if role in permission.getRoles(): out.append(name) return out
Return the permissions of the role which are granted on the object Code extracted from `IRoleManager.permissionsOfRole` :param role: The role to check the permission :param brain_or_object: Catalog brain or object :returns: List of permissions of the role
def aggregate(self, dataset_ids=None, boundary='exact', side='left', func='mean', **dim_kwargs): new_scn = self.copy(datasets=dataset_ids) for src_area, ds_ids in new_scn.iter_by_area(): if src_area is None: for ds_id in ds_ids: new_scn.datasets[ds_id] = self[ds_id] continue if boundary != 'exact': raise NotImplementedError("boundary modes appart from 'exact' are not implemented yet.") target_area = src_area.aggregate(**dim_kwargs) resolution = max(target_area.pixel_size_x, target_area.pixel_size_y) for ds_id in ds_ids: res = self[ds_id].coarsen(boundary=boundary, side=side, func=func, **dim_kwargs) new_scn.datasets[ds_id] = getattr(res, func)() new_scn.datasets[ds_id].attrs['area'] = target_area new_scn.datasets[ds_id].attrs['resolution'] = resolution return new_scn
Create an aggregated version of the Scene. Args: dataset_ids (iterable): DatasetIDs to include in the returned `Scene`. Defaults to all datasets. func (string): Function to apply on each aggregation window. One of 'mean', 'sum', 'min', 'max', 'median', 'argmin', 'argmax', 'prod', 'std', 'var'. 'mean' is the default. boundary: Not implemented. side: Not implemented. dim_kwargs: the size of the windows to aggregate. Returns: A new aggregated scene See also: xarray.DataArray.coarsen Example: `scn.aggregate(func='min', x=2, y=2)` will aggregate 2x2 pixels by applying the `min` function.
def _prepare(self, data, groupname): if groupname in self.h5file: del self.h5file[groupname] group = self.h5file.create_group(groupname) group.attrs['version'] = self.version data.init_group( group, self.chunk_size, self.compression, self.compression_opts) return group
Clear the group if existing and initialize empty datasets.
def assertHeader(self, name, value=None, *args, **kwargs): return name in self.raw_headers and ( True if value is None else self.raw_headers[name] == value)
Returns `True` if ``name`` was in the headers and, if ``value`` is True, whether or not the values match, or `False` otherwise.
def name2unicode(name): if name in glyphname2unicode: return glyphname2unicode[name] m = STRIP_NAME.search(name) if not m: raise KeyError(name) return unichr(int(m.group(0)))
Converts Adobe glyph names to Unicode numbers.
def fromseconds(cls, seconds): try: seconds = int(seconds) except TypeError: seconds = int(seconds.flatten()[0]) return cls(datetime.timedelta(0, int(seconds)))
Return a |Period| instance based on a given number of seconds.
def request(self, request_method, api_method, *args, **kwargs): url = self._build_url(api_method) resp = requests.request(request_method, url, *args, **kwargs) try: rv = resp.json() except ValueError: raise RequestFailedError(resp, 'not a json body') if not resp.ok: raise RequestFailedError(resp, rv.get('error')) return rv
Perform a request. Args: request_method: HTTP method for this request. api_method: API method name for this request. *args: Extra arguments to pass to the request. **kwargs: Extra keyword arguments to pass to the request. Returns: A dict contains the request response data. Raises: RequestFailedError: Raises when BearyChat's OpenAPI responses with status code != 2xx
def make_vbox_dirs(max_vbox_id, output_dir, topology_name): if max_vbox_id is not None: for i in range(1, max_vbox_id + 1): vbox_dir = os.path.join(output_dir, topology_name + '-files', 'vbox', 'vm-%s' % i) os.makedirs(vbox_dir)
Create VirtualBox working directories if required :param int max_vbox_id: Number of directories to create :param str output_dir: Output directory :param str topology_name: Topology name
def csvtolist(inputstr): reader = csv.reader([inputstr], skipinitialspace=True) output = [] for r in reader: output += r return output
converts a csv string into a list
def translatePath(path): valid_dirs = ['xbmc', 'home', 'temp', 'masterprofile', 'profile', 'subtitles', 'userdata', 'database', 'thumbnails', 'recordings', 'screenshots', 'musicplaylists', 'videoplaylists', 'cdrips', 'skin', ] assert path.startswith('special://'), 'Not a valid special:// path.' parts = path.split('/')[2:] assert len(parts) > 1, 'Need at least a single root directory' assert parts[0] in valid_dirs, '%s is not a valid root dir.' % parts[0] _create_dir(os.path.join(TEMP_DIR, parts[0])) return os.path.join(TEMP_DIR, *parts)
Creates folders in the OS's temp directory. Doesn't touch any possible XBMC installation on the machine. Attempting to do as little work as possible to enable this function to work seamlessly.
def tempoAdjust1(self, tempoFactor): if self.apicalIntersect.any(): tempoFactor = tempoFactor * 0.5 else: tempoFactor = tempoFactor * 2 return tempoFactor
Adjust tempo based on recent active apical input only :param tempoFactor: scaling signal to MC clock from last sequence item :return: adjusted scaling signal
def add_namespace(self, namespace): if namespace is None: raise ValueError("Namespace argument must not be None") namespace = namespace.strip('/') if namespace in self.namespaces: raise CIMError( CIM_ERR_ALREADY_EXISTS, _format("Namespace {0!A} already exists in the mock " "repository", namespace)) self.namespaces[namespace] = True
Add a CIM namespace to the mock repository. The namespace must not yet exist in the mock repository. Note that the default connection namespace is automatically added to the mock repository upon creation of this object. Parameters: namespace (:term:`string`): The name of the CIM namespace in the mock repository. Must not be `None`. Any leading and trailing slash characters are split off from the provided string. Raises: ValueError: Namespace argument must not be None CIMError: CIM_ERR_ALREADY_EXISTS if the namespace already exists in the mock repository.
def setOutBoundLinkQuality(self, LinkQuality): print '%s call setOutBoundLinkQuality' % self.port print LinkQuality try: cmd = 'macfilter rss add-lqi * %s' % str(LinkQuality) print cmd return self.__sendCommand(cmd)[0] == 'Done' except Exception, e: ModuleHelper.WriteIntoDebugLogger("setOutBoundLinkQuality() Error: " + str(e))
set custom LinkQualityIn for all receiving messages from the any address Args: LinkQuality: a given custom link quality link quality/link margin mapping table 3: 21 - 255 (dB) 2: 11 - 20 (dB) 1: 3 - 9 (dB) 0: 0 - 2 (dB) Returns: True: successful to set the link quality False: fail to set the link quality
def ltrim(self, key, start, stop): redis_list = self._get_list(key, 'LTRIM') if redis_list: start, stop = self._translate_range(len(redis_list), start, stop) self.redis[self._encode(key)] = redis_list[start:stop + 1] return True
Emulate ltrim.
def functionFactory(in_code, name, defaults, globals_, imports): def generatedFunction(): pass generatedFunction.__code__ = marshal.loads(in_code) generatedFunction.__name__ = name generatedFunction.__defaults = defaults generatedFunction.__globals__.update(pickle.loads(globals_)) for key, value in imports.items(): imported_module = __import__(value) scoop.logger.debug("Dynamically loaded module {0}".format(value)) generatedFunction.__globals__.update({key: imported_module}) return generatedFunction
Creates a function at runtime using binary compiled inCode
def remove(self, elem): try: return PDeque(self._left_list.remove(elem), self._right_list, self._length - 1) except ValueError: try: return PDeque(self._left_list, self._right_list.reverse().remove(elem).reverse(), self._length - 1) except ValueError: raise ValueError('{0} not found in PDeque'.format(elem))
Return new deque with first element from left equal to elem removed. If no such element is found a ValueError is raised. >>> pdeque([2, 1, 2]).remove(2) pdeque([1, 2])
def unique(values): ret = None if isinstance(values, collections.Hashable): ret = set(values) else: ret = [] for value in values: if value not in ret: ret.append(value) return ret
Removes duplicates from a list. .. code-block:: jinja {% set my_list = ['a', 'b', 'c', 'a', 'b'] -%} {{ my_list | unique }} will be rendered as: .. code-block:: text ['a', 'b', 'c']
def handle_legacy_tloc(line: str, position: int, tokens: ParseResults) -> ParseResults: log.log(5, 'legacy translocation statement: %s [%d]', line, position) return tokens
Handle translocations that lack the ``fromLoc`` and ``toLoc`` entries.
def bookmarks(self): cmd = [HG, 'bookmarks'] output = self._command(cmd).decode(self.encoding, 'replace') if output.startswith('no bookmarks set'): return [] results = [] for line in output.splitlines(): m = bookmarks_rx.match(line) assert m, 'unexpected output: ' + line results.append(m.group('name')) return results
Get list of bookmarks
def del_feature(self, pr_name): if hasattr(self, pr_name): delattr(self, pr_name) self.features.remove(pr_name)
Permanently deletes a node's feature.
def getMaskIndices(mask): return [ list(mask).index(True), len(mask) - 1 - list(mask)[::-1].index(True) ]
get lower and upper index of mask
def index_raw_bulk(self, header, document): self.bulker.add("%s%s" % (header, document)) return self.flush_bulk()
Function helper for fast inserting :param header: a string with the bulk header must be ended with a newline :param document: a json document string must be ended with a newline
def preprocess_value(self, value, default=tuple()): if not value: return default if isinstance(value, (list, tuple)): if len(value) == 1 and not value[0]: return default if not isinstance(value, (list, tuple)): value = value, return value
Preprocess the value for set
def validate_identifier(self, field): if field.data: field.data = field.data.lower() if Community.get(field.data, with_deleted=True): raise validators.ValidationError( _('The identifier already exists. ' 'Please choose a different one.'))
Validate field identifier.
def set_position(self, point, reset=False): if isinstance(point, np.ndarray): if point.ndim != 1: point = point.ravel() self.camera.SetPosition(point) if reset: self.reset_camera() self.camera_set = True self._render()
sets camera position to a point
def add_user(self, user, is_admin=False): users_count = self.users.all().count() if users_count == 0: is_admin = True org_user = self._org_user_model.objects.create( user=user, organization=self, is_admin=is_admin ) if users_count == 0: self._org_owner_model.objects.create( organization=self, organization_user=org_user ) user_added.send(sender=self, user=user) return org_user
Adds a new user and if the first user makes the user an admin and the owner.
def rpc_get_names(self, filename, source, offset): source = get_source(source) if hasattr(self.backend, "rpc_get_names"): return self.backend.rpc_get_names(filename, source, offset) else: raise Fault("get_names not implemented by current backend", code=400)
Get all possible names
def add_petabencana_layer(self): from safe.gui.tools.peta_bencana_dialog import PetaBencanaDialog dialog = PetaBencanaDialog(self.iface.mainWindow(), self.iface) dialog.show()
Add petabencana layer to the map. This uses the PetaBencana API to fetch the latest floods in JK. See https://data.petabencana.id/floods
async def update_version(self): get_version = GetVersion(pyvlx=self) await get_version.do_api_call() if not get_version.success: raise PyVLXException("Unable to retrieve version") self.version = get_version.version get_protocol_version = GetProtocolVersion(pyvlx=self) await get_protocol_version.do_api_call() if not get_protocol_version.success: raise PyVLXException("Unable to retrieve protocol version") self.protocol_version = get_protocol_version.version PYVLXLOG.warning( "Connected to: %s, protocol version: %s", self.version, self.protocol_version)
Retrieve version and protocol version from API.
def _build_request(self, verb, verb_arguments): method = getattr(self._component, verb) method_args = {str(k): v for k, v in verb_arguments.items()} return method(**method_args)
Builds HttpRequest object. Args: verb (str): Request verb (ex. insert, update, delete). verb_arguments (dict): Arguments to be passed with the request. Returns: httplib2.HttpRequest: HttpRequest to be sent to the API.
def _list_records(self, rtype=None, name=None, content=None): if name: name = self._relative_name(name) if not rtype: rtype = "ANY" filter_query = {"rdtype": rtype, "name": name, "content": content} with localzone.manage(self.filename, self.origin, autosave=True) as zone: records = zone.find_record(**filter_query) result = [] for record in records: rdict = { "type": record.rdtype, "name": self._full_name(record.name), "ttl": record.ttl, "content": record.content, "id": record.hashid, } if rdict["type"] == "TXT": rdict["content"] = rdict["content"].replace('"', "") result.append(rdict) LOGGER.debug("list_records: %s", result) return result
Return a list of records matching the supplied params. If no params are provided, then return all zone records. If no records are found, return an empty list.
def pack(args): " Parse file or dir, import css, js code and save with prefix " assert op.exists(args.source), "Does not exists: %s" % args.source zeta_pack(args)
Parse file or dir, import css, js code and save with prefix
def only_self(self): others, self.others = self.others, [] try: yield finally: self.others = others + self.others
Only match in self not others.
def regs(self): regs = set() for operand in self.operands: if not operand.type.has_reg: continue regs.update(operand.regs) return regs
Names of all registers used by the instruction.
def _bsecurate_cli_component_file_refs(args): data = curate.component_file_refs(args.files) s = '' for cfile, cdata in data.items(): s += cfile + '\n' rows = [] for el, refs in cdata: rows.append((' ' + el, ' '.join(refs))) s += '\n'.join(format_columns(rows)) + '\n\n' return s
Handles the component-file-refs subcommand
def delete_lines(self): cursor = self.textCursor() self.__select_text_under_cursor_blocks(cursor) cursor.removeSelectedText() cursor.deleteChar() return True
Deletes the document lines under cursor. :return: Method success. :rtype: bool
def response(self, in_thread: Optional[bool] = None) -> "Message": data = {"channel": self["channel"]} if in_thread: if "message" in self: data["thread_ts"] = ( self["message"].get("thread_ts") or self["message"]["ts"] ) else: data["thread_ts"] = self.get("thread_ts") or self["ts"] elif in_thread is None: if "message" in self and "thread_ts" in self["message"]: data["thread_ts"] = self["message"]["thread_ts"] elif "thread_ts" in self: data["thread_ts"] = self["thread_ts"] return Message(data)
Create a response message. Depending on the incoming message the response can be in a thread. By default the response follow where the incoming message was posted. Args: in_thread (boolean): Overwrite the `threading` behaviour Returns: a new :class:`slack.event.Message`
def setup(service_manager, conf, reload_method="reload"): conf.register_opts(service_opts) _load_service_manager_options(service_manager, conf) def _service_manager_reload(): _configfile_reload(conf, reload_method) _load_service_manager_options(service_manager, conf) if os.name != "posix": return service_manager.register_hooks( on_new_worker=functools.partial( _new_worker_hook, conf, reload_method), on_reload=_service_manager_reload)
Load services configuration from oslo config object. It reads ServiceManager and Service configuration options from an oslo_config.ConfigOpts() object. Also It registers a ServiceManager hook to reload the configuration file on reload in the master process and in all children. And then when each child start or reload, the configuration options are logged if the oslo config option 'log_options' is True. On children, the configuration file is reloaded before the running the application reload method. Options currently supported on ServiceManager and Service: * graceful_shutdown_timeout :param service_manager: ServiceManager instance :type service_manager: cotyledon.ServiceManager :param conf: Oslo Config object :type conf: oslo_config.ConfigOpts() :param reload_method: reload or mutate the config files :type reload_method: str "reload/mutate"
def add_actions_to_context_menu(self, menu): inspect_action = create_action(self, _("Inspect current object"), QKeySequence(get_shortcut('console', 'inspect current object')), icon=ima.icon('MessageBoxInformation'), triggered=self.inspect_object) clear_line_action = create_action(self, _("Clear line or block"), QKeySequence(get_shortcut( 'console', 'clear line')), triggered=self.clear_line) reset_namespace_action = create_action(self, _("Remove all variables"), QKeySequence(get_shortcut( 'ipython_console', 'reset namespace')), icon=ima.icon('editdelete'), triggered=self.reset_namespace) clear_console_action = create_action(self, _("Clear console"), QKeySequence(get_shortcut('console', 'clear shell')), triggered=self.clear_console) quit_action = create_action(self, _("&Quit"), icon=ima.icon('exit'), triggered=self.exit_callback) add_actions(menu, (None, inspect_action, clear_line_action, clear_console_action, reset_namespace_action, None, quit_action)) return menu
Add actions to IPython widget context menu
def update(self, quality_score, issue=values.unset): return self._proxy.update(quality_score, issue=issue, )
Update the FeedbackInstance :param unicode quality_score: The call quality expressed as an integer from 1 to 5 :param FeedbackInstance.Issues issue: Issues experienced during the call :returns: Updated FeedbackInstance :rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
def raise_for_redefined_annotation(self, line: str, position: int, annotation: str) -> None: if self.disallow_redefinition and self.has_annotation(annotation): raise RedefinedAnnotationError(self.get_line_number(), line, position, annotation)
Raise an exception if the given annotation is already defined. :raises: RedefinedAnnotationError
def _initSymbols(ptc): ptc.am = ['', ''] ptc.pm = ['', ''] for idx, xm in enumerate(ptc.locale.meridian[:2]): target = ['am', 'pm'][idx] setattr(ptc, target, [xm]) target = getattr(ptc, target) if xm: lxm = xm.lower() target.extend((xm[0], '{0}.{1}.'.format(*xm), lxm, lxm[0], '{0}.{1}.'.format(*lxm)))
Initialize symbols and single character constants.
def watch_context(keys, result, reqid, container, module = 'objectdb'): try: keys = [k for k,r in zip(keys, result) if r is not None] yield result finally: if keys: async def clearup(): try: await send_api(container, module, 'munwatch', {'keys': keys, 'requestid': reqid}) except QuitException: pass container.subroutine(clearup(), False)
DEPRECATED - use request_context for most use cases
def create_transform(ctx, transform): from canari.commands.create_transform import create_transform create_transform(ctx.project, transform)
Creates a new transform in the specified directory and auto-updates dependencies.
def add(self, metric_name, stat, config=None): with self._lock: metric = KafkaMetric(metric_name, stat, config or self._config) self._registry.register_metric(metric) self._metrics.append(metric) self._stats.append(stat)
Register a metric with this sensor Arguments: metric_name (MetricName): The name of the metric stat (AbstractMeasurableStat): The statistic to keep config (MetricConfig): A special configuration for this metric. If None use the sensor default configuration.
def add_to_configs(self, configs): if len(configs) == 0: return None if self.configs is None: self.configs = np.atleast_2d(configs) else: configs = np.atleast_2d(configs) self.configs = np.vstack((self.configs, configs)) return self.configs
Add one or more measurement configurations to the stored configurations Parameters ---------- configs: list or numpy.ndarray list or array of configurations Returns ------- configs: Kx4 numpy.ndarray array holding all configurations of this instance
def validate(self, value): if value in self.empty_values and self.required: raise ValidationError(self.error_messages['required'])
This was overridden to have our own ``empty_values``.
def get_addon_name(addonxml): xml = parse(addonxml) addon_node = xml.getElementsByTagName('addon')[0] return addon_node.getAttribute('name')
Parses an addon name from the given addon.xml filename.
def has_namespace(self, namespace: str) -> bool: return self.has_enumerated_namespace(namespace) or self.has_regex_namespace(namespace)
Check that the namespace has either been defined by an enumeration or a regular expression.
def verify(self, windowSize=None): if self.samplerate() is None: return "Multiple recording files with conflicting samplerates" msg = self._autoParams.verify() if msg: return msg if self.traceCount() == 0: return "Test is empty" if windowSize is not None: durations = self.expandFunction(self.duration) if durations[0] > windowSize or durations[-1] > windowSize: return "Stimulus duration exceeds window duration" msg = self.verifyExpanded(self.samplerate()) if msg: return msg if self.caldb is None or self.calv is None: return "Test reference voltage not set" if None in self.voltage_limits: return "Device voltage limits not set" return 0
Checks the stimulus, including expanded parameters for invalidating conditions :param windowSize: acquistion (recording) window size (seconds) :type windowSize: float :returns: str -- error message, if any, 0 otherwise
def play_actions(self, target): for method_name, args, kwargs in self.actions: method = getattr(target, method_name) method(*args, **kwargs)
Play record actions on the target object. :param target: the target which recive all record actions, is a brown ant app instance normally. :type target: :class:`~brownant.app.Brownant`
def redirect_response(self, url, permanent=False): if permanent: self.send_response(301) else: self.send_response(302) self.send_header("Location", url) self.end_headers()
Generate redirect response
def set_ylabels(self, label=None, **kwargs): if label is None: label = label_from_attrs(self.data[self._y_var]) for ax in self._left_axes: ax.set_ylabel(label, **kwargs) return self
Label the y axis on the left column of the grid.
def _get_struct_encodedu32(self): useful = [] while True: byte = ord(self._src.read(1)) useful.append(byte) if byte < 127: break useful = ['00000000' + bin(b)[2:] for b in useful[::-1]] return int(''.join([b[-7:] for b in useful]), 2)
Get a EncodedU32 number.
def end_state(self): if self.str_begin != len(self.format): if len(self.state) > 1 or self.state[-1] != 'string': self.fmt.append_text( "(Bad format string; ended in state %r)" % self.state[-1]) else: self.fmt.append_text(self.format[self.str_begin:]) return self.fmt
Wrap things up and add any final string content.
def _update_params(self, constants): constants = np.max(np.min(constants, 1)) self.params['r']['value'] = max([self.params['r']['value'], constants]) epsilon = constants / self.params['r']['value'] influence = self._calculate_influence(epsilon) return influence * epsilon
Update the params.
def output_json(data, code, headers=None): settings = current_app.config.get('RESTFUL_JSON', {}) if current_app.debug: settings.setdefault('indent', 4) settings.setdefault('sort_keys', not PY3) dumped = dumps(data, **settings) + "\n" resp = make_response(dumped, code) resp.headers.extend(headers or {}) return resp
Makes a Flask response with a JSON encoded body
def get_cached_placeholder_output(parent_object, placeholder_name): if not PlaceholderRenderingPipe.may_cache_placeholders(): return None language_code = get_parent_language_code(parent_object) cache_key = get_placeholder_cache_key_for_parent(parent_object, placeholder_name, language_code) return cache.get(cache_key)
Return cached output for a placeholder, if available. This avoids fetching the Placeholder object.
def get_residuals(ds, m): model_spectra = get_model_spectra(ds, m) resid = ds.test_flux - model_spectra return resid
Using the dataset and model object, calculate the residuals and return Parameters ---------- ds: dataset object m: model object Return ------ residuals: array of residuals, spec minus model spec
def sortByNamespacePrefix(urisList, nsList): exit = [] urisList = sort_uri_list_by_name(urisList) for ns in nsList: innerexit = [] for uri in urisList: if str(uri).startswith(str(ns)): innerexit += [uri] exit += innerexit for uri in urisList: if uri not in exit: exit += [uri] return exit
Given an ordered list of namespaces prefixes, order a list of uris based on that. Eg In [7]: ll Out[7]: [rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'), rdflib.term.URIRef(u'printGenericTreeorg/2000/01/rdf-schema#comment'), rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'), rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass')] In [8]: sortByNamespacePrefix(ll, [OWL.OWLNS, RDFS]) Out[8]: [rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#equivalentClass'), rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#comment'), rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'), rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type')]
def get_client_secret(self): self._client_secret = predix.config.get_env_value(predix.app.Manifest, 'client_secret') return self._client_secret
Return the client secret that should correspond with the client id.
def is_root(self): sub_prefix = self.reddit_session.config.by_object[Submission] return self.parent_id.startswith(sub_prefix)
Return True when the comment is a top level comment.
def modify_area(self, pid, xmin, xmax, zmin, zmax, value): area_polygon = shapgeo.Polygon( ((xmin, zmax), (xmax, zmax), (xmax, zmin), (xmin, zmin)) ) self.modify_polygon(pid, area_polygon, value)
Modify the given dataset in the rectangular area given by the parameters and assign all parameters inside this area the given value. Partially contained elements are treated as INSIDE the area, i.e., they are assigned new values. Parameters ---------- pid: int id of the parameter set to modify xmin: float smallest x value of the area to modify xmax: float largest x value of the area to modify zmin: float smallest z value of the area to modify zmin: float largest z value of the area to modify value: float this value is assigned to all parameters of the area Examples -------- >>> import crtomo.tdManager as CRtdm tdman = CRtdm.tdMan( elem_file='GRID/elem.dat', elec_file='GRID/elec.dat', ) pid = tdman.parman.add_empty_dataset(value=1) tdman.parman.modify_area( pid, xmin=0, xmax=2, zmin=-2, zmin=-0.5, value=2, ) fig, ax = tdman.plot.plot_elements_to_ax(pid) fig.savefig('out.png')
def assure_check(fnc): @wraps(fnc) def _wrapped(self, check, *args, **kwargs): if not isinstance(check, CloudMonitorCheck): check = self._check_manager.get(check) return fnc(self, check, *args, **kwargs) return _wrapped
Converts an checkID passed as the check to a CloudMonitorCheck object.
def send_batch(messages, api_key=None, secure=None, test=None, **request_args): return _default_pyst_batch_sender.send(messages=messages, api_key=api_key, secure=secure, test=test, **request_args)
Send a batch of messages. :param messages: Messages to send. :type message: A list of `dict` or :class:`Message` :param api_key: Your Postmark API key. Required, if `test` is not `True`. :param secure: Use the https scheme for the Postmark API. Defaults to `True` :param test: Use the Postmark Test API. Defaults to `False`. :param \*\*request_args: Keyword arguments to pass to :func:`requests.request`. :rtype: :class:`BatchSendResponse`
def most_frequent(lst): lst = lst[:] highest_freq = 0 most_freq = None for val in unique(lst): if lst.count(val) > highest_freq: most_freq = val highest_freq = lst.count(val) return most_freq
Returns the item that appears most frequently in the given list.
def load(self, arguments): "Load the values from the a ServerConnection arguments" features = arguments[1:-1] list(map(self.load_feature, features))
Load the values from the a ServerConnection arguments
def center_line(space, line): line = line.strip() left_length = math.floor((space - len(line)) / 2) right_length = math.ceil((space - len(line)) / 2) left_space = " " * int(left_length) right_space = " " * int(right_length) line = ''.join([left_space, line, right_space]) return line
Add leading & trailing space to text to center it within an allowed width Parameters ---------- space : int The maximum character width allowed for the text. If the length of text is more than this value, no space will be added.\ line : str The text that will be centered. Returns ------- line : str The text with the leading space added to it
def concatenate_1d(arrays): if len(arrays) == 0: return np.array([]) if len(arrays) == 1: return np.asanyarray(arrays[0]) if any(map(np.ma.is_masked, arrays)): return np.ma.concatenate(arrays) return np.concatenate(arrays)
Concatenate 1D numpy arrays. Similar to np.concatenate but work with empty input and masked arrays.
def random_pairs_without_replacement_large_frames( n, shape, random_state=None): n_max = max_pairs(shape) sample = np.array([]) while len(sample) < n: n_sample_size = (n - len(sample)) * 2 sample = random_state.randint(n_max, size=n_sample_size) pairs_non_unique = np.append(sample, sample) sample = _unique_rows_numpy(pairs_non_unique) if len(shape) == 1: return _map_tril_1d_on_2d(sample[0:n], shape[0]) else: return np.unravel_index(sample[0:n], shape)
Make a sample of random pairs with replacement
def device_add_rule(self, direction, action, src, dst, target=None): value = [direction, src, dst, action] if target: value.append(target) self._set_aliased('device-rule', ' '.join(value), multi=True) return self
Adds a tuntap device rule. To be used in a vassal. :param str|unicode direction: Direction: * in * out. :param str|unicode action: Action: * allow * deny * route * gateway. :param str|unicode src: Source/mask. :param str|unicode dst: Destination/mask. :param str|unicode target: Depends on action. * Route / Gateway: Accept addr:port
def do_copy(self,args): parser = CommandArgumentParser("copy") parser.add_argument('-a','--asg',dest='asg',nargs='+',required=False,default=[],help='Copy specified ASG info.') parser.add_argument('-o','--output',dest='output',nargs='+',required=False,default=[],help='Copy specified output info.') args = vars(parser.parse_args(args)) values = [] if args['output']: values.extend(self.getOutputs(args['output'])) if args['asg']: for asg in args['asg']: try: index = int(asg) asgSummary = self.wrappedStack['resourcesByTypeIndex']['AWS::AutoScaling::AutoScalingGroup'][index] except: asgSummary = self.wrappedStack['resourcesByTypeName']['AWS::AutoScaling::AutoScalingGroup'][asg] values.append(asgSummary.physical_resource_id) print("values:{}".format(values)) pyperclip.copy("\n".join(values))
Copy specified id to stack. copy -h for detailed help.
def make_account_admin(self, user_id, account_id, role=None, role_id=None, send_confirmation=None): path = {} data = {} params = {} path["account_id"] = account_id data["user_id"] = user_id if role is not None: data["role"] = role if role_id is not None: data["role_id"] = role_id if send_confirmation is not None: data["send_confirmation"] = send_confirmation self.logger.debug("POST /api/v1/accounts/{account_id}/admins with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/accounts/{account_id}/admins".format(**path), data=data, params=params, single_item=True)
Make an account admin. Flag an existing user as an admin within the account.
def from_json(cls, data): optional_keys = {'wind_direction': 0, 'rain': False, 'snow_on_ground': False} assert 'wind_speed' in data, 'Required key "wind_speed" is missing!' for key, val in optional_keys.items(): if key not in data: data[key] = val return cls(data['wind_speed'], data['wind_direction'], data['rain'], data['snow_on_ground'])
Create a Wind Condition from a dictionary. Args: data = { "wind_speed": float, "wind_direction": float, "rain": bool, "snow_on_ground": bool}
def view_attr(attr_name): def view_attr(_value, context, **_params): value = getattr(context["view"], attr_name) return _attr(value) return view_attr
Creates a getter that will drop the current value and retrieve the view's attribute with specified name. @param attr_name: the name of an attribute belonging to the view. @type attr_name: str
def _build_default_options(self): return [ OptionDefault('model', None, inherit=True), OptionDefault('abstract', False, inherit=False), OptionDefault('strategy', enums.CREATE_STRATEGY, inherit=True), OptionDefault('inline_args', (), inherit=True), OptionDefault('exclude', (), inherit=True), OptionDefault('rename', {}, inherit=True), ]
Provide the default value for all allowed fields. Custom FactoryOptions classes should override this method to update() its return value.
def format(self, info_dict, delimiter='/'): def dfs(father, path, acc): if isinstance(father, list): for child in father: dfs(child, path, acc) elif isinstance(father, collections.Mapping): for child in sorted(father.items(), key=itemgetter(0)), : dfs(child, path, acc) elif isinstance(father, tuple): path = copy.copy(path) path.append(father[0]) dfs(father[1], path, acc) else: path[-1] = '{}: {}'.format(path[-1], str(father)) acc.append(delimiter.join(path)) result = [] dfs(info_dict.get('Prefix') or info_dict, [], result) return '\n'.join(result)
This formatter will take a data structure that represent a tree and will print all the paths from the root to the leaves in our case it will print each value and the keys that needed to get to it, for example: vm0: net: lago memory: 1024 will be output as: vm0/net/lago vm0/memory/1024 Args: info_dict (dict): information to reformat delimiter (str): a delimiter for the path components Returns: str: String representing the formatted info
def is_pid_healthy(pid): if HAS_PSUTIL: try: proc = psutil.Process(pid) except psutil.NoSuchProcess: log.warning("PID %s is no longer running.", pid) return False return any(['salt' in cmd for cmd in proc.cmdline()]) if salt.utils.platform.is_aix() or salt.utils.platform.is_windows(): return True if not salt.utils.process.os_is_running(pid): log.warning("PID %s is no longer running.", pid) return False cmdline_file = os.path.join('proc', str(pid), 'cmdline') try: with salt.utils.files.fopen(cmdline_file, 'rb') as fp_: return b'salt' in fp_.read() except (OSError, IOError) as err: log.error("There was a problem reading proc file: %s", err) return False
This is a health check that will confirm the PID is running and executed by salt. If pusutil is available: * all architectures are checked if psutil is not available: * Linux/Solaris/etc: archs with `/proc/cmdline` available are checked * AIX/Windows: assume PID is healhty and return True
def _add_flaky_report(self, stream): value = self._stream.getvalue() if not self._flaky_success_report and not value: return stream.write('===Flaky Test Report===\n\n') try: stream.write(value) except UnicodeEncodeError: stream.write(value.encode('utf-8', 'replace')) stream.write('\n===End Flaky Test Report===\n')
Baseclass override. Write details about flaky tests to the test report. :param stream: The test stream to which the report can be written. :type stream: `file`
def _delete_iapp(self, iapp_name, deploying_device): iapp = deploying_device.tm.sys.application iapp_serv = iapp.services.service.load( name=iapp_name, partition=self.partition ) iapp_serv.delete() iapp_tmpl = iapp.templates.template.load( name=iapp_name, partition=self.partition ) iapp_tmpl.delete()
Delete an iapp service and template on the root device. :param iapp_name: str -- name of iapp :param deploying_device: ManagementRoot object -- device where the iapp will be deleted
def date_range(field_name, **kwargs): for k, v in kwargs.items(): dt = v if not hasattr(v, 'isoformat'): dt = strp_lenient(str(v)) if dt is None: raise ValueError("unable to use provided time: " + str(v)) kwargs[k] = dt.isoformat() + 'Z' return _filter('DateRangeFilter', config=kwargs, field_name=field_name)
Build a DateRangeFilter. Predicate arguments accept a value str that in ISO-8601 format or a value that has a `isoformat` callable that returns an ISO-8601 str. :raises: ValueError if predicate value does not parse >>> date_range('acquired', gt='2017') == \ {'config': {'gt': '2017-01-01T00:00:00Z'}, \ 'field_name': 'acquired', 'type': 'DateRangeFilter'} True
def write_wave(path, audio, sample_rate): with contextlib.closing(wave.open(path, 'wb')) as wf: wf.setnchannels(1) wf.setsampwidth(2) wf.setframerate(sample_rate) wf.writeframes(audio)
Writes a .wav file. Takes path, PCM audio data, and sample rate.
def initialize_env_specs(hparams, env_problem_name): if env_problem_name: env = registry.env_problem(env_problem_name, batch_size=hparams.batch_size) else: env = rl_utils.setup_env(hparams, hparams.batch_size, hparams.eval_max_num_noops, hparams.rl_env_max_episode_steps, env_name=hparams.rl_env_name) env.start_new_epoch(0) return rl.make_real_env_fn(env)
Initializes env_specs using the appropriate env.
def fix_partial_utf8_punct_in_1252(text): def latin1_to_w1252(match): "The function to apply when this regex matches." return match.group(0).encode('latin-1').decode('sloppy-windows-1252') def w1252_to_utf8(match): "The function to apply when this regex matches." return match.group(0).encode('sloppy-windows-1252').decode('utf-8') text = C1_CONTROL_RE.sub(latin1_to_w1252, text) return PARTIAL_UTF8_PUNCT_RE.sub(w1252_to_utf8, text)
Fix particular characters that seem to be found in the wild encoded in UTF-8 and decoded in Latin-1 or Windows-1252, even when this fix can't be consistently applied. One form of inconsistency we need to deal with is that some character might be from the Latin-1 C1 control character set, while others are from the set of characters that take their place in Windows-1252. So we first replace those characters, then apply a fix that only works on Windows-1252 characters. This is used as a transcoder within `fix_encoding`.
def read_varint64(self): i = self.read_var_uint64() if i > wire_format.INT64_MAX: i -= (1 << 64) return i
Reads a varint from the stream, interprets this varint as a signed, 64-bit integer, and returns the integer.
def save_fits(self, data, name): data = data.reshape(1, 1, data.shape[0], data.shape[0]) new_file = pyfits.PrimaryHDU(data,self.img_hdu_list[0].header) new_file.writeto("{}".format(name), overwrite=True)
This method simply saves the model components and the residual. INPUTS: data (no default) Data which is to be saved. name (no default) File name for new .fits file. Will overwrite.