code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_status(self, instance): status_key, status = self._get_status(instance) if status['state'] in ['complete', 'error']: cache.delete(status_key) return status
Retrives a status of a field from cache. Fields in state 'error' and 'complete' will not retain the status after the call.
def get_timestamp_expression(self, time_grain): label = utils.DTTM_ALIAS db = self.table.database pdf = self.python_date_format is_epoch = pdf in ('epoch_s', 'epoch_ms') if not self.expression and not time_grain and not is_epoch: sqla_col = column(self.column_name, type_=DateTime) return self.table.make_sqla_column_compatible(sqla_col, label) grain = None if time_grain: grain = db.grains_dict().get(time_grain) if not grain: raise NotImplementedError( f'No grain spec for {time_grain} for database {db.database_name}') col = db.db_engine_spec.get_timestamp_column(self.expression, self.column_name) expr = db.db_engine_spec.get_time_expr(col, pdf, time_grain, grain) sqla_col = literal_column(expr, type_=DateTime) return self.table.make_sqla_column_compatible(sqla_col, label)
Getting the time component of the query
def expire_hit(self, hit_id): try: self.mturk.update_expiration_for_hit(HITId=hit_id, ExpireAt=0) except Exception as ex: raise MTurkServiceException( "Failed to expire HIT {}: {}".format(hit_id, str(ex)) ) return True
Expire a HIT, which will change its status to "Reviewable", allowing it to be deleted.
def plot(self, plot_grouped=False): cumulative_detections( detections=self.detections, plot_grouped=plot_grouped)
Plot the cumulative number of detections in time. .. rubric:: Example >>> family = Family( ... template=Template(name='a'), detections=[ ... Detection(template_name='a', detect_time=UTCDateTime(0) + 200, ... no_chans=8, detect_val=4.2, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0), ... Detection(template_name='a', detect_time=UTCDateTime(0), ... no_chans=8, detect_val=4.5, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0), ... Detection(template_name='a', detect_time=UTCDateTime(0) + 10, ... no_chans=8, detect_val=4.5, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0)]) >>> family.plot(plot_grouped=True) # doctest: +SKIP .. plot:: from eqcorrscan.core.match_filter import Family, Template from eqcorrscan.core.match_filter import Detection from obspy import UTCDateTime family = Family( template=Template(name='a'), detections=[ Detection(template_name='a', detect_time=UTCDateTime(0) + 200, no_chans=8, detect_val=4.2, threshold=1.2, typeofdet='corr', threshold_type='MAD', threshold_input=8.0), Detection(template_name='a', detect_time=UTCDateTime(0), no_chans=8, detect_val=4.5, threshold=1.2, typeofdet='corr', threshold_type='MAD', threshold_input=8.0), Detection(template_name='a', detect_time=UTCDateTime(0) + 10, no_chans=8, detect_val=4.5, threshold=1.2, typeofdet='corr', threshold_type='MAD', threshold_input=8.0)]) family.plot(plot_grouped=True)
def dist_sq(self, other=None): v = self - other if other else self return sum(map(lambda a: a * a, v))
For fast length comparison
def _add_ce_record(self, curr_dr_len, thislen): if self.dr_entries.ce_record is None: self.dr_entries.ce_record = RRCERecord() self.dr_entries.ce_record.new() curr_dr_len += RRCERecord.length() self.dr_entries.ce_record.add_record(thislen) return curr_dr_len
An internal method to add a new length to a Continuation Entry. If the Continuation Entry does not yet exist, this method creates it. Parameters: curr_dr_len - The current Directory Record length. thislen - The new length to add to the Continuation Entry. Returns: An integer representing the current directory record length after adding the Continuation Entry.
def _find_image_id(self, image_id): if not self._images: connection = self._connect() self._images = connection.get_all_images() image_id_cloud = None for i in self._images: if i.id == image_id or i.name == image_id: image_id_cloud = i.id break if image_id_cloud: return image_id_cloud else: raise ImageError( "Could not find given image id `%s`" % image_id)
Finds an image id to a given id or name. :param str image_id: name or id of image :return: str - identifier of image
def new_from_url(cls, url, verify=True): response = requests.get(url, verify=verify, timeout=2.5) return cls.new_from_response(response)
Constructs a new WebPage object for the URL, using the `requests` module to fetch the HTML. Parameters ---------- url : str verify: bool
def set_margins(self, top=None, bottom=None): if (top is None or top == 0) and bottom is None: self.margins = None return margins = self.margins or Margins(0, self.lines - 1) if top is None: top = margins.top else: top = max(0, min(top - 1, self.lines - 1)) if bottom is None: bottom = margins.bottom else: bottom = max(0, min(bottom - 1, self.lines - 1)) if bottom - top >= 1: self.margins = Margins(top, bottom) self.cursor_position()
Select top and bottom margins for the scrolling region. :param int top: the smallest line number that is scrolled. :param int bottom: the biggest line number that is scrolled.
def _AbortJoin(self, timeout=None): for pid, process in iter(self._processes_per_pid.items()): logger.debug('Waiting for process: {0:s} (PID: {1:d}).'.format( process.name, pid)) process.join(timeout=timeout) if not process.is_alive(): logger.debug('Process {0:s} (PID: {1:d}) stopped.'.format( process.name, pid))
Aborts all registered processes by joining with the parent process. Args: timeout (int): number of seconds to wait for processes to join, where None represents no timeout.
def _clone(self): cloned_self = self.__class__( *self.flat_path, project=self.project, namespace=self.namespace ) cloned_self._parent = self._parent return cloned_self
Duplicates the Key. Most attributes are simple types, so don't require copying. Other attributes like ``parent`` are long-lived and so we re-use them. :rtype: :class:`google.cloud.datastore.key.Key` :returns: A new ``Key`` instance with the same data as the current one.
def set_ownership(self): assert self.section is not None for t in self.children: t.parent = self t._section = self.section t.doc = self.doc t.set_ownership()
Recursivelt set the parent, section and doc for a children
def get_version_info(): from astropy import __version__ astropy_version = __version__ from photutils import __version__ photutils_version = __version__ return 'astropy: {0}, photutils: {1}'.format(astropy_version, photutils_version)
Return astropy and photutils versions. Returns ------- result : str The astropy and photutils versions.
def is_rotation(self, other): if len(self) != len(other): return False for i in range(len(self)): if self.rotate(i) == other: return True return False
Determine whether two sequences are the same, just at different rotations. :param other: The sequence to check for rotational equality. :type other: coral.sequence._sequence.Sequence
def evaluate_binop_comparison(self, operation, left, right, **kwargs): if not operation in self.binops_comparison: raise ValueError("Invalid comparison binary operation '{}'".format(operation)) if left is None or right is None: return None if not isinstance(left, (list, ListIP)): left = [left] if not isinstance(right, (list, ListIP)): right = [right] if not left or not right: return None if operation in ['OP_IS']: res = self.binops_comparison[operation](left, right) if res: return True elif operation in ['OP_IN']: for iteml in left: res = self.binops_comparison[operation](iteml, right) if res: return True else: for iteml in left: if iteml is None: continue for itemr in right: if itemr is None: continue res = self.binops_comparison[operation](iteml, itemr) if res: return True return False
Evaluate given comparison binary operation with given operands.
def is_locked(self, key): check_not_none(key, "key can't be None") key_data = self._to_data(key) return self._encode_invoke_on_key(map_is_locked_codec, key_data, key=key_data)
Checks the lock for the specified key. If the lock is acquired, it returns ``true``. Otherwise, it returns ``false``. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), the key that is checked for lock :return: (bool), ``true`` if lock is acquired, ``false`` otherwise.
def _read_config(self): try: self.config = self.componentmodel.find_one( {'name': self.uniquename}) except ServerSelectionTimeoutError: self.log("No database access! Check if mongodb is running " "correctly.", lvl=critical) if self.config: self.log("Configuration read.", lvl=verbose) else: self.log("No configuration found.", lvl=warn)
Read this component's configuration from the database
def repr2_json(obj_, **kwargs): import utool as ut kwargs['trailing_sep'] = False json_str = ut.repr2(obj_, **kwargs) json_str = str(json_str.replace('\'', '"')) json_str = json_str.replace('(', '[') json_str = json_str.replace(')', ']') json_str = json_str.replace('None', 'null') return json_str
hack for json reprs
def rejoin(self, group_id): url = utils.urljoin(self.url, 'join') payload = {'group_id': group_id} response = self.session.post(url, json=payload) return Group(self, **response.data)
Rejoin a former group. :param str group_id: the group_id of a group :return: the group :rtype: :class:`~groupy.api.groups.Group`
def _repeat_length(cls, part): repeat_len = len(part) if repeat_len == 0: return repeat_len first_digit = part[0] limit = repeat_len // 2 + 1 indices = (i for i in range(1, limit) if part[i] == first_digit) for index in indices: (quot, rem) = divmod(repeat_len, index) if rem == 0: first_chunk = part[0:index] if all(first_chunk == part[x:x + index] \ for x in range(index, quot * index, index)): return index return repeat_len
The length of the repeated portions of ``part``. :param part: a number :type part: list of int :returns: the first index at which part repeats :rtype: int If part does not repeat, result is the length of part. Complexity: O(len(part)^2)
def _updateTargetFromNode(self): if not self.autoRangeCti or not self.autoRangeCti.configValue: padding = 0 elif self.paddingCti.configValue == -1: padding = None else: padding = self.paddingCti.configValue / 100 targetRange = self.calculateRange() if not np.all(np.isfinite(targetRange)): logger.warn("New target range is not finite. Plot range not updated") return self.setTargetRange(targetRange, padding=padding)
Applies the configuration to the target axis.
def get_archive_name(self): name = self.get_local_name().split('.')[0] case = self.case_id label = self.commons['cmdlineopts'].label date = '' rand = ''.join(random.choice(string.ascii_lowercase) for x in range(7)) if self.name_pattern == 'legacy': nstr = "sosreport-{name}{case}{date}" case = '.' + case if case else '' date = '-%Y%m%d%H%M%S' elif self.name_pattern == 'friendly': nstr = "sosreport-{name}{label}{case}{date}-{rand}" case = '-' + case if case else '' label = '-' + label if label else '' date = '-%Y-%m-%d' else: nstr = self.name_pattern nstr = nstr.format( name=name, label=label, case=case, date=date, rand=rand ) return self.sanitize_filename(time.strftime(nstr))
This function should return the filename of the archive without the extension. This uses the policy's name_pattern attribute to determine the name. There are two pre-defined naming patterns - 'legacy' and 'friendly' that give names like the following: legacy - 'sosreport-tux.123456-20171224185433' friendly - 'sosreport-tux-mylabel-123456-2017-12-24-ezcfcop.tar.xz' A custom name_pattern can be used by a policy provided that it defines name_pattern using a format() style string substitution. Usable substitutions are: name - the short hostname of the system label - the label given by --label case - the case id given by --case-id or --ticker-number rand - a random string of 7 alpha characters Note that if a datestamp is needed, the substring should be set in the name_pattern in the format accepted by strftime().
def get_health(self, consumers=2, messages=100): data = {'consumers': consumers, 'messages': messages} try: self._request('GET', '/health', data=json.dumps(data)) return True except SensuAPIException: return False
Returns health information on transport & Redis connections.
def _sanitizer(self, obj): if isinstance(obj, datetime.datetime): return obj.isoformat() if hasattr(obj, "to_dict"): return obj.to_dict() return obj
Sanitizer method that will be passed to json.dumps.
def baseimage(self, new_image): images = self.parent_images or [None] images[-1] = new_image self.parent_images = images
change image of final stage FROM instruction
def lock(self) -> asyncio.Lock: if self.lock_key not in self.request.custom_content: self.request.custom_content[self.lock_key] = asyncio.Lock() return self.request.custom_content[self.lock_key]
Return and generate if required the lock for this request.
def hacking_no_author_tags(physical_line): for regex in AUTHOR_TAG_RE: if regex.match(physical_line): physical_line = physical_line.lower() pos = physical_line.find('moduleauthor') if pos < 0: pos = physical_line.find('author') return (pos, "H105: Don't use author tags")
Check that no author tags are used. H105 don't use author tags
def stop(self) -> None: if self._stopped: return self._stopped = True for fd, sock in self._sockets.items(): assert sock.fileno() == fd self._handlers.pop(fd)() sock.close()
Stops listening for new connections. Requests currently in progress may still continue after the server is stopped.
def filter_roidb(self): num_roidb = len(self._roidb) self._roidb = [roi_rec for roi_rec in self._roidb if len(roi_rec['gt_classes'])] num_after = len(self._roidb) logger.info('filter roidb: {} -> {}'.format(num_roidb, num_after))
Remove images without usable rois
def get_assessment_bank_assignment_session(self, proxy): if not self.supports_assessment_bank_assignment(): raise errors.Unimplemented() return sessions.AssessmentBankAssignmentSession(proxy=proxy, runtime=self._runtime)
Gets the ``OsidSession`` associated with the assessment bank assignment service. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentBankAssignmentSession) - an ``AssessmentBankAssignmentSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_bank_assignment()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_bank_assignment()`` is ``true``.*
def length(cls, dataset): return np.product([len(d.points) for d in dataset.data.coords(dim_coords=True)], dtype=np.intp)
Returns the total number of samples in the dataset.
def _filter_seqs(fn): out_file = op.splitext(fn)[0] + "_unique.fa" idx = 0 if not file_exists(out_file): with open(out_file, 'w') as out_handle: with open(fn) as in_handle: for line in in_handle: if line.startswith("@") or line.startswith(">"): fixed_name = _make_unique(line.strip(), idx) seq = in_handle.next().strip() counts = _get_freq(fixed_name) if len(seq) < 26 and (counts > 1 or counts == 0): idx += 1 print(fixed_name, file=out_handle, end="\n") print(seq, file=out_handle, end="\n") if line.startswith("@"): in_handle.next() in_handle.next() return out_file
Convert names of sequences to unique ids
def save(self, *args, **kwargs): self.type = INTERFACE_TYPES.get('ethernet') super(Ethernet, self).save(*args, **kwargs)
automatically set Interface.type to ethernet
def observed(cls, _func): def wrapper(*args, **kwargs): self = args[0] assert(isinstance(self, Observable)) self._notify_method_before(self, _func.__name__, args, kwargs) res = _func(*args, **kwargs) self._notify_method_after(self, _func.__name__, res, args, kwargs) return res return wrapper
Decorate methods to be observable. If they are called on an instance stored in a property, the model will emit before and after notifications.
def table(self): if self._table is None: column_names = [] for fileid in self.header.file_ids: for column_name in self.header.column_names: column_names.append("{}_{}".format(column_name, fileid)) column_names.append("ZP_{}".format(fileid)) if len(column_names) > 0: self._table = Table(names=column_names) else: self._table = Table() return self._table
The astropy.table.Table object that will contain the data result @rtype: Table @return: data table
def list_nodes_min(conn=None, call=None): if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_min function must be called with -f or --function.' ) if conn is None: conn = get_conn() ret = {} for node in conn.list_servers(bare=True): ret[node.name] = {'id': node.id, 'state': node.status} return ret
Return a list of VMs with minimal information CLI Example .. code-block:: bash salt-cloud -f list_nodes_min myopenstack
def import_certificate(self, certificate_data, bay_number=None): uri = "{}/https/certificaterequest".format(self.data['uri']) if bay_number: uri += "?bayNumber=%d" % (bay_number) headers = {'Content-Type': 'application/json'} return self._helper.do_put(uri, certificate_data, -1, headers)
Imports a signed server certificate into the enclosure. Args: certificate_data: Dictionary with Signed certificate and type. bay_number: OA to which the signed certificate will be imported. Returns: Enclosure.
def move_up(self): old_index = self.current_index self.current_index -= 1 self.__wrap_index() self.__handle_selections(old_index, self.current_index)
Try to select the button above the currently selected one. If a button is not there, wrap down to the bottom of the menu and select the last button.
def send_audio(chat_id, audio, caption=None, duration=None, performer=None, title=None, reply_to_message_id=None, reply_markup=None, disable_notification=False, parse_mode=None, **kwargs): files = None if isinstance(audio, InputFile): files = [audio] audio = None elif not isinstance(audio, str): raise Exception('audio must be instance of InputFile or str') params = dict( chat_id=chat_id, audio=audio ) params.update( _clean_params( caption=caption, duration=duration, performer=performer, title=title, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup, disable_notification=disable_notification, parse_mode=parse_mode, ) ) return TelegramBotRPCRequest('sendAudio', params=params, files=files, on_result=Message.from_result, **kwargs)
Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .mp3 format. On success, the sent Message is returned. Bots can currently send audio files of up to 50 MB in size, this limit may be changed in the future. For backward compatibility, when the fields title and performer are both empty and the mime-type of the file to be sent is not audio/mpeg, the file will be sent as a playable voice message. For this to work, the audio must be in an .ogg file encoded with OPUS. This behavior will be phased out in the future. For sending voice messages, use the sendVoice method instead. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param audio: Audio file to send. Pass a file_id as String to send an audio file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an audio file from the Internet, or upload a new one using multipart/form-data. :param caption: Audio caption, 0-200 characters :param duration: Duration of the audio in seconds :param performer: Performer :param title: Track name :param reply_to_message_id: If the message is a reply, ID of the original message :param reply_markup: Additional interface options. A JSON-serialized object for a custom reply keyboard, :param disable_notification: Sends the message silently. iOS users will not receive a notification, Android users will receive a notification with no sound. Other apps coming soon. :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type chat_id: int or str :type audio: InputFile or str :type caption: str :type duration: int :type performer: str :type title: str :type reply_to_message_id: int :type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply :type parse_mode: str :returns: On success, the sent Message is returned. :rtype: TelegramBotRPCRequest
def create_stream(self, uidList=[]): req_hook = 'pod/v1/im/create' req_args = json.dumps(uidList) status_code, response = self.__rest__.POST_query(req_hook, req_args) self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
create a stream
def get_batch_unlock( end_state: NettingChannelEndState, ) -> Optional[MerkleTreeLeaves]: if len(end_state.merkletree.layers[LEAVES]) == 0: return None lockhashes_to_locks = dict() lockhashes_to_locks.update({ lock.lockhash: lock for secrethash, lock in end_state.secrethashes_to_lockedlocks.items() }) lockhashes_to_locks.update({ proof.lock.lockhash: proof.lock for secrethash, proof in end_state.secrethashes_to_unlockedlocks.items() }) lockhashes_to_locks.update({ proof.lock.lockhash: proof.lock for secrethash, proof in end_state.secrethashes_to_onchain_unlockedlocks.items() }) ordered_locks = [ lockhashes_to_locks[LockHash(lockhash)] for lockhash in end_state.merkletree.layers[LEAVES] ] return cast(MerkleTreeLeaves, ordered_locks)
Unlock proof for an entire merkle tree of pending locks The unlock proof contains all the merkle tree data, tightly packed, needed by the token network contract to verify the secret expiry and calculate the token amounts to transfer.
def replace(self, year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, tzinfo=None): if year is None: year = self.year if month is None: month = self.month if day is None: day = self.day if hour is None: hour = self.hour if minute is None: minute = self.minute if second is None: second = self.second if microsecond is None: microsecond = self.microsecond if tzinfo is None: tzinfo = self.tzinfo if year > 0: cls = datetime else: cls = extended_datetime return cls( year, month, day, hour, minute, second, microsecond, tzinfo )
Returns a new datetime.datetime or asn1crypto.util.extended_datetime object with the specified components replaced :return: A datetime.datetime or asn1crypto.util.extended_datetime object
def execution_timer(value): def _invoke(method, key_arg_position, *args, **kwargs): start_time = time.time() result = method(*args, **kwargs) duration = time.time() - start_time key = [method.func_name] if key_arg_position is not None: key.append(args[key_arg_position]) add_timing('.'.join(key), value=duration) return result if type(value) is types.FunctionType: def wrapper(*args, **kwargs): return _invoke(value, None, *args, **kwargs) return wrapper else: def duration_decorator(func): def wrapper(*args, **kwargs): return _invoke(func, value, *args, **kwargs) return wrapper return duration_decorator
The ``execution_timer`` decorator allows for easy instrumentation of the duration of function calls, using the method name in the key. The following example would add duration timing with the key ``my_function`` .. code: python @statsd.execution_timer def my_function(foo): pass You can also have include a string argument value passed to your method as part of the key. Pass the index offset of the arguments to specify the argument number to use. In the following example, the key would be ``my_function.baz``: .. code:python @statsd.execution_timer(2) def my_function(foo, bar, 'baz'): pass
def Conditional(self, i, j, val, name=''): pmf = Pmf(name=name) for vs, prob in self.Items(): if vs[j] != val: continue pmf.Incr(vs[i], prob) pmf.Normalize() return pmf
Gets the conditional distribution of the indicated variable. Distribution of vs[i], conditioned on vs[j] = val. i: index of the variable we want j: which variable is conditioned on val: the value the jth variable has to have Returns: Pmf
def gist(self, id_num): url = self._build_url('gists', str(id_num)) json = self._json(self._get(url), 200) return Gist(json, self) if json else None
Gets the gist using the specified id number. :param int id_num: (required), unique id of the gist :returns: :class:`Gist <github3.gists.Gist>`
def expand(self, other): if not isinstance(other, Result): raise ValueError("Provided argument has to be instance of overpy:Result()") other_collection_map = {Node: other.nodes, Way: other.ways, Relation: other.relations, Area: other.areas} for element_type, own_collection in self._class_collection_map.items(): for element in other_collection_map[element_type]: if is_valid_type(element, element_type) and element.id not in own_collection: own_collection[element.id] = element
Add all elements from an other result to the list of elements of this result object. It is used by the auto resolve feature. :param other: Expand the result with the elements from this result. :type other: overpy.Result :raises ValueError: If provided parameter is not instance of :class:`overpy.Result`
def get_idxs(exprs): idxs = set() for expr in (exprs): for i in expr.find(sympy.Idx): idxs.add(i) return sorted(idxs, key=str)
Finds sympy.tensor.indexed.Idx instances and returns them.
def ossos_release_with_metadata(): discoveries = [] observations = ossos_discoveries() for obj in observations: discov = [n for n in obj[0].mpc_observations if n.discovery.is_discovery][0] tno = parameters.tno() tno.dist = obj[1].distance tno.ra_discov = discov.coordinate.ra.degrees tno.mag = discov.mag tno.name = discov.provisional_name discoveries.append(tno) return discoveries
Wrap the objects from the Version Releases together with the objects instantiated from fitting their mpc lines
def delete_object(self, obj, view_kwargs): if obj is None: url_field = getattr(self, 'url_field', 'id') filter_value = view_kwargs[url_field] raise ObjectNotFound('{}: {} not found'.format(self.model.__name__, filter_value), source={'parameter': url_field}) self.before_delete_object(obj, view_kwargs) self.session.delete(obj) try: self.session.commit() except JsonApiException as e: self.session.rollback() raise e except Exception as e: self.session.rollback() raise JsonApiException("Delete object error: " + str(e)) self.after_delete_object(obj, view_kwargs)
Delete an object through sqlalchemy :param DeclarativeMeta item: an item from sqlalchemy :param dict view_kwargs: kwargs from the resource view
def log(self, message, level=None): level = _STORM_LOG_LEVELS.get(level, _STORM_LOG_INFO) self.send_message({"command": "log", "msg": str(message), "level": level})
Log a message to Storm optionally providing a logging level. :param message: the log message to send to Storm. :type message: str :param level: the logging level that Storm should use when writing the ``message``. Can be one of: trace, debug, info, warn, or error (default: ``info``). :type level: str .. warning:: This will send your message to Storm regardless of what level you specify. In almost all cases, you are better of using ``Component.logger`` and not setting ``pystorm.log.path``, because that will use a :class:`pystorm.component.StormHandler` to do the filtering on the Python side (instead of on the Java side after taking the time to serialize your message and send it to Storm).
def delete_job(self, id, jobstore=None): warnings.warn('delete_job has been deprecated, use remove_job instead.', DeprecationWarning) self.remove_job(id, jobstore)
DEPRECATED, use remove_job instead. Remove a job, preventing it from being run any more. :param str id: the identifier of the job :param str jobstore: alias of the job store that contains the job
def compute_tls13_traffic_secrets(self): hkdf = self.prcs.hkdf self.tls13_master_secret = hkdf.extract(self.tls13_handshake_secret, None) cts0 = hkdf.derive_secret(self.tls13_master_secret, b"client application traffic secret", b"".join(self.handshake_messages)) self.tls13_derived_secrets["client_traffic_secrets"] = [cts0] sts0 = hkdf.derive_secret(self.tls13_master_secret, b"server application traffic secret", b"".join(self.handshake_messages)) self.tls13_derived_secrets["server_traffic_secrets"] = [sts0] es = hkdf.derive_secret(self.tls13_master_secret, b"exporter master secret", b"".join(self.handshake_messages)) self.tls13_derived_secrets["exporter_secret"] = es if self.connection_end == "server": self.pwcs.tls13_derive_keys(sts0) elif self.connection_end == "client": self.prcs.tls13_derive_keys(sts0)
Ciphers key and IV are updated accordingly for Application data. self.handshake_messages should be ClientHello...ServerFinished.
def get_authorization_url(self): return self._format_url( OAUTH2_ROOT + 'authorize', query = { 'response_type': 'code', 'client_id': self.client.get('client_id', ''), 'redirect_uri': self.client.get('redirect_uri', '') })
Get the authorization Url for the current client.
def _activate_texture(mesh, name): if name == True or isinstance(name, int): keys = list(mesh.textures.keys()) idx = 0 if not isinstance(name, int) or name == True else name if idx > len(keys): idx = 0 try: name = keys[idx] except IndexError: logging.warning('No textures associated with input mesh.') return None try: texture = mesh.textures[name] except KeyError: logging.warning('Texture ({}) not associated with this dataset'.format(name)) texture = None else: if name in mesh.scalar_names: old_tcoord = mesh.GetPointData().GetTCoords() mesh.GetPointData().SetTCoords(mesh.GetPointData().GetArray(name)) mesh.GetPointData().AddArray(old_tcoord) mesh.Modified() return texture
Grab a texture and update the active texture coordinates. This makes sure to not destroy old texture coordinates Parameters ---------- name : str The name of the texture and texture coordinates to activate Return ------ vtk.vtkTexture : The active texture
def get_user_flagger(): user_klass = get_user_model() try: user = user_klass.objects.get(pk=COMMENT_FLAG_USER_ID) except user_klass.DoesNotExist: try: user = user_klass.objects.get( **{user_klass.USERNAME_FIELD: FLAGGER_USERNAME}) except user_klass.DoesNotExist: user = user_klass.objects.create_user(FLAGGER_USERNAME) return user
Return an User instance used by the system when flagging a comment as trackback or pingback.
def get_by_location(cls, location, include_deactivated=False): if include_deactivated: view = views.service_location else: view = views.active_service_location result = yield view.first(key=location, include_docs=True) parent = cls.parent_resource(**result['doc']) raise Return(cls(parent=parent, **result['value']))
Get a service by it's location
def get_quoted_columns(self, platform): columns = [] for column in self._columns.values(): columns.append(column.get_quoted_name(platform)) return columns
Returns the quoted representation of the column names the constraint is associated with. But only if they were defined with one or a column name is a keyword reserved by the platform. Otherwise the plain unquoted value as inserted is returned. :param platform: The platform to use for quotation. :type platform: Platform :rtype: list
def view(location, browser=None, new="same", autoraise=True): try: new = { "same": 0, "window": 1, "tab": 2 }[new] except KeyError: raise RuntimeError("invalid 'new' value passed to view: %r, valid values are: 'same', 'window', or 'tab'" % new) if location.startswith("http"): url = location else: url = "file://" + abspath(location) try: controller = get_browser_controller(browser) controller.open(url, new=new, autoraise=autoraise) except (SystemExit, KeyboardInterrupt): raise except: pass
Open a browser to view the specified location. Args: location (str) : Location to open If location does not begin with "http:" it is assumed to be a file path on the local filesystem. browser (str or None) : what browser to use (default: None) If ``None``, use the system default browser. new (str) : How to open the location. Valid values are: ``'same'`` - open in the current tab ``'tab'`` - open a new tab in the current window ``'window'`` - open in a new window autoraise (bool) : Whether to automatically raise the location in a new browser window (default: True) Returns: None
def _init_typedef(self, typedef_curr, name, lnum): if typedef_curr is None: return TypeDef() msg = "PREVIOUS {REC} WAS NOT TERMINATED AS EXPECTED".format(REC=name) self._die(msg, lnum)
Initialize new typedef and perform checks.
def tagsOf(self, obj): return self.store.query( Tag, AND(Tag.catalog == self, Tag.object == obj)).getColumn("name")
Return an iterator of unicode strings - the tag names which apply to the given object.
def benchmark(): pool_size = multiprocessing.cpu_count() - 1 if pool_size < 1: pool_size = 1 pool = multiprocessing.Pool(processes=pool_size, maxtasksperchild=1) results = pool.imap_unordered(run_scenario, Benchmark.scenarii) pool.close() pool.join() benchmark = Benchmark() benchmark.load_csv() benchmark.add(results) benchmark.save_csv()
Run a benchmarking suite and measure time taken by the solver. Each scenario is run in an isolated process, and results are appended to CSV file.
def forward(self, input_tensor): ones = input_tensor.data.new_ones(input_tensor.shape[0], input_tensor.shape[-1]) dropout_mask = torch.nn.functional.dropout(ones, self.p, self.training, inplace=False) if self.inplace: input_tensor *= dropout_mask.unsqueeze(1) return None else: return dropout_mask.unsqueeze(1) * input_tensor
Apply dropout to input tensor. Parameters ---------- input_tensor: ``torch.FloatTensor`` A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` Returns ------- output: ``torch.FloatTensor`` A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` with dropout applied.
def is_type_I_branch(u, v, dfs_data): if u != a(v, dfs_data): return False if u == L2(v, dfs_data): return True return False
Determines whether a branch uv is a type I branch.
def extract_name_max_chars(name, max_chars=64, blank=" "): new_name = name.strip() if len(new_name) > max_chars: new_name = new_name[:max_chars] if new_name.rfind(blank) > 0: new_name = new_name[:new_name.rfind(blank)] return new_name
Extracts max chars in name truncated to nearest word :param name: path to edit :param max_chars: max chars of new name :param blank: char that represents the blank between words :return: Name edited to contain at most max_chars
def tridi_inverse_iteration(d, e, w, x0=None, rtol=1e-8): eig_diag = d - w if x0 is None: x0 = np.random.randn(len(d)) x_prev = np.zeros_like(x0) norm_x = np.linalg.norm(x0) x0 /= norm_x while np.linalg.norm(np.abs(x0) - np.abs(x_prev)) > rtol: x_prev = x0.copy() tridisolve(eig_diag, e, x0) norm_x = np.linalg.norm(x0) x0 /= norm_x return x0
Perform an inverse iteration to find the eigenvector corresponding to the given eigenvalue in a symmetric tridiagonal system. Parameters ---------- d : ndarray main diagonal of the tridiagonal system e : ndarray offdiagonal stored in e[:-1] w : float eigenvalue of the eigenvector x0 : ndarray initial point to start the iteration rtol : float tolerance for the norm of the difference of iterates Returns ------- e : ndarray The converged eigenvector
def process_temporary_file(self, tmp_file): if len(tmp_file.filename) > 100: base_filename = tmp_file.filename[:tmp_file.filename.rfind(".")] tmp_file.filename = "%s.%s" % (base_filename[:99-len(tmp_file.extension)], tmp_file.extension) tmp_file.save() data = { 'uuid': str(tmp_file.uuid) } response = HttpResponse(json.dumps(data), status=201) response['Content-type'] = "text/plain" return response
Truncates the filename if necessary, saves the model, and returns a response
def overlay_depth(obj): if isinstance(obj, DynamicMap): if isinstance(obj.last, CompositeOverlay): return len(obj.last) elif obj.last is None: return None return 1 else: return 1
Computes the depth of a DynamicMap overlay if it can be determined otherwise return None.
def encode_categorical(table, columns=None, **kwargs): if isinstance(table, pandas.Series): if not is_categorical_dtype(table.dtype) and not table.dtype.char == "O": raise TypeError("series must be of categorical dtype, but was {}".format(table.dtype)) return _encode_categorical_series(table, **kwargs) def _is_categorical_or_object(series): return is_categorical_dtype(series.dtype) or series.dtype.char == "O" if columns is None: columns_to_encode = {nam for nam, s in table.iteritems() if _is_categorical_or_object(s)} else: columns_to_encode = set(columns) items = [] for name, series in table.iteritems(): if name in columns_to_encode: series = _encode_categorical_series(series, **kwargs) if series is None: continue items.append(series) new_table = pandas.concat(items, axis=1, copy=False) return new_table
Encode categorical columns with `M` categories into `M-1` columns according to the one-hot scheme. Parameters ---------- table : pandas.DataFrame Table with categorical columns to encode. columns : list-like, optional, default: None Column names in the DataFrame to be encoded. If `columns` is None then all the columns with `object` or `category` dtype will be converted. allow_drop : boolean, optional, default: True Whether to allow dropping categorical columns that only consist of a single category. Returns ------- encoded : pandas.DataFrame Table with categorical columns encoded as numeric. Numeric columns in the input table remain unchanged.
def example_bigbeds(): hits = [] d = data_dir() for fn in os.listdir(d): fn = os.path.join(d, fn) if os.path.splitext(fn)[-1] == '.bigBed': hits.append(os.path.abspath(fn)) return hits
Returns list of example bigBed files
def _file_model_from_path(self, path, content=False, format=None): model = base_model(path) model["type"] = "file" if self.fs.isfile(path): model["last_modified"] = model["created"] = self.fs.lstat(path)["ST_MTIME"] else: model["last_modified"] = model["created"] = DUMMY_CREATED_DATE if content: try: content = self.fs.read(path) except NoSuchFile as e: self.no_such_entity(e.path) except GenericFSError as e: self.do_error(str(e), 500) model["format"] = format or "text" model["content"] = content model["mimetype"] = mimetypes.guess_type(path)[0] or "text/plain" if format == "base64": model["format"] = format or "base64" from base64 import b64decode model["content"] = b64decode(content) return model
Build a file model from database record.
def cool_paginate(context, **kwargs) -> dict: names = ( 'size', 'next_name', 'previous_name', 'elastic', 'page_obj', ) return_dict = {name: value for name, value in zip(names, map(kwargs.get, names))} if context.get('request'): return_dict['request'] = context['request'] else: raise RequestNotExists( 'Unable to find request in your template context,' 'please make sure that you have the request context processor enabled' ) if not return_dict.get('page_obj'): if context.get('page_obj'): return_dict['page_obj'] = context['page_obj'] else: raise PageNotSpecified( 'You customized paginator standard name, ' "but haven't specified it in {% cool_paginate %} tag." ) if not return_dict.get('elastic'): return_dict['elastic'] = getattr(settings, 'COOL_PAGINATOR_ELASTIC', 10) return return_dict
Main function for pagination process.
def autocommit(f): "A decorator to commit to the storage if autocommit is set to True." @wraps(f) def wrapper(self, *args, **kwargs): result = f(self, *args, **kwargs) if self._meta.commit_ready(): self.commit() return result return wrapper
A decorator to commit to the storage if autocommit is set to True.
def check_nonstandard_section_name(self): std_sections = ['.text', '.bss', '.rdata', '.data', '.rsrc', '.edata', '.idata', '.pdata', '.debug', '.reloc', '.stab', '.stabstr', '.tls', '.crt', '.gnu_deb', '.eh_fram', '.exptbl', '.rodata'] for i in range(200): std_sections.append('/'+str(i)) non_std_sections = [] for section in self.pefile_handle.sections: name = convert_to_ascii_null_term(section.Name).lower() if (name not in std_sections): non_std_sections.append(name) if non_std_sections: return{'description': 'Section(s) with a non-standard name, tamper indication', 'severity': 3, 'category': 'MALFORMED', 'attributes': non_std_sections} return None
Checking for an non-standard section name
def stderr_with_input(cmd, stdin): handle, gpg_stderr = stderr_handle() LOGGER.debug("GPG command %s", ' '.join(cmd)) try: gpg_proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=gpg_stderr) output, _err = gpg_proc.communicate(polite_bytes(stdin)) if handle: handle.close() return output except subprocess.CalledProcessError as exception: return gpg_error(exception, 'GPG variable encryption error') except OSError as exception: raise CryptoritoError("File %s not found" % exception.filename)
Runs a command, passing something in stdin, and returning whatever came out from stdout
def resolve_all(self, import_items): for import_item in import_items: try: yield self.resolve_import(import_item) except ImportException as err: logging.info('unknown module %s', err.module_name)
Resolves a list of imports. Yields filenames.
def transfers_complete(self): for transfer in self.transfers: if not transfer.is_complete: error = { 'errorcode': 4003, 'errormessage': 'You must complete transfer before logout.' } hellraiser(error)
Check if all transfers are completed.
def kill(self, dwExitCode = 0): hThread = self.get_handle(win32.THREAD_TERMINATE) win32.TerminateThread(hThread, dwExitCode) if self.pInjectedMemory is not None: try: self.get_process().free(self.pInjectedMemory) self.pInjectedMemory = None except Exception: pass
Terminates the thread execution. @note: If the C{lpInjectedMemory} member contains a valid pointer, the memory is freed. @type dwExitCode: int @param dwExitCode: (Optional) Thread exit code.
def prune(self, whole=False, keys=[], names=[], filters=[]): for node in self.climb(whole): if not all([key in node.data for key in keys]): continue if names and not any( [re.search(name, node.name) for name in names]): continue try: if not all([utils.filter(filter, node.data, regexp=True) for filter in filters]): continue except utils.FilterError: continue yield node
Filter tree nodes based on given criteria
def transformer_image_decoder(targets, encoder_output, ed_attention_bias, hparams, name=None): with tf.variable_scope(name, default_name="transformer_dec"): batch_size = common_layers.shape_list(targets)[0] targets = tf.reshape(targets, [batch_size, hparams.img_len, hparams.img_len, hparams.num_channels * hparams.hidden_size]) decoder_input, _, _ = cia.prepare_decoder(targets, hparams) decoder_output = cia.transformer_decoder_layers( decoder_input, encoder_output, hparams.num_decoder_layers or hparams.num_hidden_layers, hparams, attention_type=hparams.dec_attention_type, encoder_decoder_attention_bias=ed_attention_bias, name="decoder") decoder_output = tf.reshape(decoder_output, [batch_size, hparams.img_len, hparams.img_len * hparams.num_channels, hparams.hidden_size]) return decoder_output
Transformer image decoder over targets with local attention. Args: targets: Tensor of shape [batch, ...], and whose size is batch * height * width * hparams.num_channels * hparams.hidden_size. encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, height, width * hparams.num_channels, hparams.hidden_size].
def s_supply(self, bus): Sg = array([complex(g.p, g.q) for g in self.generators if (g.bus == bus) and not g.is_load], dtype=complex64) if len(Sg): return sum(Sg) else: return 0 + 0j
Returns the total complex power generation capacity.
def from_string(contents): lines = contents.split("\n") num_sites = int(lines[0]) coords = [] sp = [] prop = [] coord_patt = re.compile( r"(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+" + r"([0-9\-\.]+)" ) for i in range(2, 2 + num_sites): m = coord_patt.search(lines[i]) if m: sp.append(m.group(1)) coords.append([float(j) for j in [m.group(i) for i in [3, 4, 2]]]) prop.append(float(m.group(5))) return ZeoVoronoiXYZ( Molecule(sp, coords, site_properties={'voronoi_radius': prop}) )
Creates Zeo++ Voronoi XYZ object from a string. from_string method of XYZ class is being redefined. Args: contents: String representing Zeo++ Voronoi XYZ file. Returns: ZeoVoronoiXYZ object
def disconnect_network_gateway(self, gateway_id, body=None): base_uri = self.network_gateway_path % gateway_id return self.put("%s/disconnect_network" % base_uri, body=body)
Disconnect a network from the specified gateway.
def process_data_config_section(config, data_config): if 'connectors' in data_config: for connector in data_config['connectors']: config.data['connectors'][ connector['name']] = get_config_from_package( connector['class']) if 'sources' in data_config: if data_config['sources']: for source in data_config['sources']: config.data['sources'][source['name']] = source del config.data['sources'][source['name']]['name']
Processes the data configuration section from the configuration data dict. :param config: The config reference of the object that will hold the configuration data from the config_data. :param data_config: Data configuration section from a config data dict.
def set_status(self, status): if self._json_state['control_url']: url = CONST.BASE_URL + self._json_state['control_url'] status_data = { 'status': str(status) } response = self._abode.send_request( method="put", url=url, data=status_data) response_object = json.loads(response.text) _LOGGER.debug("Set Status Response: %s", response.text) if response_object['id'] != self.device_id: raise AbodeException((ERROR.SET_STATUS_DEV_ID)) if response_object['status'] != str(status): raise AbodeException((ERROR.SET_STATUS_STATE)) _LOGGER.info("Set device %s status to: %s", self.device_id, status) return True return False
Set device status.
def statistics(self, start=None, end=None, namespace=None): return self.make_context(start=start, end=end, namespace=namespace).statistics()
Get write statistics for the specified namespace and date range
def main(): description = 'Letter - a commandline interface' parser = argparse.ArgumentParser(description=description) parser.add_argument('--gmail', action='store_true', help='Send via Gmail', ) args = parser.parse_args() to = raw_input('To address > ') subject = raw_input('Subject > ') body = raw_input('Your Message > ') if args.gmail: user = fromaddr = raw_input('Gmail Address > ') pw = getpass.getpass() postie = letter.GmailPostman(user=user, pw=pw) else: postie = letter.Postman() fromaddr = raw_input('From address > ') class Message(letter.Letter): Postie = postie From = fromaddr To = to Subject = subject Body = body return 0
Do the things! Return: 0 Exceptions:
def _check_underflow(self, n): if self._pos + n > self._end_pos: raise self.BufferUnderflow()
Raise BufferUnderflow if there's not enough bytes to satisfy the request.
def create_folder_structure(self): self.info_file, directories = create_folder_structure(self.project, self.name) self.project_dir, self.batch_dir, self.raw_dir = directories logger.debug("create folders:" + str(directories))
Creates a folder structure based on the project and batch name. Project - Batch-name - Raw-data-dir The info_df JSON-file will be stored in the Project folder. The summary-files will be saved in the Batch-name folder. The raw data (including exported cycles and ica-data) will be saved to the Raw-data-dir.
def stop_artifact_creation(self, id_or_uri, task_uri): data = { "taskUri": task_uri } uri = self.URI + '/' + extract_id_from_uri(id_or_uri) + self.STOP_CREATION_PATH return self._client.update(data, uri=uri)
Stops creation of the selected Artifact Bundle. Args: id_or_uri: ID or URI of the Artifact Bundle. task_uri: Task URI associated with the Artifact Bundle. Returns: string:
def exists(self, queue_name, timeout=None): try: self.get_queue_metadata(queue_name, timeout=timeout) return True except AzureHttpError as ex: _dont_fail_not_exist(ex) return False
Returns a boolean indicating whether the queue exists. :param str queue_name: The name of queue to check for existence. :param int timeout: The server timeout, expressed in seconds. :return: A boolean indicating whether the queue exists. :rtype: bool
def get_municipalities(self): return sorted(list(set([ location.municipality for location in self.get_locations().values() ])))
Return the list of unique municipalities, sorted by name.
def setpurpose(self, purpose): if isinstance(purpose, str): purp_no = libcrypto.X509_PURPOSE_get_by_sname(purpose) if purp_no <= 0: raise X509Error("Invalid certificate purpose '%s'" % purpose) elif isinstance(purpose, int): purp_no = purpose if libcrypto.X509_STORE_set_purpose(self.store, purp_no) <= 0: raise X509Error("cannot set purpose")
Sets certificate purpose which verified certificate should match @param purpose - number from 1 to 9 or standard strind defined in Openssl possible strings - sslcient,sslserver, nssslserver, smimesign,i smimeencrypt, crlsign, any, ocsphelper
def _create(self, cache_file): conn = sqlite3.connect(cache_file) cur = conn.cursor() cur.execute("PRAGMA foreign_keys = ON") cur.execute( ) cur.execute( ) conn.commit() conn.close()
Create the tables needed to store the information.
def intersection(a, b, scale=1): try: a1, a2 = a except TypeError: a1 = a.start a2 = a.stop try: b1, b2 = b except TypeError: b1 = b.start b2 = b.stop if a2 <= b1: return None if a1 >= b2: return None if a2 <= b2: if a1 <= b1: return slice(b1 * scale, a2 * scale) else: return slice(a1 * scale, a2 * scale) else: if a1 <= b1: return slice(b1 * scale, b2 * scale) else: return slice(a1 * scale, b2 * scale)
Intersection between two segments.
def get_calendar(self, name): canonical_name = self.resolve_alias(name) try: return self._calendars[canonical_name] except KeyError: pass try: factory = self._calendar_factories[canonical_name] except KeyError: raise InvalidCalendarName(calendar_name=name) calendar = self._calendars[canonical_name] = factory() return calendar
Retrieves an instance of an TradingCalendar whose name is given. Parameters ---------- name : str The name of the TradingCalendar to be retrieved. Returns ------- calendar : calendars.TradingCalendar The desired calendar.
def trsm(self,B,trans='N'): r if trans=='N': cp.trsm(self._L0,B) pftrsm(self._V,self._L,self._B,B,trans='N') elif trans=='T': pftrsm(self._V,self._L,self._B,B,trans='T') cp.trsm(self._L0,B,trans='T') elif type(trans) is str: raise ValueError("trans must be 'N' or 'T'") else: raise TypeError("trans must be 'N' or 'T'") return
r""" Solves a triangular system of equations with multiple righthand sides. Computes .. math:: B &:= L^{-1} B \text{ if trans is 'N'} B &:= L^{-T} B \text{ if trans is 'T'}
def symlink(self, source, dest): dest = self._adjust_cwd(dest) self._log(DEBUG, "symlink({!r}, {!r})".format(source, dest)) source = b(source) self._request(CMD_SYMLINK, source, dest)
Create a symbolic link to the ``source`` path at ``destination``. :param str source: path of the original file :param str dest: path of the newly created symlink
def parse_html(html): paragraphs = re.split("</?p[^>]*>", html) paragraphs = [re.split("<br */?>", p) for p in paragraphs if p] return [[get_text(l) for l in p] for p in paragraphs]
Attempt to convert html to plain text while keeping line breaks. Returns a list of paragraphs, each being a list of lines.
def send_confirm_password_email(person): url = '%s/profile/login/%s/' % ( settings.REGISTRATION_BASE_URL, person.username) context = CONTEXT.copy() context.update({ 'url': url, 'receiver': person, }) to_email = person.email subject, body = render_email('confirm_password', context) send_mail(subject, body, settings.ACCOUNTS_EMAIL, [to_email])
Sends an email to user allowing them to confirm their password.
def to_pandas_series_rdd(self): pd_index = self.index().to_pandas_index() return self.map(lambda x: (x[0], pd.Series(x[1], pd_index)))
Returns an RDD of Pandas Series objects indexed with Pandas DatetimeIndexes