code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def is_same_transform(matrix0, matrix1): matrix0 = np.array(matrix0, dtype=np.float64, copy=True) matrix0 /= matrix0[3, 3] matrix1 = np.array(matrix1, dtype=np.float64, copy=True) matrix1 /= matrix1[3, 3] return np.allclose(matrix0, matrix1)
Return True if two matrices perform same transformation. >>> is_same_transform(np.identity(4), np.identity(4)) True >>> is_same_transform(np.identity(4), random_rotation_matrix()) False
def AddATR(self, readernode, atr): capchild = self.AppendItem(readernode, atr) self.SetPyData(capchild, None) self.SetItemImage( capchild, self.cardimageindex, wx.TreeItemIcon_Normal) self.SetItemImage( capchild, self.cardimageindex, wx.TreeItemIcon_Expanded) self.Expand(capchild) return capchild
Add an ATR to a reader node.
def decrease_step(self) -> str: if self._steps_index > 0: self._steps_index = self._steps_index - 1 return 'step: {}'.format(self.current_step())
Decrease the jog resolution without overrunning the list of values
def authenticate_admin(self, transport, account_name, password): Authenticator.authenticate_admin(self, transport, account_name, password) auth_token = AuthToken() auth_token.account_name = account_name params = {sconstant.E_NAME: account_name, sconstant.E_PASSWORD: password} self.log.debug('Authenticating admin %s' % account_name) try: res = transport.invoke(zconstant.NS_ZIMBRA_ADMIN_URL, sconstant.AuthRequest, params, auth_token) except SoapException as exc: raise AuthException(unicode(exc), exc) auth_token.token = res.authToken auth_token.session_id = res.sessionId self.log.info('Authenticated admin %s, session id %s' % (account_name, auth_token.session_id)) return auth_token
Authenticates administrator using username and password.
def AIC_compare(aic_list): aic_values = np.array(aic_list) minimum = np.min(aic_values) delta = aic_values - minimum values = np.exp(-delta / 2) weights = values / np.sum(values) return delta, weights
Calculates delta AIC and AIC weights from a list of AIC values Parameters ----------------- aic_list : iterable AIC values from set of candidat models Returns ------------- tuple First element contains the delta AIC values, second element contains the relative AIC weights. Notes ----- AIC weights can be interpreted as the probability that a given model is the best model in the set. Examples -------- >>> # Generate random data >>> rand_samp = md.nbinom_ztrunc.rvs(20, 0.5, size=100) >>> # Fit Zero-truncated NBD (Full model) >>> mle_nbd = md.nbinom_ztrunc.fit_mle(rand_samp) >>> # Fit a logseries (limiting case of Zero-truncated NBD, reduced model) >>> mle_logser = md.logser.fit_mle(rand_samp) >>> # Get AIC for ztrunc_nbinom >>> nbd_aic = comp.AIC(rand_samp, md.nbinom_ztrunc(*mle_nbd)) >>> # Get AIC for logser >>> logser_aic = comp.AIC(rand_samp, md.logser(*mle_logser)) >>> # Make AIC list and get weights >>> aic_list = [nbd_aic, logser_aic] >>> comp.AIC_compare(aic_list) (array([ 0. , 19.11806518]), array([ 9.99929444e-01, 7.05560486e-05])) >>> # Zero-truncated NBD is a far superior model based on AIC weights
def getCachedOrUpdatedValue(self, key, channel=None): if channel: return self._hmchannels[channel].getCachedOrUpdatedValue(key) try: return self._VALUES[key] except KeyError: value = self._VALUES[key] = self.getValue(key) return value
Gets the channel's value with the given key. If the key is not found in the cache, the value is queried from the host. If 'channel' is given, the respective channel's value is returned.
async def get(self) -> InfoDict: if self._seen_kork: raise AnalysisComplete() info = await self._queue.get() if not info: self._seen_kork = True await self._finished raise AnalysisComplete() return info
Waits for the next dictionary of information from the engine and returns it. It might be more convenient to use ``async for info in analysis: ...``. :raises: :exc:`chess.engine.AnalysisComplete` if the analysis is complete (or has been stopped) and all information has been consumed. Use :func:`~chess.engine.AnalysisResult.next()` if you prefer to get ``None`` instead of an exception.
def reload(script, input, output): script = Path(script).expand().abspath() output = Path(output).expand().abspath() input = input if isinstance(input, (list, tuple)) else [input] output.makedirs_p() _script_reload(script, input, output)
reloads the generator script when the script files or the input files changes
def as_completed(jobs): jobs = tuple(jobs) event = threading.Event() callback = lambda f, ev: event.set() [job.add_listener(Job.SUCCESS, callback, once=True) for job in jobs] [job.add_listener(Job.ERROR, callback, once=True) for job in jobs] while jobs: event.wait() event.clear() jobs, finished = split_list_by(jobs, lambda x: x.finished) for job in finished: yield job
Generator function that yields the jobs in order of their completion. Attaches a new listener to each job.
def read(self, pos, size, **kwargs): data, realsize = self.read_data(size, **kwargs) if not self.state.solver.is_true(realsize == 0): self.state.memory.store(pos, data, size=realsize) return realsize
Reads some data from the file, storing it into memory. :param pos: The address to write the read data into memory :param size: The requested length of the read :return: The real length of the read
def _clear(self): draw = ImageDraw.Draw(self._background_image) draw.rectangle(self._device.bounding_box, fill="black") del draw
Helper that clears the composition.
def dtstr_to_datetime(dtstr, to_tz=None, fail_silently=True): try: dt = datetime.datetime.utcfromtimestamp(int(dtstr, 36) / 1e3) if to_tz: dt = timezone.make_aware(dt, timezone=pytz.UTC) if to_tz != pytz.UTC: dt = dt.astimezone(to_tz) return dt except ValueError, e: if not fail_silently: raise e return None
Convert result from datetime_to_dtstr to datetime in timezone UTC0.
def download_data_dictionary(request, dataset_id): dataset = Dataset.objects.get(pk=dataset_id) dataDict = dataset.data_dictionary fields = DataDictionaryField.objects.filter( parent_dict=dataDict ).order_by('columnIndex') response = HttpResponse(content_type='text/csv') csvName = slugify(dataset.title + ' data dict') + '.csv' response['Content-Disposition'] = 'attachment; filename=%s' % (csvName) csvWriter = writer(response) metaHeader = [ 'Data Dictionary for {0} prepared by {1}'.format( dataset.title, dataset.uploaded_by ) ] csvWriter.writerow(metaHeader) trueHeader = ['Column Index', 'Heading', 'Description', 'Data Type'] csvWriter.writerow(trueHeader) for field in fields: mappedIndex = field.COLUMN_INDEX_CHOICES[field.columnIndex-1][1] csvWriter.writerow( [mappedIndex, field.heading, field.description, field.dataType] ) return response
Generates and returns compiled data dictionary from database. Returned as a CSV response.
def adam7_generate(width, height): for xstart, ystart, xstep, ystep in adam7: if xstart >= width: continue yield ((xstart, y, xstep) for y in range(ystart, height, ystep))
Generate the coordinates for the reduced scanlines of an Adam7 interlaced image of size `width` by `height` pixels. Yields a generator for each pass, and each pass generator yields a series of (x, y, xstep) triples, each one identifying a reduced scanline consisting of pixels starting at (x, y) and taking every xstep pixel to the right.
def calculate_equinoxes(self, year, timezone='UTC'): tz = pytz.timezone(timezone) d1 = ephem.next_equinox(str(year)) d = ephem.Date(str(d1)) equinox1 = d.datetime() + tz.utcoffset(d.datetime()) d2 = ephem.next_equinox(d1) d = ephem.Date(str(d2)) equinox2 = d.datetime() + tz.utcoffset(d.datetime()) return (equinox1.date(), equinox2.date())
calculate equinox with time zone
def segment_to_vector(self, seg): ft_dict = {ft: val for (val, ft) in self.fts(seg)} return [ft_dict[name] for name in self.names]
Given a Unicode IPA segment, return a list of feature specificiations in cannonical order. Args: seg (unicode): IPA consonant or vowel Returns: list: feature specifications ('+'/'-'/'0') in the order from `FeatureTable.names`
def get_prefixes(self, query): try: res = Prefix.smart_search(query, {}) except socket.error: print >> sys.stderr, "Connection refused, please check hostname & port" sys.exit(1) except xmlrpclib.ProtocolError: print >> sys.stderr, "Authentication failed, please check your username / password" sys.exit(1) for p in res['result']: p.prefix_ipy = IPy.IP(p.prefix) self.prefixes.append(p)
Get prefix data from NIPAP
def to_paginated_list(self, result, _ns, _operation, **kwargs): items, context = self.parse_result(result) headers = dict() paginated_list = PaginatedList( items=items, _page=self, _ns=_ns, _operation=_operation, _context=context, ) return paginated_list, headers
Convert a controller result to a paginated list. The result format is assumed to meet the contract of this page class's `parse_result` function.
def _create_job(self, mapping): job_id = self.bulk.create_insert_job(mapping["sf_object"], contentType="CSV") self.logger.info(" Created bulk job {}".format(job_id)) local_ids_for_batch = {} for batch_file, local_ids in self._get_batches(mapping): batch_id = self.bulk.post_batch(job_id, batch_file) local_ids_for_batch[batch_id] = local_ids self.logger.info(" Uploaded batch {}".format(batch_id)) self.bulk.close_job(job_id) return job_id, local_ids_for_batch
Initiate a bulk insert and upload batches to run in parallel.
def get_entry_view(self, key): check_not_none(key, "key can't be None") key_data = self._to_data(key) return self._encode_invoke_on_key(map_get_entry_view_codec, key_data, key=key_data, thread_id=thread_id())
Returns the EntryView for the specified key. **Warning: This method returns a clone of original mapping, modifying the returned value does not change the actual value in the map. One should put modified value back to make changes visible to all nodes.** **Warning 2: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), the key of the entry. :return: (EntryView), EntryView of the specified key. .. seealso:: :class:`~hazelcast.core.EntryView` for more info about EntryView.
def font_extents(self): extents = ffi.new('cairo_font_extents_t *') cairo.cairo_font_extents(self._pointer, extents) self._check_status() return ( extents.ascent, extents.descent, extents.height, extents.max_x_advance, extents.max_y_advance)
Return the extents of the currently selected font. Values are given in the current user-space coordinate system. Because font metrics are in user-space coordinates, they are mostly, but not entirely, independent of the current transformation matrix. If you call :meth:`context.scale(2) <scale>`, text will be drawn twice as big, but the reported text extents will not be doubled. They will change slightly due to hinting (so you can't assume that metrics are independent of the transformation matrix), but otherwise will remain unchanged. :returns: A ``(ascent, descent, height, max_x_advance, max_y_advance)`` tuple of floats. :obj:`ascent` The distance that the font extends above the baseline. Note that this is not always exactly equal to the maximum of the extents of all the glyphs in the font, but rather is picked to express the font designer's intent as to how the font should align with elements above it. :obj:`descent` The distance that the font extends below the baseline. This value is positive for typical fonts that include portions below the baseline. Note that this is not always exactly equal to the maximum of the extents of all the glyphs in the font, but rather is picked to express the font designer's intent as to how the font should align with elements below it. :obj:`height` The recommended vertical distance between baselines when setting consecutive lines of text with the font. This is greater than ``ascent + descent`` by a quantity known as the line spacing or external leading. When space is at a premium, most fonts can be set with only a distance of ``ascent + descent`` between lines. :obj:`max_x_advance` The maximum distance in the X direction that the origin is advanced for any glyph in the font. :obj:`max_y_advance` The maximum distance in the Y direction that the origin is advanced for any glyph in the font. This will be zero for normal fonts used for horizontal writing. (The scripts of East Asia are sometimes written vertically.)
def get_item_query_session_for_bank(self, bank_id): if not self.supports_item_query(): raise errors.Unimplemented() return sessions.ItemQuerySession(bank_id, runtime=self._runtime)
Gets the ``OsidSession`` associated with the item query service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the bank return: (osid.assessment.ItemQuerySession) - ``an _item_query_session`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_item_query()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_item_query()`` and ``supports_visible_federation()`` are ``true``.*
def init_layer(self): self.layer = self.vector.GetLayer() self.__features = [None] * self.nfeatures
initialize a layer object Returns -------
def _create_dataset( self, group, chunk_size, compression, compression_opts): if chunk_size == 'auto': chunks = True else: per_chunk = ( nb_per_chunk(20, 1, chunk_size) if self.dtype == np.dtype('O') else nb_per_chunk( np.dtype(self.dtype).itemsize, 1, chunk_size)) chunks = (per_chunk,) shape = (0,) maxshape = (None,) group.create_dataset( self.name, shape, dtype=self.dtype, chunks=chunks, maxshape=maxshape, compression=compression, compression_opts=compression_opts)
Create an empty dataset in a group.
def update(self, item): if item.matrix not in self.data: self.data[item.matrix] = [] result = Select(self.data[item.matrix]).where( lambda entry: entry.stage == item.stage).build() if len(result) > 0: stage = result[0] stage.status = item.status stage.add(item.timestamp, item.information) else: stage = CollectorStage(stage=item.stage, status=item.status) stage.add(item.timestamp, item.information) self.data[item.matrix].append(stage)
Add a collector item. Args: item (CollectorUpdate): event data like stage, timestampe and status.
def get_mimetype(path): filename = os.path.split(path)[1] mimetype = mimetypes.guess_type(filename)[0] if mimetype is None: mimetype = 'text/x-plain' _logger().debug('mimetype detected: %s', mimetype) return mimetype
Guesses the mime type of a file. If mime type cannot be detected, plain text is assumed. :param path: path of the file :return: the corresponding mime type.
def ssl_server_options(): cafile = options.ssl_ca_cert keyfile = options.ssl_key certfile = options.ssl_cert verify_mode = options.ssl_cert_reqs try: context = ssl.create_default_context( purpose=ssl.Purpose.CLIENT_AUTH, cafile=cafile) context.load_cert_chain(certfile=certfile, keyfile=keyfile) context.verify_mode = verify_mode return context except AttributeError: ssl_options = { 'ca_certs': cafile, 'keyfile': keyfile, 'certfile': certfile, 'cert_reqs': verify_mode } return ssl_options
ssl options for tornado https server these options are defined in each application's default.conf file if left empty, use the self generated keys and certificates included in this package. this function is backward compatible with python version lower than 2.7.9 where ssl.SSLContext is not available.
def stop_polling(self): if hasattr(self, '_polling') and self._polling: log.info('Stop polling...') self._polling = False
Break long-polling process. :return:
def click_element_at_coordinates(self, coordinate_X, coordinate_Y): self._info("Pressing at (%s, %s)." % (coordinate_X, coordinate_Y)) driver = self._current_application() action = TouchAction(driver) action.press(x=coordinate_X, y=coordinate_Y).release().perform()
click element at a certain coordinate
def make_relationship(self, relator, direction= RELATIONSHIP_DIRECTIONS.BIDIRECTIONAL): if IEntity.providedBy(relator): rel = DomainRelationship(relator, self, direction=direction) elif IResource.providedBy(relator): rel = ResourceRelationship(relator, self, direction=direction) else: raise ValueError('Invalid relator argument "%s" for ' 'relationship; must provide IEntity or ' 'IResource.' % relator) return rel
Create a relationship object for this attribute from the given relator and relationship direction.
def get_resource_retriever(url): if url.startswith('http://') or url.startswith('https://'): return HttpResourceRetriever(url) else: raise ValueError('Unsupported scheme in url: %s' % url)
Get the appropriate retriever object for the specified url based on url scheme. Makes assumption that HTTP urls do not require any special authorization. For HTTP urls: returns HTTPResourceRetriever For s3:// urls returns S3ResourceRetriever :param url: url of the resource to be retrieved :return: ResourceRetriever object
def union(self, x, y): repr_x = self.find(x) repr_y = self.find(y) if repr_x == repr_y: return False if self.rank[repr_x] == self.rank[repr_y]: self.rank[repr_x] += 1 self.up[repr_y] = repr_x elif self.rank[repr_x] > self.rank[repr_y]: self.up[repr_y] = repr_x else: self.up[repr_x] = repr_y return True
Merges part that contain x and part containing y :returns: False if x, y are already in same part :complexity: O(inverse_ackerman(n))
def _publish_date(self, item): url = item['url'] html = deepcopy(item['spider_response'].body) publish_date = None try: if html is None: request = urllib2.Request(url) html = urllib2.build_opener().open(request).read() html = BeautifulSoup(html, "lxml") publish_date = self._extract_from_json(html) if publish_date is None: publish_date = self._extract_from_meta(html) if publish_date is None: publish_date = self._extract_from_html_tag(html) if publish_date is None: publish_date = self._extract_from_url(url) except Exception as e: pass return publish_date
Returns the publish_date of the extracted article.
def get_base(self): if self._type == 'query': return self._observable.get_query_base() return self._observable.get_target_base()
Get the single base at this position. :returns: base :rtype: char
def check_key(key, allowed): if key in allowed: return True for pattern in allowed: if fnmatch(key, pattern): return True return False
Validate that the specified key is allowed according the provided list of patterns.
def _gen_ticket(prefix=None, lg=settings.CAS_TICKET_LEN): random_part = u''.join( random.choice( string.ascii_letters + string.digits ) for _ in range(lg - len(prefix or "") - 1) ) if prefix is not None: return u'%s-%s' % (prefix, random_part) else: return random_part
Generate a ticket with prefix ``prefix`` and length ``lg`` :param unicode prefix: An optional prefix (probably ST, PT, PGT or PGTIOU) :param int lg: The length of the generated ticket (with the prefix) :return: A randomlly generated ticket of length ``lg`` :rtype: unicode
def by_occupied_housing_units(self, lower=-1, upper=2 ** 31, zipcode_type=ZipcodeType.Standard, sort_by=SimpleZipcode.occupied_housing_units.name, ascending=False, returns=DEFAULT_LIMIT): return self.query( occupied_housing_units_lower=lower, occupied_housing_units_upper=upper, sort_by=sort_by, zipcode_type=zipcode_type, ascending=ascending, returns=returns, )
Search zipcode information by occupied house of units.
def add_bookmark(self, time): if self.annot is None: msg = 'No score file loaded' lg.debug(msg) error_dialog = QErrorMessage() error_dialog.setWindowTitle('Error adding bookmark') error_dialog.showMessage(msg) error_dialog.exec() return answer = QInputDialog.getText(self, 'New Bookmark', 'Enter bookmark\'s name') if answer[1]: name = answer[0] self.annot.add_bookmark(name, time) lg.info('Added Bookmark ' + name + 'at ' + str(time)) self.update_annotations()
Run this function when user adds a new bookmark. Parameters ---------- time : tuple of float start and end of the new bookmark, in s
def fetch_cvparams_values_from_subel(base, subelname, paramnames, ns): sub_el = basereader.find_element_xpath(base, subelname, ns) cvparams = get_all_cvparams(sub_el, ns) output = [] for param in paramnames: output.append(fetch_cvparam_value_by_name(cvparams, param)) return output
Searches a base element for subelement by name, then takes the cvParams of that subelement and returns the values as a list for the paramnames that match. Value order in list equals input paramnames order.
def query_filter(query): try: return {'operation': int(query)} except ValueError: pass if isinstance(query, string_types): query = query.strip() for operation in KNOWN_OPERATIONS: if query.startswith(operation): query = "%s %s" % (operation, query[len(operation):].strip()) return {'operation': query} if query.startswith('*') and query.endswith('*'): query = "*= %s" % query.strip('*') elif query.startswith('*'): query = "$= %s" % query.strip('*') elif query.endswith('*'): query = "^= %s" % query.strip('*') else: query = "_= %s" % query return {'operation': query}
Translate a query-style string to a 'filter'. Query can be the following formats: Case Insensitive 'value' OR '*= value' Contains 'value*' OR '^= value' Begins with value '*value' OR '$= value' Ends with value '*value*' OR '_= value' Contains value Case Sensitive '~ value' Contains '!~ value' Does not contain '> value' Greater than value '< value' Less than value '>= value' Greater than or equal to value '<= value' Less than or equal to value :param string query: query string
def midPoint(self, point): x = (self.x + point.x)/2.0 y = (self.y + point.y)/2.0 z = (self.z + point.z)/2.0 return MapPoint(x,y,z)
identify the midpoint between two mapPoints
def legacy_signature(**kwargs_mapping): def signature_decorator(f): @wraps(f) def wrapper(*args, **kwargs): redirected_kwargs = { kwargs_mapping[k] if k in kwargs_mapping else k: v for k, v in kwargs.items() } return f(*args, **redirected_kwargs) return wrapper return signature_decorator
This decorator makes it possible to call a function using old argument names when they are passed as keyword arguments. @legacy_signature(old_arg1='arg1', old_arg2='arg2') def func(arg1, arg2=1): return arg1 + arg2 func(old_arg1=1) == 2 func(old_arg1=1, old_arg2=2) == 3
def on_hover(self, callback, remove=False): self._hover_callbacks.register_callback(callback, remove=remove)
The hover callback takes an unpacked set of keyword arguments.
def longitude(self, longitude): if not (-180 <= longitude <= 180): raise ValueError('longitude was {}, but has to be in [-180, 180]' .format(longitude)) self._longitude = longitude
Setter for longitude.
def get_invocation_command_nodefault( toolset, tool, user_provided_command=[], additional_paths=[], path_last=False): assert isinstance(toolset, basestring) assert isinstance(tool, basestring) assert is_iterable_typed(user_provided_command, basestring) assert is_iterable_typed(additional_paths, basestring) or additional_paths is None assert isinstance(path_last, (int, bool)) if not user_provided_command: command = find_tool(tool, additional_paths, path_last) if not command and __debug_configuration: print "warning: toolset", toolset, "initialization: can't find tool, tool" else: command = check_tool(user_provided_command) if not command and __debug_configuration: print "warning: toolset", toolset, "initialization:" print "warning: can't find user-provided command", user_provided_command command = [] if command: command = ' '.join(command) return command
A helper rule to get the command to invoke some tool. If 'user-provided-command' is not given, tries to find binary named 'tool' in PATH and in the passed 'additional-path'. Otherwise, verifies that the first element of 'user-provided-command' is an existing program. This rule returns the command to be used when invoking the tool. If we can't find the tool, a warning is issued. If 'path-last' is specified, PATH is checked after 'additional-paths' when searching for 'tool'.
def setDefaultColorRamp(self, colorRampEnum=ColorRampEnum.COLOR_RAMP_HUE): self._colorRamp = ColorRampGenerator.generateDefaultColorRamp(colorRampEnum)
Returns the color ramp as a list of RGB tuples
def unshare_project(project_id, usernames,**kwargs): user_id = kwargs.get('user_id') proj_i = _get_project(project_id) proj_i.check_share_permission(user_id) for username in usernames: user_i = _get_user(username) proj_i.unset_owner(user_i.id, write=write, share=share) db.DBSession.flush()
Un-share a project with a list of users, identified by their usernames.
def close_multicast_socket(sock, address): if sock is None: return if address: mreq = make_mreq(sock.family, address) if sock.family == socket.AF_INET: sock.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, mreq) elif sock.family == socket.AF_INET6: sock.setsockopt(ipproto_ipv6(), socket.IPV6_LEAVE_GROUP, mreq) sock.close()
Cleans up the given multicast socket. Unregisters it of the multicast group. Parameters should be the result of create_multicast_socket :param sock: A multicast socket :param address: The multicast address used by the socket
def get_source_by_name(self, name): srcs = self.get_sources_by_name(name) if len(srcs) == 1: return srcs[0] elif len(srcs) == 0: raise Exception('No source matching name: ' + name) elif len(srcs) > 1: raise Exception('Multiple sources matching name: ' + name)
Return a single source in the ROI with the given name. The input name string can match any of the strings in the names property of the source object. Case and whitespace are ignored when matching name strings. If no sources are found or multiple sources then an exception is thrown. Parameters ---------- name : str Name string. Returns ------- srcs : `~fermipy.roi_model.Model` A source object.
def flush(self): if self.triggered and len(self.buffer) > 0: text = [] for record in self.buffer: terminator = getattr(record, 'terminator', '\n') s = self.format(record) if terminator is not None: text.append(s + terminator) else: text.append(s) msg = MIMEText(''.join(text)) msg['From'] = self.fromAddr msg['To'] = self.toAddr msg['Subject'] = self.subject smtp = smtplib.SMTP('localhost') smtp.sendmail(self.fromAddr, [self.toAddr], msg.as_string()) smtp.quit() self.buffer = []
Send messages by e-mail. The sending of messages is suppressed if a trigger severity level has been set and none of the received messages was at that level or above. In that case the messages are discarded. Empty e-mails are discarded.
def reset_default_props(**kwargs): global _DEFAULT_PROPS pcycle = plt.rcParams['axes.prop_cycle'] _DEFAULT_PROPS = { 'color': itertools.cycle(_get_standard_colors(**kwargs)) if len(kwargs) > 0 else itertools.cycle([x['color'] for x in pcycle]), 'marker': itertools.cycle(['o', 'x', '.', '+', '*']), 'linestyle': itertools.cycle(['-', '--', '-.', ':']), }
Reset properties to initial cycle point
def reset(self): for key in list(self.keys()): self.iterators[key] = _itertools.cycle(self[key]) return self
Resets the style cycle.
def get_object(self, view_kwargs, qs=None): self.before_get_object(view_kwargs) id_field = getattr(self, 'id_field', inspect(self.model).primary_key[0].key) try: filter_field = getattr(self.model, id_field) except Exception: raise Exception("{} has no attribute {}".format(self.model.__name__, id_field)) url_field = getattr(self, 'url_field', 'id') filter_value = view_kwargs[url_field] query = self.retrieve_object_query(view_kwargs, filter_field, filter_value) if qs is not None: query = self.eagerload_includes(query, qs) try: obj = query.one() except NoResultFound: obj = None self.after_get_object(obj, view_kwargs) return obj
Retrieve an object through sqlalchemy :params dict view_kwargs: kwargs from the resource view :return DeclarativeMeta: an object from sqlalchemy
def stop_workers(self, _join_arbiter=True): self._must_stop.set() self._workers.stop() self._result_notifier.join() self._broker.stop() if _join_arbiter: self._arbiter.join() self._reset()
Stop the workers and wait for them to terminate.
def resume_trial(self, trial): assert trial.status == Trial.PAUSED, trial.status self.start_trial(trial)
Resumes PAUSED trials. This is a blocking call.
def RemoveEventHandler(self, wb): from UcsBase import WriteUcsWarning if wb in self._wbs: self._remove_watch_block(wb) else: WriteUcsWarning("Event handler not found")
Removes an event handler.
async def shutdown(self): if self.log_output: logging.info('Shutting down ...') else: print('Shutting down ...') await self.send_reset() try: self.loop.stop() except: pass try: self.loop.close() except: pass sys.exit(0)
This method attempts an orderly shutdown If any exceptions are thrown, just ignore them. :returns: No return value
def clone_repo(self): tempdir_path = tempfile.mkdtemp() if self.args.git: self.log.debug('Cloning git source repository from %s to %s', self.source, tempdir_path) self.sh('git clone', self.source, tempdir_path) else: raise NotImplementedError('Unknown repo type') self.source = tempdir_path
Clone a repository containing the dotfiles source.
def create_pipe(backend_p): return Zsock(lib.zsys_create_pipe(byref(zsock_p.from_param(backend_p))), False)
Create a pipe, which consists of two PAIR sockets connected over inproc. The pipe is configured to use the zsys_pipehwm setting. Returns the frontend socket successful, NULL if failed.
def config(): cfg = ConfigParser() cfg.read(os.path.join(os.path.dirname(os.path.realpath(ips_vagrant.__file__)), 'config/ipsv.conf')) return cfg
Load system configuration @rtype: ConfigParser
async def sound(dev: Device, target, value): if target and value: click.echo("Setting %s to %s" % (target, value)) click.echo(await dev.set_sound_settings(target, value)) print_settings(await dev.get_sound_settings())
Get or set sound settings.
def Gaussian_filter(x, sigma, norm=True): r x = check_float(x) sigma = check_float(sigma) val = np.exp(-0.5 * (x / sigma) ** 2) if norm: return val / (np.sqrt(2 * np.pi) * sigma) else: return val
r"""Gaussian filter This method implements a Gaussian filter. Parameters ---------- x : float Input data point sigma : float Standard deviation (filter scale) norm : bool Option to return normalised data. Default (norm=True) Returns ------- float Gaussian filtered data point Examples -------- >>> from modopt.signal.filter import Gaussian_filter >>> Gaussian_filter(1, 1) 0.24197072451914337 >>> Gaussian_filter(1, 1, False) 0.60653065971263342
def sample(self, bqm, beta_range=None, num_reads=10, num_sweeps=1000): if not isinstance(num_reads, int): raise TypeError("'samples' should be a positive integer") if num_reads < 1: raise ValueError("'samples' should be a positive integer") h, J, offset = bqm.to_ising() samples = [] energies = [] for __ in range(num_reads): sample, energy = ising_simulated_annealing(h, J, beta_range, num_sweeps) samples.append(sample) energies.append(energy) response = SampleSet.from_samples(samples, Vartype.SPIN, energies) response.change_vartype(bqm.vartype, offset, inplace=True) return response
Sample from low-energy spin states using simulated annealing. Args: bqm (:obj:`.BinaryQuadraticModel`): Binary quadratic model to be sampled from. beta_range (tuple, optional): Beginning and end of the beta schedule (beta is the inverse temperature) as a 2-tuple. The schedule is applied linearly in beta. Default is chosen based on the total bias associated with each node. num_reads (int, optional, default=10): Number of reads. Each sample is the result of a single run of the simulated annealing algorithm. num_sweeps (int, optional, default=1000): Number of sweeps or steps. Returns: :obj:`.SampleSet` Note: This is a reference implementation, not optimized for speed and therefore not an appropriate sampler for benchmarking.
def getPWMFrequency(self, device=DEFAULT_DEVICE_ID, message=True): return self._getPWMFrequency(device, message)
Get the motor shutdown on error status stored on the hardware device. :Keywords: device : `int` The device is the integer number of the hardware devices ID and is only used with the Pololu Protocol. Defaults to the hardware's default value. message : `bool` If set to `True` a text message will be returned, if set to `False` the integer stored in the Qik will be returned. :Returns: A text message or an int. See the `message` parameter above.
def parse_pdb_ligand_info(self, pdb_ligand_info): mtchs = re.findall('(<ligand.*?</ligand>)', pdb_ligand_info, re.DOTALL) for m in mtchs: if m.upper().find('CHEMICALID="{0}"'.format(self.PDBCode.upper())) != -1: ligand_type = re.match('<ligand.*?\stype="(.*?)".*?>', m, re.DOTALL) if ligand_type: self.LigandType = ligand_type.group(1)
This only parses the ligand type as all the other information should be in the .cif file. The XML file has proper capitalization whereas the .cif file uses all caps for the ligand type.
def edit_imagefindpars(): teal.teal(imagefindpars.__taskname__, returnAs=None, autoClose=True, loadOnly=False, canExecute=False)
Allows the user to edit the imagefindpars configObj in a TEAL GUI
def count(self, path): try: res = self.get_bite().count(self.list_path(path)).next() dir_count = res['directoryCount'] file_count = res['fileCount'] content_size = res['spaceConsumed'] except StopIteration: dir_count = file_count = content_size = 0 return {'content_size': content_size, 'dir_count': dir_count, 'file_count': file_count}
Use snakebite.count, if available. :param path: directory to count the contents of :type path: string :return: dictionary with content_size, dir_count and file_count keys
def _boosted_value(name, action, key, value, boost): if boost is not None: value_key = 'query' if action in MATCH_ACTIONS else 'value' return {name: {'boost': boost, value_key: value}} return {name: value}
Boost a value if we should in _process_queries
def search_file(search_root, search_filename, instance_relative_root=False): if instance_relative_root: search_root = os.path.join(current_app.instance_path, search_root) file_path = None file_ext = None for file in os.listdir(search_root): filename, ext = os.path.splitext(file) if filename == search_filename and ext and ext != '.': file_path = os.path.join(search_root, filename + ext) file_ext = ext[1:] break return file_path, file_ext
Search for a filename in a specific search root dir. :param search_root: root dir to search :param search_filename: filename to search (no extension) :param instance_relative_root: search root is relative to instance path :return: tuple(full_file_path, extension without heading dot)
def map_tree(visitor, tree): newn = [map_tree(visitor, node) for node in tree.nodes] return visitor(tree, newn)
Apply function to nodes
def get_initial(self, form, name): if hasattr(form, 'initial'): return form.initial.get(name, None) return None
Get the initial data that got passed into the superform for this composite field. It should return ``None`` if no initial values where given.
def tidy(fnames): for fname in fnames: try: node = nrml.read(fname) except ValueError as err: print(err) return with open(fname + '.bak', 'wb') as f: f.write(open(fname, 'rb').read()) with open(fname, 'wb') as f: nrml.write(node.nodes, f, writers.FIVEDIGITS, xmlns=node['xmlns']) print('Reformatted %s, original left in %s.bak' % (fname, fname))
Reformat a NRML file in a canonical form. That also means reducing the precision of the floats to a standard value. If the file is invalid, a clear error message is shown.
def children(self): children = [] child_nodes = getattr(self.parsed_response, 'Children') for child in getattr(child_nodes, 'BrowseNode', []): children.append(AmazonBrowseNode(child)) return children
This browse node's children in the browse node tree. :return: A list of this browse node's children in the browse node tree.
def add_pr_curve(self, tag, labels, predictions, num_thresholds, global_step=None, weights=None): if num_thresholds < 2: raise ValueError('num_thresholds must be >= 2') labels = _make_numpy_array(labels) predictions = _make_numpy_array(predictions) self._file_writer.add_summary(pr_curve_summary(tag, labels, predictions, num_thresholds, weights), global_step)
Adds precision-recall curve. Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs. Since `asnumpy()` is a blocking function call, this function would block the main thread till it returns. It may consequently affect the performance of async execution of the MXNet engine. Parameters ---------- tag : str A tag attached to the summary. Used by TensorBoard for organization. labels : MXNet `NDArray` or `numpy.ndarray`. The ground truth values. A tensor of 0/1 values with arbitrary shape. predictions : MXNet `NDArray` or `numpy.ndarray`. A float32 tensor whose values are in the range `[0, 1]`. Dimensions must match those of `labels`. num_thresholds : int Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for. Should be `>= 2`. This value should be a constant integer value, not a tensor that stores an integer. The thresholds for computing the pr curves are calculated in the following way: `width = 1.0 / (num_thresholds - 1), thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]`. global_step : int Global step value to record. weights : MXNet `NDArray` or `numpy.ndarray`. Optional float32 tensor. Individual counts are multiplied by this value. This tensor must be either the same shape as or broadcastable to the `labels` tensor.
def get_folders(self): endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' r = requests.get(endpoint, headers=self._headers) if check_response(r): return Folder._json_to_folders(self, r.json())
Returns a list of all folders for this account Returns: List[:class:`Folder <pyOutlook.core.folder.Folder>`]
def error(message, code=1): if message: print('ERROR: {0}'.format(message), file=sys.stderr) else: print(file=sys.stderr) sys.exit(code)
Prints an error message to stderr and exits with a status of 1 by default.
def open(self, page, parms=None, payload=None, HTTPrequest=None ): response = self.open_raw( page, parms, payload, HTTPrequest ) return response.read()
Opens a page from the server with optional content. Returns the string response.
def default_preference_list(self, prefs): prefs = _check_preferences(prefs) if prefs is not None: self._prefs = prefs
Set the default preference list. :param str prefs: A string containing the default preferences for ciphers, digests, and compression algorithms.
def createStaticLibBuilder(env): try: static_lib = env['BUILDERS']['StaticLibrary'] except KeyError: action_list = [ SCons.Action.Action("$ARCOM", "$ARCOMSTR") ] if env.get('RANLIB',False) or env.Detect('ranlib'): ranlib_action = SCons.Action.Action("$RANLIBCOM", "$RANLIBCOMSTR") action_list.append(ranlib_action) static_lib = SCons.Builder.Builder(action = action_list, emitter = '$LIBEMITTER', prefix = '$LIBPREFIX', suffix = '$LIBSUFFIX', src_suffix = '$OBJSUFFIX', src_builder = 'StaticObject') env['BUILDERS']['StaticLibrary'] = static_lib env['BUILDERS']['Library'] = static_lib return static_lib
This is a utility function that creates the StaticLibrary Builder in an Environment if it is not there already. If it is already there, we return the existing one.
def raise_for_missing_namespace(self, line: str, position: int, namespace: str, name: str) -> None: if not self.has_namespace(namespace): raise UndefinedNamespaceWarning(self.get_line_number(), line, position, namespace, name)
Raise an exception if the namespace is not defined.
def is_empty(value, msg=None, except_=None, inc_zeros=True): if hasattr(value, 'empty'): value = not bool(value.empty) elif inc_zeros and value in ZEROS: value = True else: pass _is_null = is_null(value, except_=False) result = bool(_is_null or not value) if except_: return is_true(result, msg=msg, except_=except_) else: return bool(result)
is defined, but null or empty like value
def update(self, other): if not isinstance(other, CtsTextgroupMetadata): raise TypeError("Cannot add %s to CtsTextgroupMetadata" % type(other)) elif str(self.urn) != str(other.urn): raise InvalidURN("Cannot add CtsTextgroupMetadata %s to CtsTextgroupMetadata %s " % (self.urn, other.urn)) for urn, work in other.works.items(): if urn in self.works: self.works[urn].update(deepcopy(work)) else: self.works[urn] = deepcopy(work) self.works[urn].parent = self self.works[urn].resource = None return self
Merge two Textgroup Objects. - Original (left Object) keeps his parent. - Added document merges with work if it already exists :param other: Textgroup object :type other: CtsTextgroupMetadata :return: Textgroup Object :rtype: CtsTextgroupMetadata
def discover() -> List[Tuple[str, str]]: if IS_ROBOT and os.path.isdir('/dev/modules'): devices = os.listdir('/dev/modules') else: devices = [] discovered_modules = [] module_port_regex = re.compile('|'.join(MODULE_TYPES.keys()), re.I) for port in devices: match = module_port_regex.search(port) if match: name = match.group().lower() if name not in MODULE_TYPES: log.warning("Unexpected module connected: {} on {}" .format(name, port)) continue absolute_port = '/dev/modules/{}'.format(port) discovered_modules.append((absolute_port, name)) log.info('Discovered modules: {}'.format(discovered_modules)) return discovered_modules
Scan for connected modules and instantiate handler classes
def run_pipeline(pipeline, context, pipeline_context_input=None, parse_input=True): logger.debug("starting") try: if parse_input: logger.debug("executing context_parser") prepare_context(pipeline=pipeline, context_in_string=pipeline_context_input, context=context) else: logger.debug("skipping context_parser") pypyr.stepsrunner.run_step_group( pipeline_definition=pipeline, step_group_name='steps', context=context) logger.debug("pipeline steps complete. Running on_success steps now.") pypyr.stepsrunner.run_step_group( pipeline_definition=pipeline, step_group_name='on_success', context=context) except Exception: logger.error("Something went wrong. Will now try to run on_failure.") pypyr.stepsrunner.run_failure_step_group( pipeline=pipeline, context=context) logger.debug("Raising original exception to caller.") raise logger.debug("done")
Run the specified pypyr pipeline. This function runs the actual pipeline. If you are running another pipeline from within a pipeline, call this, not main(). Do call main() instead for your 1st pipeline if there are pipelines calling pipelines. Pipeline and context should be already loaded. Args: pipeline (dict): Dictionary representing the pipeline. context (pypyr.context.Context): Reusable context object. pipeline_context_input (str): Initialize the pypyr context with this string. parse_input (bool): run context_parser in pipeline. Returns: None
def pretty_polyfit_plot(x, y, deg=1, xlabel=None, ylabel=None, **kwargs): plt = pretty_plot(**kwargs) pp = np.polyfit(x, y, deg) xp = np.linspace(min(x), max(x), 200) plt.plot(xp, np.polyval(pp, xp), 'k--', x, y, 'o') if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) return plt
Convenience method to plot data with trend lines based on polynomial fit. Args: x: Sequence of x data. y: Sequence of y data. deg (int): Degree of polynomial. Defaults to 1. xlabel (str): Label for x-axis. ylabel (str): Label for y-axis. \\*\\*kwargs: Keyword args passed to pretty_plot. Returns: matplotlib.pyplot object.
def DEFAULT_NULLVALUE(test): return False if isinstance(test,bool) \ else 0 if isinstance(test,int) \ else 0.0 if isinstance(test,float) \ else ''
Returns a null value for each of various kinds of test values. **Parameters** **test** : bool, int, float or string Value to test. **Returns** **null** : element in `[False, 0, 0.0, '']` Null value corresponding to the given test value: * if `test` is a `bool`, return `False` * else if `test` is an `int`, return `0` * else if `test` is a `float`, return `0.0` * else `test` is a `str`, return `''`
def fit(self, sequences, y=None): super(BACE, self).fit(sequences, y=y) if self.n_macrostates is not None: self._do_lumping() else: raise RuntimeError('n_macrostates must not be None to fit') return self
Fit a BACE lumping model using a sequence of cluster assignments. Parameters ---------- sequences : list(np.ndarray(dtype='int')) List of arrays of cluster assignments y : None Unused, present for sklearn compatibility only. Returns ------- self
def _handle_join_dags(self, request): if request.payload['names'] is None: send_response = len(self._dags_running) <= 1 else: send_response = all([name not in self._dags_running.keys() for name in request.payload['names']]) if send_response: return Response(success=True, uid=request.uid) else: return None
The handler for the join_dags request. If dag names are given in the payload only return a valid Response if none of the dags specified by the names are running anymore. If no dag names are given, wait for all dags except one, which by design is the one that issued the request, to be finished. Args: request (Request): Reference to a request object containing the incoming request. Returns: Response: A response object containing the following fields: - success: True if all dags the request was waiting for have completed.
def as_dict(self): out = {} for prop in self: propval = getattr(self, prop) if hasattr(propval, 'for_json'): out[prop] = propval.for_json() elif isinstance(propval, list): out[prop] = [getattr(x, 'for_json', lambda:x)() for x in propval] elif isinstance(propval, (ProtocolBase, LiteralValue)): out[prop] = propval.as_dict() elif propval is not None: out[prop] = propval return out
Return a dictionary containing the current values of the object. Returns: (dict): The object represented as a dictionary
def list_udfs(self, database=None, like=None): if not database: database = self.current_database statement = ddl.ListFunction(database, like=like, aggregate=False) with self._execute(statement, results=True) as cur: result = self._get_udfs(cur, udf.ImpalaUDF) return result
Lists all UDFs associated with given database Parameters ---------- database : string like : string for searching (optional)
def get_query_cache_key(compiler): sql, params = compiler.as_sql() check_parameter_types(params) cache_key = '%s:%s:%s' % (compiler.using, sql, [text_type(p) for p in params]) return sha1(cache_key.encode('utf-8')).hexdigest()
Generates a cache key from a SQLCompiler. This cache key is specific to the SQL query and its context (which database is used). The same query in the same context (= the same database) must generate the same cache key. :arg compiler: A SQLCompiler that will generate the SQL query :type compiler: django.db.models.sql.compiler.SQLCompiler :return: A cache key :rtype: int
def extract_feature_dependent_feature(self, extractor, force_extraction=False, verbose=0, add_args=None, custom_name=None): if self._prepopulated is False: raise errors.EmptyDatabase(self.dbpath) else: return extract_feature_dependent_feature_base(self.dbpath, self.path_to_set, self._set_object, extractor, force_extraction, verbose, add_args, custom_name)
Extracts a feature which may be dependent on other features and stores it in the database Parameters ---------- extractor : function, which takes the path of a data point, a dictionary of all other features and *args as parameters and returns a feature force_extraction : boolean, if True - will re-extract feature even if a feature with this name already exists in the database, otherwise, will only extract if the feature doesn't exist in the database. default value: False verbose : int, if bigger than 0, will print the current number of the file for which data is being extracted add_args : optional arguments for the extractor (list/dictionary/tuple/whatever). if None, the extractor should take only one input argument - the file path. default value: None custom_name : string, optional name for the feature (it will be stored in the database with the custom_name instead of extractor function name). if None, the extractor function name will be used. default value: None Returns ------- None
def empty_bar_plot(ax): plt.sca(ax) plt.setp(plt.gca(),xticks=[],xticklabels=[]) return ax
Delete all axis ticks and labels
def register_file(self, filepath, creator, status=FileStatus.no_file, flags=FileFlags.no_flags): try: file_handle = self.get_handle(filepath) raise KeyError("File %s already exists in archive" % filepath) except KeyError: pass localpath = self._get_localpath(filepath) if status == FileStatus.exists: fullpath = self._get_fullpath(filepath) if not os.path.exists(fullpath): print("register_file called on called on mising file %s" % fullpath) status = FileStatus.missing timestamp = 0 else: timestamp = int(os.stat(fullpath).st_mtime) else: timestamp = 0 key = len(self._table) + 1 file_handle = FileHandle(path=localpath, key=key, creator=creator, timestamp=timestamp, status=status, flags=flags) file_handle.append_to_table(self._table) self._cache[localpath] = file_handle return file_handle
Register a file in the archive. If the file already exists, this raises a `KeyError` Parameters ---------- filepath : str The path to the file creatror : int A unique key for the job that created this file status : `FileStatus` Enumeration giving current status of file flags : `FileFlags` Enumeration giving flags set on this file Returns `FileHandle`
def describe(self): return { "name": self.name, "params": self.params, "returns": self.returns, "description": self.description, }
Describes the method. :return: Description :rtype: dict[str, object]
def calc(self, x:Image, *args:Any, **kwargs:Any)->Image: "Apply to image `x`, wrapping it if necessary." if self._wrap: return getattr(x, self._wrap)(self.func, *args, **kwargs) else: return self.func(x, *args, **kwargs)
Apply to image `x`, wrapping it if necessary.
def _verify_credentials(self): r = requests.get(self.apiurl + "account/verify_credentials.xml", auth=HTTPBasicAuth(self._username, self._password), headers=self.header) if r.status_code != 200: raise UserLoginFailed("Username or Password incorrect.")
An internal method that verifies the credentials given at instantiation. :raises: :class:`Pymoe.errors.UserLoginFailed`
def ConnectNoSSL(host='localhost', port=443, user='root', pwd='', service="hostd", adapter="SOAP", namespace=None, path="/sdk", version=None, keyFile=None, certFile=None, thumbprint=None, b64token=None, mechanism='userpass'): if hasattr(ssl, '_create_unverified_context'): sslContext = ssl._create_unverified_context() else: sslContext = None return Connect(host=host, port=port, user=user, pwd=pwd, service=service, adapter=adapter, namespace=namespace, path=path, version=version, keyFile=keyFile, certFile=certFile, thumbprint=thumbprint, sslContext=sslContext, b64token=b64token, mechanism=mechanism)
Provides a standard method for connecting to a specified server without SSL verification. Useful when connecting to servers with self-signed certificates or when you wish to ignore SSL altogether. Will attempt to create an unverified SSL context and then connect via the Connect method.
def load_and_print_resfile(filename, info_dict=None): if info_dict is None: info_dict = dict() info_dict["mass"] = 1.23 info_dict["nom_cap"] = 3600 info_dict["tot_mass"] = 2.33 d = CellpyData() print("filename:", filename) print("info_dict in:", end=' ') print(info_dict) d.from_raw(filename) d.set_mass(info_dict["mass"]) d.make_step_table() d.make_summary() for test in d.datasets: print("newtest") print(test) return info_dict
Load a raw data file and print information. Args: filename (str): name of the resfile. info_dict (dict): Returns: info (str): string describing something.
def is_templatetags_module_valid_constant(node): if node.name not in ('register', ): return False parent = node.parent while not isinstance(parent, Module): parent = parent.parent if "templatetags." not in parent.name: return False return True
Suppress warnings for valid constants in templatetags module.