code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def inherit_docstrings(cls): @functools.wraps(cls) def _inherit_docstrings(cls): if not isinstance(cls, (type, colorise.compat.ClassType)): raise RuntimeError("Type is not a class") for name, value in colorise.compat.iteritems(vars(cls)): if isinstance(getattr(cls, name), types.MethodType): if not getattr(value, '__doc__', None): for base in cls.__bases__: basemethod = getattr(base, name, None) if basemethod and getattr(base, '__doc__', None): value.__doc__ = basemethod.__doc__ return cls return _inherit_docstrings(cls)
Class decorator for inheriting docstrings. Automatically inherits base class doc-strings if not present in the derived class.
def upload(config): token = get_keeper_token(config['keeper_url'], config['keeper_user'], config['keeper_password']) build_resource = register_build(config, token) ltdconveyor.upload_dir( build_resource['bucket_name'], build_resource['bucket_root_dir'], config['build_dir'], aws_access_key_id=config['aws_id'], aws_secret_access_key=config['aws_secret'], surrogate_key=build_resource['surrogate_key'], cache_control='max-age=31536000', surrogate_control=None, upload_dir_redirect_objects=True) confirm_build(config, token, build_resource)
Upload the build documentation site to LSST the Docs. Parameters ---------- config : `lander.config.Configuration` Site configuration, which includes upload information and credentials.
def get_keeper_token(base_url, username, password): token_endpoint = base_url + '/token' r = requests.get(token_endpoint, auth=(username, password)) if r.status_code != 200: raise RuntimeError('Could not authenticate to {0}: error {1:d}\n{2}'. format(base_url, r.status_code, r.json())) return r.json()['token']
Get a temporary auth token from LTD Keeper.
def get_product(config): product_url = config['keeper_url'] + '/products/{p}'.format( p=config['ltd_product']) r = requests.get(product_url) if r.status_code != 200: raise RuntimeError(r.json()) product_info = r.json() return product_info
Get the /product/<product> resource from LTD Keeper.
def install( board_id='atmega88', mcu='atmega88', f_cpu=20000000, upload='usbasp', core='arduino', replace_existing=True, ): board = AutoBunch() board.name = TEMPL.format(mcu=mcu, f_cpu=f_cpu, upload=upload) board.upload.using = upload board.upload.maximum_size = 8 * 1024 board.build.mcu = mcu board.build.f_cpu = str(f_cpu) + 'L' board.build.core = core # for 1.0 board.build.variant = 'standard' install_board(board_id, board, replace_existing=replace_existing)
install atmega88 board.
def _compute_bgid(self, bg=None): if bg is None: bg = self._bgdata if isinstance(bg, qpimage.QPImage): # Single QPImage if "identifier" in bg: return bg["identifier"] else: data = [bg.amp, bg.pha] for key in sorted(list(bg.meta.keys())): val = bg.meta[key] data.append("{}={}".format(key, val)) return hash_obj(data) elif (isinstance(bg, list) and isinstance(bg[0], qpimage.QPImage)): # List of QPImage data = [] for bgii in bg: data.append(self._compute_bgid(bgii)) return hash_obj(data) elif (isinstance(bg, SeriesData) and (len(bg) == 1 or len(bg) == len(self))): # DataSet return bg.identifier else: raise ValueError("Unknown background data type: {}".format(bg))
Return a unique identifier for the background data
def identifier(self): if self.background_identifier is None: idsum = self._identifier_data() else: idsum = hash_obj([self._identifier_data(), self.background_identifier]) return idsum
Return a unique identifier for the given data set
def get_time(self, idx): # raw data qpi = self.get_qpimage_raw(idx) if "time" in qpi.meta: thetime = qpi.meta["time"] else: thetime = np.nan return thetime
Return time of data at index `idx` Returns nan if the time is not defined
def get_qpimage(self, idx): # raw data qpi = self.get_qpimage_raw(idx) if "identifier" not in qpi: msg = "`get_qpimage_raw` does not set 'identifier' " \ + "in class '{}'!".format(self.__class__) raise KeyError(msg) # bg data if self._bgdata: if len(self._bgdata) == 1: # One background for all bgidx = 0 else: bgidx = idx if isinstance(self._bgdata, SeriesData): # `get_qpimage` does take `idx` bg = self._bgdata.get_qpimage_raw(bgidx) else: # `self._bgdata` is a QPImage bg = self._bgdata[bgidx] qpi.set_bg_data(bg_data=bg) return qpi
Return background-corrected QPImage of data at index `idx`
def set_bg(self, dataset): if isinstance(dataset, qpimage.QPImage): # Single QPImage self._bgdata = [dataset] elif (isinstance(dataset, list) and len(dataset) == len(self) and isinstance(dataset[0], qpimage.QPImage)): # List of QPImage self._bgdata = dataset elif (isinstance(dataset, SeriesData) and (len(dataset) == 1 or len(dataset) == len(self))): # DataSet self._bgdata = dataset else: raise ValueError("Bad length or type for bg: {}".format(dataset)) self.background_identifier = self._compute_bgid()
Set background data Parameters ---------- dataset: `DataSet`, `qpimage.QPImage`, or int If the ``len(dataset)`` matches ``len(self)``, then background correction is performed element-wise. Otherwise, ``len(dataset)`` must be one and is used for all data of ``self``. See Also -------- get_qpimage: obtain the background corrected QPImage
def get_time(self, idx=0): thetime = super(SingleData, self).get_time(idx=0) return thetime
Time of the data Returns nan if the time is not defined
def do(self, fn, message=None, *args, **kwargs): self.items.put(ChainItem(fn, self.do, message, *args, **kwargs)) return self
Add a 'do' action to the steps. This is a function to execute :param fn: A function :param message: Message indicating what this function does (used for debugging if assertions fail)
def expect(self, value, message='Failed: "{actual} {operator} {expected}" after step "{step}"', operator='=='): if operator not in self.valid_operators: raise ValueError('Illegal operator specified for ') self.items.put(ChainItem(value, self.expect, message, operator=operator)) return self
Add an 'assertion' action to the steps. This will evaluate the return value of the last 'do' step and compare it to the value passed here using the specified operator. Checks that the first function will return 2 >>> AssertionChain().do(lambda: 1 + 1, 'add 1 + 1').expect(2) This will check that your function does not return None >>> AssertionChain().do(lambda: myfunction(), 'call my function').expect(None, operator='is not') :param value: The expected value :param message: The error message to raise if the assertion fails. You can access the variables: {actual} -- The actual value {expected} -- The expected value {step} -- The step just performed, which did not meet the expectation {operator} -- The operator used to make the comparison
def perform(self): last_value = None last_step = None while self.items.qsize(): item = self.items.get() if item.flag == self.do: last_value = item.item(*item.args, **item.kwargs) last_step = item.message elif item.flag == self.expect: message = item.message local = {'value': last_value, 'expectation': item.item} expression = 'value {operator} expectation'.format(operator=item.operator) result = eval(expression, local) # Format the error message format_vars = { 'actual': last_value, 'expected': item.item, 'step': last_step, 'operator': item.operator } for var, val in format_vars.iteritems(): message = message.replace('{' + str(var) + '}', str(val)) assert result, message return last_value
Runs through all of the steps in the chain and runs each of them in sequence. :return: The value from the lat "do" step performed
def get_volumes(self): vols = [self.find_volume(name) for name in self.virsp.listVolumes()] return vols
Return a list of all Volumes in this Storage Pool
def create_backed_vol(self, name, backer, _format='qcow2'): vol_xml = ElementTree.Element('volume') vol_name = ElementTree.SubElement(vol_xml, 'name') name = '{0}.{1}'.format(name, _format) vol_name.text = name target = ElementTree.SubElement(vol_xml, 'target') target_format = ElementTree.SubElement(target, 'format') target_format.set('type', _format) vol_cap = ElementTree.SubElement(vol_xml, 'capacity') vol_cap.set('unit', 'bytes') # @TODO(rdelinger) this should be dynamic vol_cap.text = backer.capacity backing_store = ElementTree.SubElement(vol_xml, 'backingStore') bs_path = ElementTree.SubElement(backing_store, 'path') bs_path.text = backer.path bs_format = ElementTree.SubElement(backing_store, 'format') bs_format.set('type', backer.format) XMLString = ElementTree.tostring(vol_xml) self.virsp.createXML(XMLString, 0) return self.find_volume(name)
TODO(rdelinger) think about changing _format This is a pretty specialized function. It takes an existing volume, and creates a new volume that is backed by the existing volume Sadly there is no easy way to do this in libvirt, the best way I've found is to just create some xml and use the createXML function
def find_volume(self, name): try: return Volume(self.virsp.storageVolLookupByName(name), self) except libvirtError: return None
Find a storage volume by its name :param name: The name of the volume :type name: str
def install(replace_existing=False): bunch = AutoBunch() bunch.name = 'DAPA' bunch.protocol = 'dapa' bunch.force = 'true' # bunch.delay=200 install_programmer('dapa', bunch, replace_existing=replace_existing)
install dapa programmer.
def get_terminal_converted(self, attr): value = self.data.get(attr.repr_name) return self.converter_registry.convert_to_representation( value, attr.value_type)
Returns the value of the specified attribute converted to a representation value. :param attr: Attribute to retrieve. :type attr: :class:`everest.representers.attributes.MappedAttribute` :returns: Representation string.
def set_terminal_converted(self, attr, repr_value): value = self.converter_registry.convert_from_representation( repr_value, attr.value_type) self.data[attr.repr_name] = value
Converts the given representation value and sets the specified attribute value to the converted value. :param attr: Attribute to set. :param str repr_value: String value of the attribute to set.
def load(self, filename, offset): try: self.offset = offset # self.fd = open(filename, 'rb') # self.fd.close() except IOError: self.logger.error('Unable to load EfiSystem volume')
Will eventually load information for Apple_Boot volume. \ Not yet implemented
def send(self, filenames=None): try: with self.ssh_client.connect() as ssh_conn: with self.sftp_client.connect(ssh_conn) as sftp_conn: for filename in filenames: sftp_conn.copy(filename=filename) self.archive(filename=filename) if self.update_history_model: self.update_history(filename=filename) except SSHClientError as e: raise TransactionFileSenderError(e) from e except SFTPClientError as e: raise TransactionFileSenderError(e) from e return filenames
Sends the file to the remote host and archives the sent file locally.
def compile(self, name, folder=None, data=None): template_name = name.replace(os.sep, "") if folder is None: folder = "" full_name = os.path.join( folder.strip(os.sep), template_name) if data is None: data = {} try: self.templates[template_name] = \ self.jinja.get_template(full_name).render(data) except TemplateNotFound as template_error: if current_app.config['DEBUG']: raise template_error
renders template_name + self.extension file with data using jinja
def create_token(self, data, token_valid_for=180) -> str: jwt_token = jwt.encode({ 'data': data, 'exp': datetime.utcnow() + timedelta(seconds=token_valid_for)}, self.app_secret) return Security.encrypt(jwt_token)
Create encrypted JWT
def verify_token(self, token) -> bool: try: self.data = jwt.decode(Security.decrypt(token), self.app_secret) return True except (Exception, BaseException) as error: self.errors.append(error) return False return False
Verify encrypted JWT
def verify_http_auth_token(self) -> bool: authorization_token = self.get_http_token() if authorization_token is not None: if self.verify_token(authorization_token): if self.data is not None: self.data = self.data['data'] return True return False else: return False return False
Use request information to validate JWT
def create_token_with_refresh_token(self, data, token_valid_for=180, refresh_token_valid_for=86400): refresh_token = None refresh_token = jwt.encode({ 'exp': datetime.utcnow() + timedelta(seconds=refresh_token_valid_for)}, self.app_secret).decode("utf-8") jwt_token = jwt.encode({ 'data': data, 'refresh_token': refresh_token, 'exp': datetime.utcnow() + timedelta(seconds=token_valid_for)}, self.app_secret) return Security.encrypt(jwt_token)
Create an encrypted JWT with a refresh_token
def verify_refresh_token(self, expired_token) -> bool: try: decoded_token = jwt.decode( Security.decrypt(expired_token), self.app_secret, options={'verify_exp': False}) if 'refresh_token' in decoded_token and \ decoded_token['refresh_token'] is not None: try: jwt.decode(decoded_token['refresh_token'], self.app_secret) self.data = decoded_token return True except (Exception, BaseException) as error: self.errors.append(error) return False except (Exception, BaseException) as error: self.errors.append(error) return False return False
Use request information to validate refresh JWT
def verify_http_auth_refresh_token(self) -> bool: authorization_token = self.get_http_token() if authorization_token is not None: if self.verify_refresh_token(authorization_token): if self.data is not None: self.data = self.data['data'] return True return False else: return False return False
Use expired token to check refresh token information
def autocomplete(input_list): if input_list[0] in ['modulehelp', 'enable', 'disable']: commands = [] for modulename in seash_modules.module_data.keys(): commands.append(input_list[0] + ' ' + modulename) return commands return []
<Purpose> Returns all valid input completions for the specified command line input. <Arguments> input_list: A list of tokens. <Side Effects> None <Exceptions> None <Returns> A list of strings representing valid completions.
def status(self, status_code=None): if status_code is not None: self.response_model.status = status_code # return string for response support return str(self.response_model.status)
Set status or Get Status
def message(self, message=None): if message is not None: self.response_model.message = message return self.response_model.message
Set response message
def data(self, data=None): if data is not None: self.response_model.data = data return self.response_model.data
Set response data
def quick_response(self, status_code): translator = Translator(environ=self.environ) if status_code == 404: self.status(404) self.message(translator.trans('http_messages.404')) elif status_code == 401: self.status(401) self.message(translator.trans('http_messages.401')) elif status_code == 400: self.status(400) self.message(translator.trans('http_messages.400')) elif status_code == 200: self.status(200) self.message(translator.trans('http_messages.200'))
Quickly construct response using a status code
def memoize(func): cache = {} def memoizer(): if 0 not in cache: cache[0] = func() return cache[0] return functools.wraps(func)(memoizer)
Cache forever.
def getpreferredencoding(): encoding = locale.getpreferredencoding(False) if sys.platform == 'darwin' and encoding.startswith('mac-'): # Upgrade ancient MacOS encodings in Python < 2.7 encoding = 'utf-8' return encoding
Return preferred encoding for text I/O.
def getinputencoding(stream=None): if stream is None: stream = sys.stdin encoding = stream.encoding if not encoding: encoding = getpreferredencoding() return encoding
Return preferred encoding for reading from ``stream``. ``stream`` defaults to sys.stdin.
def getoutputencoding(stream=None): if stream is None: stream = sys.stdout encoding = stream.encoding if not encoding: encoding = getpreferredencoding() return encoding
Return preferred encoding for writing to ``stream``. ``stream`` defaults to sys.stdout.
def decode(string, encoding=None, errors=None): if encoding is None: encoding = getpreferredencoding() if errors is None: errors = getpreferrederrors() return string.decode(encoding, errors)
Decode from specified encoding. ``encoding`` defaults to the preferred encoding. ``errors`` defaults to the preferred error handler.
def encode(string, encoding=None, errors=None): if encoding is None: encoding = getpreferredencoding() if errors is None: errors = getpreferrederrors() return string.encode(encoding, errors)
Encode to specified encoding. ``encoding`` defaults to the preferred encoding. ``errors`` defaults to the preferred error handler.
def _get_response_mime_type(self): view_name = self.request.view_name if view_name != '': mime_type = get_registered_mime_type_for_name(view_name) else: mime_type = None acc = None for acc in self.request.accept: if acc == '*/*': # The client does not care; use the default. mime_type = self.__get_default_response_mime_type() break try: mime_type = \ get_registered_mime_type_for_string(acc.lower()) except KeyError: pass else: break if mime_type is None: if not acc is None: # The client specified a MIME type we can not handle; this # is a 406 exception. We supply allowed MIME content # types in the body of the response. headers = \ [('Location', self.request.path_url), ('Content-Type', TextPlainMime.mime_type_string), ] mime_strings = get_registered_mime_strings() exc = HTTPNotAcceptable('Requested MIME content type(s) ' 'not acceptable.', body=','.join(mime_strings), headers=headers) raise exc mime_type = self.__get_default_response_mime_type() return mime_type
Returns the reponse MIME type for this view. :raises: :class:`pyramid.httpexceptions.HTTPNotAcceptable` if the MIME content type(s) the client specified can not be handled by the view.
def _get_response_body_mime_type(self): mime_type = self._get_response_mime_type() if mime_type is AtomMime: # FIXME: This cements using XML as the representation to use in # ATOM bodies (which is perhaps not too worrisome). mime_type = XmlMime return mime_type
Returns the response body MIME type. This might differ from the overall response mime type e.g. in ATOM responses where the body MIME type is XML.
def _get_result(self, resource): if self._convert_response: self._update_response_body(resource) result = self.request.response else: result = dict(context=resource) return result
Converts the given resource to a result to be returned from the view. Unless a custom renderer is employed, this will involve creating a representer and using it to convert the resource to a string. :param resource: Resource to convert. :type resource: Object implementing :class:`evererst.interfaces.IResource`. :returns: :class:`pyramid.reposnse.Response` object or a dictionary with a single key "context" mapped to the given resource (to be passed on to a custom renderer).
def _update_response_body(self, resource): rpr = self._get_response_representer(resource) # Set content type and body of the response. self.request.response.content_type = \ rpr.content_type.mime_type_string rpr_body = rpr.to_bytes(resource) self.request.response.body = rpr_body
Creates a representer and updates the response body with the byte representation created for the given resource.
def _update_response_location_header(self, resource): location = resource_to_url(resource, request=self.request) loc_hdr = ('Location', location) hdr_names = [hdr[0].upper() for hdr in self.request.response.headerlist] try: idx = hdr_names.index('LOCATION') except ValueError: self.request.response.headerlist.append(loc_hdr) else: # Replace existing location header. # FIXME: It is not clear under which conditions this happens, so # we do not have a test for it yet. self.request.response.headerlist[idx] = loc_hdr
Adds a new or replaces an existing Location header to the response headers pointing to the URL of the given resource.
def _get_request_representer(self): try: mime_type = \ get_registered_mime_type_for_string(self.request.content_type) except KeyError: # The client sent a content type we do not support (415). raise HTTPUnsupportedMediaType() return as_representer(self.context, mime_type)
Returns a representer for the content type specified in the request. :raises HTTPUnsupportedMediaType: If the specified content type is not supported.
def _extract_request_data(self): rpr = self._get_request_representer() return rpr.data_from_bytes(self.request.body)
Extracts the data from the representation submitted in the request body and returns it. This default implementation uses a representer for the content type specified by the request to perform the extraction and returns an object implementing the :class:`everest.representers.interfaces.IResourceDataElement` interface. :raises HTTPError: To indicate problems with the request data extraction in terms of HTTP codes.
def _handle_conflict(self, name): err = HTTPConflict('Member "%s" already exists!' % name).exception return self.request.get_response(err)
Handles requests that triggered a conflict. Respond with a 409 "Conflict"
def check(self): request = get_current_request() ignore_guid = request.params.get('ignore-message') coll = request.root['_messages'] vote = False if ignore_guid: ignore_mb = coll.get(ignore_guid) if not ignore_mb is None and ignore_mb.text == self.message.text: vote = True return vote
Implements user message checking for views. Checks if the current request has an explicit "ignore-message" parameter (a GUID) pointing to a message with identical text from a previous request, in which case further processing is allowed.
def create_307_response(self): request = get_current_request() msg_mb = UserMessageMember(self.message) coll = request.root['_messages'] coll.add(msg_mb) # Figure out the new location URL. qs = self.__get_new_query_string(request.query_string, self.message.slug) resubmit_url = "%s?%s" % (request.path_url, qs) headers = [('Warning', '299 %s' % self.message.text), # ('Content-Type', cnt_type), ] http_exc = HttpWarningResubmit(location=resubmit_url, detail=self.message.text, headers=headers) return request.get_response(http_exc)
Creates a 307 "Temporary Redirect" response including a HTTP Warning header with code 299 that contains the user message received during processing the request.
def mkdir(*args): path = '' for chunk in args: path = os.path.join(path, chunk) if not os.path.isdir(path): os.mkdir(path) return path
Create a directory specified by a sequence of subdirectories >>> mkdir("/tmp", "foo", "bar", "baz") '/tmp/foo/bar/baz' >>> os.path.isdir('/tmp/foo/bar/baz') True
def shell(cmd, *args, **kwargs): # type: (Union[str, unicode], *Union[str, unicode], **Any) ->Tuple[int, str] if kwargs.get('rel_path') and not cmd.startswith("/"): cmd = os.path.join(kwargs['rel_path'], cmd) status = 0 try: output = subprocess.check_output( (cmd,) + args, stderr=kwargs.get('stderr')) except subprocess.CalledProcessError as e: if kwargs.get('raise_on_status', True): raise e output = e.output status = e.returncode except OSError as e: # command not found if kwargs.get('raise_on_status', True): raise e if 'stderr' in kwargs: kwargs['stderr'].write(e.message) return -1, "" if six.PY3: output = output.decode('utf8') return status, output
Execute shell command and return output Args: cmd (str): the command itself, i.e. part until the first space *args: positional arguments, i.e. other space-separated parts rel_path (bool): execute relative to the path (default: `False`) raise_on_status(bool): bool, raise exception if command exited with non-zero status (default: `True`) stderr (file-like): file-like object to collect stderr output, None by default Returns: Tuple[int, str]: status, shell output
def raw_filesize(path): # type: (str) -> Optional[int] with open('/dev/null', 'w') as devnull: status, output = shell("du", "-bs", path, raise_on_status=False, stderr=devnull) if status != 0: return None # output is: <size>\t<path>\n return int(output.split("\t", 1)[0])
Get size of a file/directory in bytes. Will return None if path does not exist or cannot be accessed.
def listen_for_events(): import_event_modules() conn = redis_connection.get_connection() pubsub = conn.pubsub() pubsub.subscribe("eventlib") for message in pubsub.listen(): if message['type'] != 'message': continue data = loads(message["data"]) if 'name' in data: event_name = data.pop('name') process_external(event_name, data)
Pubsub event listener Listen for events in the pubsub bus and calls the process function when somebody comes to play.
def sendrequest(self, request): ''' Recieves a request xml as a string and posts it to the health service url specified in the settings.py ''' url = urlparse.urlparse(self.connection.healthserviceurl) conn = None if url.scheme == 'https': conn = httplib.HTTPSConnection(url.netloc) else: conn = httplib.HTTPConnection(url.netloc) conn.putrequest('POST', url.path) conn.putheader('Content-Type', 'text/xml') conn.putheader('Content-Length', '%d' % len(request)) conn.endheaders() try: conn.send(request) except socket.error, v: if v[0] == 32: # Broken pipe conn.close() raise response = conn.getresponse().read() return etree.fromstring(responsef sendrequest(self, request): ''' Recieves a request xml as a string and posts it to the health service url specified in the settings.py ''' url = urlparse.urlparse(self.connection.healthserviceurl) conn = None if url.scheme == 'https': conn = httplib.HTTPSConnection(url.netloc) else: conn = httplib.HTTPConnection(url.netloc) conn.putrequest('POST', url.path) conn.putheader('Content-Type', 'text/xml') conn.putheader('Content-Length', '%d' % len(request)) conn.endheaders() try: conn.send(request) except socket.error, v: if v[0] == 32: # Broken pipe conn.close() raise response = conn.getresponse().read() return etree.fromstring(response)
Recieves a request xml as a string and posts it to the health service url specified in the settings.py
def commit(self, unit_of_work): MemoryRepository.commit(self, unit_of_work) if self.is_initialized: entity_classes_to_dump = set() for state in unit_of_work.iterator(): entity_classes_to_dump.add(type(state.entity)) for entity_cls in entity_classes_to_dump: self.__dump_entities(entity_cls)
Dump all resources that were modified by the given session back into the repository.
def _validate_pdf_file(self): if self['pdf_path'] is None: self._logger.error('--pdf argument must be set') sys.exit(1) if not os.path.exists(self['pdf_path']): self._logger.error('Cannot find PDF ' + self['pdf_path']) sys.exit(1)
Validate that the pdf_path configuration is set and the referenced file exists. Exits the program with status 1 if validation fails.
def _get_docushare_url(handle, validate=True): logger = structlog.get_logger(__name__) logger.debug('Using Configuration._get_docushare_url') # Make a short link to the DocuShare version page since # a) It doesn't immediately trigger a PDF download, # b) It gives the user extra information about the document before # downloading it. url = 'https://ls.st/{handle}*'.format(handle=handle.lower()) if validate: # Test that the short link successfully resolves to DocuShare logger.debug('Validating {0}'.format(url)) try: response = requests.head(url, allow_redirects=True, timeout=30) except requests.exceptions.RequestException as e: raise DocuShareError(str(e)) error_message = 'URL {0} does not resolve to DocuShare'.format(url) if response.status_code != 200: logger.warning('HEAD {0} status: {1:d}'.format( url, response.status_code)) raise DocuShareError(error_message) redirect_url_parts = urllib.parse.urlsplit(response.url) if redirect_url_parts.netloc != 'docushare.lsst.org': logger.warning('{0} resolved to {1}'.format(url, response.url)) raise DocuShareError(error_message) return url
Get a docushare URL given document's handle. Parameters ---------- handle : `str` Handle name, such as ``'LDM-151'``. validate : `bool`, optional Set to `True` to request that the link resolves by performing a HEAD request over the network. `False` disables this testing. Default is `True`. Returns ------- docushare_url : `str` Shortened DocuShare URL for the document corresponding to the handle. Raises ------ lander.exceptions.DocuShareError Raised for any error related to validating the DocuShare URL.
def _init_defaults(self): defaults = { 'build_dir': None, 'build_datetime': datetime.datetime.now(dateutil.tz.tzutc()), 'pdf_path': None, 'extra_downloads': list(), 'environment': None, 'lsstdoc_tex_path': None, 'title': None, 'title_plain': "", 'authors': None, 'authors_json': list(), 'doc_handle': None, 'series': None, 'series_name': None, 'abstract': None, 'abstract_plain': "", 'ltd_product': None, 'docushare_url': None, 'github_slug': None, 'git_branch': 'master', # so we default to the main LTD edition 'git_commit': None, 'git_tag': None, 'travis_job_number': None, 'is_travis_pull_request': False, # If not on Travis, not a PR 'is_draft_branch': True, 'aws_id': None, 'aws_secret': None, 'keeper_url': 'https://keeper.lsst.codes', 'keeper_user': None, 'keeper_password': None, 'upload': False } return defaults
Create a `dict` of default configurations.
def create_permissions_from_tuples(model, codename_tpls): if codename_tpls: model_cls = django_apps.get_model(model) content_type = ContentType.objects.get_for_model(model_cls) for codename_tpl in codename_tpls: app_label, codename, name = get_from_codename_tuple( codename_tpl, model_cls._meta.app_label ) try: Permission.objects.get(codename=codename, content_type=content_type) except ObjectDoesNotExist: Permission.objects.create( name=name, codename=codename, content_type=content_type ) verify_codename_exists(f"{app_label}.{codename}")
Creates custom permissions on model "model".
def remove_historical_group_permissions(group=None, allowed_permissions=None): allowed_permissions = allowed_permissions or ["view"] for action in allowed_permissions: for permission in group.permissions.filter( codename__contains="historical" ).exclude(codename__startswith=action): group.permissions.remove(permission)
Removes group permissions for historical models except those whose prefix is in `allowed_historical_permissions`. Default removes all except `view`.
def traversal(root): '''Tree traversal function that generates nodes. For each subtree, the deepest node is evaluated first. Then, the next-deepest nodes are evaluated until all the nodes in the subtree are generated.''' stack = [root] while len(stack) > 0: node = stack.pop() if hasattr(node,'children'): if node.children == set(): try: stack[-1].children.remove(node) except: pass yield (node,stack) else: childnode = node.children.pop() stack += [node,childnode] else: children = [x for x in ast.iter_child_nodes(node)] node.children = set(children) stack.append(nodef traversal(root): '''Tree traversal function that generates nodes. For each subtree, the deepest node is evaluated first. Then, the next-deepest nodes are evaluated until all the nodes in the subtree are generated.''' stack = [root] while len(stack) > 0: node = stack.pop() if hasattr(node,'children'): if node.children == set(): try: stack[-1].children.remove(node) except: pass yield (node,stack) else: childnode = node.children.pop() stack += [node,childnode] else: children = [x for x in ast.iter_child_nodes(node)] node.children = set(children) stack.append(node)
Tree traversal function that generates nodes. For each subtree, the deepest node is evaluated first. Then, the next-deepest nodes are evaluated until all the nodes in the subtree are generated.
def formatBodyNode(root,path): '''Format the root node for use as the body node.''' body = root body.name = "body" body.weight = calcFnWeight(body) body.path = path body.pclass = None return bodf formatBodyNode(root,path): '''Format the root node for use as the body node.''' body = root body.name = "body" body.weight = calcFnWeight(body) body.path = path body.pclass = None return body
Format the root node for use as the body node.
def formatFunctionNode(node,path,stack): '''Add some helpful attributes to node.''' #node.name is already defined by AST module node.weight = calcFnWeight(node) node.path = path node.pclass = getCurrentClass(stack) return nodf formatFunctionNode(node,path,stack): '''Add some helpful attributes to node.''' #node.name is already defined by AST module node.weight = calcFnWeight(node) node.path = path node.pclass = getCurrentClass(stack) return node
Add some helpful attributes to node.
def calcFnWeight(node): '''Calculates the weight of a function definition by recursively counting its child nodes in the AST. Note that the tree traversal will become O(n^2) instead of O(n) if this feature is enabled.''' stack = [node] count = 0 while len(stack) > 0: node = stack.pop() children = [x for x in ast.iter_child_nodes(node)] count += len(children) stack = stack + children return counf calcFnWeight(node): '''Calculates the weight of a function definition by recursively counting its child nodes in the AST. Note that the tree traversal will become O(n^2) instead of O(n) if this feature is enabled.''' stack = [node] count = 0 while len(stack) > 0: node = stack.pop() children = [x for x in ast.iter_child_nodes(node)] count += len(children) stack = stack + children return count
Calculates the weight of a function definition by recursively counting its child nodes in the AST. Note that the tree traversal will become O(n^2) instead of O(n) if this feature is enabled.
def getSourceFnDef(stack,fdefs,path): '''VERY VERY SLOW''' found = False for x in stack: if isinstance(x, ast.FunctionDef): for y in fdefs[path]: if ast.dump(x)==ast.dump(y): #probably causing the slowness found = True return y raise if not found: for y in fdefs[path]: if y.name=='body': return y raisf getSourceFnDef(stack,fdefs,path): '''VERY VERY SLOW''' found = False for x in stack: if isinstance(x, ast.FunctionDef): for y in fdefs[path]: if ast.dump(x)==ast.dump(y): #probably causing the slowness found = True return y raise if not found: for y in fdefs[path]: if y.name=='body': return y raise
VERY VERY SLOW
def delete_database(mongo_uri, database_name): client = pymongo.MongoClient(mongo_uri) client.drop_database(database_name)
Delete a mongo database using pymongo. Mongo daemon assumed to be running. Inputs: - mongo_uri: A MongoDB URI. - database_name: The mongo database name as a python string.
def delete_collection(mongo_uri, database_name, collection_name): client = pymongo.MongoClient(mongo_uri) db = client[database_name] db.drop_collection(collection_name)
Delete a mongo document collection using pymongo. Mongo daemon assumed to be running. Inputs: - mongo_uri: A MongoDB URI. - database_name: The mongo database name as a python string. - collection_name: The mongo collection as a python string.
def parse_xml(self, xml): ''' :param xml: lxml.etree.Element representing a single VocabularyItem ''' xmlutils = XmlUtils(xml) self.code_value = xmlutils.get_string_by_xpath('code-value') self.display_text = xmlutils.get_string_by_xpath('display-text') self.abbreviation_text = xmlutils.get_string_by_xpath('abbreviation-text') info_xml = xml.xpath('info-xml') if info_xml != []: self.info_xml = info_xml[0f parse_xml(self, xml): ''' :param xml: lxml.etree.Element representing a single VocabularyItem ''' xmlutils = XmlUtils(xml) self.code_value = xmlutils.get_string_by_xpath('code-value') self.display_text = xmlutils.get_string_by_xpath('display-text') self.abbreviation_text = xmlutils.get_string_by_xpath('abbreviation-text') info_xml = xml.xpath('info-xml') if info_xml != []: self.info_xml = info_xml[0]
:param xml: lxml.etree.Element representing a single VocabularyItem
async def main(): async with aiohttp.ClientSession() as session: zaehler = Volkszaehler(loop, session, UUID, host=HOST) # Get the data await zaehler.get_data() print("Average:", zaehler.average) print("Max:", zaehler.max) print("Min:", zaehler.min) print("Consumption:", zaehler.consumption) print("Data tuples:", zaehler.tuples)
The main part of the example script.
def create_staging_collection(resource): ent_cls = get_entity_class(resource) coll_cls = get_collection_class(resource) agg = StagingAggregate(ent_cls) return coll_cls.create_from_aggregate(agg)
Helper function to create a staging collection for the given registered resource. :param resource: registered resource :type resource: class implementing or instance providing or subclass of a registered resource interface.
def parse(self): try: return self.parse_top_level() except PartpyError as ex: self.error = True print(ex.pretty_print())
Run the parser over the entire sourestring and return the results.
def parse_top_level(self): contacts = [] while not self.eos: contact = self.parse_contact() # match a contact expression. if not contact: # There was no contact so end file. break # This would be a nice place to put other expressions. contacts.append(contact) # skip all whitespace between the end of the last contact and the # next non whitespace character, ie until something interesting. self.parse_whitespace() output = {} for key, value in contacts: output[key] = value return output
The top level parser will do a loop where it looks for a single contact parse and then eats all whitespace until there is no more input left or another contact is found to be parsed and stores them.
def parse_contact(self): self.parse_whitespace() name = self.parse_name() # parse a name expression and get the string. if not name: # No name was found so shout it out. raise PartpyError(self, 'Expecting a name') self.parse_whitespace() # allow name and email to be delimited by either a ':' or '-' if not self.match_any_char(':-'): raise PartpyError(self, 'Expecting : or -') self.eat_length(1) self.parse_whitespace() email = self.parse_email() # parse an email and store its string. if not email: raise PartpyError(self, 'Expecting an email address') return (name, email)
Parse a top level contact expression, these consist of a name expression a special char and an email expression. The characters found in a name and email expression are returned.
def parse_name(self): name = [] while True: # Match the current char until it doesnt match the given pattern: # first char must be an uppercase alpha and the rest must be lower # cased alphas. part = self.match_string_pattern(spat.alphau, spat.alphal) if part == '': break # There is no more matchable strings. self.eat_string(part) # Eat the found string name.append(part) # Store this name part if self.get_char() == ' ': # if the current char is a single space # eat it. This allows one space between parts self.eat_length(1) if not len(name): # if no name parts where detected raise an expection. raise PartpyError(self, 'Expecting a title cased name') return ' '.join(name)
This function uses string patterns to match a title cased name. This is done in a loop until there are no more names to match so as to be able to include surnames etc. in the output.
def parse_email(self): email = [] # Match from current char until a non lower cased alpha name = self.match_string_pattern(spat.alphal) if not name: raise PartpyError(self, 'Expected a valid name') email.append(name) # Store the name self.eat_string(name) # Eat the name nextchar = self.get_char() if not nextchar == '@': raise PartpyError(self, 'Expecting @, found: ' + nextchar) email.append(nextchar) self.eat_length(1) # Eat the '@' symbol # Use string pattern matching to match all lower cased alphas or '.'s. site = self.match_string_pattern(spat.alphal + '.') if not site: raise PartpyError(self, 'Expecting a site, found: ' + site) email.append(site) self.eat_string(site) # Eat the site return ''.join(email)
Email address parsing is done in several stages. First the name of the email use is determined. Then it looks for a '@' as a delimiter between the name and the site. Lastly the email site is matched. Each part's string is stored, combined and returned.
def require_valid_type(value, *classes): if value is not None: valid = False for auxiliar_class in classes: if isinstance(value, auxiliar_class): valid = True break if not valid: raise TypeError()
Checks that the specified object reference is instance of classes and throws a :py:class:`TypeError` if it is not. :param value: The object. :type value: object :param classes: The classes. :type classes: list(class)
def get_development_container_name(self): if self.__prefix: return "{0}:{1}-{2}-dev".format( self.__repository, self.__prefix, self.__branch) else: return "{0}:{1}-dev".format( self.__repository, self.__branch)
Returns the development container name
def get_build_container_tag(self): if self.__prefix: return "{0}-{1}-{2}".format( self.__prefix, self.__branch, self.__version) else: return "{0}-{1}".format( self.__branch, self.__version)
Return the build container tag
def get_branch_container_tag(self): if self.__prefix: return "{0}-{1}".format( self.__prefix, self.__branch) else: return "{0}".format(self.__branch)
Returns the branch container tag
def custom_server_error(request, template_name='500.html', admin_template_name='500A.html'): trace = None if request.user.is_authenticated() and (request.user.is_staff or request.user.is_superuser): try: import traceback, sys trace = traceback.format_exception(*(sys.exc_info())) if not request.user.is_superuser and trace: trace = trace[-1:] trace = '\n'.join(trace) except: pass # if url is part of the admin site, use the 500A.html template if request.path.startswith('/%s' % admin.site.name): template_name = admin_template_name t = loader.get_template(template_name) # You need to create a 500.html and 500A.html template. return http.HttpResponseServerError(t.render(Context({'trace': trace})))
500 error handler. Displays a full trackback for superusers and the first line of the traceback for staff members. Templates: `500.html` or `500A.html` (admin) Context: trace Holds the traceback information for debugging.
def parse_n_jobs(s): n_jobs = None N = cpu_count() if isinstance(s, int): n_jobs = s elif isinstance(s, float): n_jobs = int(s) elif isinstance(s, str): m = re.match(r'(\d*(?:\.\d*)?)?(\s*\*?\s*n)?$', s.strip()) if m is None: raise ValueError('Unable to parse n_jobs="{}"'.format(s)) k = float(m.group(1)) if m.group(1) else 1 if m.group(2): n_jobs = k * N elif k < 1: n_jobs = k * N else: n_jobs = int(k) else: raise TypeError('n_jobs argument must be of type str, int, or float.') n_jobs = int(n_jobs) if n_jobs <= 0: warnings.warn('n_jobs={} is invalid. Setting n_jobs=1.'.format(n_jobs)) n_jobs = 1 #end if return int(n_jobs)
This function parses a "math"-like string as a function of CPU count. It is useful for specifying the number of jobs. For example, on an 8-core machine:: assert parse_n_jobs('0.5 * n') == 4 assert parse_n_jobs('2n') == 16 assert parse_n_jobs('n') == 8 assert parse_n_jobs('4') == 4 :param str s: string to parse for number of CPUs
def getbool(self, key, **kwargs): def _string_to_bool(s): if isinstance(s, str): if s.strip().lower() in ('true', 't', '1'): return True elif s.strip().lower() in ('false', 'f', '0', 'None', 'null', ''): return False raise ValueError('Unable to get boolean value of "{}".'.format(s)) #end if return bool(s) #end def return self.get(key, cast_func=_string_to_bool, **kwargs)
Gets the setting value as a :func:`bool` by cleverly recognizing true values. :rtype: bool
def getint(self, key, **kwargs): return self.get(key, cast_func=int, **kwargs)
Gets the setting value as a :obj:`int`. :rtype: int
def getfloat(self, key, **kwargs): return self.get(key, cast_func=float, **kwargs)
Gets the setting value as a :obj:`float`. :rtype: float
def getserialized(self, key, decoder_func=None, **kwargs): value = self.get(key, cast_func=None, **kwargs) if isinstance(value, (dict, list, tuple)) or value is None: return value if decoder_func: return decoder_func(value) try: o = json.loads(value) return o except json.decoder.JSONDecodeError: pass try: o = yaml.load(value) return o except yaml.parser.ParserError: pass raise ValueError('Unable to parse {} setting using JSON or YAML.'.format(key))
Gets the setting value as a :obj:`dict` or :obj:`list` trying :meth:`json.loads`, followed by :meth:`yaml.load`. :rtype: dict, list
def geturi(self, key, **kwargs): return self.get(key, cast_func=urlparse, **kwargs)
Gets the setting value as a :class:`urllib.parse.ParseResult`. :rtype: urllib.parse.ParseResult
def getlist(self, key, delimiter=',', **kwargs): value = self.get(key, **kwargs) if value is None: return value if isinstance(value, str): value = value.strip() if value.startswith('[') and value.endswith(']'): return self.getserialized(key) return [p.strip(' ') for p in value.split(delimiter)] #end if return list(value)
Gets the setting value as a :class:`list`; it splits the string using ``delimiter``. :param str delimiter: split the value using this delimiter :rtype: list
def getnjobs(self, key, **kwargs): return self.get(key, cast_func=parse_n_jobs, **kwargs)
Gets the setting value as an integer relative to the number of CPU. See :func:`ycsettings.settings.parse_n_jobs` for parsing rules. :rtype: int
def _in_list(self, original_list, item): # pylint: disable=no-self-use for item_list in original_list: if item is item_list: return True return False
Check that an item as contained in a list. :param original_list: The list. :type original_list: list(object) :param item: The item. :type item: hatemile.util.html.htmldomelement.HTMLDOMElement :return: True if the item contained in the list or False if not. :rtype: bool
def _sort_results(self, results): parents = [] groups = [] for result in results: if not self._in_list(parents, result.parent): parents.append(result.parent) groups.append([]) groups[len(groups) - 1].append(result) else: groups[parents.index(result.parent)].append(result) array = [] for group in groups: array += sorted( group, key=lambda element: element.parent.contents.index(element) ) return array
Order the results. :param results: The disordened results. :type results: array.bs4.element.Tag :return: The ordened results. :rtype: array.bs4.element.Tag
def _fix_data_select(self): elements = self.document.select('*') for element in elements: attributes = element.attrs.keys() data_attributes = list() for attribute in attributes: if bool(re.findall('^data-', attribute)): data_attributes.append({ 'original': attribute, 'modified': re.sub('data-', 'dataaaaaa', attribute), 'value': element[attribute] }) if data_attributes: auxiliar_element = BeautifulSoupHTMLDOMElement(element) for data_attribute in data_attributes: auxiliar_element.remove_attribute( data_attribute['original'] ) auxiliar_element.set_attribute( data_attribute['modified'], data_attribute['value'] )
Replace all hyphens of data attributes for 'aaaaa', to avoid error in search.
def join_and(value): # convert numbers to strings value = [str(item) for item in value] if len(value) == 1: return value[0] if len(value) == 2: return "%s and %s" % (value[0], value[1]) # join all but the last element all_but_last = ", ".join(value[:-1]) return "%s and %s" % (all_but_last, value[-1])
Given a list of strings, format them with commas and spaces, but with 'and' at the end. >>> join_and(['apples', 'oranges', 'pears']) "apples, oranges, and pears" There is surely a better home for this
def render_activity(activity, grouped_activity=None, *args, **kwargs): template_name = 'activity_monitor/includes/models/{0.app_label}_{0.model}.html'.format(activity.content_type) try: tmpl = loader.get_template(template_name) except template.TemplateDoesNotExist: return None # we know we have a template, so render it content_object = activity.content_object return tmpl.render(Context({ 'activity': activity, 'obj': content_object, 'grouped_activity': grouped_activity }))
Given an activity, will attempt to render the matching template snippet for that activity's content object or will return a simple representation of the activity. Also takes an optional 'grouped_activity' argument that would match up with what is produced by utils.group_activity
def show_activity_count(date=None): if not date: today = datetime.datetime.now() - datetime.timedelta(hours = 24) return Activity.objects.filter(timestamp__gte=today).count() return Activity.objects.filter(timestamp__gte=date).count()
Simple filter to get activity count for a given day. Defaults to today.
def show_new_activity(last_seen=None, cap=1000, template='grouped', include=None, exclude=None): if not last_seen or last_seen is '': last_seen = datetime.date.today() actions = Activity.objects.filter(timestamp__gte=last_seen) if include: include_types = include.split(',') actions = actions.filter(content_type__model__in=include_types) if exclude: exclude_types = exclude.split(',') actions = actions.exclude(content_type__model__in=exclude_types) # Now apply cap actions = actions[:cap] if template=='detailed': template = 'activity_monitor/includes/detailed.html' actions = group_activities(actions) elif template=='grouped': template = 'activity_monitor/includes/grouped_list.html' actions = group_activities(actions) else: template = 'activity_monitor/includes/activity_list.html' return {'actions': actions, 'selected_template': template}
Inclusion tag to show new activity, either since user was last seen or today (if not last_seen). Note that passing in last_seen is up to you. Usage: {% show_new_activity %} Or, to show since last seen: {% show_new_activity last_seen %} Can also cap the number of items returned. Default is 1000. Usage: {% show_new_activity last_seen 50 %} Allows passing template, controlling level of detail. Template choices are: * 'plain': simple list * 'grouped': items are grouped by content type * 'detailed': items are grouped and can use custom template snippets Usage: {% show_new_activity last_seen 50 'plain' %} If no template choice argument is passed, 'grouped' will be used. Also accepts "include" and "exclude" options to control which activities are returned. Content types should be passed in by name. * 'include' will **only** return passed content types * 'exclude' will **not** return passed content types Include is evaluated before exclude. Usage: {% show_new_activity last_seen 50 'plain' exclude="comment,post" %}
def paginate_activity(visible_date=None): #if visible_date: # visible_date = datetime.datetime.strptime(visible_date, "%b %d ") if not visible_date: visible_date = datetime.date.today() previous_day = visible_date - datetime.timedelta(days=1) if visible_date == datetime.date.today(): next_day = None else: next_day = visible_date + datetime.timedelta(days=1) return {'previous_day': previous_day, 'next_day': next_day}
Creates "get previous day" / "get next day" pagination for activities. Visible date is the date of the activities currently being shown, represented by a date object. If not provided, it will default to today. #Expects date as default "Aug. 25, 2014" format.
def find_files(self): modules = self.evernode_app.get_modules() root_path = sys.path[0] if self.evernode_app.root_path is None \ else self.evernode_app.root_path dirs = [dict( dir=os.path.join(root_path, 'resources', 'lang'), module="root")] for module_name in modules: modules_folder = 'modules{}%s'.format(os.sep) if module_name is not None: modules_folder = modules_folder % (module_name.strip(os.sep)) else: continue path = os.path.join( root_path, modules_folder, 'resources', 'lang') if os.path.isdir(path): dirs.append(dict(dir=path, module=module_name)) for dir in dirs: module_pack = { 'name': dir['module'], 'languages': [], 'file_packs': [] } for path, subdirs, files in os.walk(dir['dir']): for subdir in subdirs: module_pack['languages'].append(subdir) for name in files: module_pack['file_packs'].append(dict( file=os.path.join(path, name), name=name.rsplit('.', 1)[0].lower(), language=path.split("lang%s" % (os.sep), 1)[1].strip() )) self.module_packs.append(module_pack) for module_pack in self.module_packs: module_pack['file_packs'] = \ list({v['file']: v for v in module_pack['file_packs']} .values()) if self.app.config['DEBUG']: print('--- Loaded Language Files ---') print("Loaded Dirs: " + str(dirs)) print("Loaded Language Packs: " + str(self.module_packs))
Gets modules routes.py and converts to module imports
def __root_path(self): if self.root_path is not None: if os.path.isdir(self.root_path): sys.path.append(self.root_path) return raise RuntimeError('EverNode requires a valid root path.' ' Directory: %s does not exist' % (self.root_path))
Just checks the root path if set
def write_metadata(self, output_path): if self._config.lsstdoc is None: self._logger.info('No known LSST LaTeX source (--tex argument). ' 'Not writing a metadata.jsonld file.') return # Build a JSON-LD dataset for the report+source repository. product_data = ltdclient.get_product(self._config) metadata = self._config.lsstdoc.build_jsonld( url=product_data['published_url'], code_url=product_data['doc_repo'], ci_url='https://travis-ci.org/' + self._config['github_slug'], readme_url=None, license_id=None) json_text = encode_jsonld( metadata, separators=(',', ':'), # compact ensure_ascii=False) # unicode output with open(output_path, 'w') as f: f.write(json_text)
Build a JSON-LD dataset for LSST Projectmeta. Parameters ---------- output_path : `str` File path where the ``metadata.jsonld`` should be written for the build.