docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Convert OAI to MARC XML. Args: marc_oai (str): String with either OAI or MARC XML. Returns: str: String with MARC XML.
def _oai_to_xml(marc_oai): # TODO: move this to MARC XML parser? record = MARCXMLRecord(marc_oai) record.oai_marc = False return record.to_XML()
1,075,467
Add proper XML namespace to the `marc_xml` record. Args: marc_xml (str): String representation of the XML record. Returns: str: XML with namespace.
def _add_namespace(marc_xml): dom = marc_xml if isinstance(dom, basestring): dom = dhtmlparser.parseString(marc_xml) root = dom.find("root") if root: root[0].params = {} for record in dom.find("record"): record.params = {} collections = dom.find("collection") if not collections: record = dom.find("record")[0] return XML_TEMPLATE.replace("$CONTENT", str(record)) for col in collections: col.params["xmlns"] = "http://www.loc.gov/MARC21/slim" col.params["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance" col.params["xsi:schemaLocation"] = "http://www.loc.gov/MARC21/slim " + \ "http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd" return str(dom)
1,075,468
If `content_or_path` contains ``\\n``, return it. Else assume, that it is path and read file at that path. Args: content_or_path (str): Content or path to the file. Returns: str: Content. Raises: IOError: whhen the file is not found.
def _read_content_or_path(content_or_path): if "\n" in content_or_path.strip(): return content_or_path if not os.path.exists(content_or_path): raise IOError("File '%s' doesn't exists!" % content_or_path) with open(content_or_path) as f: return f.read()
1,075,469
Read MARC XML or OAI file, convert, add namespace and return XML in required format with all necessities. Args: xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. Returns: obj: Required XML parsed with ``lxml.etree``.
def _read_marcxml(xml): # read file, if `xml` is valid file path marc_xml = _read_content_or_path(xml) # process input file - convert it from possible OAI to MARC XML and add # required XML namespaces marc_xml = _oai_to_xml(marc_xml) marc_xml = _add_namespace(marc_xml) file_obj = StringIO.StringIO(marc_xml) return ET.parse(file_obj)
1,075,470
Read XSLT template. Args: template (str): Filename or XML string. Don't use ``\\n`` in case of filename. Returns: obj: Required XML parsed with ``lxml.etree``.
def _read_template(template): template = _read_content_or_path(template) file_obj = StringIO.StringIO(template) return ET.parse(file_obj)
1,075,471
Transform `xml` using XSLT `template`. Args: xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. template (str): Filename or XML string. Don't use ``\\n`` in case of filename. Returns: str: Transformed `xml` as string.
def xslt_transformation(xml, template): transformer = ET.XSLT( _read_template(template) ) newdom = transformer( _read_marcxml(xml) ) return ET.tostring(newdom, pretty_print=True, encoding="utf-8")
1,075,472
Loads a single fixture. Args: * fixture_id (str): the id of the fixture * head2head (int, optional): load the previous n fixture of the two teams Returns: * :obj: json: the fixture-json
def get_fixture(self, fixture_id, head2head=None): filters = [] if head2head is not None and int(head2head) > 0: self.logger.debug(f'Getting fixture {fixture_id}. head2head is {head2head}.') filters.append(self.__createFilter('head2head', head2head)) else: self.logger.debug(f'Getting fixture {fixture_id}.') return self._request('fixtures', fixture_id, filters=filters)
1,075,850
Loads the players of a team. Args: * team (:obj: json): a team in json format obtained from the service. Returns: * :obj: json: the players of the team
def get_players(self, team): team_id = self.__get_team_id(team) self.logger.debug(f'Getting players of team {team_id}.') return self._request('teams', team_id, 'players')
1,075,851
Return a list of indexes of substr. If substr not found, list is empty. Arguments: substr (str): Substring to match. string (str): String to match in. Returns: list of int: Start indices of substr.
def get_substring_idxs(substr, string): return [match.start() for match in re.finditer(substr, string)]
1,076,282
Truncate a string to a maximum number of characters. If the string is longer than maxchar, then remove excess characters and append an ellipses. Arguments: string (str): String to truncate. maxchar (int): Maximum length of string in characters. Must be >= 4. Returns: str: Of length <= maxchar. Raises: TruncateError: In case of an error.
def truncate(string, maxchar): if maxchar < 4: raise TruncateError("Maxchar must be > 3") if len(string) <= maxchar: return string else: return string[:maxchar - 3] + "..."
1,076,283
Initialise the class. Args: client (:class:`consulate.Consul`): A :class:`consulate.Consul` instance. base_path (str): the base path to use in Consul.
def __init__(self, client, base_path): self._client = client self._base_path = base_path self._queue_path = posixpath.join(self._base_path, 'queue', '') self._counter_path = posixpath.join(self._queue_path, 'counter') self._ensure_counter() self._ensure_queue()
1,076,360
Put a task into the queue. Args: value (str): Task data. priority (int): An optional priority as an integer with at most 3 digits. Lower values signify higher priority.
def put(self, value, priority=100): task_name = '{}{:03d}_{}'.format(self.TASK_PREFIX, priority, self._counter) path = posixpath.join(self._queue_path, task_name) self._client.kv[path] = value
1,076,364
Create a new table. If the table already exists, nothing happens. Example: >>> db.create_table("foo", (("id", "integer primary key"), ("value", "text"))) Arguments: name (str): The name of the table to create. schema (sequence of tuples): A list of (name, type) tuples representing each of the columns.
def create_table(self, name, schema): columns = [" ".join(column) for column in schema] self.execute("CREATE TABLE IF NOT EXISTS {name} ({columns})" .format(name=name, columns=",".join(columns)))
1,076,383
Create a new table with same schema as the source. If the named table already exists, nothing happens. Arguments: name (str): The name of the table to create. src (str): The name of the source table to duplicate. Raises: sql.OperationalError: If source table does not exist.
def create_table_from(self, name, src): # Lookup the command which was used to create the "src" table. query = self.execute("SELECT sql FROM sqlite_master WHERE " "type='table' and name=?", (src,)) try: cmd = query.fetchone()[0] except TypeError: raise sql.OperationalError("Cannot copy non-existent table '{0}'" .format(src)) # Modify the original command to replace the old table name # with the new one. new_cmd = re.sub("(CREATE TABLE) \w+", "\\1 " + name, cmd, re.IGNORECASE) # Execute this new command. self.execute(new_cmd)
1,076,384
Create a carbon copy of the source table. Arguments: src (str): The name of the table to copy. dst (str): The name of the target duplicate table. Raises: sql.OperationalError: If source table does not exist.
def copy_table(self, src, dst): # Create table. self.create_table_from(dst, src) # Copy contents of src to dst. self.execute("INSERT INTO {dst} SELECT * FROM {src}" .format(dst=dst, src=src)) # Commit changes. self.commit()
1,076,385
Pick frame info from current caller's `frame`. Args: * frame: :type:`frame` instance, use :func:`inspect.currentframe`. * parent: whether to get outer frame (caller) traceback info, :data:`False` by default. Returns: :class:`inspect.Trackback` instance from :data:`frame` or its parent frame.
def traceback(frame, parent=False): # Traceback(filename='<stdin>', lineno=1, function='<module>', code_context=None, index=None) if parent is True: # frame itself will always be placed @ the first index of its outerframes. outers = inspect.getouterframes(frame) traceback = (len(outers) == 1) and None or inspect.getframeinfo(outers[1][0]) else: traceback = inspect.getframeinfo(frame) return traceback
1,076,393
Create the real absolute path for the given path. Add supports for userdir & / supports. Args: * path: pathname to use for realpath. Returns: Platform independent real absolute path.
def realpath(path): if path == '~': return userdir if path == '/': return sysroot if path.startswith('/'): return os.path.abspath(path) if path.startswith('~/'): return os.path.expanduser(path) if path.startswith('./'): return os.path.abspath(os.path.join(os.path.curdir, path[2:])) return os.path.abspath(path)
1,076,428
Find absolute file/folder paths with the given ``re`` pattern. Args: * pattern: search pattern, support both string (exact match) and `re` pattern. * path: root path to start searching, default is current working directory. * recursive: whether to recursively find the matched items from `path`, False by default Returns: Generator of the matched items of Files/Folders.
def find(pattern, path=os.path.curdir, recursive=False): root = realpath(path) Finder = lambda item: regex.is_regex(pattern) \ and pattern.match(item) or (pattern == item) if recursive: for base, dirs, files in os.walk(root, topdown=True): for segment in itertools.chain(filter(Finder, files), filter(Finder, dirs)): yield FS(os.path.join(base, segment)) else: for segment in filter(Finder, os.listdir(root)): yield(os.path.join(root, segment))
1,076,429
Copy item to the given `dest` path. Args: * dest: destination path to copy.
def copy(self, dest): if os.path.isfile(self.path): shutil.copy2(self.path, dest) else: shutil.copytree(self.path, dest, symlinks=False, ignore=None)
1,076,430
Release lock. To release a lock, we must already own the lock. Arguments: force (bool, optional): If true, ignore any existing lock owner. Raises: UnableToReleaseLockError: If the lock is claimed by another process (not raised if force option is used).
def release(self, force=False): # There's no lock, so do nothing. if not self.islocked: return if self.owned_by_self or force: os.remove(self.path) else: raise UnableToReleaseLockError(self)
1,076,491
Read the contents of a LockFile. Arguments: path (str): Path to lockfile. Returns: Tuple(int, datetime): The integer PID of the lock owner, and the date the lock was required. If the lock is not claimed, both values are None.
def read(path): if fs.exists(path): with open(path) as infile: components = infile.read().split() pid = int(components[0]) date = datetime.date.fromtimestamp(float(components[1])) return pid, date else: return None, None
1,076,492
Write the contents of a LockFile. Arguments: path (str): Path to lockfile. pid (int): The integer process ID. timestamp (datetime): The time the lock was aquired.
def write(path, pid, timestamp): with open(path, "w") as lockfile: print(pid, timestamp, file=lockfile)
1,076,493
Create an image from a terminated host (with auto_delete_boot_disk=False) Args: name: The name of the image
def terminate_and_create_image(name): node = _host_node() operation = _gcp().instances().delete(project=DEFAULT_PROJECT, zone=DEFAULT_ZONE, instance=node['real_name']).execute() while True: status = get_zone_operation_status(operation=operation) if status == 'DONE': break print 'Terminating instance [OPERATION %s]' % status time.sleep(5) body = { 'name': name, 'sourceDisk': node['source_disk'], } operation = _gcp().images().insert(project=DEFAULT_PROJECT, body=body).execute() while True: status = get_global_operation_status(operation=operation) if status == 'DONE': break print 'Creating image [OPERATION %s]' % status time.sleep(5) print 'Created image: %s' % operation['targetLink']
1,076,702
Load a png or jpeg image into a bitmap buffer. Args: buf (Buffer): Buffer to load request_components (int): If you want to force number of components Returns: A tuple containing: - Bitmap buffer - width of bitmap - height of bitmap - number of components
def load_image(buf, request_components=0): x = ffi.new('int*') y = ffi.new('int*') n = ffi.new('int*') cbuf = ffi.from_buffer(buf) bitmap = lib.stbi_load_from_memory( ffi.cast('unsigned char*', cbuf), len(buf), x, y, n, request_components ) pybuffer = ffi.buffer(bitmap, x[0]*y[0]*n[0]) return pybuffer, x[0], y[0], n[0]
1,076,803
Resize an image Args: buf (Buffer): Buffer coming from `load_image` width (int): Width of `buf` height (int): Height of `buf` num_channels (int): Number of channels in `buf` (RGBA=4) new_width (int): Desired width new_height (int): Desired height Returns: Buffer: Resized image Raises: ResizeError: If an error occurs during resize
def resize_image(buf, width, height, num_channels, new_width, new_height): new_size = new_width * new_height * num_channels input_pixels = ffi.from_buffer(buf) output_pixels = ffi.new('unsigned char[]', new_size) result = lib.stbir_resize_uint8( ffi.cast('unsigned char*', input_pixels), width, height, 0, output_pixels, new_width, new_height, 0, num_channels ) if not result: raise ResizeError() return ffi.buffer(output_pixels, new_size)
1,076,804
Finalise a plot. Display or show the plot, then close it. Arguments: output (str, optional): Path to save figure to. If not given, show plot. figsize ((float, float), optional): Figure size in inches. **kwargs: Any additional arguments to pass to plt.savefig(). Only required if output is not None.
def finalise(output=None, figsize=None, tight=True, **kwargs): import matplotlib.pyplot as plt # Set figure size. if figsize is not None: plt.gcf().set_size_inches(*figsize) # Set plot layout. if tight: plt.tight_layout() if output is None: plt.show() else: plt.savefig(output, **kwargs) io.info("Wrote", output) plt.close()
1,076,913
r''' Create one or more cloud servers Args: * provider (str): Cloud provider, e.g. ec2, digitalocean * count (int) =1: Number of instances * name (str) =None: Name of server(s) * \**kwargs: Provider-specific flags
def create(provider, count=1, name=None, **kwargs): r count = int(count) provider = provider_by_name(provider) options = provider.create_server_defaults options.update(kwargs) names = [name] * count provider.validate_create_options(**options) return provider.create_servers(count, names, **options)
1,077,421
SSH into the server(s) (sequentially if more than one) Args: cmd (str) ='': Command to run on the server
def ssh(cmd=''): with settings(warn_only=True): local('ssh -A -o StrictHostKeyChecking=no -i "%s" %s@%s "%s"' % ( env.key_filename, env.user, env.host, cmd))
1,077,422
Connection object builder. Args: virtualhost (str): selected virtualhost in rabbitmq Returns: pika.ConnectionParameters: object filled by `constants` from :class:`edeposit.amqp.settings`.
def getConParams(virtualhost): return pika.ConnectionParameters( host=settings.RABBITMQ_HOST, port=int(settings.RABBITMQ_PORT), virtual_host=virtualhost, credentials=pika.PlainCredentials( settings.RABBITMQ_USER_NAME, settings.RABBITMQ_USER_PASSWORD ) )
1,077,435
Return function for sending progress messages back to original caller. Args: uuid (str): UUID of the received message. key (str): Routing key. Returns: fn reference: Reference to function which takes only one data \ argument.
def get_sendback(self, uuid, key): def send_back_callback(data): self.sendResponse( serializers.serialize(data), uuid, key ) return send_back_callback
1,077,439
Callback called when exception was raised. This method serializes the exception and sends it over AMQP back to caller. Args: e (obj): Instance of the exception. uuid (str): UUID of the message that caused the exception to raise. routing_key (str): Which routing key was used. body (str): Body of the exception - the longer text. tb (str, default None): Traceback (stacktrace)v of the exception.
def process_exception(self, e, uuid, routing_key, body, tb=None): # get informations about message msg = e.message if hasattr(e, "message") else str(e) exception_type = str(e.__class__) exception_name = str(e.__class__.__name__) print "Sending exception %s: %s for UUID %s." % ( exception_name, msg, uuid ) self.sendMessage( self.output_exchange, routing_key, str(body), properties=pika.BasicProperties( content_type="application/text", delivery_mode=2, headers={ "exception": msg, "exception_type": exception_type, "exception_name": exception_name, "traceback": tb, "UUID": uuid } ) )
1,077,440
SSH into a running container, using the host as a jump host. This requires the container to have a running sshd process. Args: * container: Container name or ID * cmd='': Command to run in the container * user='root': SSH username * password='root': SSH password
def ssh(container, cmd='', user='root', password='root'): ip = get_ip(container) ssh_cmd = 'sshpass -p \'%s\' ssh -A -t -o StrictHostKeyChecking=no \'%s\'@%s' % (password, user, ip) local('ssh -A -t -o StrictHostKeyChecking=no -i "%s" %s@%s %s %s' % ( env.key_filename, env.user, env.host, ssh_cmd, cmd))
1,078,563
Prepare a vanilla server by installing docker, curl, and sshpass. If a file called ``dot_dockercfg`` exists in the current working directory, it is uploaded as ``~/.dockercfg``. Args: * docker_mount=None: Partition that will be mounted as /var/lib/docker
def setup(docker_mount=None, force=False): if not is_ubuntu() and not is_boot2docker(): raise Exception('Head In The Clouds Docker is only supported on Ubuntu') # a bit hacky if os.path.exists('dot_dockercfg') and not fabric.contrib.files.exists('~/.dockercfg'): put('dot_dockercfg', '~/.dockercfg') if not fabric.contrib.files.exists('~/.ssh/id_rsa'): fab.run('ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa') if docker_is_installed() and not force: return for attempt in range(3): sudo('wget -qO- https://get.docker.io/gpg | apt-key add -') sudo('sh -c "echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"') with settings(warn_only=True): sudo('apt-get update') failed = sudo('apt-get install -y lxc-docker sshpass curl').failed if not failed: break if docker_mount: create_docker_mount(docker_mount)
1,078,566
Kill a container Args: * container: Container name or ID * rm=True: Remove the container or not
def kill(container, rm=True): container = get_container(container) if not container: raise Exception('No such container: %s' % container) unbind_all(container['ip']) # legacy, only here for backwards compatibility sudo('docker kill %s' % container['name']) if rm: sudo('docker rm %s' % container['name'])
1,078,569
Set up an SSH tunnel into the container, using the host as a gateway host. Args: * container: Container name or ID * local_port: Local port * remote_port=None: Port on the Docker container (defaults to local_port) * gateway_port=None: Port on the gateway host (defaults to remote_port)
def tunnel(container, local_port, remote_port=None, gateway_port=None): if remote_port is None: remote_port = local_port if gateway_port is None: gateway_port = remote_port remote_host = get_ip(container) command = % { 'key_filename': env.key_filename, 'local_port': local_port, 'gateway_port': gateway_port, 'gateway_user': env.user, 'gateway_host': env.host, 'remote_port': remote_port, 'remote_host': remote_host, } command = command.replace('\n', '') local(command)
1,078,570
Insert `tag` before `before` tag if present. If not, insert it into `root`. Args: tag (obj): HTMLElement instance. before (obj): HTMLElement instance. root (obj): HTMLElement instance.
def insert_tag(tag, before, root): if not before: root.childs.append(tag) tag.parent = root return if type(before) in [tuple, list]: before = first(before) # check that `before` is double linked if not hasattr(before, "parent"): raise ValueError("Input must be double-linked!") # put it before first existing identifier parent = before.parent parent.childs.insert( parent.childs.index(before), tag ) tag.parent = parent
1,078,959
Transform content in all `tags` using result of `content_transformer(tag)` call. Args: tags (obj/list): HTMLElement instance, or list of HTMLElement instances. content_transformer (function): Function which is called as ``content_transformer(tag)``.
def transform_content(tags, content_transformer): if type(tags) not in [tuple, list]: tags = [tags] for tag in tags: new_child = dhtmlparser.HTMLElement(content_transformer(tag)) # don't forget to add parent if the list is double-linked if hasattr(tag, "parent"): new_child.parent = tag tag.childs = [new_child]
1,078,960
Create double linked DOM from input. In case of string, parse it, make it double-linked. In case of DOM, just make it double-linked. Args: str_or_dom (str/HTMLelement): String or HTMLelement instance. Returns: obj: HTMLelement with parsed, double-linked content from `str_or_dom`.
def double_linked_dom(str_or_dom): dom = str_or_dom if not isinstance(str_or_dom, dhtmlparser.HTMLElement): dom = dhtmlparser.parseString(str_or_dom) dhtmlparser.makeDoubleLinked(dom) return dom
1,078,961
Set global variables to values defined in `config_dict`. Args: config_dict (dict): dictionary with data, which are used to set \ `globals`. Note: `config_dict` have to be dictionary, or it is ignored. Also all variables, that are not already in globals, or are not types defined in :attr:`_ALLOWED` (str, int, float) or starts with ``_`` are silently ignored.
def substitute_globals(config_dict): constants = get_all_constants() if type(config_dict) != dict: return for key in config_dict.keys(): if key in constants and type(config_dict[key]) in _ALLOWED: globals()[key] = config_dict[key]
1,079,258
Instantiate a directory checksum cache. Arguments: path (str): Path to persistent cache store. hash (str, optional): Hash algorithm to use, e.g. 'md5', 'sha1'.
def __init__(self, path, hash='sha1'): self.path = fs.path(path) self.hash = hash db = sqlite3.connect(self.path) c = db.cursor() c.execute() db.commit() db.close()
1,079,275
Compute the hash of a directory. Arguments: path: Directory. **dirhash_opts: Additional options to checksumdir.dirhash(). Returns: str: Checksum of directory.
def dirhash(self, path, **dirhash_opts): path = fs.path(path) last_modified = time.ctime(max( max(os.path.getmtime(os.path.join(root, file)) for file in files) for root,_,files in os.walk(path))) db = sqlite3.connect(self.path) c = db.cursor() c.execute("SELECT date, hash FROM dirhashcache WHERE path=?", (path,)) cached = c.fetchone() if cached: cached_date, cached_hash = cached if cached_date == last_modified: # cache hit dirhash = cached_hash else: # out of date cache dirhash = checksumdir.dirhash(path, self.hash, **dirhash_opts) c.execute("UPDATE dirhashcache SET date=?, hash=? WHERE path=?", (last_modified, dirhash, path)) db.commit() else: # new entry dirhash = checksumdir.dirhash(path, self.hash, **dirhash_opts) c.execute("INSERT INTO dirhashcache VALUES (?,?,?)", (path, last_modified, dirhash)) db.commit() db.close() return dirhash
1,079,277
Creates the data for a SSDP request. Args: request_line (string): The request line for the request (e.g. ``"M-SEARCH * HTTP/1.1"``). headers (dict of string -> string): Dictionary of header name - header value pairs to present in the request. Returns: bytes: The encoded request.
def encode_request(request_line, **headers): lines = [request_line] lines.extend(['%s: %s' % kv for kv in headers.items()]) return ('\r\n'.join(lines) + '\r\n\r\n').encode('utf-8')
1,079,779
Decodes the data from a SSDP response. Args: data (bytes): The encoded response. Returns: dict of string -> string: Case-insensitive dictionary of header name to header value pairs extracted from the response.
def decode_response(data): res = CaseInsensitiveDict() for dataline in data.decode('utf-8').splitlines()[1:]: dataline = dataline.strip() if not dataline: continue line_parts = dataline.split(':', 1) # This is to deal with headers with no value. if len(line_parts) < 2: line_parts = (line_parts[0], '') res[line_parts[0].strip()] = line_parts[1].strip() return res
1,079,780
Send an SSDP search request via the provided socket. Args: sock: A socket suitable for use to send a broadcast message - preferably one created by :py:func:`make_socket`. search_target (string): A :term:`resource type` target to search for.
def request_via_socket(sock, search_target): msgparts = dict(HOST=MCAST_IP_PORT, MAN='"ssdp:discover"', MX='3', ST=search_target) msg = encode_request('M-SEARCH * HTTP/1.1', **msgparts) sock.sendto(msg, (MCAST_IP, MCAST_PORT))
1,079,781
Yield SSDP search responses and advertisements from the provided socket. Args: sock: A socket suitable for use to send a broadcast message - preferably one created by :py:func:`make_socket`. timeout (int / float): Overall time in seconds for how long to wait for before no longer listening for responses. Yields: dict of string -> string: Case-insensitive dictionary of header name to header value pairs extracted from the response.
def responses_from_socket(sock, timeout=10): now = time.time() give_up_by = now + timeout while now < give_up_by: try: data = sock.recv(1024) except socket.timeout: now = time.time() continue # We handle either search responses or announcements. for data_prefix in [ b'HTTP/1.1 200 OK', b'NOTIFY * HTTP/1.1', ]: if data[:len(data_prefix)] == data_prefix: break else: now = time.time() continue yield decode_response(data) now = time.time() continue
1,079,782
Add `xmlns` and `ID` attributes to ``<mods:mods>`` tag. Args: dom (HTMLElement): DOM containing whole document. volume_counter (int, default 0): ID of volume.
def add_missing_xml_attributes(dom, volume_counter=0): mods_tag = get_mods_tag(dom) if mods_tag: params = mods_tag.params # add missing attributes params["ID"] = "MODS_VOLUME_%04d" % (volume_counter + 1) params["xmlns:mods"] = "http://www.loc.gov/mods/v3" params["xmlns:xlink"] = "http://www.w3.org/1999/xlink" params["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance" params["xsi:schemaLocation"] = " ".join(( "http://www.w3.org/2001/XMLSchema-instance", "http://www.w3.org/2001/XMLSchema.xsd", "http://www.loc.gov/mods/v3", "http://www.loc.gov/standards/mods/v3/mods-3-4.xsd", "http://www.w3.org/1999/xlink http://www.w3.org/1999/xlink.xsd", ))
1,079,833
Fix bugs in `mods` produced by XSLT template. Args: marc_xml (str): Original Aleph record. mods (str): XML string generated by XSLT template. uuid (str): UUID of the package. counter (int): Number of record, is added to XML headers. url (str): URL of the publication (public or not). Returns: str: Updated XML.
def postprocess_monograph(marc_xml, mods, uuid, counter, url): dom = double_linked_dom(mods) if not isinstance(marc_xml, MARCXMLRecord): marc_xml = MARCXMLRecord(marc_xml) add_missing_xml_attributes(dom, counter) fix_invalid_type_parameter(dom) if uuid: add_uuid(dom, uuid) add_marccountry_tag(dom) # add <genre> tag if not found add_genre(dom) # remove hairs from some tags remove_hairs_from_tags(dom) fix_issuance(dom) fix_location_tag(dom) fix_related_item_tag(dom) fix_missing_electronic_locator_tag(dom, url) fix_missing_lang_tags(marc_xml, dom) return dom.prettify()
1,079,842
Split a string by ``sep`` and yield chunks Args: s (str-type): string to split sep (str-type): delimiter to split by Yields: generator of strings: chunks of string s
def itersplit(s, sep=None): if not s: yield s return exp = re.compile(r'\s+' if sep is None else re.escape(sep)) pos = 0 while True: m = exp.search(s, pos) if not m: if pos < len(s) or sep is not None: yield s[pos:] break if pos < m.start() or sep is not None: yield s[pos:m.start()] pos = m.end()
1,079,863
Itersplit a string into a (named, if specified) tuple. Args: str_ (str): string to split fsep (str): field separator (delimiter to split by) revtuple (object): namedtuple (or class with a ``._fields`` attr) (optional) fields (list of str): field names (if revtuple is not specified) preparse (callable): function to parse str with before itersplitting Returns: tuple or revtuple: fields as a tuple or revtuple, if specified
def itersplit_to_fields(str_, fsep=DEFAULT_FSEP, revtuple=None, fields=[], preparse=None): if preparse: str_ = preparse(str_) _fields = itersplit(str_, fsep) if revtuple is not None: try: values = (t[1] for t in izip_longest(revtuple._fields, _fields)) return revtuple(*values) except Exception as e: log.error(revtuple) log.error(_fields) log.exception(e) raise return tuple(izip_longest(fields, _fields, fillvalue=None))
1,079,864
Search for repositories with a stack and ``os.listdir`` Args: where (str): path to search from Yields: Repository subclass instance
def listdir_find_repos(where): stack = deque([(convert_path(where), '')]) while stack: where, prefix = stack.pop() try: for name in sorted(os.listdir(where), reverse=True): fn = os.path.join(where, name) if name in REPO_PREFIXES: if 1: # os.path.exists(fn): # yield name[1:], fn.rstrip(name)[:-1] # abspath repo = REPO_PREFIXES[name](fn.rstrip(name)[:-1]) yield repo stack.append((fn, prefix + name + '/')) except OSError as e: if e.errno == errno.EACCES: log.error("Skipping: %s", e) else: raise
1,079,866
Search for repositories with GNU find Args: where (str): path to search from ignore_error (bool): if False, raise Exception when the returncode is not zero. Yields: Repository subclass instance
def find_find_repos(where, ignore_error=True): log.debug(('REPO_REGEX', REPO_REGEX)) FIND_REPO_REGEXCMD = ("-regex", '.*(%s)$' % REPO_REGEX) if os.uname()[0] == 'Darwin': cmd = ("find", '-E', '-L', # dereference symlinks where, FIND_REPO_REGEXCMD[0], FIND_REPO_REGEXCMD[1]) else: cmd = ("find", '-O3', '-L', # dereference symlinks where, # " .", "-regextype","posix-egrep", FIND_REPO_REGEXCMD[0], FIND_REPO_REGEXCMD[1]) _cmd = ' '.join(cmd) log.debug("find_find_repos(%r) = %s" % (where, _cmd)) kwargs = { #'shell': True, 'cwd': where, 'stderr': sys.stderr, 'stdout': subprocess.PIPE,} p = subprocess.Popen(cmd, universal_newlines=True, **kwargs) if p.returncode and not ignore_error: p_stdout = p.communicate()[0] raise Exception("Subprocess return code: %d\n%r\n%r" % ( p.returncode, cmd, p_stdout)) for l in iter(p.stdout.readline, ''): path = l.rstrip() _path, _prefix = os.path.dirname(path), os.path.basename(path) repo = REPO_PREFIXES.get(_prefix) if repo is None: log.error("repo for path %r and prefix %r is None" % (path, _prefix)) if repo: yield repo(_path)
1,079,867
Search for repositories and deduplicate based on ``repo.fpath`` Args: where (str): path to search from Yields: Repository subclass
def find_unique_repos(where): repos = Dict() path_uuids = Dict() log.debug("find_unique_repos(%r)" % where) for repo in find_find_repos(where): # log.debug(repo) repo2 = (hasattr(repo, 'search_upwards') and repo.search_upwards(upwards=path_uuids)) if repo2: if repo2 == repo: continue else: repo = repo2 if (repo.fpath not in repos): log.debug("%s | %s | %s" % (repo.prefix, repo.fpath, repo.unique_id)) repos[repo.fpath] = repo yield repo
1,079,868
Do a repository report: call the report function for each Repository Args: repos (iterable): iterable of Repository instances report (string): report name output (writeable): output stream to print to Yields: Repository subclass
def do_repo_report(repos, report='full', output=sys.stdout, *args, **kwargs): for i, repo in enumerate(repos): log.debug(str((i, next(repo.origin_report())))) try: if repo is not None: reportfunc = REPORT_TYPES.get(report) if reportfunc is None: raise Exception("Unrecognized report type: %r (%s)" % (report, ', '.join(REPORT_TYPES.keys()))) for l in reportfunc(repo, *args, **kwargs): print(l, file=output) except Exception as e: log.error(repo) log.error(report) log.error(e) raise yield repo
1,079,869
Generate a thg-reporegistry.xml file from a list of repos and print to output Args: repos (iterable): iterable of Repository subclass instances output (writeable): output stream to which THG XML will be printed
def do_tortoisehg_report(repos, output): import operator import xml.etree.ElementTree as ET root = ET.Element('reporegistry') item = ET.SubElement(root, 'treeitem') group = ET.SubElement(item, 'group', attrib=Dict(name='groupname')) def fullname_to_shortname(fullname): shortname = fullname.replace(os.environ['HOME'], '~') shortname = shortname.lstrip('./') return shortname for repo in sorted(repos, key=operator.attrgetter('fpath')): fullname = os.path.join( os.path.dirname(repo.fpath), os.path.basename(repo.fpath)) shortname = fullname_to_shortname(fullname) if repo.prefix != '.hg': shortname = "%s%s" % (shortname, repo.prefix) _ = ET.SubElement(group, 'repo', attrib=Dict( root=repo.fpath, shortname=shortname, basenode='0'*40)) _ print('<?xml version="1.0" encoding="UTF-8"?>', file=output) print("<!-- autogenerated: %s -->" % "TODO", file=output) print(ET.dump(root), file=output)
1,079,870
Create a new Repository instance Args: fpath (str): path (relative or absolute) to repository
def __init__(self, fpath): self.fpath = os.path.abspath(fpath) self.symlinks = []
1,079,873
Run a command with the current working directory set to self.fpath Args: cmd (str or tuple): cmdstring or listlike Keyword Arguments: ignore_error (bool): if False, raise an Exception if p.returncode is not 0 cwd (str): current working dir to run cmd with shell (bool): subprocess.Popen ``shell`` kwarg Returns: str: stdout output of wrapped call to ``sh`` (``subprocess.Popen``)
def sh(self, cmd, ignore_error=False, cwd=None, shell=False, **kwargs): kwargs.update({ 'shell': shell, 'cwd': cwd or self.fpath, 'stderr': subprocess.STDOUT, 'stdout': subprocess.PIPE, 'ignore_error': ignore_error}) log.debug((('cmd', cmd), ('kwargs', kwargs))) return sh(cmd, **kwargs)
1,079,887
Convert a URL to local mercurial URL schemes Args: url (str): URL to map to local mercurial URL schemes example:: # schemes.gh = git://github.com/ >> remote_url = git://github.com/westurner/dotfiles' >> to_hg_scheme_url(remote_url) << gh://westurner/dotfiles
def to_hg_scheme_url(cls, url): regexes = cls._get_url_scheme_regexes() for scheme_key, pattern, regex in regexes: match = regex.match(url) if match is not None: groups = match.groups() if len(groups) == 2: return u''.join( scheme_key, '://', pattern.replace('{1}', groups[0]), groups[1]) elif len(groups) == 1: return u''.join( scheme_key, '://', pattern, groups[0])
1,079,894
Preprocess/cleanup a bzr log message before parsing Args: s (str): log message string by (int): cutoff threshold for log message length Returns: str: preprocessed log message string
def _logmessage_transform(cls, s, by=2): if len(s) >= by: return s[by:].strip('\n') return s.strip('\n')
1,079,903
Parse bazaar log file format Args: r (str): bzr revision identifier Yields: dict: dict of (attr, value) pairs :: $ bzr log -l1 ------------------------------------------------------------ revno: 1 committer: ubuntu <ubuntu@ubuntu-desktop> branch nick: ubuntu-desktop /etc repository timestamp: Wed 2011-10-12 01:16:55 -0500 message: Initial commit
def _parselog(self, r): def __parselog(entry): bufname = None buf = deque() print(entry) if entry == ['']: return for l in itersplit(entry, '\n'): if not l: continue mobj = self.logrgx.match(l) if not mobj: # " - Log message" buf.append(self._logmessage_transform(l)) if mobj: mobjlen = len(mobj.groups()) if mobjlen == 2: # "attr: value" attr, value = mobj.groups() if attr == 'message': bufname = 'desc' else: attr = self.field_trans.get(attr, attr) yield (self.field_trans.get(attr, attr), value) else: raise Exception() if bufname is not None: if len(buf): buf.pop() len(buf) > 1 and buf.popleft() yield (bufname, '\n'.join(buf)) return kwargs = dict(__parselog(r)) # FIXME if kwargs: if 'tags' not in kwargs: kwargs['tags'] = tuple() else: kwargs['tags'].split(' ') # TODO if 'branchnick' not in kwargs: kwargs['branchnick'] = None try: yield kwargs # TODO # return self._tuple(**kwargs) except: log.error(r) log.error(kwargs) raise else: log.error("failed to parse: %r" % r)
1,079,904
Deeply updates a dictionary. List values are concatenated. Args: d (dict): First dictionary which will be updated u (dict): Second dictionary use to extend the first one Returns: dict: The merge dictionary
def deep_update(d, u): for k, v in u.items(): if isinstance(v, Mapping): d[k] = deep_update(d.get(k, {}), v) elif isinstance(v, list): existing_elements = d.get(k, []) d[k] = existing_elements + [ele for ele in v if ele not in existing_elements] else: d[k] = v return d
1,079,932
Recursively update values in dst from src. Unlike the builtin dict.update() function, this method will decend into nested dicts, updating all nested values. Arguments: dst (dict): Destination dict. src (dict): Source dict. Returns: dict: dst updated with entries from src.
def update(dst, src): for k, v in src.items(): if isinstance(v, Mapping): r = update(dst.get(k, {}), v) dst[k] = r else: dst[k] = src[k] return dst
1,080,084
Recursively get values in dict. Unlike the builtin dict.values() function, this method will descend into nested dicts, returning all nested values. Arguments: src (dict): Source dict. Returns: list: List of values.
def dict_values(src): for v in src.values(): if isinstance(v, dict): for v in dict_values(v): yield v else: yield v
1,080,085
Create servers and containers as required to meet the configuration specified in _name_. Args: * name: The name of the yaml config file (you can omit the .yml extension for convenience) Example: fab ensemble.up:wordpress
def up(name, debug=False): if debug: env.ensemble_debug = True filenames_to_try = [ name, '%s.yml' % name, '%s.yaml' % name, ] for filename in filenames_to_try: if os.path.exists(filename): with open(filename, 'r') as f: config = yaml.load(f) break else: abort('Ensemble manifest not found: %s' % name) uncache() try: do_up(config) except exceptions.ConfigException, e: abort('Config error: ' + str(e))
1,080,263
Stop a profiling timer. Arguments: name (str): The name of the timer to stop. If no name is given, stop the global anonymous timer. Returns: bool: Whether or not profiling is enabled. Raises: KeyError: If the named timer does not exist.
def stop(name, file=sys.stderr): if is_enabled(): elapsed = (time() - __TIMERS[name]) if elapsed > 60: elapsed_str = '{:.1f} m'.format(elapsed / 60) elif elapsed > 1: elapsed_str = '{:.1f} s'.format(elapsed) else: elapsed_str = '{:.1f} ms'.format(elapsed * 1000) del __TIMERS[name] print("[prof]", name, elapsed_str, file=file) return is_enabled()
1,080,265
Run the system command with optional options. Args: * command: system command. * cwd: current working directory. * verbose: direct options for :func:`subprocess.Popen`. Returns: Opened process, standard output & error.
def execute(command, cwd=os.path.curdir, **options): process = subprocess.Popen(shlex.split(command), cwd=cwd, **options) stdout, stderr = process.communicate() return process, stdout, stderr
1,080,323
Higher order function creating a compare function for objectives. Args: objective (cipy.algorithms.core.Objective): The objective to create a compare for. Returns: callable: Function accepting two objectives to compare. Examples: >>> a = Minimum(0.1) >>> b = Minimum(0.2) >>> compare = comparator(a) >>> comparison = compare(a, b) # False
def comparator(objective): if isinstance(objective, Minimum): return lambda l, r: l < r else: return lambda l, r: l > r
1,080,796
Parse the source into an AST node with PyPosAST. Enhance nodes with positions Arguments: code -- code text Keyword Arguments: filename -- code path mode -- execution mode (exec, eval, single) tree -- current tree, if it was optimized
def parse(code, filename='<unknown>', mode='exec', tree=None): visitor = Visitor(code, filename, mode, tree=tree) return visitor.tree
1,080,912
Find all nodes of a given type Arguments: code -- code text desired_type -- ast Node or tuple Keyword Arguments: path -- code path mode -- execution mode (exec, eval, single) tree -- current tree, if it was optimized
def get_nodes(code, desired_type, path="__main__", mode="exec", tree=None): return _GetVisitor(parse(code, path, mode, tree), desired_type).result
1,080,913
Constructor. Also see Entry.__init__. Args: allow_comments (bool): Whether to allow comments. Default False. directory (str): Optional. If the page should live in a subdirectory instead of at the web root, specify it here instead of making it part of the slug.
def __init__(self, *args, allow_comments=False, directory=None, **kwargs): super().__init__(*args, **kwargs) self.allow_comments = allow_comments self.dir = directory
1,081,055
Constructor. Also see Entry.__init__. Args: pubdate (datetime): When the post was published. excerpt (str): An excerpt of the post body. tags (list): A list of Tag objects associated with the post. allow_comments (bool): Whether to allow comments. Default False.
def __init__(self, *args, pubdate=None, excerpt=None, tags=None, allow_comments=True, **kwargs): super().__init__(*args, **kwargs) self.excerpt = excerpt or _get_excerpt(self.body) self.pubdate = pubdate self.tags = tags or [] self.allow_comments = allow_comments
1,081,056
Get corresponding text in the code Arguments: lines -- code splitted by linebreak node -- PyPosAST enhanced node Keyword Arguments: lstrip -- During extraction, strip lines with this arg (default="") ljoin -- During extraction, join lines with this arg (default="\n") strip -- After extraction, strip all code with this arg (default="")
def extract_code(lines, node, lstrip="", ljoin="\n", strip=""): first_line, first_col = node.first_line - 1, node.first_col last_line, last_col = node.last_line - 1, node.last_col if first_line == last_line: return lines[first_line][first_col:last_col].strip(strip) result = [] # Add first line result.append(lines[first_line][first_col:].strip(lstrip)) # Add middle lines if first_line + 1 != last_line: for line in range(first_line + 1, last_line): result.append(lines[line].strip(lstrip)) # Add last line result.append(lines[last_line][:last_col].strip(lstrip)) return ljoin.join(result).strip(strip)
1,081,125
Parse HTML from text into array filled with tags end text. Source code is little bit unintutive, because it is state machine parser. For better understanding, look at http://bit.ly/1rXRcJj Example:: >>> dhtmlparser._raw_split('<html><tag params="true"></html>') ['<html>', '<tag params="true">', '</html>'] Args: itxt (str): Input HTML text, which will be parsed. Returns: list: List of strings (input splitted to tags and text).
def _raw_split(itxt): echr = "" buff = ["", "", "", ""] content = "" array = [] next_state = 0 inside_tag = False escaped = False COMMENT_START = ["-", "!", "<"] COMMENT_END = ["-", "-"] gc.disable() for c in itxt: # content if next_state == StateEnum.content: if c == "<": if content: array.append(content) content = c next_state = StateEnum.tag inside_tag = False else: content += c # html tag elif next_state == StateEnum.tag: if c == ">": array.append(content + c) content = "" next_state = StateEnum.content elif c == "'" or c == '"': echr = c content += c next_state = StateEnum.parameter elif c == "-" and buff[:3] == COMMENT_START: if content[:-3]: array.append(content[:-3]) content = content[-3:] + c next_state = StateEnum.comment else: if c == "<": # jump back into tag instead of content array.append(content) inside_tag = True content = "" content += c # quotes "" / '' elif next_state == StateEnum.parameter: if c == echr and not escaped: # end of quotes next_state = StateEnum.tag # unescaped end of line - this is good for invalid HTML like # <a href=something">..., because it allows recovery if c == "\n" and not escaped and buff[0] == ">": next_state = StateEnum.content inside_tag = False content += c escaped = not escaped if c == "\\" else False # html comments elif next_state == StateEnum.comment: if c == ">" and buff[:2] == COMMENT_END: next_state = StateEnum.tag if inside_tag else StateEnum.content inside_tag = False array.append(content + c) content = "" else: content += c # rotate buffer buff = _rotate_buff(buff) buff[0] = c gc.enable() if content: array.append(content) return array
1,081,208
Go through `istack` and search endtag. Element at first index is considered as opening tag. Args: istack (list): List of :class:`.HTMLElement` objects. Returns: int: Index of end tag or 0 if not found.
def _indexOfEndTag(istack): if len(istack) <= 0: return 0 if not istack[0].isOpeningTag(): return 0 cnt = 0 opener = istack[0] for index, el in enumerate(istack[1:]): if el.isOpeningTag() and \ el.getTagName().lower() == opener.getTagName().lower(): cnt += 1 elif el.isEndTagTo(opener): if cnt == 0: return index + 1 cnt -= 1 return 0
1,081,209
Recursively go through element array and create DOM. Args: istack (list): List of :class:`.HTMLElement` objects. Returns: list: DOM tree as list.
def _parseDOM(istack): ostack = [] end_tag_index = 0 def neither_nonpair_or_end_or_comment(el): return not (el.isNonPairTag() or el.isEndTag() or el.isComment()) index = 0 while index < len(istack): el = istack[index] # check if this is pair tag end_tag_index = _indexOfEndTag(istack[index:]) if end_tag_index == 0 and neither_nonpair_or_end_or_comment(el): el.isNonPairTag(True) if end_tag_index == 0: if not el.isEndTag(): ostack.append(el) else: el.childs = _parseDOM(istack[index + 1: end_tag_index + index]) el.endtag = istack[end_tag_index + index] # reference to endtag el.endtag.openertag = el ostack.append(el) ostack.append(el.endtag) index = end_tag_index + index index += 1 return ostack
1,081,210
Standard output from `dhtmlparser` is single-linked tree. This will make it double-linked. Args: dom (obj): :class:`.HTMLElement` instance. parent (obj, default None): Don't use this, it is used in recursive call.
def makeDoubleLinked(dom, parent=None): dom.parent = parent for child in dom.childs: child.parent = dom makeDoubleLinked(child, dom)
1,081,212
Remove all tags from `dom` and obtain plaintext representation. Args: dom (str, obj, array): str, HTMLElement instance or array of elements. Returns: str: Plain string without tags.
def removeTags(dom): # python 2 / 3 shill try: string_type = basestring except NameError: string_type = str # initialize stack with proper value (based on dom parameter) element_stack = None if type(dom) in [list, tuple]: element_stack = dom elif isinstance(dom, HTMLElement): element_stack = dom.childs if dom.isTag() else [dom] elif isinstance(dom, string_type): element_stack = parseString(dom).childs else: element_stack = dom # remove all tags output = "" while element_stack: el = element_stack.pop(0) if not (el.isTag() or el.isComment() or not el.getTagName()): output += el.__str__() if el.childs: element_stack = el.childs + element_stack return output
1,081,213
Register the extension with the application. Args: app (flask.Flask): The application to register with.
def init_app(self, app): app.url_rule_class = partial(NavigationRule, copilot=self) app.context_processor(self.inject_context)
1,081,233
Register a navbar entry with the copilot. Args: navbar_kwargs (dict): Arguments passed to the :class:`NavbarEntry` instance.
def register_entry(self, navbar_kwargs): # Add a new rule for each level in the path. path = navbar_kwargs.pop('path') # If a single object is used rather than an iterable (including # a single string), wrap it before using. if not hasattr(path, '__iter__') or isinstance(path, basestring): path = [path] entry_group = self.navbar_entries # HACK: I'd like to intelligently replace the URL rule in the # case where the intended rule is provided, but the function has # already created a blank "placeholder" rule for it. There are # probably nicer ways to approach this, but it works. for name, is_last in iter_islast(path): kwargs = deepcopy(navbar_kwargs) kwargs['name'] = name for existing_entry in entry_group: # If there's an existing entry for this "link", use it # instead of creating a new one. If this existing entry # has no rule and this is the last item in ``path``, the # rule was intended to be assigned to this entry, so # overwrite the blank rule with the one provided via # ``navbar_kwargs``. if existing_entry.name == name: entry = existing_entry if is_last: entry.endpoint = kwargs['endpoint'] break else: # If we can't find an existing entry, create one with a # blank endpoint. If this rule is not the final one in # the list, the endpoint was not intended for this, so # don't assign it. if not is_last: kwargs['endpoint'] = None entry = NavbarEntry(**kwargs) entry_group.add(entry) entry_group = entry.children
1,081,235
Parameters ---------- args: context: str or None style: str or None palette: str or None kwargs: - reset Raises ------ ValueError:
def use(*args, context=None, style=None, palette=None, **kwargs): if kwargs.get('reset', False): styles = ['default', ] else: styles = [] styles.extend(list(args)) styles.append(collect(context=context, style=style, palette=palette, **kwargs)) # apply mpls styles return mpl.style.use(styles)
1,081,294
Parameters ---------- args: context: str or None style: str or None palette: str or None kwargs: - reset Raises ------ ValueError:
def temp(*args, context=None, style=None, palette=None, **kwargs): # apply specified matplotlib styles and reset if specified styles = list(args) styles.append(collect(context=context, style=style, palette=palette, **kwargs)) return mpl.style.context(styles, after_reset=kwargs.get('reset', False))
1,081,295
Make a HTML link out of an URL. Args: title (str): Text to show for the link. url (str): URL the link will point to. blank (bool): If True, appends target=_blank, noopener and noreferrer to the <a> element. Defaults to False.
def make_link(title, url, blank=False): attrs = 'href="%s"' % url if blank: attrs += ' target="_blank" rel="noopener noreferrer"' return '<a %s>%s</a>' % (attrs, title)
1,081,301
Constructor. Args: root_path (str): Full path to the directory which contains the posts, pages, templates etc. directories. root_url (str): The root URL of your website. site_title (str): The title of your website. site_desc (str): A subtitle or description of your website.
def __init__(self, root_path, root_url, site_title, site_desc=None): self.root_path = root_path self.root_url = root_url self.site_title = site_title self.site_desc = site_desc self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name self.pages = self.cm.pages self.posts = self.cm.posts self.tags = self.cm.tags self.asset_hash = {} self.jinja = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.join(root_path, 'templates')), ) self.jinja.globals.update({ 'a': make_link, 'asset_hash': self.asset_hash, 'asset_url': self.get_asset_url, 'now': datetime.now(), 'root_url': self.root_url, 'site_description': self.site_desc, 'site_title': self.site_title, 'tags': self.tags, })
1,081,302
Get the URL of an asset. If asset hashes are added and one exists for the path, it will be appended as a query string. Args: path (str): Path to the file, relative to your "assets" directory.
def get_asset_url(self, path): url = self.root_url + '/assets/' + path if path in self.asset_hash: url += '?' + self.asset_hash[path] return url
1,081,303
Get all the posts added to the blog. Args: num (int): Optional. If provided, only return N posts (sorted by date, most recent first). tag (Tag): Optional. If provided, only return posts that have a specific tag. private (bool): By default (if False), private posts are not included. If set to True, private posts will also be included.
def get_posts(self, num=None, tag=None, private=False): posts = self.posts if not private: posts = [post for post in posts if post.public] if tag: posts = [post for post in posts if tag in post.tags] if num: return posts[:num] return posts
1,081,308
Generate an XML sitemap. Args: path (str): The name of the file to write to. https (bool): If True, links inside the sitemap with relative scheme (e.g. example.com/something) will be set to HTTPS. If False (the default), they will be set to plain HTTP.
def generate_sitemap(self, path='sitemap.xml', https=False): sitemap = russell.sitemap.generate_sitemap(self, https=https) self.write_file(path, sitemap)
1,081,317
Write a file of any type to the destination path. Useful for files like robots.txt, manifest.json, and so on. Args: path (str): The name of the file to write to. contents (str or bytes): The contents to write.
def write_file(self, path, contents): path = self._get_dist_path(path) if not os.path.isdir(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) if isinstance(contents, bytes): mode = 'wb+' else: mode = 'w' with open(path, mode) as file: file.write(contents)
1,081,318
Returns a formatted string about the status, useful for logging. args: r - takes requests.models.Response
def request_status(r, detailed=False): base_string = "HTTP {r.request.method} {r.request.url}: {r.status_code}" if r.status_code in range(200,99): string = base_string if detailed is True: string += " - {r.json()}" else: string += " - 👍" return string.format(r=r) else: string = base_string return string.format(r=r)
1,081,371
Adding retries to requests.get with exponential backoff. Args: url (str): The URL to fetch max_backoff (int): The number of seconds to sleep at maximums verbose (bool): Whether to print exceptions. Returns: Response: For successful requests return requests' response. `None` otherwise.
def get(url, max_backoff=32, verbose=False, **kwargs): sleep_seconds = 1 while sleep_seconds <= max_backoff: try: # you may overwrite `timeout` via `kwargs` response = requests.get(url, **{**{'timeout': 30}, **kwargs}) # for 4xx, return instantly, no hope of success if 400 <= response.status_code < 500: return None # successfully return 2XX and 3xx if 200 <= response.status_code < 400: return response # for 1xx and 5xx, retry except RequestException as e: if verbose: print(str(e)) time.sleep(sleep_seconds) sleep_seconds *= 2 return None
1,081,544
Unescape `quote` in string `inp`. Example usage:: >> unescape('hello \\"') 'hello "' Args: inp (str): String in which `quote` will be unescaped. quote (char, default "): Specify which character will be unescaped. Returns: str: Unescaped string.
def unescape(inp, quote='"'): if len(inp) < 2: return inp output = "" unesc = False for act in inp: if act == quote and unesc: output = output[:-1] output += act if act == "\\": unesc = not unesc else: unesc = False return output
1,082,169
Escape `quote` in string `inp`. Example usage:: >>> escape('hello "') 'hello \\"' >>> escape('hello \\"') 'hello \\\\"' Args: inp (str): String in which `quote` will be escaped. quote (char, default "): Specify which character will be escaped. Returns: str: Escaped string.
def escape(inp, quote='"'): output = "" for c in inp: if c == quote: output += '\\' output += c return output
1,082,170
Re-orders a miz file into a folder (flattened) Args: miz_file_path: source miz file target_dir: folder to flatten the content into skip_options_file: do not re-order option file
def reorder(miz_file_path: typing.Union[str, Path], target_dir: typing.Union[str, Path], skip_options_file: bool, ): miz_file_path = Path(miz_file_path).absolute() if not miz_file_path.exists(): raise FileNotFoundError(miz_file_path) if not miz_file_path.is_file(): raise ValueError(f'not a file: {miz_file_path}') target_dir_path = Path(target_dir).absolute() if not target_dir_path.exists(): target_dir_path.mkdir(parents=True) else: if not target_dir_path.is_dir(): raise ValueError(f'not a directory: {target_dir_path}') LOGGER.debug('re-ordering miz file: %s', miz_file_path) LOGGER.debug('destination folder: %s', target_dir) LOGGER.debug('%s option file', "skipping" if skip_options_file else "including") if not target_dir_path.exists(): LOGGER.debug('creating directory %s', target_dir_path) target_dir_path.mkdir(exist_ok=True) Miz._do_reorder(miz_file_path, skip_options_file, target_dir_path)
1,082,342
Flattens a MIZ file into the temp dir Args: overwrite: allow overwriting exiting files
def unzip(self, overwrite: bool = False): if self.zip_content and not overwrite: raise FileExistsError(str(self.temp_dir)) LOGGER.debug('unzipping miz to temp dir') try: with ZipFile(str(self.miz_path)) as zip_file: LOGGER.debug('reading infolist') self.zip_content = [f.filename for f in zip_file.infolist()] self._extract_files_from_zip(zip_file) except BadZipFile: raise BadZipFile(str(self.miz_path)) except: # noqa: E722 LOGGER.exception('error while unzipping miz file: %s', self.miz_path) raise LOGGER.debug('checking miz content') # noinspection PyTypeChecker for miz_item in ['mission', 'options', 'warehouses', 'l10n/DEFAULT/dictionary', 'l10n/DEFAULT/mapResource']: if not Path(self.temp_dir.joinpath(miz_item)).exists(): LOGGER.error('missing file in miz: %s', miz_item) raise FileNotFoundError(miz_item) self._check_extracted_content() LOGGER.debug('all files have been found, miz successfully unzipped')
1,082,347
Return a JSON serializable type for ``o``. Args: obj (:py:class:`object`): the object to be serialized. Raises: :py:class:`AttributeError`: when ``o`` is not a Python object. Returns: (dict): JSON serializable type for the given object.
def as_object(obj): LOGGER.debug('as_object(%s)', obj) if isinstance(obj, datetime.date): return as_date(obj) elif hasattr(obj, '__dict__'): # populate dict with visible attributes out = {k: obj.__dict__[k] for k in obj.__dict__ if not k.startswith('_')} # populate dict with property names and values for k, v in ( (p, getattr(obj, p)) for p, _ in inspect.getmembers( obj.__class__, lambda x: isinstance(x, property)) ): out[k] = v return out
1,082,497
Return the RFC3339 UTC string representation of the given date and time. Args: dat (:py:class:`datetime.date`): the object/type to be serialized. Raises: TypeError: when ``o`` is not an instance of ``datetime.date``. Returns: (str) JSON serializable type for the given object.
def as_date(dat): LOGGER.debug('as_date(%s)', dat) return strict_rfc3339.timestamp_to_rfc3339_utcoffset( calendar.timegm(dat.timetuple()))
1,082,498
Create `endtags` to elements which looks like openers, but doesn't have proper :attr:`HTMLElement.endtag`. Args: childs (list): List of childs (:class:`HTMLElement` obj) - typically from :attr:`HTMLElement.childs` property. Returns: list: List of closed elements.
def _closeElements(childs, HTMLElement): out = [] # close all unclosed pair tags for e in childs: if not e.isTag(): out.append(e) continue if not e.isNonPairTag() and not e.isEndTag() and not e.isComment() \ and e.endtag is None: e.childs = _closeElements(e.childs, HTMLElement) out.append(e) out.append(HTMLElement("</" + e.getTagName() + ">")) # join opener and endtag e.endtag = out[-1] out[-1].openertag = e else: out.append(e) return out
1,082,910
Read password from external file and retrun as string. The file should contain just single line. Prevents hard-coding password anywhere in this script. IMPORTANT! Password is stored as plain text! Do NOT use with your personal account!" Args: pass_file (str): /path/to/pass_file
def read_passwd_file(pass_file): with open(pass_file) as fin: passwd = fin.read().strip() return passwd
1,083,069
A Puush Account can be instantiated either with API key or e-mail and password. Parameters: * api_key_or_email: API key if it's the only argument, e-mail if password parameter is present. * password (optional): The password for the Puush account if api_key_or_email is an e-mail address.
def __init__(self, api_key_or_email, password=None): # E-mail and password authentication if password is not None: email = api_key_or_email self.is_premium, self._api_key, _, _ = auth(email, password) # Direct API key authentication else: api_key = api_key_or_email self.is_premium, self._api_key, _, _ = auth(api_key)
1,083,105
Upload a file to the Puush account. Parameters: * f: The file. Either a path to a file or a file-like object.
def upload(self, f): if hasattr(f, 'read'): needs_closing = False else: f = open(f, 'rb') needs_closing = True # The Puush server can't handle non-ASCII filenames. # The official Puush desktop app actually substitutes ? for # non-ISO-8859-1 characters, which helps some Unicode filenames, # but some are still let through and encounter server errors. # Try uploading a file named åäö.txt through the desktop app - # it won't work. It's better to let this Python API do that, # however, with the behavior probably intended in the desktop app. filename = os.path.basename(f.name).encode('ascii', 'replace') filename = filename.decode('ascii') # Requests doesn't like bytes md5 = md5_file(f) data = { 'z': 'meaningless', 'c': md5 } files = { 'f': (filename, f) } res = self._api_request('up', data=data, files=files)[0] if res[0] == '-1': raise PuushError("File upload failed.") elif res[0] == '-3': raise PuushError("File upload failed: hash didn't match with " "the file the server received.") if needs_closing: f.close() _, url, id, size = res now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') return self._File(id, url, filename, now, 0)
1,083,108
Delete a file. Parameters: * id: The Puush ID of the file to delete.
def delete(self, id): res = self._api_request('del', data={'i': id})[0] if res[0] == '-1': raise PuushError("File deletion failed.")
1,083,109
Get the 100x100 thumbnail of a file. Return the raw PNG data. Parameters: * id: The Puush ID of the file to get the thumbnail of.
def thumbnail(self, id): res = self._raw_api_request('thumb', data={'i': id}) if not res: raise PuushError("Getting thumbnail failed.") return res
1,083,110