docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Pack `data` to GZIP and write them to `out`. If `out` is not defined, :mod:`stringio` is used. Args: data (obj): Any packable data (str / unicode / whatever). out (file, default None): Optional opened file handler. Returns: obj: File handler with packed data seeked at the beginning.
def to_gzipped_file(data, out=None): if not out: out = StringIO.StringIO() with gzip.GzipFile(fileobj=out, mode="w") as f: f.write(data) out.seek(0) return out
1,069,642
Return `fn` in template context, or in other words add `fn` to template path, so you don't need to write absolute path of `fn` in template directory manually. Args: fn (str): Name of the file in template dir. Return: str: Absolute path to the file.
def in_template_path(fn): return os.path.join( os.path.abspath(os.path.dirname(__file__)), "../templates", fn, )
1,069,645
Download items from the aleph and store them in `db`. Start from `last_id` if specified. Args: db (obj): Dictionary-like object used as DB. last_id (int): Start from this id.
def _download_items(db, last_id): MAX_RETRY = 20 # how many times to try till decision that this is an end MAX_DOC_ID = 10000000 # this is used for download iterator not_found_cnt = 0 # circuit breaker for doc_id in xrange(last_id, MAX_DOC_ID): doc_id += 1 print "Downloading %d.." % (doc_id) if not_found_cnt >= MAX_RETRY: print "It looks like this is an end:", doc_id - MAX_RETRY break try: record = _download(doc_id) except (DocumentNotFoundException, InvalidAlephBaseException): print "\tnot found, skipping" not_found_cnt += 1 continue not_found_cnt = 0 db["item_%d" % doc_id] = record db["last_id"] = doc_id - MAX_RETRY if doc_id > MAX_RETRY else 1 if doc_id % 100 == 0: db.commit()
1,069,883
Open the `cache_fn` as database and download all not-yet downloaded items. Args: cache_fn (str): Path to the sqlite database. If not exists, it will be created. start (int, default None): If set, start from this sysno.
def download_items(cache_fn, start=None): with SqliteDict(cache_fn) as db: last_id = db.get("last_id", 0) if not start else start _download_items(db, last_id) db.commit()
1,069,884
Go thru downloaded data stored in `db` and filter keywords, which are parsed and then yielded. Shows nice progress bar. Args: db (obj): Opened database connection. Yields: obj: :class:`KeywordInfo` instances for yeach keyword.
def _pick_keywords(db): for key, val in tqdm(db.iteritems(), total=len(db)): # skip counter of the last downloaded document if key == "last_id": continue # this is optimization to speed up skipping of the unwanted elements # by the factor of ~20 piece = val[:500] if len(val) > 500 else val if '<fixfield id="001">ph' not in piece.lower(): continue parsed = MARCXMLRecord(val) code = parsed.get("001") if not code: continue # record was disabled if parsed["682i"]: continue if code.lower().startswith("ph"): yield KeywordInfo.from_marc( sysno=int(key.split("_")[-1]), # item_xxx -> int(xxx) marc=parsed, )
1,069,885
Go thru `cache_fn` and filter keywords. Store them in `keyword_list.json`. Args: cache_fn (str): Path to the file with cache. Returns: list: List of :class:`KeywordInfo` objects.
def generate(cache_fn): if not os.path.exists(cache_fn): print >> sys.stderr, "Can't access `%s`!" % cache_fn sys.exit(1) with SqliteDict(cache_fn) as db: for item in _pick_keywords(db): yield item
1,069,886
Translate `keywords` to full keyword records as they are used in Aleph. Returns tuple with three lists, each of which is later used in different part of the MRC/MARC record. Args: keywords (list): List of keyword strings. Returns: tuple: (mdt_list, cz_keyword_list, en_keyword_list)
def compile_keywords(keywords): mdt = [] cz_keywords = [] en_keywords = [] for keyword in keywords: keyword = keyword_to_info(keyword.encode("utf-8")) if not keyword: continue cz_keywords.append({ "uid": keyword["uid"], "zahlavi": keyword["zahlavi"], "zdroj": "czenas", }) if keyword.get("mdt"): mdt.append({ "mdt": keyword["mdt"], "mrf": keyword["mrf"], }) angl_ekvivalent = keyword.get("angl_ekvivalent") if angl_ekvivalent: en_keywords.append({ "zahlavi": angl_ekvivalent, "zdroj": keyword.get("zdroj_angl_ekvivalentu") or "eczenas", }) return mdt, cz_keywords, en_keywords
1,069,937
Convert `url` to filename used to download the datasets. ``http://kitakitsune.org/xe`` -> ``kitakitsune.org_xe``. Args: url (str): URL of the resource. Returns: str: Normalized URL.
def url_to_fn(url): url = url.replace("http://", "").replace("https://", "") url = url.split("?")[0] return url.replace("%", "_").replace("/", "_")
1,069,938
Parse input `date` string in free-text format for four-digit long groups. Args: date (str): Input containing years. Returns: tuple: ``(from, to)`` as four-digit strings.
def parse_date_range(date, alt_end_date=None): NOT_ENDED = "9999" all_years = re.findall(r"\d{4}", date) if alt_end_date: NOT_ENDED = alt_end_date if not all_years: return "****", NOT_ENDED elif len(all_years) == 1: return all_years[0], NOT_ENDED return all_years[0], all_years[1]
1,069,939
Validate the `new` translation against the `old` one. No checks are needed for deleted translations Args: old: The old translation. new: The new translation. Raises: A ValidationError with an appropriate message.
def __call__(self, old, new): if not new or not self.precondition(): return self.validate(old, new)
1,070,066
Parse a RADL document. Args: - data(str): filepath to a RADL content or a string with content. Return: RADL object.
def parse_radl(data): if data is None: return None elif os.path.isfile(data): f = open(data) data = "".join(f.readlines()) f.close() elif data.strip() == "": return RADL() data = data + "\n" parser = RADLParser(lextab='radl') return parser.parse(data)
1,070,101
Send request to Seeder's API. Args: url_id (str): ID used as identification in Seeder. data (obj, default None): Optional parameter for data. json (obj, default None): Optional parameter for JSON body. req_type (fn, default None): Request method used to send/download the data. If none, `requests.get` is used. Returns: dict: Data from Seeder.
def _send_request(url_id, data=None, json=None, req_type=None): url = settings.SEEDER_INFO_URL % url_id if not req_type: req_type = requests.get resp = req_type( url, data=data, json=json, timeout=settings.SEEDER_TIMEOUT, headers={ "User-Agent": settings.USER_AGENT, "Authorization": settings.SEEDER_TOKEN, } ) resp.raise_for_status() data = resp.json() return data
1,070,122
Download data and convert them to dict used in frontend. Args: url_id (str): ID used as identification in Seeder. Returns: dict: Dict with data for frontend or None in case of error.
def get_remote_info(url_id): try: data = _send_request(url_id) except Exception as e: sys.stderr.write("Seeder GET error: ") # TODO: better! sys.stderr.write(str(e.message)) return None return _convert_to_wakat_format(data)
1,070,123
WA KAT dataset has different structure from Seeder. This is convertor which converts WA-KAT -> Seeder data format. Args: dataset (dict): WA-KAT dataset sent from frontend. Returns: dict: Dict with converted data.
def _convert_to_seeder_format(dataset): data = {} seed = {} _add_if_set(data, "name", dataset.get("title")) _add_if_set(data, "issn", dataset.get("issn")) _add_if_set(data, "annotation", dataset.get("annotation")) rules = dataset.get("rules", {}) if rules: _add_if_set(data, "frequency", rules.get("frequency")) # set seed info _add_if_set(seed, "budget", rules.get("budget")) _add_if_set(seed, "calendars", rules.get("calendars")) _add_if_set(seed, "global_reject", rules.get("global_reject")) _add_if_set(seed, "gentle_fetch", rules.get("gentle_fetch")) _add_if_set(seed, "javascript", rules.get("javascript")) _add_if_set(seed, "local_traps", rules.get("local_traps")) _add_if_set(seed, "youtube", rules.get("youtube")) _add_if_set(seed, "url", dataset.get("url")) if seed: data["seed"] = seed return data
1,070,124
Send request to Seeder's API with data changed by user. Args: url_id (str): ID used as identification in Seeder. dataset (dict): WA-KAT dataset sent from frontend.
def send_update(url_id, dataset): data = _convert_to_seeder_format(dataset) if not data: return try: _send_request(url_id, json=data, req_type=requests.patch) except Exception as e: sys.stderr.write("Seeder PATCH error: ") # TODO: better! sys.stderr.write(str(e.message)) return None
1,070,125
Interpret the given Python object as a table. Args: obj: A sequence (later a mapping, too) Returns: A list of lists represents rows of cells. Raises: TypeError: If the type couldn't be interpreted as a table.
def interpret_obj( self, obj, v_level_indexes, h_level_indexes, v_level_visibility, h_level_visibility, v_level_sort_keys, h_level_sort_keys, v_level_titles, h_level_titles, ): if not isinstance(obj, NonStringIterable): raise self.error("Cannot make a table from object {!r}".format(obj)) rectangular_rows = tabulate( obj, v_level_indexes=v_level_indexes, h_level_indexes=h_level_indexes, v_level_visibility=v_level_visibility, h_level_visibility=h_level_visibility, v_level_sort_keys=v_level_sort_keys, h_level_sort_keys=h_level_sort_keys, v_level_titles=v_level_titles, h_level_titles=h_level_titles, ) assert is_rectangular(rectangular_rows) num_rows, num_cols = size(rectangular_rows) return rectangular_rows, num_cols
1,070,409
Store `data` under `property_name` in the `url` key in REST API DB. Args: url (obj): URL of the resource to which `property_name` will be stored. property_name (str): Name of the property under which the `data` will be stored. data (obj): Any object.
def _save_to_database(url, property_name, data): data = json.dumps([ d.to_dict() if hasattr(d, "to_dict") else d for d in data ]) logger.debug("_save_to_database() data: %s" % repr(data)) requests.post( _WEB_URL + _REQUEST_DB_SAVE, timeout=REQUEST_TIMEOUT, allow_redirects=True, verify=False, data={ "url": url, "value": data, "property_name": property_name, } ) logger.info( "`%s` for `%s` sent to REST DB." % ( property_name, url, ) )
1,070,432
Get IP address for given `domain`. Try to do smart parsing. Args: domain (str): Domain or URL. Returns: str: IP address. Raises: ValueError: If can't parse the domain.
def get_ip_address(domain): if "://" not in domain: domain = "http://" + domain hostname = urlparse(domain).netloc if not hostname: raise ValueError("Can't parse hostname!") return socket.gethostbyname(hostname)
1,070,434
Get list of tags with `address` for given `ip_address`. Args: index_page (str): HTML content of the page you wisht to analyze. Returns: list: List of :class:`.SourceString` objects.
def get_whois_tags(ip_address): whois = IPWhois(ip_address).lookup_whois() nets = whois.get("nets", None) if not nets: return [] # parse cities cities = [ net["city"] for net in nets if net.get("city", None) ] # parse address tags address_list = [] for net in nets: address = net.get("address", None) if not address: continue # filter company name if "description" in net and net["description"]: address = address.replace(net["description"], "").strip() if "\n" in address: address = ", ".join(address.splitlines()) address_list.append(address) return [ SourceString(val, source="Whois") for val in set(cities + address_list) ]
1,070,435
Return list of `place` tags parsed from `meta` and `whois`. Args: index_page (str): HTML content of the page you wisht to analyze. domain (str): Domain of the web, without ``http://`` or other parts. Returns: list: List of :class:`.SourceString` objects.
def get_place_tags(index_page, domain): #: TODO geoip to docstring ip_address = get_ip_address(domain) dom = dhtmlparser.parseString(index_page) place_tags = [ get_html_geo_place_tags(dom), get_whois_tags(ip_address), # [_get_geo_place_tag(ip_address)], # TODO: implement geoip ] return sum(place_tags, [])
1,070,436
Sort by multiple attributes. Args: items: An iterable series to be sorted. *keys: Key objects which extract key values from the items. The first key will be the most significant, and the last key the least significant. If no key functions are provided, the items will be sorted in ascending natural order. Returns: A list of items sorted according to keys.
def multisorted(items, *keys): if len(keys) == 0: keys = [asc()] for key in reversed(keys): items = sorted(items, key=key.func, reverse=key.reverse) return items
1,070,467
Sort by tuples with a different key for each item. Args: items: An iterable series of sequences (typically tuples) *keys: Key objects which transform individual elements of each tuple into sort keys. The zeroth object transforms the zeroth element of each tuple, the first key object transforms the first element of each tuple, and so on. Returns: A list of items sorted according to keys.
def tuplesorted(items, *keys): # Transform the keys so each works on one item of the tuple tuple_keys = [ Key(func=lambda t, i=index, k=key: k.func(t[i]), reverse=key.reverse) for index, key in enumerate(keys) ] return multisorted(items, *tuple_keys)
1,070,468
Put data into ``<select>`` element. Args: authors (dict): Dictionary with author informations returned from aleph REST API. Format: ``{"name": .., "code": .., "linked_forms": ["..",]}``.
def set_select(cls, authors): cls.select_el.html = "" if not authors: cls.select_el.disabled = True cls.select_el <= html.OPTION("Nic nenalezeno!") return cls.select_el.disabled = False for author_dict in authors: name = author_dict.get("name") code = author_dict.get("code") alt_name = author_dict.get("alt_name", name) if not (name and code): continue cls.code_to_data[code] = author_dict cls.select_el <= html.OPTION(alt_name, value=code)
1,070,484
Set content of given `el` to `value`. Args: el (obj): El reference to input you wish to set. value (obj/list): Value to which the `el` will be set.
def _set_input(el, value): if isinstance(value, dict): el.value = value["val"] elif type(value) in [list, tuple]: el.value = ", ".join(item["val"] for item in value) else: el.value = value
1,071,252
Set content of given textarea element `el` to `value`. Args: el (obj): Reference to textarea element you wish to set. value (obj/list): Value to which the `el` will be set.
def _set_textarea(el, value): if isinstance(value, dict): el.text = value["val"] elif type(value) in [list, tuple]: el.text = "\n\n".join( "-- %s --\n%s" % (item["source"], item["val"]) for item in value ) else: el.text = value
1,071,253
Convert given `el` to typeahead input and set it to `value`. This method also sets the dropdown icons and descriptors. Args: el (obj): Element reference to the input you want to convert to typeahead. value (list): List of dicts with two keys: ``source`` and ``val``.
def _set_typeahead(cls, el, value): PlaceholderHandler.reset_placeholder_dropdown(el) # if there is no elements, show alert icon in glyph if not value and not el.value: DropdownHandler.set_dropdown_glyph(el.id, "glyphicon-alert") return # if there is only one element, don't use typeahead, just put the # information to the input, set different dropdown glyph and put source # to the dropdown if len(value) == 1: source = value[0]["source"].strip() dropdown_el = DropdownHandler.set_dropdown_glyph( el.id, "glyphicon-eye-open" ) dropdown_content = "<span class='gray_text'>&nbsp;(%s)</span>" # save the source to the dropdown menu if source: dropdown_el.html = dropdown_content % source[::-1] el.value = value[0]["val"] return # get reference to parent element parent_id = el.parent.id if "typeahead" not in parent_id.lower(): parent_id = el.parent.parent.id if parent_id in cls._set_by_typeahead: window.destroy_typeahead_tag("#" + parent_id) # if there are multiple elements, put them to the typeahead and show # dropdown glyph window.make_typeahead_tag("#" + parent_id, value) DropdownHandler.set_dropdown_glyph(el.id, "glyphicon-menu-down") PlaceholderHandler.set_placeholder_dropdown(el) cls._set_by_typeahead.add(parent_id)
1,071,254
Set given `el` tag element to `value`. Automatically choose proper method to set the `value` based on the type of the `el`. Args: el (obj): Element reference to the input you want to convert to typeahead. value (list): List of dicts with two keys: ``source`` and ``val``.
def set_el(cls, el, value): if not el: return tag_name = el.elt.tagName.lower() if tag_name == "textarea": cls._set_textarea(el, value) elif tag_name == "input": if "typeahead" in el.class_name.lower(): cls._set_typeahead(el, value) else: cls._set_input(el, value) elif tag_name == "select": el.value = value else: raise ValueError( "Setter for %s (%s) not implemented!" % (tag_name, el.id) )
1,071,256
Get value of given `el` tag element. Automatically choose proper method to set the `value` based on the type of the `el`. Args: el (obj): Element reference to the input you want to convert to typeahead. Returns: str: Value of the object.
def get_el(el): tag_name = el.elt.tagName.lower() if tag_name in {"input", "textarea", "select"}: return el.value else: raise ValueError( "Getter for %s (%s) not implemented!" % (tag_name, el.id) )
1,071,257
Draw a circle. Args: x (int): The x coordinate of the center of the circle. y (int): The y coordinate of the center of the circle. r (int): The radius of the circle. color (Tuple[int, int, int, int]): The color of the circle. Raises: SDLError: If an error is encountered.
def draw_circle(self, x, y, r, color): check_int_err(lib.circleRGBA(self._ptr, x, y, r, color[0], color[1], color[2], color[3]))
1,071,327
Draw an arc. Args: x (int): The x coordinate of the center of the arc. y (int): The y coordinate of the center of the arc. r (int): The radius of the arc. start (int): The start of the arc. end (int): The end of the arc. color (Tuple[int, int, int, int]): The color of the circle. Raises: SDLError: If an error is encountered.
def draw_arc(self, x, y, r, start, end, color): check_int_err(lib.arcRGBA(self._ptr, x, y, r, start, end, color[0], color[1], color[2], color[3]))
1,071,328
Draw a line. Args: x1 (int): The x coordinate of the start of the line. y1 (int): The y coordinate of the start of the line. x2 (int): The x coordinate of the end of the line. y2 (int): The y coordinate of the end of the line. color (Tuple[int, int, int, int]): The color of the circle. Raises: SDLError: If an error is encountered.
def draw_line(self, x1, y1, x2, y2, color): check_int_err(lib.lineRGBA(self._ptr, x1, y1, x2, y2, color[0], color[1], color[2], color[3]))
1,071,329
Construct a new point. Args: x (int): The x position of the point. y (int): The y position of the point.
def __init__(self, x=0, y=0): self._ptr = ffi.new('SDL_Point *', [x, y])
1,071,540
Return the minimal rectangle enclosing the given set of points Args: points (List[Point]): The set of points that the new Rect must enclose. clip_rect (Rect): A clipping Rect. Returns: Rect: A new Rect enclosing the given points.
def enclose_points(points, clip_rect): point_array = ffi.new('SDL_Point[]', len(points)) for i, p in enumerate(points): point_array[i] = p._ptr enclosing_rect = Rect() if lib.SDL_EnclosePoints(point_array, len(points), clip_rect._ptr, enclosing_rect._ptr): return enclosing_rect else: return None
1,071,542
Construct a new Rect with the given position and size. Args: x (int): The x position of the upper left corner of the rectangle. y (int): The y position of the upper left corner of the rectangle. w (int): The width of the rectangle. h (int): The height of the rectangle.
def __init__(self, x=0, y=0, w=0, h=0): self._ptr = ffi.new('SDL_Rect *', [x, y, w, h])
1,071,543
Return whether this rectangle intersects with another rectangle. Args: other (Rect): The rectangle to test intersection with. Returns: bool: True if there is an intersection, False otherwise.
def has_intersection(self, other): return bool(lib.SDL_HasIntersection(self._ptr, other._ptr))
1,071,545
Calculate the intersection of this rectangle and another rectangle. Args: other (Rect): The other rectangle. Returns: Rect: The intersection of this rectangle and the given other rectangle, or None if there is no such intersection.
def intersect(self, other): intersection = Rect() if lib.SDL_IntersectRect(self._ptr, self._ptr, intersection._ptr): return intersection else: return None
1,071,546
Calculate the union of this rectangle and another rectangle. Args: other (Rect): The other rectangle. Returns: Rect: The union of this rectangle and the given other rectangle.
def union(self, other): union = Rect() lib.SDL_UnionRect(self._ptr, other._ptr, union._ptr) return union
1,071,547
Process informations about exception and send them thru AMQP. Args: e (obj): Exception instance. body (str): Text which will be sent over AMQP. tb (obj): Traceback object with informations, which will be put to the headers.
def _process_exception(e, body, tb): # get informations about message msg = e.message if hasattr(e, "message") else str(e) exception_type = str(e.__class__) exception_name = str(e.__class__.__name__) properties = pika.BasicProperties( content_type="application/text", delivery_mode=2, headers={ "exception": msg, "exception_type": exception_type, "exception_name": exception_name, "traceback": tb, "UUID": str(uuid.uuid4()) } ) send_message("harvester", body, properties=properties)
1,071,686
Some basic postprocessing of the periodical publications. Args: marc_xml (str): Original Aleph record. mods (str): XML string generated by XSLT template. uuid (str): UUID of the package. counter (int): Number of record, is added to XML headers. url (str): URL of the publication (public or not). Returns: str: Updated XML.
def postprocess_periodical(marc_xml, mods, uuid, counter, url): dom = double_linked_dom(mods) # TODO: Add more postprocessing add_missing_xml_attributes(dom, counter) if uuid: add_uuid(dom, uuid) return dom.prettify()
1,071,833
Write a message to a file. Arguments: args A list of arguments which make up the message. The last argument is the path to the file to write to.
def echo(*args, **kwargs): msg = args[:-1] path = fs.path(args[-1]) append = kwargs.pop("append", False) if append: with open(path, "a") as file: print(*msg, file=file, **kwargs) else: with open(fs.path(path), "w") as file: print(*msg, file=file, **kwargs)
1,071,957
Check that a process is running. Arguments: pid (int): Process ID to check. Returns: True if the process is running, else false.
def isprocess(pid, error=False): try: # Don't worry folks, no processes are harmed in the making of # this system call: os.kill(pid, 0) return True except OSError: return False
1,071,960
Construct an ScpError. Arguments: stdout (str): Captured stdout of scp subprocess. stderr (str): Captured stderr of scp subprocess.
def __init__(self, stdout, stderr): self.out = stdout self.err = stderr
1,071,961
Run the subprocess. Arguments: timeout (optional) If a positive real value, then timout after the given number of seconds. Raises: SubprocessError If subprocess has not completed after "timeout" seconds.
def run(self, timeout=-1): def target(): self.process = subprocess.Popen(self.cmd, stdout=self.stdout_dest, stderr=self.stderr_dest, shell=self.shell) stdout, stderr = self.process.communicate() # Decode output if the user wants, and if there is any. if self.decode_out: if stdout: self.stdout = stdout.decode("utf-8") if stderr: self.stderr = stderr.decode("utf-8") thread = threading.Thread(target=target) thread.start() if timeout > 0: thread.join(timeout) if thread.is_alive(): self.process.terminate() thread.join() raise SubprocessError(("Reached timeout after {t} seconds" .format(t=timeout))) else: thread.join() return self.process.returncode, self.stdout, self.stderr
1,071,963
Ensure path exists. Arguments: *components (str[]): Path components. Returns: str: File path. Raises: File404: If path does not exist.
def must_exist(*components): _path = path(*components) if not exists(_path): raise File404(_path) return _path
1,072,177
Return whether a path is an executable file. Arguments: path (str): Path of the file to check. Examples: >>> fs.isexe("/bin/ls") True >>> fs.isexe("/home") False >>> fs.isexe("/not/a/real/path") False Returns: bool: True if file is executable, else false.
def isexe(*components): _path = path(*components) return isfile(_path) and os.access(_path, os.X_OK)
1,072,181
Return only subdirectories from a directory listing. Arguments: root (str): Path to directory. Can be relative or absolute. **kwargs: Any additional arguments to be passed to ls(). Returns: list of str: A list of directory paths. Raises: OSError: If root directory does not exist.
def lsdirs(root=".", **kwargs): paths = ls(root=root, **kwargs) if isfile(root): return [] return [_path for _path in paths if isdir(path(root, _path))]
1,072,183
Return only files from a directory listing. Arguments: root (str): Path to directory. Can be relative or absolute. **kwargs: Any additional arguments to be passed to ls(). Returns: list of str: A list of file paths. Raises: OSError: If root directory does not exist.
def lsfiles(root=".", **kwargs): paths = ls(root=root, **kwargs) if isfile(root): return paths return [_path for _path in paths if isfile(path(root, _path))]
1,072,184
Move a file or directory. If the destination already exists, this will attempt to overwrite it. Arguments: src (string): path to the source file or directory. dst (string): path to the destination file or directory. Raises: File404: if source does not exist. IOError: in case of error.
def mv(src, dst): if not exists(src): raise File404(src) try: shutil.move(src, dst) except Exception as e: raise IOError(str(e))
1,072,187
Get the size of a file in bytes or as a human-readable string. Arguments: *components (str[]): Path to file. **kwargs: If "human_readable" is True, return a formatted string, e.g. "976.6 KiB" (default True) Returns: int or str: If "human_readble" kwarg is True, return str, else int.
def du(*components, **kwargs): human_readable = kwargs.get("human_readable", True) _path = path(*components) if not exists(_path): raise Error("file '{}' not found".format(_path)) size = os.stat(_path).st_size if human_readable: return naturalsize(size) else: return size
1,072,191
Read file to string. Arguments: path (str): Source.
def read_file(path): with open(must_exist(path)) as infile: r = infile.read() return r
1,072,192
Return a list of all file paths from a list of files or directories. For each path in the input: if it is a file, return it; if it is a directory, return a list of files in the directory. Arguments: paths (list of str): List of file and directory paths. Returns: list of str: Absolute file paths. Raises: File404: If any of the paths do not exist.
def files_from_list(*paths): ret = [] for path in paths: if isfile(path): ret.append(abspath(path)) elif isdir(path): ret += [f for f in ls(path, abspaths=True, recursive=True) if isfile(f)] else: raise File404(path) return ret
1,072,193
Return absolute path for filename from local ``xslt/`` directory. Args: fn (str): Filename. ``MARC21slim2MODS3-4-NDK.xsl`` for example. Returns: str: Absolute path to `fn` in ``xslt`` dicretory..
def _absolute_template_path(fn): return os.path.join(os.path.dirname(__file__), "xslt", fn)
1,072,303
Apply `func` to all ``<mods:mods>`` tags from `xml`. Insert UUID. Args: marc_xml (str): Original Aleph record. xml (str): XML which will be postprocessed. func (fn): Function, which will be used for postprocessing. uuid (str): UUID, which will be inserted to `xml`. url (str): URL of the publication (public or not). Returns: list: List of string with postprocessed XML.
def _apply_postprocessing(marc_xml, xml, func, uuid, url): dom = dhtmlparser.parseString(xml) return [ func(marc_xml, mods_tag, uuid, cnt, url) for cnt, mods_tag in enumerate(dom.find("mods:mods")) ]
1,072,304
Convert `marc_xml` to MODS data format. Args: marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. uuid (str): UUID string giving the package ID. url (str): URL of the publication (public or not). Returns: list: Collection of transformed xml strings.
def transform_to_mods_mono(marc_xml, uuid, url): marc_xml = _read_content_or_path(marc_xml) transformed = xslt_transformation( marc_xml, _absolute_template_path("MARC21slim2MODS3-4-NDK.xsl") ) return _apply_postprocessing( marc_xml=marc_xml, xml=transformed, func=mods_postprocessor.postprocess_monograph, uuid=uuid, url=url, )
1,072,305
Convert `marc_xml` to multimonograph MODS data format. Args: marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. uuid (str): UUID string giving the package ID. url (str): URL of the publication (public or not). Returns: list: Collection of transformed xml strings.
def transform_to_mods_multimono(marc_xml, uuid, url): marc_xml = _read_content_or_path(marc_xml) transformed = xslt_transformation( marc_xml, _absolute_template_path("MARC21toMultiMonographTitle.xsl") ) return _apply_postprocessing( marc_xml=marc_xml, xml=transformed, func=mods_postprocessor.postprocess_multi_mono, uuid=uuid, url=url, )
1,072,306
Convert `marc_xml` to periodical MODS data format. Args: marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. uuid (str): UUID string giving the package ID. url (str): URL of the publication (public or not). Returns: list: Collection of transformed xml strings.
def transform_to_mods_periodical(marc_xml, uuid, url): marc_xml = _read_content_or_path(marc_xml) transformed = xslt_transformation( marc_xml, _absolute_template_path("MARC21toPeriodicalTitle.xsl") ) return _apply_postprocessing( marc_xml=marc_xml, xml=transformed, func=mods_postprocessor.postprocess_periodical, uuid=uuid, url=url, )
1,072,307
Convert `marc_xml` to MODS. Decide type of the record and what template to use (monograph, multi-monograph, periodical). Args: marc_xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. uuid (str): UUID string giving the package ID. url (str): URL of the publication (public or not). Returns: list: Collection of transformed xml strings.
def marcxml2mods(marc_xml, uuid, url): marc_xml = _read_content_or_path(marc_xml) return type_decisioner( marc_xml, lambda: transform_to_mods_mono(marc_xml, uuid, url), lambda: transform_to_mods_multimono(marc_xml, uuid, url), lambda: transform_to_mods_periodical(marc_xml, uuid, url), )
1,072,309
Parses the given DSL string and returns parsed results. Args: input_string (str): DSL string prefix (str): Optional prefix to add to every element name, useful to namespace things Returns: dict: Parsed content
def parse(input_string, prefix=''): tree = parser.parse(input_string) visitor = ChatlVisitor(prefix) visit_parse_tree(tree, visitor) return visitor.parsed
1,072,336
Send multiple args into a single message to a given address. Args: address (str): OSC Address. args (list): Arguments to be parsed in VVVV.
def send_msg(self, address, args=[]): if not address.startswith('/'): address = '/{}'.format(address) msg = osc_message_builder.OscMessageBuilder(address=address) for arg in args: msg.add_arg(arg) self.conn.send(msg.build()) return
1,072,383
Upsert a list of pnls. Note: this performs a full update of existing records with matching keys, so the passed in pnl objects should be complete. Args: asset_manager_id (int): the id of the asset manager owning the pnl pnls (list): list of pnl objects to upsert
def pnl_upsert(self, asset_manager_id, pnls): self.logger.info('Upsert PnL for - Asset Manager: %s', asset_manager_id) pnls = [pnls] if not isinstance(pnls, list) else pnls json_pnls = [pnl.to_interface() for pnl in pnls] url = '%s/pnls/%s' % (self.endpoint, asset_manager_id) response = self.session.put(url, json=json_pnls) if response.ok: results = [] for pnl_result in response.json(): results.append(json_to_pnl(pnl_result)) self.logger.info('Upserted %s PnL records', len(results)) return results else: self.logger.error(response.text) response.raise_for_status()
1,072,582
Pika and Daemon wrapper for handling AMQP connections. Args: connection_param (pika.ConnectionParameters): object setting the connection queue (str): name of queue where the daemon should listen output_exchange (str): name of exchange where the daemon should put responses output_key (str): routing key for output exchange
def __init__(self, connection_param, queue, output_exchange, output_key): super(PikaDaemon, self).__init__(queue) self.connection_param = connection_param self.queue = queue self.output_exchange = output_exchange self.content_type = "application/json" self.output_key = output_key
1,072,917
Send `message` to ``self.output_exchange`` with routing key ``self.output_key``, ``self.content_type`` in ``delivery_mode=2``. Args: message (str): message which will be sent UUID: unique identification of message routing_key (str): which routing key to use to send message back
def sendResponse(self, message, UUID, routing_key): self.sendMessage( exchange=self.output_exchange, routing_key=routing_key, message=message, UUID=UUID )
1,072,921
Helper function that builds and md5sum from a file in chunks. Args: file_path: The path to the file you want an md5sum for. Returns: A string containing an md5sum.
def _md5sum(file_path): md5 = hashlib.md5() with open(file_path, "rb") as md5_file: while True: data = md5_file.read(1024 * 1024 * 4) if not data: break md5.update(data) return md5.digest()
1,073,251
Create a new transient cache. Optionally supports populating the cache with values of an existing cache. Arguments: basecache (TransientCache, optional): Cache to populate this new cache with.
def __init__(self, basecache=None): self._data = {} if basecache is not None: for key,val in basecache.items(): self._data[key] = val
1,073,295
Create a new JSON cache. Optionally supports populating the cache with values of an existing cache. Arguments: basecache (TransientCache, optional): Cache to populate this new cache with.
def __init__(self, path, basecache=None): super(JsonCache, self).__init__() self.path = fs.abspath(path) if fs.exists(self.path): io.debug(("Loading cache '{0}'".format(self.path))) with open(self.path) as file: self._data = json.load(file) if basecache is not None: for key,val in basecache.items(): self._data[key] = val # Register exit handler atexit.register(self.write)
1,073,297
Create filesystem cache. Arguments: root (str): String. escape_key (fn, optional): Function to convert keys to file names.
def __init__(self, root, escape_key=hash_key): self.path = root self.escape_key = escape_key fs.mkdir(self.path)
1,073,299
Get the filesystem path for a key. Arguments: key: Key. Returns: str: Absolute path.
def keypath(self, key): return fs.path(self.path, self.escape_key(key))
1,073,300
Get path to file in cache. Arguments: key: Key. Returns: str: Path to cache value. Raises: KeyErorr: If key not in cache.
def __getitem__(self, key): path = self.keypath(key) if fs.exists(path): return path else: raise KeyError(key)
1,073,301
Emplace file in cache. Arguments: key: Key. value (str): Path of file to insert in cache. Raises: ValueError: If no "value" does nto exist.
def __setitem__(self, key, value): if not fs.exists(value): raise ValueError(value) path = self.keypath(key) fs.mkdir(self.path) fs.mv(value, path)
1,073,302
Check cache contents. Arguments: key: Key. Returns: bool: True if key in cache, else false.
def __contains__(self, key): path = self.keypath(key) return fs.exists(path)
1,073,303
Delete cached file. Arguments: key: Key. Raises: KeyError: If file not in cache.
def __delitem__(self, key): path = self.keypath(key) if fs.exists(path): fs.rm(path) else: raise KeyError(key)
1,073,304
Unpack a compressed archive. Arguments: *components (str[]): Absolute path. **kwargs (dict, optional): Set "compression" to compression type. Default: bz2. Set "dir" to destination directory. Defaults to the directory of the archive. Returns: str: Path to directory.
def unpack_archive(*components, **kwargs) -> str: path = fs.abspath(*components) compression = kwargs.get("compression", "bz2") dir = kwargs.get("dir", fs.dirname(path)) fs.cd(dir) tar = tarfile.open(path, "r:" + compression) tar.extractall() tar.close() fs.cdpop() return dir
1,073,373
Generic daemon class, which allows you to daemonize your script and react to events in simple callbacks. Args: pid_filename (str): name of daemon's PID file, which is stored in ``/tmp``. Class automatically adds ``.pid`` suffix.
def __init__(self, pid_filename): self.stdin_path = '/dev/null' self.stdout_path = '/dev/null' self.stderr_path = '/dev/null' self.pidfile_path = '/tmp/' + pid_filename + '.pid' self.pidfile_timeout = 5 self.daemon_runner = runner.DaemonRunner(self) # react to parameters and check if daemon is not already runnig if self.isRunning() and "stop" not in sys.argv and \ "restart" not in sys.argv: self.onIsRunning()
1,073,404
Return controller that handle given path. Args: - path: requested path, like: /blog/post_view/15
def get_controller(self, path): path_info = path.lstrip('/').split('/', 2) try: return self._routes.get(path_info[0] + '/' + path_info[1]) except (IndexError, KeyError): return self._routes.get(path_info[0] or 'index')
1,073,434
Load a JSON data blob. Arguments: path (str): Path to file. must_exist (bool, otional): If False, return empty dict if file does not exist. Returns: array or dict: JSON data. Raises: File404: If path does not exist, and must_exist is True. InvalidFile: If JSON is malformed.
def read_file(*components, **kwargs): must_exist = kwargs.get("must_exist", True) if must_exist: path = fs.must_exist(*components) else: path = fs.path(*components) try: with open(path) as infile: return loads(infile.read()) except ValueError as e: raise ValueError( "malformed JSON file '{path}'. Message from parser: {err}" .format(path=fs.basename(path), err=str(e))) except IOError as e: if not must_exist: return {} else: return e
1,073,451
Write JSON data to file. Arguments: path (str): Destination. data (dict or list): JSON serializable data. format (bool, optional): Pretty-print JSON data.
def write_file(path, data, format=True): if format: fs.write_file(path, format_json(data)) else: fs.write_file(path, json.dumps(data))
1,073,452
Return properly created blocking connection. Args: host (str): Host as it is defined in :func:`.get_amqp_settings`. Uses :func:`edeposit.amqp.amqpdaemon.getConParams`.
def create_blocking_connection(host): return pika.BlockingConnection( amqpdaemon.getConParams( settings.get_amqp_settings()[host.lower()]["vhost"] ) )
1,073,768
Create exchanges, queues and route them. Args: host (str): One of the possible hosts.
def create_schema(host): connection = create_blocking_connection(host) channel = connection.channel() exchange = settings.get_amqp_settings()[host]["exchange"] channel.exchange_declare( exchange=exchange, exchange_type="topic", durable=True ) print "Created exchange '%s'." % exchange print "Creating queues:" queues = settings.get_amqp_settings()[host]["queues"] for queue in queues.keys(): channel.queue_declare( queue=queue, durable=True, # arguments={'x-message-ttl': int(1000 * 60 * 60 * 24)} # :S ) print "\tCreated durable queue '%s'." % queue print print "Routing exchanges using routing key to queues:" for queue in queues.keys(): channel.queue_bind( queue=queue, exchange=exchange, routing_key=queues[queue] ) print "\tRouting exchange %s['%s'] -> '%s'." % ( exchange, queues[queue], queue )
1,073,769
Create communication channel for given `host`. Args: host (str): Specified --host. timeout (int): Set `timeout` for returned `channel`. Returns: Object: Pika channel object.
def _get_channel(host, timeout): connection = create_blocking_connection(host) # register timeout if timeout >= 0: connection.add_timeout( timeout, lambda: sys.stderr.write("Timeouted!\n") or sys.exit(1) ) return connection.channel()
1,073,770
Print all messages in queue. Args: host (str): Specified --host. timeout (int): How log should script wait for message.
def receive(host, timeout): parameters = settings.get_amqp_settings()[host] queues = parameters["queues"] queues = dict(map(lambda (x, y): (y, x), queues.items())) # reverse items queue = queues[parameters["out_key"]] channel = _get_channel(host, timeout) for method_frame, properties, body in channel.consume(queue): print json.dumps({ "method_frame": str(method_frame), "properties": str(properties), "body": body }) print "-" * 79 print channel.basic_ack(method_frame.delivery_tag)
1,073,771
Send message to given `host`. Args: host (str): Specified host: aleph/ftp/whatever available host. data (str): JSON data. timeout (int, default None): How much time wait for connection.
def send_message(host, data, timeout=None, properties=None): channel = _get_channel(host, timeout) if not properties: properties = pika.BasicProperties( content_type="application/json", delivery_mode=2, headers={"UUID": str(uuid.uuid4())} ) parameters = settings.get_amqp_settings()[host] channel.basic_publish( exchange=parameters["exchange"], routing_key=parameters["in_key"], properties=properties, body=data )
1,073,772
Mount an EBS volume Args: volume (str): EBS volume ID device (str): default /dev/xvdf mountpoint (str): default /mnt/data fstype (str): default ext4
def mount_volume(volume, device='/dev/xvdf', mountpoint='/mnt/data', fstype='ext4'): _ec2().attach_volume(volume, _host_node()['id'], device) time.sleep(1) sudo('mkdir -p "%s"' % mountpoint) sudo('mount -t "%s" "%s" "%s"' % (fstype, device, mountpoint))
1,074,168
Return elements which falls within specified interquartile range. Arguments: array (list): Sequence of numbers. lower (float): Lower bound for IQR, in range 0 <= lower <= 1. upper (float): Upper bound for IQR, in range 0 <= upper <= 1. Returns: list: Copy of original list, with elements outside of IQR removed.
def filter_iqr(array, lower, upper): upper, lower = iqr(array, upper, lower) new = list(array) for x in new[:]: if x < lower or x > upper: new.remove(x) return new
1,074,391
Return value of given option. If option isn't found - return default_value (None by default). Args: - option: string with path to option with `:` separator
def get(cls, option, default_value=None): config = cls.__get_instance() for name in option.split(':'): if not name: raise Exception('Incorrect value in path (maybe double `:` or empty path)') if name not in config: return default_value config = config[name] return deepcopy(config)
1,074,422
Download genbank files from NCBI using Biopython Entrez efetch. Args: list_of_NC_accessions (list): a list of strings, e.g ['NC_015758', 'NC_002695'] email (string): NCBI wants your email folder (string): Where the gb files download to, generally './genomes/'
def fetch_gbwithparts(list_of_NC_accessions, email, folder): from Bio import Entrez from time import sleep print 'downloading genomes... please wait' for item in list_of_NC_accessions: Entrez.email = email handle = Entrez.efetch(db="nuccore", id=item, retmode='full', rettype='gbwithparts') data = handle.read() if not os.path.exists(folder): os.makedirs(folder) with open('%s/%s.gb' % (folder,item), 'w') as textfile: textfile.write(data) print 'done downloading %s' % item sleep(2)
1,074,608
initialize the plugin object arguments: name: the name of the plugin, as used in the auto-generated help version: an optional version of your plugin add_stdargs: add hostname, timeout, verbose and version (default) catch_exceptions: gracefully catch exceptions
def __init__(self, name=os.path.basename(sys.argv[0]), version=None, add_stdargs=True, catch_exceptions=True): self._name = name self._args = None self._timeout = 10 self._results = [] self._perfdata = [] self._extdata = [] self._timeout_delay = None self._timeout_code = None if version is None: version = "undefined" if catch_exceptions is True: sys.excepthook = self._excepthook self._parser = ThrowingArgumentParser() if add_stdargs: self.parser.add_argument("-H", "--hostname", help="hostname", metavar="HOSTNAME") self.parser.add_argument("-t", "--timeout", help="timeout", metavar="TIMEOUT", default=30, type=int) self.parser.add_argument("-v", "--verbose", help="increase verbosity", action="count", default=0) self.parser.add_argument("-V", "--version", help="show version", action="version", version=name + " " + str(version))
1,075,121
set the timeout for plugin operations when timeout is reached, exit properly with nagios-compliant output arguments: timeout: timeout in seconds code: exit status code
def set_timeout(self, timeout=None, code=None): if timeout is None: timeout = self.args.timeout if self.args.timeout else 10 if code is None: code = UNKNOWN self._timeout_delay = timeout self._timeout_code = code signal.signal(signal.SIGALRM, self._timeout_handler) signal.alarm(timeout)
1,075,124
manual exit from the plugin arguments: code: exit status code message: a short, one-line message to display perfdata: perfdata, if any extdata: multi-line message to give more details
def exit(self, code=None, message=None, perfdata=None, extdata=None): code = UNKNOWN if code is None else int(code) message = "" if message is None else str(message) perfdata = "" if perfdata is None else str(perfdata) extdata = "" if extdata is None else str(extdata) print("{0} {1} - {2} | {3}".format(self.name.upper(), _CODES_STR[code], message, perfdata)) if extdata: print(extdata) sys.exit(code)
1,075,125
exit when using internal function to add results automatically generates output, but each parameter can be overriden all parameters are optional arguments: code: exit status code message: a short, one-line message to display perfdata: perfdata, if any extdata: multi-line message to give more details
def finish(self, code=None, message=None, perfdata=None, extdata=None): if code is None: code = self.get_code() if message is None: message = self.get_message(msglevels=[code]) if perfdata is None: perfdata = self.get_perfdata() if extdata is None: extdata = self.get_extdata() self.exit(code=code, message=message, perfdata=perfdata, extdata=extdata)
1,075,126
parses the arguments from command-line arguments: optional argument list to parse returns: a dictionnary containing the arguments
def parse_args(self, arguments=None): self._args = self.parser.parse_args(arguments) return self.args
1,075,127
checks a value against warning and critical thresholds threshold syntax: https://nagios-plugins.org/doc/guidelines.html arguments: value: the value to check warning: warning threshold critical: critical threshold returns: the result status of the check
def check_threshold(value, warning=None, critical=None): if critical is not None: if not isinstance(critical, Threshold): critical = Threshold(critical) if not critical.check(value): return CRITICAL if warning is not None: if not isinstance(warning, Threshold): warning = Threshold(warning) if not warning.check(value): return WARNING return OK
1,075,128
add a result to the internal result list arguments: same arguments as for Result()
def add_result(self, code, message=None): self._results.append(Result(code, message))
1,075,129
the final code for multi-checks arguments: the worst-case code from all added results, or UNKNOWN if none were added
def get_code(self): code = UNKNOWN for result in self._results: if code == UNKNOWN or (result.code < UNKNOWN and result.code > code): code = result.code return code
1,075,130
the final message for mult-checks arguments: msglevels: an array of all desired levels (ex: [CRITICAL, WARNING]) joiner: string used to join all messages (default: ', ') returns: one-line message created with input results or None if there are none
def get_message(self, msglevels=None, joiner=None): messages = [] if joiner is None: joiner = ', ' if msglevels is None: msglevels = [OK, WARNING, CRITICAL] for result in self._results: if result.code in msglevels: messages.append(result.message) return joiner.join([msg for msg in messages if msg])
1,075,131
add a perfdata to the internal perfdata list arguments: the same arguments as for Perfdata()
def add_perfdata(self, *args, **kwargs): self._perfdata.append(Perfdata(*args, **kwargs))
1,075,132
initialize a result object arguments: code: the status code message: the status message
def __init__(self, code, message=None): self.code = code self.codestr = _CODES_STR[code] self.message = message
1,075,133
initializes a new Threshold Object arguments: threshold: string describing the threshold (see https://nagios-plugins.org/doc/guidelines.html)
def __init__(self, threshold): self._threshold = threshold self._min = 0 self._max = 0 self._inclusive = False self._parse(threshold)
1,075,134
internal threshold string parser arguments: threshold: string describing the threshold
def _parse(self, threshold): match = re.search(r'^(@?)((~|\d*):)?(\d*)$', threshold) if not match: raise ValueError('Error parsing Threshold: {0}'.format(threshold)) if match.group(1) == '@': self._inclusive = True if match.group(3) == '~': self._min = float('-inf') elif match.group(3): self._min = float(match.group(3)) else: self._min = float(0) if match.group(4): self._max = float(match.group(4)) else: self._max = float('inf') if self._max < self._min: raise ValueError('max must be superior to min')
1,075,135
check if a value is correct according to threshold arguments: value: the value to check
def check(self, value): if self._inclusive: return False if self._min <= value <= self._max else True else: return False if value > self._max or value < self._min else True
1,075,136
initalize the object most arguments refer to : https://nagios-plugins.org/doc/guidelines.html#AEN200 arguments: label: name of the performance data element value: value of the element uom: unit of mesurement warning: the warning threshold string critical: the critical threshold string minimum: minimum value (usually for graphs) maximum: maximul value (usually for graphs)
def __init__(self, label, value, uom=None, warning=None, critical=None, minimum=None, maximum=None): self.label = label self.value = value self.uom = uom self.warning = warning self.critical = critical self.minimum = minimum self.maximum = maximum
1,075,137