docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Returns the same date 1 year ago. Args: date (datetime or datetime.date) Returns: (datetime or datetime.date) Raises: -
def last_year(date_): day = 28 if date_.day == 29 and date_.month == 2 else date_.day return datetime.date(date_.year-1, date_.month, day)
1,126,087
Turns a string into a datetime.time object. This will only work if the format can be "guessed", so the string must have one of the formats from VALID_TIME_FORMATS_TEXT. Args: time_str (str) a string that represents a date Returns: datetime.time object Raises: ValueError if the input string does not have a valid format.
def timestr2time(time_str): if any(c not in '0123456789:' for c in time_str): raise ValueError('Illegal character in time string') if time_str.count(':') == 2: h, m, s = time_str.split(':') elif time_str.count(':') == 1: h, m = time_str.split(':') s = '00' elif len(time_str) == 6: h = time_str[:2] m = time_str[2:4] s = time_str[4:] else: raise ValueError('Time format not recognised. {}'.format( VALID_TIME_FORMATS_TEXT)) if len(m) == 2 and len(s) == 2: mins = int(m) sec = int(s) else: raise ValueError('m and s must be 2 digits') try: return datetime.time(int(h), mins, sec) except ValueError: raise ValueError('Invalid time {}. {}'.format(time_str, VALID_TIME_FORMATS_TEXT))
1,126,088
Turns a datetime.time object into a string. The string must have one of the formats from VALID_TIME_FORMATS_TEXT to make it compatible with timestr2time. Args: time (datetime.time) the time to be translated fmt (str) a format string. Returns: (str) that represents a time. Raises: ValueError if the format is not valid.
def time2timestr(time, fmt='hhmmss'): if fmt.count(':') == 2: if not fmt.index('h') < fmt.index('m') < fmt.index('s'): raise ValueError('Invalid format string. {}'.format( VALID_TIME_FORMATS_TEXT)) h, m, s = fmt.split(':') elif fmt.count(':') == 1: if not fmt.index('h') < fmt.index('m'): raise ValueError('Invalid format string. {}'.format( VALID_TIME_FORMATS_TEXT)) h, m = fmt.split(':') s = None elif any(c not in 'hms' for c in fmt) or len(fmt) != 6: raise ValueError('Invalid character in format string. {}'.format( VALID_TIME_FORMATS_TEXT)) else: if not fmt.index('h') < fmt.index('m') < fmt.index('s'): raise ValueError('Invalid format string. {}'.format( VALID_TIME_FORMATS_TEXT)) h, m, s = fmt[:-4], fmt[-4:-2], fmt[-2:] for string, char in ((h, 'h'), (m, 'm'), (s, 's')): if string is not None and any(c != char for c in string): raise ValueError('Invalid date format: {} is not {}'.\ format(char, string)) if len(h) == 2: fmt = fmt.replace('hh', '%H', 1) elif len(h) == 1: fmt = fmt.replace('h', 'X%H', 1) else: raise ValueError('Invalid format string, hour must have 1 or 2 digits') if len(m) == 2: fmt = fmt.replace('mm', '%M', 1) else: raise ValueError('Invalid format string, minutes must have 2 digits') if s is not None and len(s) == 2: fmt = fmt. replace('ss', '%S', 1) elif s is not None: raise ValueError('Invalid format string, seconds must have 2 digits') return time.strftime(fmt).replace('X0','X').replace('X','')
1,126,089
Find all PDF files in the specified directory. Args: source_directory (str): The source directory. Returns: list(str): Filepaths to all PDF files in the specified directory. Raises: ValueError
def _get_pdf_filenames_at(source_directory): if not os.path.isdir(source_directory): raise ValueError("%s is not a directory!" % source_directory) return [os.path.join(source_directory, filename) for filename in os.listdir(source_directory) if filename.endswith(PDF_EXTENSION)]
1,126,418
Compress a single PDF file. Args: filepath (str): Path to the PDF file. output_path (str): Output path. ghostscript_binary (str): Name/alias of the Ghostscript binary. Raises: ValueError FileNotFoundError
def compress_pdf(filepath, output_path, ghostscript_binary): if not filepath.endswith(PDF_EXTENSION): raise ValueError("Filename must end with .pdf!\n%s does not." % filepath) try: file_size = os.stat(filepath).st_size if file_size < FILE_SIZE_LOWER_LIMIT: LOGGER.info(NOT_COMPRESSING.format(filepath, file_size, FILE_SIZE_LOWER_LIMIT)) process = subprocess.Popen(['cp', filepath, output_path]) else: LOGGER.info(COMPRESSING.format(filepath)) process = subprocess.Popen( [ghostscript_binary, "-sDEVICE=pdfwrite", "-dCompatabilityLevel=1.4", "-dPDFSETTINGS=/ebook", "-dNOPAUSE", "-dQUIET", "-dBATCH", "-sOutputFile=%s" % output_path, filepath] ) except FileNotFoundError: msg = GS_NOT_INSTALLED.format(ghostscript_binary) raise FileNotFoundError(msg) process.communicate() LOGGER.info(FILE_DONE.format(output_path))
1,126,419
Compress all PDF files in the current directory and place the output in the given output directory. This is a generator function that first yields the amount of files to be compressed, and then yields the output path of each file. Args: source_directory (str): Filepath to the source directory. output_directory (str): Filepath to the output directory. ghostscript_binary (str): Name of the Ghostscript binary. Returns: list(str): paths to outputs.
def compress_multiple_pdfs(source_directory, output_directory, ghostscript_binary): source_paths = _get_pdf_filenames_at(source_directory) yield len(source_paths) for source_path in source_paths: output = os.path.join(output_directory, os.path.basename(source_path)) compress_pdf(source_path, output, ghostscript_binary) yield output
1,126,420
Returns a triplestore connection args: attr_name: The name the connection will be assigned in the config manager params: The paramaters of the connection kwargs: log_level: logging level to use
def make_tstore_conn(params, **kwargs): log.setLevel(params.get('log_level', __LOG_LEVEL__)) log.debug("\n%s", params) params.update(kwargs) try: vendor = RdfwConnections['triplestore'][params.get('vendor')] except KeyError: vendor = RdfwConnections['triplestore']['blazegraph'] conn = vendor(**params) return conn
1,126,895
returns the specified connection args: conn_name: the name of the connection
def get(self, conn_name, default=None, **kwargs): if isinstance(conn_name, RdfwConnections): return conn_name try: return self.conns[conn_name] except KeyError: if default: return self.get(default, **kwargs) raise LookupError("'%s' connection has not been set" % conn_name)
1,126,903
Takes a list of connections and sets them in the manager args: conn_list: list of connection defitions
def load(self, conn_list, **kwargs): for conn in conn_list: conn['delay_check'] = kwargs.get('delay_check', False) self.set_conn(**conn) if kwargs.get('delay_check'): test = self.wait_for_conns(**kwargs) if not test: log.critical("\n\nEXITING:Unable to establish connections \n" "%s", test)
1,126,904
delays unitil all connections are working args: timeout: number of seconds to try to connecting. Error out when timeout is reached start_delay: number of seconds to wait before checking status interval: number of seconds to wait between checks
def wait_for_conns(self, timeout=60, start_delay=0, interval=5, **kwargs): log.setLevel(kwargs.get('log_level',self.log_level)) timestamp = time.time() last_check = time.time() + start_delay - interval last_delay_notification = time.time() - interval timeout += 1 failing = True up_conns = {} # loop until the server is up or the timeout is reached while((time.time()-timestamp) < timeout) and failing: # if delaying, the start of the check, print waiting to start if start_delay > 0 and time.time() - timestamp < start_delay \ and (time.time()-last_delay_notification) > 5: print("Delaying server status check until %ss. Current time: %ss" \ % (start_delay, int(time.time() - timestamp))) last_delay_notification = time.time() # check status at the specified 'interval' until the server is up first_check = True while ((time.time()-last_check) > interval) and failing: msg = ["\tChecked status of servers at %ss" % \ int((time.time()-timestamp)), "\t** CONNECTION STATUS:"] last_check = time.time() failing = self.failing new_up = (self.active.keys() - failing.keys()) - \ up_conns.keys() msg += ["\t\t UP - %s: %s" % (key, self.conns[key]) for key in new_up] up_conns.update({key: self.conns[key] for key in new_up}) msg.append("\t*** '%s' connection(s) up" % len(up_conns)) msg += ["\t\t FAILING - %s: %s" % (key, self.conns[key]) for key in failing] log.info("** CONNECTION STATUS:\n%s", "\n".join(msg)) if not failing: log.info("**** Servers up at %ss" % \ int((time.time()-timestamp))) break if failing: raise RuntimeError("Unable to establish connection(s): ", failing) for conn in up_conns.values(): conn.delay_check_pass() return not failing
1,126,906
generate datasets list to activate args: settings: dictionary from settings file argv: list from sys.argv
def generate_datasets_list(settings, argv): datasets_string_list = settings["DATASETS_LIST"] datasets_list = [] if len(argv) == 2: try: datasets_items = datasets_string_list.iteritems() except AttributeError: datasets_items = datasets_string_list.items() for key, val in datasets_items: key_module = importlib.import_module( settings["PYTHON_MODULE"] + "." + key ) for element in val: datasets_list.append( (key, element, getattr(key_module, element)()) ) elif len(argv) > 2: arguments = argv[2:] for argument in arguments: argument_list = argument.split(".") key = ".".join(argument_list[:-1]) key_module = importlib.import_module( settings["PYTHON_MODULE"] + "." + key ) datasets_list.append( (key, argument_list[-1], getattr(key_module, argument_list[-1])()) ) else: print_help() return datasets_list
1,127,060
Return source of the `link` whether it is filename or url. Args: link (str): Filename or URL. Returns: str: Content. Raises: UserWarning: When the `link` couldn't be resolved.
def _get_source(link): if link.startswith("http://") or link.startswith("https://"): down = httpkie.Downloader() return down.download(link) if os.path.exists(link): with open(link) as f: return f.read() raise UserWarning("html: '%s' is neither URL or data!" % link)
1,127,085
Given a string containing a xx:xx:xx:xx:xx:xx address, return as a byte sequence. Args: addr (str): Bluetooth address in xx:xx:xx:xx:xx:xx format. reverse (bool): True if the byte ordering should be reversed in the output. Returns: A bytearray containing the converted address.
def fmt_addr_raw(addr, reverse=True): addr = addr.replace(':', '') raw_addr = [int(addr[i:i+2], 16) for i in range(0, len(addr), 2)] if reverse: raw_addr.reverse() # for Python 2, this needs to be a string instead of a bytearray if sys.version_info[0] == 2: return str(bytearray(raw_addr)) return bytearray(raw_addr)
1,127,598
Return a pretty-printed (hex style) version of a binary string. Args: raw (bytes): any sequence of bytes reverse (bool): True if output should be in reverse order. Returns: Hex string corresponding to input byte sequence.
def pp_hex(raw, reverse=True): if not reverse: return ''.join(['{:02x}'.format(v) for v in bytearray(raw)]) return ''.join(reversed(['{:02x}'.format(v) for v in bytearray(raw)]))
1,127,599
Check whether a specified user is able to do a specified workshift. Parameters: workshift_profile is the workshift profile for a user shift is a weekly recurring workshift Returns: True if the user has enough free time between the shift's start time and end time to do the shift's required number of hours. False otherwise.
def is_available(workshift_profile, shift): if shift.week_long: return True start_time = ( shift.start_time if shift.start_time is not None else time(hour=0) ) end_time = ( shift.end_time if shift.end_time is not None else time(hour=23, minute=59) ) relevant_blocks = list() for block in workshift_profile.time_blocks.order_by('start_time'): if block.day == shift.day and block.preference == TimeBlock.BUSY \ and block.start_time < end_time \ and block.end_time > start_time: relevant_blocks.append(block) # Time blocks should be ordered; so go through and see if there is a wide # enough window for the workshifter to do the shift. If there is, # return True. if not relevant_blocks: return True hours_delta = timedelta(hours=float(shift.hours)) # Check the time between shift start and block start block = relevant_blocks.pop(0) start_delta = timedelta( hours=block.start_time.hour - start_time.hour, minutes=block.start_time.minute - start_time.minute, ) if start_delta >= hours_delta: return True while len(relevant_blocks) > 0: block, prev_block = relevant_blocks.pop(0), block # Check the time between the last block and the next block # is larger than the length of the shift start_end_delta = timedelta( hours=block.start_time.hour - prev_block.end_time.hour, minutes=block.start_time.minute - prev_block.end_time.minute, ) if start_end_delta >= hours_delta: return True # Check the time between the end of the time block to the end of the shift end_delta = timedelta( hours=end_time.hour - block.end_time.hour, minutes=end_time.minute - block.end_time.minute, ) if end_delta >= hours_delta: return True return False
1,127,614
Validate timestamp specified by request. See `validate.request` for additional info. Args: stamp: str. Time request was made as ISO 8601 timestamp. tolerance: int. Number of seconds request remains valid from timestamp. Returns bool: True if valid, False otherwise.
def timestamp(stamp, tolerance=150): try: tolerance = datetime.timedelta(0, tolerance) timestamp_low = dateutil.parser.parse(stamp) timestamp_high = timestamp_low + tolerance now = datetime.datetime.now(timestamp_low.tzinfo) except ValueError: return False return now >= timestamp_low and now <= timestamp_high
1,127,740
Validate URL specified by SignatureCertChainUrl. See `validate.request` for additional info. Args: url: str. SignatureCertChainUrl header value sent by request. Returns: bool: True if valid, False otherwise.
def signature_cert_chain_url(url): r = urlparse(url) if not r.scheme.lower() == 'https': warnings.warn('Certificate URL scheme is invalid.') return False if not r.hostname.lower() == 's3.amazonaws.com': warnings.warn('Certificate URL hostname is invalid.') return False if not os.path.normpath(r.path).startswith('/echo.api/'): warnings.warn('Certificate URL path is invalid.') return False if r.port and not r.port == 443: warnings.warn('Certificate URL port is invalid.') return False return True
1,127,741
Retrieve and parse PEM-encoded X.509 certificate chain. See `validate.request` for additional info. Args: url: str. SignatureCertChainUrl header value sent by request. Returns: list or bool: If url is valid, returns the certificate chain as a list of cryptography.hazmat.backends.openssl.x509._Certificate certificates where certs[0] is the first certificate in the file; if url is invalid, returns False.
def retrieve(url): try: pem_data = urlopen(url).read() except (ValueError, HTTPError): warnings.warn('Certificate URL is invalid.') return False if sys.version >= '3': try: pem_data = pem_data.decode() except(UnicodeDecodeError): warnings.warn('Certificate encoding is not utf-8.') return False return _parse_pem_data(pem_data)
1,127,742
Parse PEM-encoded X.509 certificate chain. Args: pem_data: str. PEM file retrieved from SignatureCertChainUrl. Returns: list or bool: If url is valid, returns the certificate chain as a list of cryptography.hazmat.backends.openssl.x509._Certificate certificates where certs[0] is the first certificate in the file; if url is invalid, returns False.
def _parse_pem_data(pem_data): sep = '-----BEGIN CERTIFICATE-----' cert_chain = [six.b(sep + s) for s in pem_data.split(sep)[1:]] certs = [] load_cert = x509.load_pem_x509_certificate for cert in cert_chain: try: certs.append(load_cert(cert, default_backend())) except ValueError: warnings.warn('Certificate is invalid.') return False return certs
1,127,743
Validate PEM-encoded X.509 certificate chain. See `validate.request` for additional info. Args: certs: list. The certificate chain as a list of cryptography.hazmat.backends.openssl.x509._Certificate certificates. See `validate.retrieve` to create certs obj. Returns: bool: True if valid, False otherwise.
def cert_chain(certs): if len(certs) < 2: warnings.warn('Certificate chain contains < 3 certificates.') return False cert = certs[0] today = datetime.datetime.today() if not today > cert.not_valid_before: warnings.warn('Certificate Not Before date is invalid.') return False if not today < cert.not_valid_after: warnings.warn('Certificate Not After date is invalid.') return False oid_san = x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME ext = cert.extensions.get_extension_for_oid(oid_san) sans = ext.value.get_values_for_type(x509.DNSName) if not 'echo-api.amazon.com' in sans: return False for i in range(len(certs) - 1): if not certs[i].issuer == certs[i + 1].subject: return False return True
1,127,744
Validate data request signature. See `validate.request` for additional info. Args: cert: cryptography.hazmat.backends.openssl.x509._Certificate. The Amazon signing certificate. sig: str. Signature header value sent by request. body: str. HTTPS request body. Returns: bool: True if valid, False otherwise.
def signature(cert, sig, body): body = six.b(body) sig = base64.decodestring(sig) padder = padding.PKCS1v15() public_key = cert.public_key() try: public_key.verify(sig, body, padder, hashes.SHA1()) return True except InvalidSignature: warnings.warn('Signature verification failed.') return False
1,127,745
Validate request application id matches true application id. Verifying the Application ID matches: https://goo.gl/qAdqe4. Args: app_id: str. Request application_id. Returns: bool: True if valid, False otherwise.
def application_id(self, app_id): if self.app_id != app_id: warnings.warn('Application ID is invalid.') return False return True
1,127,747
Shortcut method to return ``Some`` or :py:data:`NONE` based on ``val``. Args: val: Some value. Returns: ``Some(val)`` if the ``val`` is not None, otherwise :py:data:`NONE`. Examples: >>> Option.maybe(0) Some(0) >>> Option.maybe(None) NONE
def maybe(cls, val: Optional[T]) -> 'Option[T]': return cast('Option[T]', NONE) if val is None else cls.Some(val)
1,127,763
Returns the contained value or computes it from ``callback``. Args: callback: The the default callback. Returns: The contained value if the :py:class:`Option` is ``Some``, otherwise ``callback()``. Examples: >>> Some(0).unwrap_or_else(lambda: 111) 0 >>> NONE.unwrap_or_else(lambda: 'ha') 'ha'
def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]: return self._val if self._is_some else callback()
1,127,766
Applies the ``callback`` with the contained value as its argument or returns :py:data:`NONE`. Args: callback: The callback to apply to the contained value. Returns: The ``callback`` result wrapped in an :class:`Option` if the contained value is ``Some``, otherwise :py:data:`NONE` Examples: >>> Some(10).map(lambda x: x * x) Some(100) >>> NONE.map(lambda x: x * x) NONE
def map(self, callback: Callable[[T], U]) -> 'Option[U]': return self._type.Some(callback(self._val)) if self._is_some else cast('Option[U]', NONE)
1,127,767
This function will add a file handler to a log with the provided level. Args: lvl (int): The severity level of messages printed to the file with the file handler, default = 1.
def addFileHandler(self,filename='', dr='',lvl=1): fname = self.name if filename != '': fname = filename if '.' not in fname: fname+='.log' fh = logging.FileHandler(os.path.join(dr,fname)) fh.setLevel(lvl) frmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' fFrmt = logging.Formatter(frmtString) fh.setFormatter(fFrmt) self.addHandler(fh)
1,128,059
This function will add a stream handler to a log with the provided level. Args: lvl (int): The severity level of messages printed to the screen with the stream handler, default = 20.
def addStreamHandler(self,lvl=20): sh = logging.StreamHandler(sys.stdout) sh.setLevel(lvl) sFrmt = logging.Formatter('%(message)s') if False: #Another format example sFrmt = logging.Formatter('%(name)s - %(levelname)s - %(message)s') sh.setFormatter(sFrmt) self.addHandler(sh)
1,128,060
Log all Key=value for every key in a dictionary. Args: d (dictionary): A standard python dictionary.
def logDict(self,d): keys = list(d.keys()) keys.sort() s = "\n"+"-"*78+"\n"+" "*20+"dictionary provided contains:\n"+"-"*78+"\n" for key in keys: s+=key+" = "+repr(d[key])+"\n" self.fileonly(s+"-"*78+"\n")
1,128,062
Creates a new object MFTHeader from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: MFTHeader: New object using hte binary stream as source
def create_from_binary(cls, ignore_signature_check, binary_view): sig, fx_offset, fx_count, lsn, seq_number, hard_link_count, first_attr_offset, \ usage_flags, entry_len, alloc_len, base_record, next_attr_id, record_n = \ cls._REPR.unpack(binary_view[:cls._REPR.size]) baad = None if not ignore_signature_check: if sig == b"FILE": baad = False elif sig == b"BAAD": baad = True else: raise HeaderError("Entry has no valid signature.", "MFTHeader") if fx_offset < MFTHeader._REPR.size: #header[1] is fx_offset raise HeaderError("Fix up array begins within the header.", "MFTHeader") if first_attr_offset < cls._REPR.size: #first attribute offset < header size raise HeaderError("First attribute offset points to inside of the header.", "MFTHeader") if entry_len > alloc_len: #entry_len > entry_alloc_len raise HeaderError("Logical size of the MFT is bigger than MFT allocated size.", "MFTHeader") file_ref, file_seq = get_file_reference(base_record) nw_obj = cls((baad, fx_offset, fx_count, lsn, seq_number, hard_link_count, first_attr_offset, MftUsageFlags(usage_flags), entry_len, alloc_len, file_ref, file_seq, next_attr_id, record_n)) return nw_obj
1,128,358
Loads all the attributes of an entry. Once executed, all the attributes should have been loaded in the attribute *attrs* instance attribute. Args: mft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells how the library will interpret data. attrs_view (memoryview(bytearray)) - A binary stream that starts at the first attribute until the end of the entry
def _load_attributes(self, mft_config, attrs_view): offset = 0 load_attrs = mft_config.attribute_load_list while (attrs_view[offset:offset+4] != b'\xff\xff\xff\xff'): attr_type, attr_len, non_resident = _get_attr_info(attrs_view[offset:]) if attr_type in load_attrs: # pass all the information to the attr, as we don't know how # much content the attribute has attr = Attribute.create_from_binary(non_resident, mft_config.load_dataruns, attrs_view[offset:]) if not attr.header.attr_type_id is AttrTypes.DATA: self.attrs[attr.header.attr_type_id].append(attr) #add an attribute else: self._add_data_attribute(attr) offset += attr_len
1,128,371
Merge two entries. Allow the merging of two MFTEntries copying the attributes to the correct place and the datastreams. Args: source_entry (:obj:`MFTEntry`) - Source entry where the data will be copied from
def merge_entries(self, source_entry): #TODO should we change this to an overloaded iadd? #TODO I really don't like this. We are spending cycles to load things that are going to be discarted. Check another way. #copy the attributes for list_attr in source_entry.attrs.values(): for attr in list_attr: self.attrs[attr.header.attr_type_id].append(attr) #add an attribute #copy data_streams for stream in source_entry.data_streams: dest_stream = self._find_datastream(stream.name) if dest_stream is not None: dest_stream.add_from_datastream(stream) else: self.data_streams.append(stream)
1,128,372
Loads the new excel format files. Old format files will automatically get loaded as well. Args: file_name: The name of the local file, or the holder for the extension type when the file_contents are supplied. file_contents: The file-like object holding contents of file_name. If left as None, then file_name is directly loaded. on_demand: Requests that a yielder be used in place of a full data copy.
def get_data_xlsx(file_name, file_contents=None, on_demand=False): return get_data_xls(file_name, file_contents=file_contents, on_demand=on_demand)
1,128,425
Loads the old excel format files. New format files will automatically get loaded as well. Args: file_name: The name of the local file, or the holder for the extension type when the file_contents are supplied. file_contents: The file-like object holding contents of file_name. If left as None, then file_name is directly loaded. on_demand: Requests that a yielder be used in place of a full data copy.
def get_data_xls(file_name, file_contents=None, on_demand=False): def tuple_to_iso_date(tuple_date): (y,m,d, hh,mm,ss) = tuple_date non_zero = lambda n: n!=0 date = "%04d-%02d-%02d" % (y,m,d) if list(filter(non_zero, (y,m,d))) else '' time = "T%02d:%02d:%02d" % (hh,mm,ss) if list(filter(non_zero, (hh,mm,ss))) or not date else '' return date+time def format_excel_val(book, val_type, value, want_tuple_date): # Data val_type Codes: # EMPTY 0 # TEXT 1 a Unicode string # NUMBER 2 float # DATE 3 float # BOOLEAN 4 int; 1 means TRUE, 0 means FALSE # ERROR 5 if val_type == 2: # TEXT if value == int(value): value = int(value) elif val_type == 3: # NUMBER datetuple = xlrd.xldate_as_tuple(value, book.datemode) value = datetuple if want_tuple_date else tuple_to_iso_date(datetuple) elif val_type == 5: # ERROR value = xlrd.error_text_from_code[value] return value def xlrd_xsl_to_array(file_name, file_contents=None): book = xlrd.open_workbook(file_name, file_contents=file_contents, on_demand=on_demand) formatter = lambda t_v: format_excel_val(book, t_v[0], t_v[1], False) row_builder = lambda s, r: list(map(formatter, zip(s.row_types(r), s.row_values(r)))) data = [SheetYielder(book, index, row_builder) for index in range(book.nsheets)] if not on_demand: for sheet in data: sheet.load() book.release_resources() return data return xlrd_xsl_to_array(file_name, file_contents)
1,128,426
Loads xml excel format files. Args: file_name: The name of the local file, or the holder for the extension type when the file_contents are supplied. file_contents: The file-like object holding contents of file_name. If left as None, then file_name is directly loaded. on_demand: Requests that a yielder be used in place of a full data copy (will be ignored).
def get_data_excel_xml(file_name, file_contents=None, on_demand=False): # NOTE this method is inefficient and uses code that's not of the highest quality if file_contents: xml_file = BytesIO(file_contents) else: xml_file = file_name book = xmlparse.ParseExcelXMLFile(xml_file) row_builder = lambda s, r: list(s.row_values(r)) return [XMLSheetYielder(book, index, row_builder) for index in range(len(book))]
1,128,427
Gets good old csv data from a file. Args: file_name: The name of the local file, or the holder for the extension type when the file_contents are supplied. encoding: Loads the file with the specified cell encoding. file_contents: The file-like object holding contents of file_name. If left as None, then file_name is directly loaded. on_demand: Requests that a yielder be used in place of a full data copy.
def get_data_csv(file_name, encoding='utf-8', file_contents=None, on_demand=False): def yield_csv(csv_contents, csv_file): try: for line in csv_contents: yield line finally: try: csv_file.close() except: pass def process_csv(csv_contents, csv_file): return [line for line in yield_csv(csv_contents, csv_file)] if file_contents: csv_file = BytesIO(file_contents) else: # Don't use 'open as' format, as on_demand loads shouldn't close the file early csv_file = open(file_name, 'rb') reader = csv.reader(csv_file, dialect=csv.excel, encoding=encoding) if on_demand: table = yield_csv(reader, csv_file) else: table = process_csv(reader, csv_file) return [table]
1,128,428
Writes 2D tables to file. Args: data: 2D list of tables/worksheets. file_name: Name of the output file (determines type). worksheet_names: A list of worksheet names (optional).
def write(data, file_name, worksheet_names=None): if re.search(XML_EXT_REGEX, file_name): return write_xml(data, file_name, worksheet_names=worksheet_names) elif re.search(XLSX_EXT_REGEX, file_name): return write_xlsx(data, file_name, worksheet_names=worksheet_names) elif re.search(XLS_EXT_REGEX, file_name): return write_xls(data, file_name, worksheet_names=worksheet_names) elif re.search(CSV_EXT_REGEX, file_name): return write_csv(data, file_name) else: return write_csv(data, file_name)
1,128,429
Writes out to old excel format. Args: data: 2D list of tables/worksheets. file_name: Name of the output file. worksheet_names: A list of worksheet names (optional).
def write_xls(data, file_name, worksheet_names=None): workbook = xlwt.Workbook() for sheet_index, sheet_data in enumerate(data): if worksheet_names and sheet_index < len(worksheet_names) and worksheet_names[sheet_index]: name = worksheet_names[sheet_index] else: name = 'Worksheet {}'.format(sheet_index) sheet = workbook.add_sheet(name) for row_index, row in enumerate(sheet_data): for col_index, value in enumerate(row): sheet.write(row_index, col_index, value) workbook.save(file_name)
1,128,430
Writes out to csv format. Args: data: 2D list of tables/worksheets. file_name: Name of the output file.
def write_csv(data, file_name, encoding='utf-8'): name_extension = len(data) > 1 root, ext = os.path.splitext(file_name) for i, sheet in enumerate(data): fname = file_name if not name_extension else root+"_"+str(i)+ext with open(fname, 'wb') as date_file: csv_file = csv.writer(date_file, encoding=encoding) for line in sheet: csv_file.writerow(line)
1,128,431
Registers a list of Rml defintions objects Args: ----- def_list: list of objects defining the rml definitons
def register_defs(self, def_list, **kwargs): for item in def_list: if isinstance(item, tuple): self.register_rml_def(*item, **kwargs) elif isinstance(item, dict): cp_kwargs = kwargs.copy() item.update(kwargs) self.register_rml_def(**item)
1,129,162
Registers the rml file locations for easy access Args: ----- location_type: ['package_all', 'package_file', 'directory', 'filepath'] location: The correlated location string based on the location_type filename: Optional, associated with 'package_file' location_type kwargs: ------- include_subfolders: Boolean
def register_rml_def(self, location_type, location, filename=None, **kwargs): if location_type == 'directory': self.register_directory(location, **kwargs) elif location_type == 'filepath': if not os.path.exists(location): raise OSError("File not found", location) if os.path.isfile(location): self.register_rml(location) elif filename: new_loc = os.path.join(location, filename) if not os.path.exists(new_loc): raise OSError("File not found", new_loc) elif os.path.isfile(new_loc): self.register_rml(new_loc) else: raise OSError("File not found", location) elif location_type.startswith('package'): pkg_path = \ importlib.util.find_spec(\ location).submodule_search_locations[0] if location_type.endswith('_all'): self.register_directory(pkg_path, **kwargs) elif location_type.endswith('_file'): filepath = os.path.join(pkg_path, filename) self.register_rml(filepath, **kwargs) else: raise NotImplementedError
1,129,163
Registers the filepath for an rml mapping Args: ----- filepath: the path the rml file
def register_rml(self, filepath, **kwargs): name = os.path.split(filepath)[-1] if name in self.rml_maps and self.rml_maps[name] != filepath: raise Exception("RML name already registered. Filenames must be " "unique.", (self.rml_maps[name], filepath)) self.rml_maps[name] = filepath
1,129,164
Instantiates a RmlProcessor and registers it in the manager Args: ----- name: the name to register the processor mappings: the list RML mapping definitions to use processor_type: the name of the RML processor to use
def make_processor(self, name, mappings, processor_type, **kwargs): from .processor import Processor if self.processors.get(name): raise LookupError("processor has already been created") if isinstance(mappings, list): mappings = [self.get_rml(item) for item in mappings] else: mappings = [self.get_rml(mappings)] self.processors[name] = Processor[processor_type](mappings, **kwargs) self.processors[name].name = name return self.processors[name]
1,129,166
Initialize :class:`RstToPdf` class. Args: style_path (str): Path to the style for the PDF. header (str, default None): Header which will be rendered to each page. footer (str, default FOOTER): Footer, which will be rendered to each page. See :attr:`FOOTER` for details. Returns: obj: Initialized object.
def _init_pdf(style_path, header=None, footer=FOOTER): return RstToPdf( language="cs", font_path=[ "/usr/share/fonts", "/usr/share/fonts/truetype/", '.', '/usr/local/lib/python2.7/dist-packages/rst2pdf/fonts' ], stylesheets=[ style_path ], breaklevel=0, splittables=True, header=header, footer=footer )
1,129,305
Generate code, which make sure that `tag_name` has enoug items. Args: tag_name (str): Name of the container. index (int): Index of the item you want to obtain from container. notfoundmsg (str): Raise :class:`.UserWarning` with debug data and following message. Returns: str: Python code.
def _required_idiom(tag_name, index, notfoundmsg): cond = "" if index > 0: cond = " or len(el) - 1 < %d" % index tag_name = str(tag_name) output = IND + "if not el%s:\n" % cond output += IND + IND + "raise UserWarning(\n" output += IND + IND + IND + "%s +\n" % repr(notfoundmsg.strip() + "\n") output += IND + IND + IND + repr("Tag name: " + tag_name) + " + '\\n' +\n" output += IND + IND + IND + "'El:' + str(el) + '\\n' +\n" output += IND + IND + IND + "'Dom:' + str(dom)\n" output += IND + IND + ")\n\n" return output + IND + "el = el[%d]\n\n" % index
1,129,520
Generate unittests for all of the generated code. Args: config (dict): Original configuration dictionary. See :mod:`~harvester.autoparser.conf_reader` for details. Returns: str: Python code.
def _unittest_template(config): output = "def test_parsers():\n" links = dict(map(lambda x: (x["link"], x["vars"]), config)) for link in links.keys(): output += IND + "# Test parsers against %s\n" % link output += IND + "html = handle_encodnig(\n" output += IND + IND + "_get_source(%s)\n" % repr(link) output += IND + ")\n" output += IND + "dom = dhtmlparser.parseString(html)\n" output += IND + "dhtmlparser.makeDoubleLinked(dom)\n\n" for var in links[link]: content = links[link][var]["data"].strip() output += IND + "%s = %s(dom)\n" % (var, _get_parser_name(var)) if "\n" in content: output += IND output += "assert %s.getContent().strip().split() == %s" % ( var, repr(content.split()) ) else: output += IND + "assert %s.getContent().strip() == %s" % ( var, repr(content) ) output += "\n\n" return output + "\n"
1,129,525
Generate parser for all `paths`. Args: config (dict): Original configuration dictionary used to get matches for unittests. See :mod:`~harvester.autoparser.conf_reader` for details. paths (dict): Output from :func:`.select_best_paths`. Returns: str: Python code containing all parsers for `paths`.
def generate_parsers(config, paths): output = # add source of neighbour picking functions from utils.py output += inspect.getsource(conf_reader._get_source) + "\n\n" output += inspect.getsource(utils._get_encoding) + "\n\n" output += inspect.getsource(utils.handle_encodnig) + "\n\n" output += inspect.getsource(utils.is_equal_tag) + "\n\n" output += inspect.getsource(utils.has_neigh) + "\n\n" output += "# Generated parsers\n" for name, path in paths.items(): path = path[0] # pick path with highest priority required = config[0]["vars"][name].get("required", False) notfoundmsg = config[0]["vars"][name].get("notfoundmsg", "") output += _generate_parser(name, path, required, notfoundmsg) output += "# Unittest\n" output += _unittest_template(config) output += "# Run tests of the parser\n" output += "if __name__ == '__main__':\n" output += IND + "test_parsers()" return output
1,129,526
Return common root of the two vectors. Args: vec1 (list/tuple): First vector. vec2 (list/tuple): Second vector. Usage example:: >>> common_vector_root([1, 2, 3, 4, 5], [1, 2, 8, 9, 0]) [1, 2] Returns: list: Common part of two vectors or blank list.
def common_vector_root(vec1, vec2): root = [] for v1, v2 in zip(vec1, vec2): if v1 == v2: root.append(v1) else: return root return root
1,129,556
Find root which is common for all `elements`. Args: elements (list): List of double-linked HTMLElement objects. Returns: list: Vector of HTMLElement containing path to common root.
def find_common_root(elements): if not elements: raise UserWarning("Can't find common root - no elements suplied.") root_path = el_to_path_vector(elements.pop()) for el in elements: el_path = el_to_path_vector(el) root_path = common_vector_root(root_path, el_path) if not root_path: raise UserWarning( "Vectors without common root:\n%s" % str(el_path) ) return root_path
1,129,557
Return list of all dirs and files inside given dir. Also can filter contents to return only dirs or files. Args: - dir_name: Which directory we need to scan (relative) - get_dirs: Return dirs list - get_files: Return files list - hide_ignored: Exclude files and dirs with initial underscore
def listdir(dir_name, get_dirs=None, get_files=None, hide_ignored=False): if get_dirs is None and get_files is None: get_dirs = True get_files = True source_dir = os.path.join(settings.BASE_DIR, 'app', dir_name) dirs = [] for dir_or_file_name in os.listdir(source_dir): path = os.path.join(source_dir, dir_or_file_name) if hide_ignored and dir_or_file_name.startswith('_'): continue is_dir = os.path.isdir(path) if get_dirs and is_dir or get_files and not is_dir: dirs.append(dir_or_file_name) return dirs
1,129,628
Object initialization Args: key: String name of an attributes key that represents the unique identify of the request attributes: Dictionary whose keys match the string values of the request attribute's names and values correspond the the request attribute values
def __init__(self, key, attributes): self.key = key self.attributes = attributes
1,129,642
Convert communication namedtuple to this class. Args: pub (obj): :class:`.Archive` instance which will be converted. Returns: obj: :class:`DBArchive` instance.
def from_comm(cls, pub): filename = None if pub.b64_data: filename = cls._save_to_unique_filename(pub) return cls( isbn=pub.isbn, uuid=pub.uuid, aleph_id=pub.aleph_id, dir_pointer=filename )
1,129,655
Fetch a commit. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of the commit to fetch. Returns: A dict with data about the commit.
def get_commit(profile, sha): resource = "/commits/" + sha data = api.get_request(profile, resource) return prepare(data)
1,129,686
Constructor. Args: name (str): Name of the periodical. sub_trees (list): List of other trees. sub_publications (list): List of sub-publication UUID's. aleph_id (str): ID used in aleph. issn (str): ISSN given to the periodical. is_public (bool): Is the tree public? Raises: ValueError: In case that `name` is not set, or `sub_trees` or `sub_publications` is not list/tuple.
def __init__(self, *args, **kwargs): super(self.__class__, self).__init__(*args, **kwargs) # type checks if not self.name.strip(): raise ValueError(".name property must be set!") if type(self.sub_trees) not in [list, tuple]: raise ValueError(".sub_trees property must contain list/tuple!") if type(self.sub_publications) not in [list, tuple]: raise ValueError(".sub_trees property must contain list/tuple!") if not self.path: self.path = self.name for sub_tree in self.sub_trees: sub_tree.path = os.path.join(self.path, sub_tree.name)
1,129,704
Returns a specific option specific in a config file Arguments: option_name -- Name of the option (example host_name) section_name -- Which section of the config (default: name) examples: >>> get_option("some option", default="default result") 'default result'
def get_option(option_name, section_name="main", default=_sentinel, cfg_file=cfg_file): defaults = get_defaults() # As a quality issue, we strictly disallow looking up an option that does not have a default # value specified in the code #if option_name not in defaults.get(section_name, {}) and default == _sentinel: # raise ValueError("There is no default value for Option %s in section %s" % (option_name, section_name)) # If default argument was provided, we set variable my_defaults to that # otherwise use the global nago defaults if default != _sentinel: my_defaults = {option_name: default} else: my_defaults = defaults.get('section_name', {}) # Lets parse our configuration file and see what we get parser = get_parser(cfg_file) return parser.get(section_name, option_name, vars=my_defaults)
1,129,745
Write a new nago.ini config file from the defaults. Arguments: cfg_file -- File that is written to like /etc/nago/nago.ini defaults -- Dictionary with default values to use
def generate_configfile(cfg_file,defaults=defaults): # Create a directory if needed and write an empty file _mkdir_for_config(cfg_file=cfg_file) with open(cfg_file, 'w') as f: f.write('') for section in defaults.keys(): set_option(section, cfg_file=cfg_file, **defaults[section])
1,129,750
Convert a nested dictionary from one convention to another. Args: d (dict): dictionary (nested or not) to be converted. convert_function (func): function that takes the string in one convention and returns it in the other one. Returns: Dictionary with the new keys.
def morph_dict(d, convert_function): # Attribution: https://stackoverflow.com/a/33668421/633213 new = {} for k, v in six.iteritems(d): new_v = v if isinstance(v, dict): new_v = morph_dict(v, convert_function) elif isinstance(v, list): new_v = list() for x in v: new_v.append( morph_dict(x, convert_function) ) new[convert_function(k)] = new_v return new
1,129,823
Summary: Initiate configuration menu to customize metal runtime options. Console script ```keyconfig``` invokes this option_configure directly in debug mode to display the contents of the local config file (if exists) Args: :path (str): full path to default local configuration file location :debug (bool): debug flag, when True prints out contents of local config file Returns: TYPE (bool): Configuration Success | Failure
def option_configure(debug=False, path=None): if CONFIG_SCRIPT in sys.argv[0]: debug = True # set debug mode if invoked from CONFIG_SCRIPT if path is None: path = local_config['PROJECT']['CONFIG_PATH'] if debug: if os.path.isfile(path): debug_mode('local_config file: ', local_config, debug, halt=True) else: msg = debug_mode(msg, {'CONFIG_PATH': path}, debug, halt=True) r = configuration.init(debug, path) return r
1,129,828
Format a long string into a block of newline seperated text. Arguments: See iter_format_block().
def format( self, text=None, width=60, chars=False, fill=False, newlines=False, prepend=None, append=None, strip_first=False, strip_last=False, lstrip=False): # Basic usage of iter_format_block(), for convenience. return '\n'.join( self.iter_format_block( (self.text if text is None else text) or '', prepend=prepend, append=append, strip_first=strip_first, strip_last=strip_last, width=width, chars=chars, fill=fill, newlines=newlines, lstrip=lstrip ) )
1,129,841
Returns numerator / denominator, but instead of a ZeroDivisionError: 0 / 0 = 0. x / 0 = float('inf') This is not mathematically correct, but often practically OK. Args: numerator (float or int) denominator (float or int) Returns: (float) Raises: -
def div(numerator, denominator): try: return numerator/denominator except ZeroDivisionError: if numerator == 0: return 0. elif denominator == 0: return float('inf') # return None else: return numerator/denominator
1,129,849
reduces a multiline string to a single line of text. args: string: the text to reduce
def reduce_multiline(string): string = str(string) return " ".join([item.strip() for item in string.split("\n") if item.strip()])
1,130,122
Takes a list or multline text string and formats it. * multiline text strings get converted to a single line * list entries are joined by a carriage return * params are passed into the sting with a python format call args: text: list or string to format params: argments for formating the text kwargs: prepend: string to prepend to each line max_width: int of max width for a line indent: number of spaces to indent
def format_multiline(text, params={}, **kwargs): def format_kwargs(text, params={}, **kwargs): if params: if isinstance(params, dict): kwargs.update(params) else: kwargs = params try: return text.format(**kwargs) except (TypeError, IndexError): if isinstance(kwargs, str): return text.format(kwargs) else: return text.format(*kwargs) if isinstance(text, list): new_text = "\n".join([format_max_width( format_kwargs( reduce_multiline(item), params, **kwargs), **kwargs) for item in text]) else: new_text = format_max_width(format_kwargs(reduce_multiline(text), params, **kwargs), **kwargs) return new_text
1,130,123
Takes a string and formats it to a max width seperated by carriage returns args: max_width: the max with for a line kwargs: indent: the number of spaces to add to the start of each line prepend: text to add to the start of each line
def format_max_width(text, max_width=None, **kwargs): ind = '' if kwargs.get("indent"): ind = ''.ljust(kwargs['indent'], ' ') prepend = ind + kwargs.get("prepend", "") if not max_width: return "{}{}".format(prepend, text) len_pre = len(kwargs.get("prepend", "")) + kwargs.get("indent", 0) test_words = text.split(" ") word_limit = max_width - len_pre if word_limit < 3: word_limit = 3 max_width = len_pre + word_limit words = [] for word in test_words: if len(word) + len_pre > max_width: n = max_width - len_pre words += [word[i:i + word_limit] for i in range(0, len(word), word_limit)] else: words.append(word) idx = 0 lines = [] idx_limit = len(words) - 1 sub_idx_limit = idx_limit while idx < idx_limit: current_len = len_pre line = prepend for i, word in enumerate(words[idx:]): if (current_len + len(word)) == max_width and line == prepend: idx += i or 1 line += word lines.append(line) if idx == idx_limit: idx -= 1 sub_idx_limit -= 1 del words[0] break if (current_len + len(word) + 1) > max_width: idx += i if idx == idx_limit: idx -= 1 sub_idx_limit -= 1 del words[0] if idx == 0: del words[0] lines.append(line) break if (i + idx) == sub_idx_limit: idx += i or 1 if line != prepend: line = " ".join([line, word]) elif word: line += word lines.append(line) else: if line != prepend: line = " ".join([line, word]) elif word: line += word current_len = len(line) return "\n".join(lines)
1,130,124
Sets up the ArgumentParser. Args: argv: an array of arguments
def setup(argv): parser = argparse.ArgumentParser( description='Compute Jekyl- and prose-aware wordcounts', epilog='Accepted filetypes: plaintext, markdown, markdown (Jekyll)') parser.add_argument('-S', '--split-hyphens', action='store_true', dest='split_hyphens', help='split hyphenated words rather than counting ' 'them as one word ("non-trivial" counts as two words ' 'rather than one)') parser.add_argument('-u', '--update', action='store_true', help='update the jekyll file in place with the counts.' ' Does nothing if the file is not a Jekyll markdown ' 'file. Implies format=yaml, invalid with input ' 'from STDIN and non-Jekyll files.') parser.add_argument('-f', '--format', nargs='?', choices=['yaml', 'json', 'default'], default='default', help='output format.') parser.add_argument('-i', '--indent', type=int, nargs='?', default=4, help='indentation depth (default: 4).') parser.add_argument('file', type=argparse.FileType('rb'), help='file to parse (or - for STDIN)') return parser.parse_args(argv)
1,130,208
Processes data provided to print a count object, or update a file. Args: args: an ArgumentParser object returned by setup()
def prose_wc(args): if args.file is None: return 1 if args.split_hyphens: INTERSTITIAL_PUNCTUATION.append(re.compile(r'-')) content = args.file.read().decode('utf-8') filename = args.file.name body = strip_frontmatter(content) parsed = markdown_to_text(body) result = wc(filename, body, parsed=parsed, is_jekyll=(body != content)) if (args.update and filename != '_stdin_' and result['counts']['type'] == 'jekyll'): update_file(filename, result, content, args.indent) else: _mockable_print({ 'yaml': yaml.safe_dump(result, default_flow_style=False, indent=args.indent), 'json': json.dumps(result, indent=args.indent), 'default': default_dump(result), }[args.format]) return 0
1,130,209
Converts markdown to text. Args: body: markdown (or plaintext, or maybe HTML) input Returns: Plaintext with all tags and frills removed
def markdown_to_text(body): # Turn our input into HTML md = markdown.markdown(body, extensions=[ 'markdown.extensions.extra' ]) # Safely parse HTML so that we don't have to parse it ourselves soup = BeautifulSoup(md, 'html.parser') # Return just the text of the parsed HTML return soup.get_text()
1,130,210
Count the words, characters, and paragraphs in a string. Args: contents: the original string to count filename (optional): the filename as provided to the CLI parsed (optional): a parsed string, expected to be plaintext only is_jekyll: whether the original contents were from a Jekyll file Returns: An object containing the various counts
def wc(filename, contents, parsed=None, is_jekyll=False): if is_jekyll: fmt = 'jekyll' else: fmt = 'md/txt' body = parsed.strip() if parsed else contents.strip() # Strip the body down to just words words = re.sub(r'\s+', ' ', body, re.MULTILINE) for punctuation in INTERSTITIAL_PUNCTUATION: words = re.sub(punctuation, ' ', words) punct = re.compile('[^\w\s]', re.U) words = punct.sub('', words) # Retrieve only non-space characters real_characters = re.sub(r'\s', '', words) # Count paragraphs in an intelligent way paragraphs = [1 if len(x) == 0 else 0 for x in contents.strip().splitlines()] for index, paragraph in enumerate(paragraphs): if paragraph == 1 and paragraphs[index + 1] == 1: paragraphs[index] = 0 return { 'counts': { 'file': filename, 'type': fmt, 'paragraphs': sum(paragraphs) + 1, 'words': len(re.split('\s+', words)), 'characters_real': len(real_characters), 'characters_total': len(words), } }
1,130,211
Updates a Jekyll file to contain the counts form an object This just converts the results to YAML and adds to the Jekyll frontmatter. Args: filename: the Jekyll file to update result: the results object from `wc` content: the contents of the original file indent: the indentation level for dumping YAML
def update_file(filename, result, content, indent): # Split the file into frontmatter and content parts = re.split('---+', content, 2) # Load the frontmatter into an object frontmatter = yaml.safe_load(parts[1]) # Add the counts entry in the results object to the frontmatter frontmatter['counts'] = result['counts'] # Set the frontmatter part backed to the stringified version of the # frontmatter object parts[1] = '\n{}'.format( yaml.safe_dump(frontmatter, default_flow_style=False, indent=indent)) result = '---'.join(parts) # Write everything back to the file with open(filename, 'wb') as f: f.write(result.encode('utf-8')) print('{} updated.'.format(filename))
1,130,212
Add a new log entry to the nago log. Arguments: level - Arbritrary string, levels should be syslog style (debug,log,info,warning,error) message - Arbritary string, the message that is to be logged.
def log(message, level="info"): now = time.time() entry = {} entry['level'] = level entry['message'] = message entry['timestamp'] = now _log_entries.append(entry)
1,130,213
Decorate other functions with this one to allow access Arguments: nago_access -- Type of access required to call this function By default only master is allowed to make that call nago_name -- What name this function will have to remote api Default is the same as the name of the function being decorated.
def nago_access(access_required="master", name=None): def real_decorator(func): func.nago_access = access_required func.nago_name = name or func.__name__ @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper return real_decorator
1,130,214
Establishes an API client for one-way communication connection with an API Server Arguments: - connection (xbahn.connection.Connection) Keyword Arguments: - client_class (xbahn.api.Client): if supplied use this class to initantiate the client object. If omitted will use xbahn.api.Client. Returns: - client_class: client instance
def api_client(connection, client_class=xbahn.api.Client): return client_class( link=xbahn.connection.link.Link( # use the connection receive messages (server responses) receive=connection, # use the connection to send messages (initiate requests to server) send=connection ) )
1,130,764
Establishes an API Server on the supplied connection Arguments: - connection (xbahn.connection.Connection) - server_class (xbahn.api.Server) Returns: - server_class: server instance
def api_server(connection, server_class): # run api server on connection return server_class( link=xbahn.connection.link.Link( # use the connection to receive messages receive=connection, # use the connection to respond to received messages respond=connection ) )
1,130,766
Send an echo request to a nago host. Arguments: token_or_host_name -- The remote node to ping If node is not provided, simply return pong You can use the special nodenames "server" or "master"
def ping(token_or_hostname=None): if not token_or_hostname: return "Pong!" node = nago.core.get_node(token_or_hostname) if not node and token_or_hostname in ('master', 'server'): token_or_hostname = nago.settings.get_option('server') node = nago.core.get_node(token_or_hostname) if not node: try: address = socket.gethostbyname(token_or_hostname) node = nago.core.Node() node['host_name'] = token_or_hostname node['address'] = address node['access'] = 'node' if token_or_hostname == nago.settings.get_option('server'): node['access'] = 'master' node.save() except Exception: raise Exception("'%s' was not found in list of known hosts, and does not resolve to a valid address" % token_or_hostname) return node.send_command('nodes', 'ping')
1,130,773
Scan `path` for viruses using ``clamd`` or ``clamscan`` (depends on :attr:`settings.USE_CLAMD`. Args: path (str): Relative or absolute path of file/directory you need to scan. Returns: dict: ``{filename: ("FOUND", "virus type")}`` or blank dict. Raises: ValueError: When the server is not running. AssertionError: When the internal file doesn't exists.
def scan_file(path): path = os.path.abspath(path) if settings.USE_CLAMD: return clamd.scan_file(path) else: return clamscan.scan_file(path)
1,130,931
Save `b64_data` to temporary file and scan it for viruses. Args: filename (str): Name of the file - used as basename for tmp file. b64_data (str): Content of the file encoded in base64. Returns: dict: ``{filename: ("FOUND", "virus type")}`` or blank dict.
def save_and_scan(filename, b64_data): with NTFile(suffix="_"+os.path.basename(filename), mode="wb") as ifile: ifile.write( b64decode(b64_data) ) ifile.flush() os.chmod(ifile.name, 0755) return scan_file(ifile.name)
1,130,932
returns a dictionary of rdfclasses based on the a lowercase search args: value: the value to search by
def find(value): value = str(value).lower() rtn_dict = RegistryDictionary() for attr in dir(MODULE.rdfclass): if value in attr.lower(): try: item = getattr(MODULE.rdfclass, attr) if issubclass(item, RdfClassBase): rtn_dict[attr] = item except TypeError: pass return rtn_dict
1,131,184
Creates a list of the class hierarchy Args: ----- class_name: name of the current class bases: list/tuple of bases for the current class
def list_hierarchy(class_name, bases): class_list = [Uri(class_name)] for base in bases: if base.__name__ not in IGNORE_CLASSES: class_list.append(Uri(base.__name__)) return list([i for i in set(class_list)])
1,131,186
Reads through the class defs and gets the related es class defintions Args: ----- class_defs: RdfDataset of class definitions
def es_get_class_defs(cls_def, cls_name): rtn_dict = {key: value for key, value in cls_def.items() \ if key.startswith("kds_es")} for key in rtn_dict: del cls_def[key] return rtn_dict
1,131,187
Returns the es_defs with the instaniated rml_processor Args: ----- es_defs: the rdf_class elacticsearch defnitions cls_name: the name of the tied class
def get_rml_processors(es_defs): proc_defs = es_defs.get("kds_esRmlProcessor", []) if proc_defs: new_defs = [] for proc in proc_defs: params = proc['kds_rmlProcessorParams'][0] proc_kwargs = {} if params.get("kds_rtn_format"): proc_kwargs["rtn_format"] = params.get("kds_rtn_format")[0] new_def = dict(name=proc['rdfs_label'][0], subj=params["kds_subjectKwarg"][0], proc_kwargs=proc_kwargs, force=proc.get('kds_forceNested',[False])[0], processor=CFG.rml.get_processor(\ proc['rdfs_label'][0], proc['kds_esRmlMapping'], proc['rdf_type'][0])) new_defs.append(new_def) es_defs['kds_esRmlProcessor'] = new_defs return es_defs
1,131,188
adds a property and its value to the class instance args: pred: the predicate/property to add obj: the value/object to add obj_method: *** No longer used.
def add_property(self, pred, obj): pred = Uri(pred) try: self[pred].append(obj) # except AttributeError: # new_list = [self[pred]] # new_list.append(obj) # self[pred] = new_list except KeyError: try: new_prop = self.properties[pred] except AttributeError: self.properties = {} self.add_property(pred, obj) return except KeyError: try: new_prop = MODULE.rdfclass.properties[pred] except KeyError: new_prop = MODULE.rdfclass.make_property({}, pred, self.class_names) try: self.properties[pred] = new_prop except AttributeError: self.properties = {pred: new_prop} init_prop = new_prop(self, get_attr(self, "dataset")) setattr(self, pred, init_prop) self[pred] = init_prop self[pred].append(obj) if self.dataset: self.dataset.add_rmap_item(self, pred, obj)
1,131,197
converts the class to a json compatable python dictionary Args: uri_format('sparql_uri','pyuri'): The format that uri values will be returned Returns: dict: a json compatabile python dictionary
def conv_json(self, uri_format="sparql_uri", add_ids=False): def convert_item(ivalue): nvalue = ivalue if isinstance(ivalue, BaseRdfDataType): if ivalue.type == 'uri': if ivalue.startswith("pyuri") and uri_format == "pyuri": nvalue = getattr(ivalue, "sparql") else: nvalue = getattr(ivalue, uri_format) else: nvalue = ivalue.to_json elif isinstance(ivalue, RdfClassBase): if ivalue.subject.type == "uri": nvalue = ivalue.conv_json(uri_format, add_ids) elif ivalue.subject.type == "bnode": nvalue = ivalue.conv_json(uri_format, add_ids) elif isinstance(ivalue, list): nvalue = [] for item in ivalue: temp = convert_item(item) nvalue.append(temp) return nvalue rtn_val = {key: convert_item(value) for key, value in self.items()} #pdb.set_trace() if add_ids: if self.subject.type == 'uri': rtn_val['uri'] = self.subject.sparql_uri rtn_val['id'] = sha1(rtn_val['uri'].encode()).hexdigest() #return {key: convert_item(value) for key, value in self.items()} return rtn_val
1,131,198
Returns the es mapping for the class args: ----- base_class: The root class being indexed role: the role states how the class should be mapped depending upon whether it is used as a subject of an object. options are es_Nested or rdf_class
def es_mapping(cls, base_class=None, role='rdf_class', **kwargs): def _prop_filter(prop, value, **kwargs): try: use_prop = len(set(value.owl_inverseOf) - parent_props) > 0 except AttributeError: use_prop = True # if not use_prop: # print(prop) if prop in nested_props and use_prop: return True return False if not base_class: base_class = cls es_map = {} # pdb.set_trace() if kwargs.get("depth"): # and kwargs.get('class') == cls.__name__: kwargs['depth'] += 1 initial = False else: initial = True kwargs['depth'] = 1 kwargs['class'] = cls.__name__ kwargs['class_obj'] = cls if kwargs.get('class_obj'): parent_props = set(cls.properties) else: parent_props = set() if role == 'rdf_class': es_map = {} es_map = {prop: value.es_mapping(base_class) \ for prop, value in cls.properties.items()} elif role == 'es_Nested': # print(locals()) # pdb.set_trace() if cls == base_class: nested_props = LABEL_FIELDS else: nested_props = cls.es_defs.get('kds_esNestedProps', list(cls.properties.keys())) es_map = {prop: value.es_mapping(base_class, **kwargs) \ for prop, value in cls.properties.items() \ if _prop_filter(prop, value, **kwargs)} ref_map = { "type" : "keyword" } lower_map = { "type": "text", "fields": { "lower": es_idx_types['es_Lower']['lower'], 'keyword': {'type': 'keyword'} } } ignore_map = { "index": False, "type": "text" } if cls == base_class: es_map['label'] = ref_map es_map['value'] = lower_map if cls.cls_defs.get('kds_storageType',[None])[0] != "blanknode" \ and cls == base_class: es_map['id'] = ref_map es_map['uri'] = ref_map rml_procs = cls.es_defs.get("kds_esRmlProcessor", []) rml_procs = [proc for proc in rml_procs if role == 'rdf_class' or proc['force']] if rml_procs: rml_maps = {} for rml in rml_procs: rml_maps[rml['name']] = ignore_map if rml_maps: es_map['rml_map'] = {"properties": rml_maps} # es_map['turtle'] = ignore_map return es_map
1,131,199
Returns the es mapping for the class args: ----- base_class: The root class being indexed role: the role states how the class should be mapped depending upon whether it is used as a subject of an object. options are es_Nested or rdf_class
def es_indexers(cls, base_class=None, role='rdf_class', **kwargs): def _prop_filter(prop, value, **kwargs): try: use_prop = len(set(value.owl_inverseOf) - parent_props) > 0 except AttributeError: use_prop = True if prop in nested_props and use_prop: return True return False if not base_class: base_class = cls rtn_list = [] # pdb.set_trace() if kwargs.get("depth"): # and kwargs.get('class') == cls.__name__: kwargs['depth'] += 1 initial = False else: initial = True kwargs['depth'] = 1 kwargs['class'] = cls.__name__ kwargs['class_obj'] = cls if kwargs.get('class_obj'): parent_props = set(cls.properties) else: parent_props = set() if role == 'rdf_class': for value in cls.properties.values(): # pdb.set_trace() rtn_list += value.es_indexers(base_class, **kwargs) elif role == 'es_Nested': if cls == base_class: nested_props = LABEL_FIELDS else: nested_props = cls.es_defs.get('kds_esNestedProps', list(cls.properties.keys())) used_props = [value for prop, value in cls.properties.items() \ if _prop_filter(prop, value, **kwargs)] for value in cls.properties.values(): # pdb.set_trace() rtn_list += value.es_indexers(base_class, **kwargs) if cls.es_defs.get('kds_esIndex',[None])[0]: rtn_list += [cls] return list(set(rtn_list))
1,131,200
Returns a JSON object of the class for insertion into es args: role: the role states how the class data should be returned depending upon whether it is used as a subject of an object. options are kds_esNested or rdf_class remove_empty: True removes empty items from es object
def es_json(self, role='rdf_class', remove_empty=True, **kwargs): def test_idx_status(cls_inst, **kwargs): if kwargs.get("force") == True: return False idx_time = cls_inst.get("kds_esIndexTime", [None])[0] mod_time = cls_inst.get("dcterm_modified", [None])[0] error_msg = cls_inst.get("kds_esIndexError", [None])[0] if (not idx_time) or \ error_msg or \ (idx_time and mod_time and idx_time < mod_time): return False return True # if self.__class__.__name__ == 'rdf_type': # pdb.set_trace() rtn_obj = {} if kwargs.get("depth"): kwargs['depth'] += 1 else: kwargs['depth'] = 1 if role == 'rdf_class': if test_idx_status(self, **kwargs): return None for prop, value in self.items(): if prop in ['kds_esIndexTime', 'kds_esIndexError']: continue new_val = value.es_json() rtn_method = get_attr(self[prop], 'kds_esObjectType', []) if 'kdr_Array' in rtn_method: rtn_obj[prop] = new_val elif (remove_empty and new_val) or not remove_empty: if len(new_val) == 1: rtn_obj[prop] = new_val[0] else: rtn_obj[prop] = new_val nested_props = None else: try: nested_props = self.es_defs.get('kds_esNestedProps', list(self.keys())).copy() except AttributeError: nested_props = list(self.keys()) for prop, value in self.items(): # if prop == 'bf_hasInstance': # pdb.set_trace() if prop in ['kds_esIndexTime', 'kds_esIndexError']: continue new_val = value.es_json(**kwargs) rtn_method = get_attr(self[prop], 'kds_esObjectType', []) if 'kdr_Array' in rtn_method: rtn_obj[prop] = new_val elif (remove_empty and new_val) or not remove_empty: if len(new_val) == 1: rtn_obj[prop] = new_val[0] \ if not isinstance(new_val, dict) \ else new_val else: rtn_obj[prop] = new_val # if 'bf_Work' in self.hierarchy: # pdb.set_trace() rtn_obj = get_es_label(rtn_obj, self) rtn_obj = get_es_value(rtn_obj, self) rtn_obj = get_es_ids(rtn_obj, self) if nested_props: nested_props += ['value', 'id', 'uri'] rtn_obj = {key: value for key, value in rtn_obj.items() if key in nested_props} # rml_procs = self.es_defs.get("kds_esRmlProcessor", []) # # if role == 'rdf_class': # # pdb.set_trace() # rml_procs = [proc for proc in rml_procs # if role == 'rdf_class' or # proc['force']] # if rml_procs: # rml_maps = {} # for rml in rml_procs: # proc_kwargs = {rml['subj']: self.subject, # "dataset": self.dataset} # proc_kwargs.update(rml['proc_kwargs']) # rml_maps[rml['name']] = rml['processor'](**proc_kwargs) # if rml_maps: # rtn_obj['rml_map'] = rml_maps rml_maps = self.get_all_rml(role=role) if rml_maps: rtn_obj['rml_map'] = rml_maps # if self.get('bf_contribution'): # pdb.set_trace() return rtn_obj
1,131,202
returns the rml mapping output for specified mapping Args: ----- rml_def: The name of the mapping or a dictionary definition
def get_rml(self, rml_def, **kwargs): if isinstance(rml_def, str): rml_procs = self.es_defs.get("kds_esRmlProcessor", []) for item in rml_procs: if item['name'] == rml_def: rml_def = item break proc_kwargs = {rml_def['subj']: self.subject, "dataset": self.dataset} proc_kwargs.update(rml_def['proc_kwargs']) return rml_def['processor'](**proc_kwargs)
1,131,203
sets the subject value for the class instance Args: subject(dict, Uri, str): the subject for the class instance
def _set_subject(self, subject): # if not subject: # self.subject = def test_uri(value): # .__wrapped__ if not isinstance(value, (Uri, BlankNode)): try: if value.startswith("_:"): return BlankNode(value) else: return Uri(value) except: return BlankNode() else: return value if isinstance(subject, dict): self.subject = test_uri(subject['s']) if isinstance(subject['o'], list): for item in subject['o']: self.add_property(subject['p'], item) else: self.add_property(subject['p'], subject['o']) else: self.subject = test_uri(subject)
1,131,205
Creates instance of the cache entry. Args: key: the unique key used to identify and locate the value. value: the cached value. timeout: time to live for the object in milliseconds
def __init__(self, key, value, timeout): self.key = key self.value = value self.expiration = time.clock() * 1000 + timeout
1,131,593
Changes the cached value and updates creation time. Args: value: the new cached value. timeout: time to live for the object in milliseconds Returns: None
def set_value(self, value, timeout): self.value = value self.expiration = time.clock() * 1000 + timeout
1,131,594
Create the poller. At least one of the optional parameters must be provided. Parameters: ----------- interrupt_handle : HANDLE (int), optional If provided, the program will generate a Ctrl+C event when this handle is signaled. parent_handle : HANDLE (int), optional If provided, the program will terminate immediately when this handle is signaled.
def __init__(self, interrupt_handle=None, parent_handle=None): assert(interrupt_handle or parent_handle) super(ParentPollerWindows, self).__init__() if ctypes is None: raise ImportError("ParentPollerWindows requires ctypes") self.daemon = True self.interrupt_handle = interrupt_handle self.parent_handle = parent_handle
1,131,669
creates a namespace if it does not exist args: name: the name of the namespace ignore_errors(bool): Will ignore if a namespace already exists or there is an error creating the namespace returns: True if created False if not created error if namespace already exists
def create_namespace(self, name, ignore_errors=False): if not self.has_namespace(name): self.namespaces[name] = ConjunctiveGraph() return True elif ignore_errors: return True else: raise RuntimeError("namespace '%s' already exists" % name)
1,131,823
deletes a namespace args: name: the name of the namespace ignore_errors(bool): Will ignore if a namespace doesn not exist or there is an error deleting the namespace returns: True if deleted False if not deleted error if namespace already exists
def delete_namespace(self, name, ignore_errors=False): if self.has_namespace(name): del self.namespaces[name] return True elif ignore_errors: return True else: raise RuntimeError("namespace '%s' does not exist" % name)
1,131,824
runs a sparql query and returns the results args: sparql: the sparql query to run namespace: the namespace to run the sparql query against mode: ['get'(default), 'update'] the type of sparql query rtn_format: ['json'(default), 'xml'] format of query results
def query(self, sparql, mode="get", namespace=None, rtn_format="json", **kwargs): if kwargs.get("debug"): log.setLevel(logging.DEBUG) conn = self.conn if namespace and namespace != self.namespace: conn = self.tstore.get_namespace(namespace) else: namespace = self.namespace if rtn_format not in self.qry_results_formats: raise KeyError("rtn_format was '%s'. Allowed values are %s" % \ (rtn_format, self.qry_results_formats)) if not sparql.strip().lower().startswith("prefix"): sparql = "%s\n%s" % (NSM.prefix(), sparql) start = datetime.datetime.now() if mode == "get": try: result = json.loads( \ conn.query(sparql).serialize(\ format=rtn_format).decode()).get('results', {}).get('bindings', []) except: print(sparql) raise if mode == "update": try: result = conn.update(sparql) except: print(sparql) raise log.debug("\nmode='%s', namespace='%s', rtn_format='%s'\n**** SPAQRL QUERY \n%s\nQuery Time: %s", mode, namespace, rtn_format, sparql, (datetime.datetime.now()-start)) return result
1,131,826
runs a sparql update query and returns the results args: sparql: the sparql query to run namespace: the namespace to run the sparql query against
def update_query(self, sparql, namespace=None, **kwargs): return self.query(sparql, "update", namespace, **kwargs)
1,131,827
loads data via file stream from python to triplestore Args: data: The data or filepath to load datatype(['ttl', 'xml', 'rdf']): the type of data to load namespace: the namespace to use graph: the graph to load the data to. is_file(False): If true python will read the data argument as a filepath, determine the datatype from the file extension, read the file and send it to blazegraph as a datastream
def load_data(self, data, datatype="ttl", namespace=None, graph=None, is_file=False, **kwargs): if kwargs.get('debug'): log.setLevel(logging.DEBUG) time_start = datetime.datetime.now() datatype_map = { 'ttl': 'turtle', 'xml': 'xml', 'rdf': 'xml', 'nt': 'nt', 'n3': 'n3', 'nquads': 'nquads', 'hturtle': 'hturtle' } if is_file: datatype = data.split(os.path.extsep)[-1] file_name = data log.debug('starting data load of %s', file_name) data = open(data, 'rb').read() try: content_type = datatype_map[datatype] except KeyError: raise NotImplementedError("'%s' is not an implemented data fromat", datatype) conn = self.conn if namespace: conn = self.tstore.get_namespace(namespace) else: namespace = self.namespace graph = pick(graph, self.graph) start = datetime.datetime.now() try: result = conn.parse(data=data, publicID=graph, format=content_type) except: if is_file: print("Datafile ", file_name) raise if is_file: log.info (" loaded %s into rdflib namespace '%s'", file_name, namespace) else: log.info(" loaded data into rdflib namespace '%s' in time: %s", namespace, (datetime.datetime.now() - start)) return result
1,131,828
Uploads data to the Blazegraph Triplestore that is stored in files in a local directory args: file_path: full path to the file namespace: the Blazegraph namespace to load the data graph: uri of the graph to load the data. Default is None
def load_local_file(self, file_path, namespace=None, graph=None, **kwargs): return self.load_data(file_path, namespace=namespace, graph=graph, is_file=True, **kwargs)
1,131,830
Will delete and recreate specified namespace args: namespace(str): Namespace to reset params(dict): params used to reset the namespace
def reset_namespace(self, namespace=None, params=None): namespace = pick(namespace, self.namespace) params = pick(params, self.namespace_params) log.warning(" Reseting namespace '%s' at host: %s", namespace, self.url) try: self.delete_namespace(namespace) except KeyError: pass self.create_namespace(namespace, params)
1,131,831
Generates a string with random characters. If no charset is specified, only letters and digits are used. Args: length (int) length of the returned string charset (string) list of characters to choose from Returns: (str) with random characters from charset Raises: -
def random_string(length=8, charset=None): if length < 1: raise ValueError('Length must be > 0') if not charset: charset = string.letters + string.digits return ''.join(random.choice(charset) for unused in xrange(length))
1,131,984
Extracts a dict from a string. Args: str_in (string) that contains python dict Returns: (dict) or None if no valid dict was found Raises: -
def str2dict(str_in): dict_out = safe_eval(str_in) if not isinstance(dict_out, dict): dict_out = None return dict_out
1,131,985
Extracts a tuple from a string. Args: str_in (string) that contains python tuple Returns: (dict) or None if no valid tuple was found Raises: -
def str2tuple(str_in): tuple_out = safe_eval(str_in) if not isinstance(tuple_out, tuple): tuple_out = None return tuple_out
1,131,986
Extracts the keys from a string that represents a dict and returns them sorted by key. Args: str_in (string) that contains python dict Returns: (list) with keys or None if no valid dict was found Raises: -
def str2dict_keys(str_in): tmp_dict = str2dict(str_in) if tmp_dict is None: return None return sorted([k for k in tmp_dict])
1,131,987
Extracts the values from a string that represents a dict and returns them sorted by key. Args: str_in (string) that contains python dict Returns: (list) with values or None if no valid dict was found Raises: -
def str2dict_values(str_in): tmp_dict = str2dict(str_in) if tmp_dict is None: return None return [tmp_dict[key] for key in sorted(k for k in tmp_dict)]
1,131,988