code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def zip(self, *args): args = list(args) args.insert(0, self.obj) maxLen = _(args).chain().collect(lambda x, *args: len(x)).max().value() for i, v in enumerate(args): l = len(args[i]) if l < maxLen: args[i] for x in range(maxLen - l): args[i].append(None) return self._wrap(zip(*args))
Zip together multiple lists into a single array -- elements that share an index go together.
def zipObject(self, values): result = {} keys = self.obj i = 0 l = len(keys) while i < l: result[keys[i]] = values[i] l = len(keys) i += 1 return self._wrap(result)
Zip together two arrays -- an array of keys and an array of values -- into a single object.
def indexOf(self, item, isSorted=False): array = self.obj ret = -1 if not (self._clean.isList() or self._clean.isTuple()): return self._wrap(-1) if isSorted: i = _.sortedIndex(array, item) ret = i if array[i] is item else -1 else: i = 0 l = len(array) while i < l: if array[i] is item: return self._wrap(i) i += 1 return self._wrap(ret)
Return the position of the first occurrence of an item in an array, or -1 if the item is not included in the array.
def lastIndexOf(self, item): array = self.obj i = len(array) - 1 if not (self._clean.isList() or self._clean.isTuple()): return self._wrap(-1) while i > -1: if array[i] is item: return self._wrap(i) i -= 1 return self._wrap(-1)
Return the position of the last occurrence of an item in an array, or -1 if the item is not included in the array.
def range(self, *args): args = list(args) args.insert(0, self.obj) return self._wrap(range(*args))
Generate an integer Array containing an arithmetic progression.
def partial(self, *args): def part(*args2): args3 = args + args2 return self.obj(*args3) return self._wrap(part)
Partially apply a function by creating a version that has had some of its arguments pre-filled, without changing its dynamic `this` context.
def memoize(self, hasher=None): ns = self.Namespace() ns.memo = {} if hasher is None: hasher = lambda x: x def memoized(*args, **kwargs): key = hasher(*args) if key not in ns.memo: ns.memo[key] = self.obj(*args, **kwargs) return ns.memo[key] return self._wrap(memoized)
Memoize an expensive function by storing its results.
def delay(self, wait, *args): def call_it(): self.obj(*args) t = Timer((float(wait) / float(1000)), call_it) t.start() return self._wrap(self.obj)
Delays a function for the given number of milliseconds, and then calls it with the arguments supplied.
def throttle(self, wait): ns = self.Namespace() ns.timeout = None ns.throttling = None ns.more = None ns.result = None def done(): ns.more = ns.throttling = False whenDone = _.debounce(done, wait) wait = (float(wait) / float(1000)) def throttled(*args, **kwargs): def later(): ns.timeout = None if ns.more: self.obj(*args, **kwargs) whenDone() if not ns.timeout: ns.timeout = Timer(wait, later) ns.timeout.start() if ns.throttling: ns.more = True else: ns.throttling = True ns.result = self.obj(*args, **kwargs) whenDone() return ns.result return self._wrap(throttled)
Returns a function, that, when invoked, will only be triggered at most once during a given window of time.
def debounce(self, wait, immediate=None): wait = (float(wait) / float(1000)) def debounced(*args, **kwargs): def call_it(): self.obj(*args, **kwargs) try: debounced.t.cancel() except(AttributeError): pass debounced.t = Timer(wait, call_it) debounced.t.start() return self._wrap(debounced)
Returns a function, that, as long as it continues to be invoked, will not be triggered. The function will be called after it stops being called for N milliseconds. If `immediate` is passed, trigger the function on the leading edge, instead of the trailing.
def once(self): ns = self.Namespace() ns.memo = None ns.run = False def work_once(*args, **kwargs): if ns.run is False: ns.memo = self.obj(*args, **kwargs) ns.run = True return ns.memo return self._wrap(work_once)
Returns a function that will be executed at most one time, no matter how often you call it. Useful for lazy initialization.
def wrap(self, wrapper): def wrapped(*args, **kwargs): if kwargs: kwargs["object"] = self.obj else: args = list(args) args.insert(0, self.obj) return wrapper(*args, **kwargs) return self._wrap(wrapped)
Returns the first function passed as an argument to the second, allowing you to adjust arguments, run code before and after, and conditionally execute the original function.
def compose(self, *args): args = list(args) def composed(*ar, **kwargs): lastRet = self.obj(*ar, **kwargs) for i in args: lastRet = i(lastRet) return lastRet return self._wrap(composed)
Returns a function that is the composition of a list of functions, each consuming the return value of the function that follows.
def after(self, func): ns = self.Namespace() ns.times = self.obj if ns.times <= 0: return func() def work_after(*args): if ns.times <= 1: return func(*args) ns.times -= 1 return self._wrap(work_after)
Returns a function that will only be executed after being called N times.
def pairs(self): keys = self._clean.keys() pairs = [] for key in keys: pairs.append([key, self.obj[key]]) return self._wrap(pairs)
Convert an object into a list of `[key, value]` pairs.
def invert(self): keys = self._clean.keys() inverted = {} for key in keys: inverted[self.obj[key]] = key return self._wrap(inverted)
Invert the keys and values of an object. The values must be serializable.
def functions(self): names = [] for i, k in enumerate(self.obj): if _(self.obj[k]).isCallable(): names.append(k) return self._wrap(sorted(names))
Return a sorted list of the function names available on the object.
def extend(self, *args): args = list(args) for i in args: self.obj.update(i) return self._wrap(self.obj)
Extend a given object with all the properties in passed-in object(s).
def pick(self, *args): ns = self.Namespace() ns.result = {} def by(key, *args): if key in self.obj: ns.result[key] = self.obj[key] _.each(self._flatten(args, True, []), by) return self._wrap(ns.result)
Return a copy of the object only containing the whitelisted properties.
def defaults(self, *args): ns = self.Namespace ns.obj = self.obj def by(source, *ar): for i, prop in enumerate(source): if prop not in ns.obj: ns.obj[prop] = source[prop] _.each(args, by) return self._wrap(ns.obj)
Fill in a given object with default properties.
def tap(self, interceptor): interceptor(self.obj) return self._wrap(self.obj)
Invokes interceptor with the obj, and then returns obj. The primary purpose of this method is to "tap into" a method chain, in order to perform operations on intermediate results within the chain.
def isEmpty(self): if self.obj is None: return True if self._clean.isString(): ret = self.obj.strip() is "" elif self._clean.isDict(): ret = len(self.obj.keys()) == 0 else: ret = len(self.obj) == 0 return self._wrap(ret)
Is a given array, string, or object empty? An "empty" object has no enumerable own-properties.
def isFile(self): try: filetype = file except NameError: filetype = io.IOBase return self._wrap(type(self.obj) is filetype)
Check if the given object is a file
def join(self, glue=" "): j = glue.join([str(x) for x in self.obj]) return self._wrap(j)
Javascript's join implementation
def matches(self): def ret(obj, *args): if self.obj is obj: return True # avoid comparing an object to itself. for key in self.obj: if self.obj[key] != obj[key]: return False return True return self._wrap(ret)
Returns a predicate for checking whether an object has a given set of `key:value` pairs.
def times(self, func, *args): n = self.obj i = 0 while n is not 0: n -= 1 func(i) i += 1 return self._wrap(func)
Run a function **n** times.
def random(self, max_number=None): min_number = self.obj if max_number is None: min_number = 0 max_number = self.obj return random.randrange(min_number, max_number)
Return a random integer between min and max (inclusive).
def result(self, property, *args): if self.obj is None: return self._wrap(self.obj) if(hasattr(self.obj, property)): value = getattr(self.obj, property) else: value = self.obj.get(property) if _.isCallable(value): return self._wrap(value(*args)) return self._wrap(value)
If the value of the named property is a function then invoke it; otherwise, return it.
def mixin(self): methods = self.obj for i, k in enumerate(methods): setattr(underscore, k, methods[k]) self.makeStatic() return self._wrap(self.obj)
Add your own custom functions to the Underscore object, ensuring that they're correctly added to the OOP wrapper as well.
def uniqueId(self, prefix=""): _IdCounter.count += 1 id = _IdCounter.count if prefix: return self._wrap(prefix + str(id)) else: return self._wrap(id)
Generate a unique integer id (unique within the entire client session). Useful for temporary DOM ids.
def escape(self): # & must be handled first self.obj = self.obj.replace("&", self._html_escape_table["&"]) for i, k in enumerate(self._html_escape_table): v = self._html_escape_table[k] if k is not "&": self.obj = self.obj.replace(k, v) return self._wrap(self.obj)
Escape a string for HTML interpolation.
def unescape(self): for i, k in enumerate(self._html_escape_table): v = self._html_escape_table[k] self.obj = self.obj.replace(v, k) return self._wrap(self.obj)
Within an interpolation, evaluation, or escaping, remove HTML escaping that had been previously added.
def value(self): if self._wrapped is not self.Null: return self._wrapped else: return self.obj
returns the object instead of instance
def makeStatic(): p = lambda value: inspect.ismethod(value) or inspect.isfunction(value) for eachMethod in inspect.getmembers(underscore, predicate=p): m = eachMethod[0] if not hasattr(_, m): def caller(a): def execute(*args): if len(args) == 1: r = getattr(underscore(args[0]), a)() elif len(args) > 1: rargs = args[1:] r = getattr(underscore(args[0]), a)(*rargs) else: r = getattr(underscore([]), a)() return r return execute _.__setattr__(m, caller(m)) # put the class itself as a parameter so that we can use it on outside _.__setattr__("underscore", underscore) _.templateSettings = {}
Provide static access to underscore class
def init(): global _users, _names _configure_app(app) _users, _names = _init_login_manager(app) _configure_logger() init_scheduler(app.config.get('SQLALCHEMY_DATABASE_URI')) db.init(app.config.get('SQLALCHEMY_DATABASE_URI'))
Initialise and configure the app, database, scheduler, etc. This should be called once at application startup or at tests startup (and not e.g. called once for each test case).
def _configure_app(app_): app_.url_map.strict_slashes = False app_.config.from_object(default_settings) app_.config.from_envvar('JOB_CONFIG', silent=True) db_url = app_.config.get('SQLALCHEMY_DATABASE_URI') if not db_url: raise Exception('No db_url in config') app_.wsgi_app = ProxyFix(app_.wsgi_app) global SSL_VERIFY if app_.config.get('SSL_VERIFY') in ['False', 'FALSE', '0', False, 0]: SSL_VERIFY = False else: SSL_VERIFY = True return app_
Configure the Flask WSGI app.
def _init_login_manager(app_): login_manager = flogin.LoginManager() login_manager.setup_app(app_) login_manager.anonymous_user = Anonymous login_manager.login_view = "login" users = {app_.config['USERNAME']: User('Admin', 0)} names = dict((int(v.get_id()), k) for k, v in users.items()) @login_manager.user_loader def load_user(userid): userid = int(userid) name = names.get(userid) return users.get(name) return users, names
Initialise and configure the login manager.
def _configure_logger_for_production(logger): stderr_handler = logging.StreamHandler(sys.stderr) stderr_handler.setLevel(logging.INFO) if 'STDERR' in app.config: logger.addHandler(stderr_handler) file_handler = logging.handlers.RotatingFileHandler( app.config.get('LOG_FILE'), maxBytes=67108864, backupCount=5) file_handler.setLevel(logging.INFO) if 'LOG_FILE' in app.config: logger.addHandler(file_handler) mail_handler = logging.handlers.SMTPHandler( '127.0.0.1', app.config.get('FROM_EMAIL'), app.config.get('ADMINS', []), 'CKAN Service Error') mail_handler.setLevel(logging.ERROR) if 'FROM_EMAIL' in app.config: logger.addHandler(mail_handler)
Configure the given logger for production deployment. Logs to stderr and file, and emails errors to admins.
def _configure_logger(): if not app.debug: _configure_logger_for_production(logging.getLogger()) elif not app.testing: _configure_logger_for_debugging(logging.getLogger())
Configure the logging module.
def init_scheduler(db_uri): global scheduler scheduler = apscheduler.Scheduler() scheduler.misfire_grace_time = 3600 scheduler.add_jobstore( sqlalchemy_store.SQLAlchemyJobStore(url=db_uri), 'default') scheduler.add_listener( job_listener, events.EVENT_JOB_EXECUTED | events.EVENT_JOB_MISSED | events.EVENT_JOB_ERROR) return scheduler
Initialise and configure the scheduler.
def user(): '''Show information about the current user :rtype: A dictionary with the following keys :param id: User id :type id: int :param name: User name :type name: string :param is_active: Whether the user is currently active :type is_active: bool :param is_anonymous: The anonymous user is the default user if you are not logged in :type is_anonymous: bool ''' user = flogin.current_user return flask.jsonify({ 'id': user.get_id(), 'name': user.name, 'is_active': user.is_active(), 'is_anonymous': user.is_anonymous }f user(): '''Show information about the current user :rtype: A dictionary with the following keys :param id: User id :type id: int :param name: User name :type name: string :param is_active: Whether the user is currently active :type is_active: bool :param is_anonymous: The anonymous user is the default user if you are not logged in :type is_anonymous: bool ''' user = flogin.current_user return flask.jsonify({ 'id': user.get_id(), 'name': user.name, 'is_active': user.is_active(), 'is_anonymous': user.is_anonymous })
Show information about the current user :rtype: A dictionary with the following keys :param id: User id :type id: int :param name: User name :type name: string :param is_active: Whether the user is currently active :type is_active: bool :param is_anonymous: The anonymous user is the default user if you are not logged in :type is_anonymous: bool
def logout(): flogin.logout_user() next = flask.request.args.get('next') return flask.redirect(next or flask.url_for("user"))
Log out the active user
def clear_jobs(): '''Clear old jobs :param days: Jobs for how many days should be kept (default: 10) :type days: integer :statuscode 200: no error :statuscode 403: not authorized to delete jobs :statuscode 409: an error occurred ''' if not is_authorized(): return json.dumps({'error': 'not authorized'}), 403, headers days = flask.request.args.get('days', None) return _clear_jobs(daysf clear_jobs(): '''Clear old jobs :param days: Jobs for how many days should be kept (default: 10) :type days: integer :statuscode 200: no error :statuscode 403: not authorized to delete jobs :statuscode 409: an error occurred ''' if not is_authorized(): return json.dumps({'error': 'not authorized'}), 403, headers days = flask.request.args.get('days', None) return _clear_jobs(days)
Clear old jobs :param days: Jobs for how many days should be kept (default: 10) :type days: integer :statuscode 200: no error :statuscode 403: not authorized to delete jobs :statuscode 409: an error occurred
def job_data(job_id): '''Get the raw data that the job returned. The mimetype will be the value provided in the metdata for the key ``mimetype``. **Results:** :rtype: string :statuscode 200: no error :statuscode 403: not authorized to view the job's data :statuscode 404: job id not found :statuscode 409: an error occurred ''' job_dict = db.get_job(job_id) if not job_dict: return json.dumps({'error': 'job_id not found'}), 404, headers if not is_authorized(job_dict): return json.dumps({'error': 'not authorized'}), 403, headers if job_dict['error']: return json.dumps({'error': job_dict['error']}), 409, headers content_type = job_dict['metadata'].get('mimetype') return flask.Response(job_dict['data'], mimetype=content_typef job_data(job_id): '''Get the raw data that the job returned. The mimetype will be the value provided in the metdata for the key ``mimetype``. **Results:** :rtype: string :statuscode 200: no error :statuscode 403: not authorized to view the job's data :statuscode 404: job id not found :statuscode 409: an error occurred ''' job_dict = db.get_job(job_id) if not job_dict: return json.dumps({'error': 'job_id not found'}), 404, headers if not is_authorized(job_dict): return json.dumps({'error': 'not authorized'}), 403, headers if job_dict['error']: return json.dumps({'error': job_dict['error']}), 409, headers content_type = job_dict['metadata'].get('mimetype') return flask.Response(job_dict['data'], mimetype=content_type)
Get the raw data that the job returned. The mimetype will be the value provided in the metdata for the key ``mimetype``. **Results:** :rtype: string :statuscode 200: no error :statuscode 403: not authorized to view the job's data :statuscode 404: job id not found :statuscode 409: an error occurred
def is_authorized(job=None): '''Returns true if the request is authorized for the job if provided. If no job is provided, the user has to be admin to be authorized. ''' if flogin.current_user.is_authenticated: return True if job: job_key = flask.request.headers.get('Authorization') if job_key == app.config.get('SECRET_KEY'): return True return job['job_key'] == job_key return Falsf is_authorized(job=None): '''Returns true if the request is authorized for the job if provided. If no job is provided, the user has to be admin to be authorized. ''' if flogin.current_user.is_authenticated: return True if job: job_key = flask.request.headers.get('Authorization') if job_key == app.config.get('SECRET_KEY'): return True return job['job_key'] == job_key return False
Returns true if the request is authorized for the job if provided. If no job is provided, the user has to be admin to be authorized.
def init(uri, echo=False): global ENGINE, _METADATA, JOBS_TABLE, METADATA_TABLE, LOGS_TABLE ENGINE = sqlalchemy.create_engine(uri, echo=echo, convert_unicode=True) _METADATA = sqlalchemy.MetaData(ENGINE) JOBS_TABLE = _init_jobs_table() METADATA_TABLE = _init_metadata_table() LOGS_TABLE = _init_logs_table() _METADATA.create_all(ENGINE)
Initialise the database. Initialise the sqlalchemy engine, metadata and table objects that we use to connect to the database. Create the database and the database tables themselves if they don't already exist. :param uri: the sqlalchemy database URI :type uri: string :param echo: whether or not to have the sqlalchemy engine log all statements to stdout :type echo: bool
def _validate_error(error): if error is None: return None elif isinstance(error, basestring): return {"message": error} else: try: message = error["message"] if isinstance(message, basestring): return error else: raise InvalidErrorObjectError( "error['message'] must be a string") except (TypeError, KeyError): raise InvalidErrorObjectError( "error must be either a string or a dict with a message key")
Validate and return the given error object. Based on the given error object, return either None or a dict with a "message" key whose value is a string (the dict may also have any other keys that it wants). The given "error" object can be: - None, in which case None is returned - A string, in which case a dict like this will be returned: {"message": error_string} - A dict with a "message" key whose value is a string, in which case the dict will be returned unchanged :param error: the error object to validate :raises InvalidErrorObjectError: If the error object doesn't match any of the allowed types
def _update_job(job_id, job_dict): # Avoid SQLAlchemy "Unicode type received non-unicode bind param value" # warnings. if job_id: job_id = unicode(job_id) if "error" in job_dict: job_dict["error"] = _validate_error(job_dict["error"]) job_dict["error"] = json.dumps(job_dict["error"]) # Avoid SQLAlchemy "Unicode type received non-unicode bind param value" # warnings. job_dict["error"] = unicode(job_dict["error"]) # Avoid SQLAlchemy "Unicode type received non-unicode bind param value" # warnings. if "data" in job_dict: job_dict["data"] = unicode(job_dict["data"]) ENGINE.execute( JOBS_TABLE.update() .where(JOBS_TABLE.c.job_id == job_id) .values(**job_dict))
Update the database row for the given job_id with the given job_dict. All functions that update rows in the jobs table do it by calling this helper function. job_dict is a dict with values corresponding to the database columns that should be updated, e.g.: {"status": "complete", "data": ...}
def mark_job_as_completed(job_id, data=None): update_dict = { "status": "complete", "data": json.dumps(data), "finished_timestamp": datetime.datetime.now(), } _update_job(job_id, update_dict)
Mark a job as completed successfully. :param job_id: the job_id of the job to be updated :type job_id: unicode :param data: the output data returned by the job :type data: any JSON-serializable type (including None)
def mark_job_as_errored(job_id, error_object): update_dict = { "status": "error", "error": error_object, "finished_timestamp": datetime.datetime.now(), } _update_job(job_id, update_dict)
Mark a job as failed with an error. :param job_id: the job_id of the job to be updated :type job_id: unicode :param error_object: the error returned by the job :type error_object: either a string or a dict with a "message" key whose value is a string
def _init_jobs_table(): _jobs_table = sqlalchemy.Table( 'jobs', _METADATA, sqlalchemy.Column('job_id', sqlalchemy.UnicodeText, primary_key=True), sqlalchemy.Column('job_type', sqlalchemy.UnicodeText), sqlalchemy.Column('status', sqlalchemy.UnicodeText, index=True), sqlalchemy.Column('data', sqlalchemy.UnicodeText), sqlalchemy.Column('error', sqlalchemy.UnicodeText), sqlalchemy.Column('requested_timestamp', sqlalchemy.DateTime), sqlalchemy.Column('finished_timestamp', sqlalchemy.DateTime), sqlalchemy.Column('sent_data', sqlalchemy.UnicodeText), # Callback URL: sqlalchemy.Column('result_url', sqlalchemy.UnicodeText), # CKAN API key: sqlalchemy.Column('api_key', sqlalchemy.UnicodeText), # Key to administer job: sqlalchemy.Column('job_key', sqlalchemy.UnicodeText), ) return _jobs_table
Initialise the "jobs" table in the db.
def _init_metadata_table(): _metadata_table = sqlalchemy.Table( 'metadata', _METADATA, sqlalchemy.Column( 'job_id', sqlalchemy.ForeignKey("jobs.job_id", ondelete="CASCADE"), nullable=False, primary_key=True), sqlalchemy.Column('key', sqlalchemy.UnicodeText, primary_key=True), sqlalchemy.Column('value', sqlalchemy.UnicodeText, index=True), sqlalchemy.Column('type', sqlalchemy.UnicodeText), ) return _metadata_table
Initialise the "metadata" table in the db.
def _init_logs_table(): _logs_table = sqlalchemy.Table( 'logs', _METADATA, sqlalchemy.Column( 'job_id', sqlalchemy.ForeignKey("jobs.job_id", ondelete="CASCADE"), nullable=False), sqlalchemy.Column('timestamp', sqlalchemy.DateTime), sqlalchemy.Column('message', sqlalchemy.UnicodeText), sqlalchemy.Column('level', sqlalchemy.UnicodeText), sqlalchemy.Column('module', sqlalchemy.UnicodeText), sqlalchemy.Column('funcName', sqlalchemy.UnicodeText), sqlalchemy.Column('lineno', sqlalchemy.Integer) ) return _logs_table
Initialise the "logs" table in the db.
def _get_metadata(job_id): # Avoid SQLAlchemy "Unicode type received non-unicode bind param value" # warnings. job_id = unicode(job_id) results = ENGINE.execute( METADATA_TABLE.select().where( METADATA_TABLE.c.job_id == job_id)).fetchall() metadata = {} for row in results: value = row['value'] if row['type'] == 'json': value = json.loads(value) metadata[row['key']] = value return metadata
Return any metadata for the given job_id from the metadata table.
def _get_logs(job_id): # Avoid SQLAlchemy "Unicode type received non-unicode bind param value" # warnings. job_id = unicode(job_id) results = ENGINE.execute( LOGS_TABLE.select().where(LOGS_TABLE.c.job_id == job_id)).fetchall() results = [dict(result) for result in results] for result in results: result.pop("job_id") return results
Return any logs for the given job_id from the logs table.
def check_node_attributes(pattern, node, *attributes): for attribute_name in attributes: attribute = node.get(attribute_name) if attribute is not None and pattern.search(attribute): return True return False
Searches match in attributes against given pattern and if finds the match against any of them returns True.
def generate_hash_id(node): try: content = tostring(node) except Exception: logger.exception("Generating of hash failed") content = to_bytes(repr(node)) hash_id = md5(content).hexdigest() return hash_id[:8]
Generates a hash_id for the node in question. :param node: lxml etree node
def get_link_density(node, node_text=None): if node_text is None: node_text = node.text_content() node_text = normalize_whitespace(node_text.strip()) text_length = len(node_text) if text_length == 0: return 0.0 links_length = sum(map(_get_normalized_text_length, node.findall(".//a"))) # Give 50 bonus chars worth of length for each img. # Tweaking this 50 down a notch should help if we hit false positives. img_bonuses = 50 * len(node.findall(".//img")) links_length = max(0, links_length - img_bonuses) return links_length / text_length
Computes the ratio for text in given node and text in links contained in the node. It is computed from number of characters in the texts. :parameter Element node: HTML element in which links density is computed. :parameter string node_text: Text content of given node if it was obtained before. :returns float: Returns value of computed 0 <= density <= 1, where 0 means no links and 1 means that node contains only links.
def get_class_weight(node): weight = 0 if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, "class"): weight -= 25 if check_node_attributes(CLS_WEIGHT_POSITIVE, node, "class"): weight += 25 if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, "id"): weight -= 25 if check_node_attributes(CLS_WEIGHT_POSITIVE, node, "id"): weight += 25 return weight
Computes weight of element according to its class/id. We're using sets to help efficiently check for existence of matches.
def is_unlikely_node(node): unlikely = check_node_attributes(CLS_UNLIKELY, node, "class", "id") maybe = check_node_attributes(CLS_MAYBE, node, "class", "id") return bool(unlikely and not maybe and node.tag != "body")
Short helper for checking unlikely status. If the class or id are in the unlikely list, and there's not also a class/id in the likely list then it might need to be removed.
def cached_property(getter): def decorator(self): key = "_cached_property_" + getter.__name__ if not hasattr(self, key): setattr(self, key, getter(self)) return getattr(self, key) decorator.__name__ = getter.__name__ decorator.__module__ = getter.__module__ decorator.__doc__ = getter.__doc__ return property(decorator)
Decorator that converts a method into memoized property. The decorator works as expected only for classes with attribute '__dict__' and immutable properties.
def clear(self): self.filename = '' self.filehandler = 0 # Station name, identification and revision year: self.station_name = '' self.rec_dev_id = '' self.rev_year = 0000 # Number and type of channels: self.TT = 0 self.A = 0 # Number of analog channels. self.D = 0 # Number of digital channels. # Analog channel information: self.An = [] self.Ach_id = [] self.Aph = [] self.Accbm = [] self.uu = [] self.a = [] self.b = [] self.skew = [] self.min = [] self.max = [] self.primary = [] self.secondary = [] self.PS = [] # Digital channel information: self.Dn = [] self.Dch_id = [] self.Dph = [] self.Dccbm = [] self.y = [] # Line frequency: self.lf = 0 # Sampling rate information: self.nrates = 0 self.samp = [] self.endsamp = [] # Date/time stamps: # defined by: [dd,mm,yyyy,hh,mm,ss.ssssss] self.start = [00,00,0000,00,00,0.0] self.trigger = [00,00,0000,00,00,0.0] # Data file type: self.ft = '' # Time stamp multiplication factor: self.timemult = 0.0 self.DatFileContent = ''
Clear the internal (private) variables of the class.
def getTime(self): T = 1/float(self.samp[self.nrates-1]) endtime = self.endsamp[self.nrates-1] * T t = numpy.linspace(0,endtime,self.endsamp[self.nrates-1]) return t
Actually, this function creates a time stamp vector based on the number of samples and sample rate.
def getAnalogID(self,num): listidx = self.An.index(num) # Get the position of the channel number. return self.Ach_id[listidx]
Returns the COMTRADE ID of a given channel number. The number to be given is the same of the COMTRADE header.
def getDigitalID(self,num): listidx = self.Dn.index(num) # Get the position of the channel number. return self.Dch_id[listidx]
Reads the COMTRADE ID of a given channel number. The number to be given is the same of the COMTRADE header.
def getAnalogType(self,num): listidx = self.An.index(num) unit = self.uu[listidx] if unit == 'kV' or unit == 'V': return 'V' elif unit == 'A' or unit == 'kA': return 'I' else: print 'Unknown channel type' return 0
Returns the type of the channel 'num' based on its unit stored in the Comtrade header file. Returns 'V' for a voltage channel and 'I' for a current channel.
def getAnalogUnit(self,num): listidx = self.An.index(num) # Get the position of the channel number. return self.uu[listidx]
Returns the COMTRADE channel unit (e.g., kV, V, kA, A) of a given channel number. The number to be given is the same of the COMTRADE header.
def ReadDataFile(self): if os.path.isfile(self.filename[0:-4] + '.dat'): filename = self.filename[0:-4] + '.dat' elif os.path.isfile(self.filename[0:-4] + '.DAT'): filename = self.filename[0:-4] + '.DAT' else: print "Data file File not found." return 0 self.filehandler = open(filename,'rb') self.DatFileContent = self.filehandler.read() # END READING .dat FILE. self.filehandler.close() # Close file. return 1
Reads the contents of the Comtrade .dat file and store them in a private variable. For accessing a specific channel data, see methods getAnalogData and getDigitalData.
def getAnalogChannelData(self,ChNumber): if not self.DatFileContent: print "No data file content. Use the method ReadDataFile first" return 0 if (ChNumber > self.A): print "Channel number greater than the total number of channels." return 0 # Fomating string for struct module: str_struct = "ii%dh" %(self.A + int(numpy.ceil((float(self.D)/float(16))))) # Number of bytes per sample: NB = 4 + 4 + self.A*2 + int(numpy.ceil((float(self.D)/float(16))))*2 # Number of samples: N = self.getNumberOfSamples() # Empty column vector: values = numpy.empty((N,1)) ch_index = self.An.index(ChNumber) # Reading the values from DatFileContent string: for i in range(N): data = struct.unpack(str_struct,self.DatFileContent[i*NB:(i*NB)+NB]) values[i] = data[ChNumber+1] # The first two number ar the sample index and timestamp values = values * self.a[ch_index] # a factor values = values + self.b[ch_index] # b factor return values
Returns an array of numbers containing the data values of the channel number "ChNumber". ChNumber is the number of the channal as in .cfg file.
def getDigitalChannelData(self,ChNumber): if not self.DatFileContent: print "No data file content. Use the method ReadDataFile first" return 0 if (ChNumber > self.D): print "Digital channel number greater than the total number of channels." return 0 # Fomating string for struct module: str_struct = "ii%dh%dH" %(self.A, int(numpy.ceil((float(self.D)/float(16))))) # Number of bytes per sample: NB = 4 + 4 + self.A*2 + int(numpy.ceil((float(self.D)/float(16))))*2 # Number of samples: N = self.getNumberOfSamples() # Empty column vector: values = numpy.empty((N,1)) # Number of the 16 word where digital channal is. Every word contains # 16 digital channels: byte_number = int(numpy.ceil((ChNumber-1)/16)+1) # Value of the digital channel. Ex. channal 1 has value 2^0=1, channel # 2 has value 2^1 = 2, channel 3 => 2^2=4 and so on. digital_ch_value = (1<<(ChNumber-1-(byte_number-1)*16)) # Reading the values from DatFileContent string: for i in range(N): data = struct.unpack(str_struct,self.DatFileContent[i*NB:(i*NB)+NB]) # The first two number ar the sample index and timestamp. # And logic to extract only one channel from the 16 bit. # Normalize the output to 0 or 1 values[i] = (digital_ch_value & data[self.A+1+byte_number]) * 1/digital_ch_value # Return the array. return values
Returns an array of numbers (0 or 1) containing the values of the digital channel status. ChNumber: digital channel number.
def initLogger(): ''' This code taken from Matt's Suspenders for initializing a logger ''' global logger logger = logging.getLogger('root') logger.setLevel(logging.DEBUG) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter("[%(asctime)s] %(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) logger.addHandler(chf initLogger(): ''' This code taken from Matt's Suspenders for initializing a logger ''' global logger logger = logging.getLogger('root') logger.setLevel(logging.DEBUG) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter("[%(asctime)s] %(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) logger.addHandler(ch)
This code taken from Matt's Suspenders for initializing a logger
def decompressBWTPoolProcess(tup): ''' Individual process for decompression ''' (inputDir, outputDir, startIndex, endIndex) = tup if startIndex == endIndex: return True #load the thing we'll be extracting from msbwt = MultiStringBWT.CompressedMSBWT() msbwt.loadMsbwt(inputDir, None) #open our output outputBwt = np.load(outputDir+'/msbwt.npy', 'r+') outputBwt[startIndex:endIndex] = msbwt.getBWTRange(startIndex, endIndex) return Truf decompressBWTPoolProcess(tup): ''' Individual process for decompression ''' (inputDir, outputDir, startIndex, endIndex) = tup if startIndex == endIndex: return True #load the thing we'll be extracting from msbwt = MultiStringBWT.CompressedMSBWT() msbwt.loadMsbwt(inputDir, None) #open our output outputBwt = np.load(outputDir+'/msbwt.npy', 'r+') outputBwt[startIndex:endIndex] = msbwt.getBWTRange(startIndex, endIndex) return True
Individual process for decompression
def clearAuxiliaryData(dirName): ''' This function removes auxiliary files associated with a given filename ''' if dirName != None: if os.path.exists(dirName+'/auxiliary.npy'): os.remove(dirName+'/auxiliary.npy') if os.path.exists(dirName+'/totalCounts.p'): os.remove(dirName+'/totalCounts.p') if os.path.exists(dirName+'/totalCounts.npy'): os.remove(dirName+'/totalCounts.npy') if os.path.exists(dirName+'/fmIndex.npy'): os.remove(dirName+'/fmIndex.npy') if os.path.exists(dirName+'/comp_refIndex.npy'): os.remove(dirName+'/comp_refIndex.npy') if os.path.exists(dirName+'/comp_fmIndex.npy'): os.remove(dirName+'/comp_fmIndex.npy') if os.path.exists(dirName+'/backrefs.npy'): os.remove(dirName+'/backrefs.npy'f clearAuxiliaryData(dirName): ''' This function removes auxiliary files associated with a given filename ''' if dirName != None: if os.path.exists(dirName+'/auxiliary.npy'): os.remove(dirName+'/auxiliary.npy') if os.path.exists(dirName+'/totalCounts.p'): os.remove(dirName+'/totalCounts.p') if os.path.exists(dirName+'/totalCounts.npy'): os.remove(dirName+'/totalCounts.npy') if os.path.exists(dirName+'/fmIndex.npy'): os.remove(dirName+'/fmIndex.npy') if os.path.exists(dirName+'/comp_refIndex.npy'): os.remove(dirName+'/comp_refIndex.npy') if os.path.exists(dirName+'/comp_fmIndex.npy'): os.remove(dirName+'/comp_fmIndex.npy') if os.path.exists(dirName+'/backrefs.npy'): os.remove(dirName+'/backrefs.npy')
This function removes auxiliary files associated with a given filename
def ok_embedded_video(node): good_keywords = ('youtube', 'blip.tv', 'vimeo') node_str = tounicode(node) for key in good_keywords: if key in node_str: return True return False
Check if this embed/video is an ok one to count.
def build_base_document(dom, return_fragment=True): body_element = dom.find(".//body") if body_element is None: fragment = fragment_fromstring('<div id="readabilityBody"/>') fragment.append(dom) else: body_element.tag = "div" body_element.set("id", "readabilityBody") fragment = body_element return document_from_fragment(fragment, return_fragment)
Builds a base document with the body as root. :param dom: Parsed lxml tree (Document Object Model). :param bool return_fragment: If True only <div> fragment is returned. Otherwise full HTML document is returned.
def check_siblings(candidate_node, candidate_list): candidate_css = candidate_node.node.get("class") potential_target = candidate_node.content_score * 0.2 sibling_target_score = potential_target if potential_target > 10 else 10 parent = candidate_node.node.getparent() siblings = parent.getchildren() if parent is not None else [] for sibling in siblings: append = False content_bonus = 0 if sibling is candidate_node.node: append = True # Give a bonus if sibling nodes and top candidates have the example # same class name if candidate_css and sibling.get("class") == candidate_css: content_bonus += candidate_node.content_score * 0.2 if sibling in candidate_list: adjusted_score = \ candidate_list[sibling].content_score + content_bonus if adjusted_score >= sibling_target_score: append = True if sibling.tag == "p": link_density = get_link_density(sibling) content = sibling.text_content() content_length = len(content) if content_length > 80 and link_density < 0.25: append = True elif content_length < 80 and link_density == 0: if ". " in content: append = True if append: logger.debug( "Sibling appended: %s %r", sibling.tag, sibling.attrib) if sibling.tag not in ("div", "p"): # We have a node that isn't a common block level element, like # a form or td tag. Turn it into a div so it doesn't get # filtered out later by accident. sibling.tag = "div" if candidate_node.node != sibling: candidate_node.node.append(sibling) return candidate_node
Looks through siblings for content that might also be related. Things like preambles, content split by ads that we removed, etc.
def clean_document(node): if node is None or len(node) == 0: return None logger.debug("\n\n-------------- CLEANING DOCUMENT -----------------") to_drop = [] for n in node.iter(): # clean out any in-line style properties if "style" in n.attrib: n.set("style", "") # remove embended objects unless it's wanted video if n.tag in ("object", "embed") and not ok_embedded_video(n): logger.debug("Dropping node %s %r", n.tag, n.attrib) to_drop.append(n) # clean headings with bad css or high link density if n.tag in ("h1", "h2", "h3", "h4") and get_class_weight(n) < 0: logger.debug("Dropping <%s>, it's insignificant", n.tag) to_drop.append(n) if n.tag in ("h3", "h4") and get_link_density(n) > 0.33: logger.debug("Dropping <%s>, it's insignificant", n.tag) to_drop.append(n) # drop block element without content and children if n.tag in ("div", "p"): text_content = shrink_text(n.text_content()) if len(text_content) < 5 and not n.getchildren(): logger.debug( "Dropping %s %r without content.", n.tag, n.attrib) to_drop.append(n) # finally try out the conditional cleaning of the target node if clean_conditionally(n): to_drop.append(n) drop_nodes_with_parents(to_drop) return node
Cleans up the final document we return as the readable article.
def find_candidates(document): nodes_to_score = set() should_remove = set() for node in document.iter(): if is_unlikely_node(node): logger.debug( "We should drop unlikely: %s %r", node.tag, node.attrib) should_remove.add(node) elif is_bad_link(node): logger.debug( "We should drop bad link: %s %r", node.tag, node.attrib) should_remove.add(node) elif node.tag in SCORABLE_TAGS: nodes_to_score.add(node) return score_candidates(nodes_to_score), should_remove
Finds cadidate nodes for the readable version of the article. Here's we're going to remove unlikely nodes, find scores on the rest, clean up and return the final best match.
def is_bad_link(node): if node.tag != "a": return False name = node.get("name") href = node.get("href") if name and not href: return True if href: href_parts = href.split("#") if len(href_parts) == 2 and len(href_parts[1]) > 25: return True return False
Helper to determine if the node is link that is useless. We've hit articles with many multiple links that should be cleaned out because they're just there to pollute the space. See tests for examples.
def leaf_div_elements_into_paragraphs(document): for element in document.iter(tag="div"): child_tags = tuple(n.tag for n in element.getchildren()) if "div" not in child_tags and "p" not in child_tags: logger.debug( "Changing leaf block element <%s> into <p>", element.tag) element.tag = "p" return document
Turn some block elements that don't have children block level elements into <p> elements. Since we can't change the tree as we iterate over it, we must do this before we process our document.
def dom(self): try: dom = self._original_document.dom # cleaning doesn't return, just wipes in place html_cleaner(dom) return leaf_div_elements_into_paragraphs(dom) except ValueError: return None
Parsed lxml tree (Document Object Model) of the given html.
def candidates(self): dom = self.dom if dom is None or len(dom) == 0: return None candidates, unlikely_candidates = find_candidates(dom) drop_nodes_with_parents(unlikely_candidates) return candidates
Generates list of candidates from the DOM.
def _readable(self): if not self.candidates: logger.info("No candidates found in document.") return self._handle_no_candidates() # right now we return the highest scoring candidate content best_candidates = sorted( (c for c in self.candidates.values()), key=attrgetter("content_score"), reverse=True) printer = PrettyPrinter(indent=2) logger.debug(printer.pformat(best_candidates)) # since we have several candidates, check the winner's siblings # for extra content winner = best_candidates[0] updated_winner = check_siblings(winner, self.candidates) updated_winner.node = prep_article(updated_winner.node) if updated_winner.node is not None: dom = build_base_document( updated_winner.node, self._return_fragment) else: logger.info( 'Had candidates but failed to find a cleaned winning DOM.') dom = self._handle_no_candidates() return self._remove_orphans(dom.get_element_by_id("readabilityBody"))
The readable parsed article
def _handle_no_candidates(self): # since we've not found a good candidate we're should help this if self.dom is not None and len(self.dom): dom = prep_article(self.dom) dom = build_base_document(dom, self._return_fragment) return self._remove_orphans( dom.get_element_by_id("readabilityBody")) else: logger.info("No document to use.") return build_error_document(self._return_fragment)
If we fail to find a good candidate we need to find something else.
def parse(cls, dom): handler = cls() saxify(dom, handler) return handler.content
Converts DOM into paragraphs.
def fastaIterator(fastaFN): ''' Iterator that yields tuples containing a sequence label and the sequence itself @param fastaFN - the FASTA filename to open and parse @return - an iterator yielding tuples of the form (label, sequence) from the FASTA file ''' if fastaFN[len(fastaFN)-3:] == '.gz': fp = gzip.open(fastaFN, 'r') else: fp = open(fastaFN, 'r') label = '' segments = [] line = '' for line in fp: if line[0] == '>': if label != '': yield (label, ''.join(segments)) label = (line.strip('\n')[1:]).split(' ')[0] segments = [] else: segments.append(line.strip('\n')) if label != '' and len(segments) > 0: yield (label, ''.join(segments)) fp.close(f fastaIterator(fastaFN): ''' Iterator that yields tuples containing a sequence label and the sequence itself @param fastaFN - the FASTA filename to open and parse @return - an iterator yielding tuples of the form (label, sequence) from the FASTA file ''' if fastaFN[len(fastaFN)-3:] == '.gz': fp = gzip.open(fastaFN, 'r') else: fp = open(fastaFN, 'r') label = '' segments = [] line = '' for line in fp: if line[0] == '>': if label != '': yield (label, ''.join(segments)) label = (line.strip('\n')[1:]).split(' ')[0] segments = [] else: segments.append(line.strip('\n')) if label != '' and len(segments) > 0: yield (label, ''.join(segments)) fp.close()
Iterator that yields tuples containing a sequence label and the sequence itself @param fastaFN - the FASTA filename to open and parse @return - an iterator yielding tuples of the form (label, sequence) from the FASTA file
def loadBWT(bwtDir, logger=None): ''' Generic load function, this is recommended for anyone wishing to use this code as it will automatically detect compression and assign the appropriate class preferring the decompressed version if both exist. @return - a MultiStringBWT, CompressedBWT, or none if neither can be instantiated ''' if os.path.exists(bwtDir+'/msbwt.npy'): msbwt = MultiStringBWT() msbwt.loadMsbwt(bwtDir, logger) return msbwt elif os.path.exists(bwtDir+'/comp_msbwt.npy'): msbwt = CompressedMSBWT() msbwt.loadMsbwt(bwtDir, logger) return msbwt else: logger.error('Invalid BWT directory.') return Nonf loadBWT(bwtDir, logger=None): ''' Generic load function, this is recommended for anyone wishing to use this code as it will automatically detect compression and assign the appropriate class preferring the decompressed version if both exist. @return - a MultiStringBWT, CompressedBWT, or none if neither can be instantiated ''' if os.path.exists(bwtDir+'/msbwt.npy'): msbwt = MultiStringBWT() msbwt.loadMsbwt(bwtDir, logger) return msbwt elif os.path.exists(bwtDir+'/comp_msbwt.npy'): msbwt = CompressedMSBWT() msbwt.loadMsbwt(bwtDir, logger) return msbwt else: logger.error('Invalid BWT directory.') return None
Generic load function, this is recommended for anyone wishing to use this code as it will automatically detect compression and assign the appropriate class preferring the decompressed version if both exist. @return - a MultiStringBWT, CompressedBWT, or none if neither can be instantiated
def parseProfileLine(fp): ''' Helper function for profile parsing @param fp - the file pointer to get the next line from @return - (kmer, kmerCount) as (string, int) ''' nextLine = fp.readline() if nextLine == None or nextLine == '': return (None, None) else: pieces = nextLine.strip('\n').split(',') return (pieces[0], int(pieces[1])f parseProfileLine(fp): ''' Helper function for profile parsing @param fp - the file pointer to get the next line from @return - (kmer, kmerCount) as (string, int) ''' nextLine = fp.readline() if nextLine == None or nextLine == '': return (None, None) else: pieces = nextLine.strip('\n').split(',') return (pieces[0], int(pieces[1]))
Helper function for profile parsing @param fp - the file pointer to get the next line from @return - (kmer, kmerCount) as (string, int)
def reverseComplement(seq): ''' Helper function for generating reverse-complements ''' revComp = '' complement = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'N':'N', '$':'$'} for c in reversed(seq): revComp += complement[c] return revComf reverseComplement(seq): ''' Helper function for generating reverse-complements ''' revComp = '' complement = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'N':'N', '$':'$'} for c in reversed(seq): revComp += complement[c] return revComp
Helper function for generating reverse-complements
def constructIndexing(self): ''' This helper function calculates the start and end index for each character in the BWT. Basically, the information generated here is for quickly finding offsets. This is run AFTER self.constructTotalCounts(...) ''' #mark starts and ends of key elements self.startIndex = [None]*self.vcLen self.endIndex = [None]*self.vcLen pos = 0 #go through the 1-mers for c in xrange(0, self.vcLen): #build start and end indexes self.startIndex[c] = pos pos += self.totalCounts[c] self.endIndex[c] = pof constructIndexing(self): ''' This helper function calculates the start and end index for each character in the BWT. Basically, the information generated here is for quickly finding offsets. This is run AFTER self.constructTotalCounts(...) ''' #mark starts and ends of key elements self.startIndex = [None]*self.vcLen self.endIndex = [None]*self.vcLen pos = 0 #go through the 1-mers for c in xrange(0, self.vcLen): #build start and end indexes self.startIndex[c] = pos pos += self.totalCounts[c] self.endIndex[c] = pos
This helper function calculates the start and end index for each character in the BWT. Basically, the information generated here is for quickly finding offsets. This is run AFTER self.constructTotalCounts(...)
def getSequenceDollarID(self, strIndex, returnOffset=False): ''' This will take a given index and work backwards until it encounters a '$' indicating which dollar ID is associated with this read @param strIndex - the index of the character to start with @return - an integer indicating the dollar ID of the string the given character belongs to ''' #figure out the first hop backwards currIndex = strIndex prevChar = self.getCharAtIndex(currIndex) currIndex = self.getOccurrenceOfCharAtIndex(prevChar, currIndex) i = 0 #while we haven't looped back to the start while prevChar != 0: #figure out where to go from here prevChar = self.getCharAtIndex(currIndex) currIndex = self.getOccurrenceOfCharAtIndex(prevChar, currIndex) i += 1 if returnOffset: return (currIndex, i) else: return currIndef getSequenceDollarID(self, strIndex, returnOffset=False): ''' This will take a given index and work backwards until it encounters a '$' indicating which dollar ID is associated with this read @param strIndex - the index of the character to start with @return - an integer indicating the dollar ID of the string the given character belongs to ''' #figure out the first hop backwards currIndex = strIndex prevChar = self.getCharAtIndex(currIndex) currIndex = self.getOccurrenceOfCharAtIndex(prevChar, currIndex) i = 0 #while we haven't looped back to the start while prevChar != 0: #figure out where to go from here prevChar = self.getCharAtIndex(currIndex) currIndex = self.getOccurrenceOfCharAtIndex(prevChar, currIndex) i += 1 if returnOffset: return (currIndex, i) else: return currIndex
This will take a given index and work backwards until it encounters a '$' indicating which dollar ID is associated with this read @param strIndex - the index of the character to start with @return - an integer indicating the dollar ID of the string the given character belongs to
def getOccurrenceOfCharAtIndex(self, sym, index): ''' This functions gets the FM-index value of a character at the specified position @param sym - the character to find the occurrence level @param index - the index we want to find the occurrence level at @return - the number of occurrences of char before the specified index ''' #sampling method #get the bin we occupy binID = index >> self.bitPower #these two methods seem to have the same approximate run time if (binID << self.bitPower) == index: ret = self.partialFM[binID][sym] else: ret = self.partialFM[binID][sym] + np.bincount(self.bwt[binID << self.bitPower:index], minlength=6)[sym] return int(retf getOccurrenceOfCharAtIndex(self, sym, index): ''' This functions gets the FM-index value of a character at the specified position @param sym - the character to find the occurrence level @param index - the index we want to find the occurrence level at @return - the number of occurrences of char before the specified index ''' #sampling method #get the bin we occupy binID = index >> self.bitPower #these two methods seem to have the same approximate run time if (binID << self.bitPower) == index: ret = self.partialFM[binID][sym] else: ret = self.partialFM[binID][sym] + np.bincount(self.bwt[binID << self.bitPower:index], minlength=6)[sym] return int(ret)
This functions gets the FM-index value of a character at the specified position @param sym - the character to find the occurrence level @param index - the index we want to find the occurrence level at @return - the number of occurrences of char before the specified index
def getFullFMAtIndex(self, index): ''' This function creates a complete FM-index for a specific position in the BWT. Example using the above example: BWT Full FM-index $ A C G T C 0 1 2 4 4 $ 0 1 3 4 4 C 1 1 3 4 4 A 1 1 4 4 4 1 2 4 4 4 @return - the above information in the form of an array that already incorporates the offset value into the counts ''' #get the bin we occupy binID = index >> self.bitPower if binID << self.bitPower == index: ret = self.partialFM[binID] else: ret = self.partialFM[binID] + np.bincount(self.bwt[binID << self.bitPower:index], minlength=6) return ref getFullFMAtIndex(self, index): ''' This function creates a complete FM-index for a specific position in the BWT. Example using the above example: BWT Full FM-index $ A C G T C 0 1 2 4 4 $ 0 1 3 4 4 C 1 1 3 4 4 A 1 1 4 4 4 1 2 4 4 4 @return - the above information in the form of an array that already incorporates the offset value into the counts ''' #get the bin we occupy binID = index >> self.bitPower if binID << self.bitPower == index: ret = self.partialFM[binID] else: ret = self.partialFM[binID] + np.bincount(self.bwt[binID << self.bitPower:index], minlength=6) return ret
This function creates a complete FM-index for a specific position in the BWT. Example using the above example: BWT Full FM-index $ A C G T C 0 1 2 4 4 $ 0 1 3 4 4 C 1 1 3 4 4 A 1 1 4 4 4 1 2 4 4 4 @return - the above information in the form of an array that already incorporates the offset value into the counts
def loadMsbwt(self, dirName, logger): ''' This functions loads a BWT file and constructs total counts, indexes start positions, and constructs an FM index in memory @param dirName - the directory to load, inside should be '<DIR>/comp_msbwt.npy' or it will fail ''' #open the file with our BWT in it self.dirName = dirName self.bwt = np.load(self.dirName+'/comp_msbwt.npy', 'r') #build auxiliary structures self.constructTotalCounts(logger) self.constructIndexing() self.constructFMIndex(loggerf loadMsbwt(self, dirName, logger): ''' This functions loads a BWT file and constructs total counts, indexes start positions, and constructs an FM index in memory @param dirName - the directory to load, inside should be '<DIR>/comp_msbwt.npy' or it will fail ''' #open the file with our BWT in it self.dirName = dirName self.bwt = np.load(self.dirName+'/comp_msbwt.npy', 'r') #build auxiliary structures self.constructTotalCounts(logger) self.constructIndexing() self.constructFMIndex(logger)
This functions loads a BWT file and constructs total counts, indexes start positions, and constructs an FM index in memory @param dirName - the directory to load, inside should be '<DIR>/comp_msbwt.npy' or it will fail
def getBWTRange(self, start, end): ''' This function masks the complexity of retrieving a chunk of the BWT from the compressed format @param start - the beginning of the range to retrieve @param end - the end of the range in normal python notation (bwt[end] is not part of the return) @return - a range of integers representing the characters in the bwt from start to end ''' #set aside an array block to fill startBlockIndex = start >> self.bitPower endBlockIndex = int(math.floor(float(end)/self.binSize)) trueStart = startBlockIndex*self.binSize #first we will extract the range of blocks return self.decompressBlocks(startBlockIndex, endBlockIndex)[start-trueStart:end-trueStartf getBWTRange(self, start, end): ''' This function masks the complexity of retrieving a chunk of the BWT from the compressed format @param start - the beginning of the range to retrieve @param end - the end of the range in normal python notation (bwt[end] is not part of the return) @return - a range of integers representing the characters in the bwt from start to end ''' #set aside an array block to fill startBlockIndex = start >> self.bitPower endBlockIndex = int(math.floor(float(end)/self.binSize)) trueStart = startBlockIndex*self.binSize #first we will extract the range of blocks return self.decompressBlocks(startBlockIndex, endBlockIndex)[start-trueStart:end-trueStart]
This function masks the complexity of retrieving a chunk of the BWT from the compressed format @param start - the beginning of the range to retrieve @param end - the end of the range in normal python notation (bwt[end] is not part of the return) @return - a range of integers representing the characters in the bwt from start to end
def decode_html(html): if isinstance(html, unicode): return html match = CHARSET_META_TAG_PATTERN.search(html) if match: declared_encoding = match.group(1).decode("ASCII") # proceed unknown encoding as if it wasn't found at all with ignored(LookupError): return html.decode(declared_encoding, "ignore") # try to enforce UTF-8 firstly with ignored(UnicodeDecodeError): return html.decode("utf8") text = TAG_MARK_PATTERN.sub(to_bytes(" "), html) diff = text.decode("utf8", "ignore").encode("utf8") sizes = len(diff), len(text) # 99% of text is UTF-8 if abs(len(text) - len(diff)) < max(sizes) * 0.01: return html.decode("utf8", "ignore") # try detect encoding encoding = "utf8" encoding_detector = chardet.detect(text) if encoding_detector["encoding"]: encoding = encoding_detector["encoding"] return html.decode(encoding, "ignore")
Converts bytes stream containing an HTML page into Unicode. Tries to guess character encoding from meta tag of by "chardet" library.
def build_document(html_content, base_href=None): assert html_content is not None if isinstance(html_content, unicode): html_content = html_content.encode("utf8", "xmlcharrefreplace") try: document = document_fromstring(html_content, parser=UTF8_PARSER) except (ParserError, XMLSyntaxError): raise ValueError("Failed to parse document contents.") if base_href: document.make_links_absolute(base_href, resolve_base_href=True) else: document.resolve_base_href() return document
Requires that the `html_content` not be None
def _parse_properties(self): props_dict = self.data.get('properties', {}) for prop_name in self.KNOWN_PROPERTIES: if prop_name in props_dict: setattr(self, prop_name, props_dict.get(prop_name)) else: setattr(self, prop_name, None)
Nodes have properties, which are facts like the name, description, url etc. Loop through each of them and set it as attributes on this company so that we can make calls like company.name person.description
def _parse_relationship(self): rs_dict = self.data.get('relationships', {}) for rs_name in self.KNOWN_RELATIONSHIPS: if rs_name in rs_dict: setattr( self, rs_name, Relationship(rs_name, rs_dict.get(rs_name))) else: # fill in other relationships with None values setattr(self, rs_name, NoneRelationshipSingleton)
Nodes have Relationships, and similarly to properties, we set it as an attribute on the Organization so we can make calls like company.current_team person.degrees
def open(self): self.startTime = datetime.datetime.now() self.offset = 0 return self
Reset time and counts.