docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Joins terms using OR operator. Args: terms (list): terms to join Examples: self._or_join(['term1', 'term2']) -> 'term1 | term2' Returns: str
def _or_join(self, terms): from six import text_type if isinstance(terms, (tuple, list)): if len(terms) > 1: return ' | '.join(text_type(t) for t in terms) else: return terms[0] else: return terms
988,068
Finds datasets by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to return. None means without limit. Returns: list of DatasetSearchResult instances.
def search(self, search_phrase, limit=None): query, query_params = self._make_query_from_terms(search_phrase, limit=limit) self._parsed_query = (str(query), query_params) assert isinstance(query, TextClause) datasets = {} def make_result(vid=None, b_score=0, p_score=0): res = DatasetSearchResult() res.b_score = b_score res.p_score = p_score res.partitions = set() res.vid = vid return res if query_params: results = self.execute(query, **query_params) for result in results: vid, dataset_score = result datasets[vid] = make_result(vid, b_score=dataset_score) logger.debug('Extending datasets with partitions.') for partition in self.backend.partition_index.search(search_phrase): if partition.dataset_vid not in datasets: datasets[partition.dataset_vid] = make_result(partition.dataset_vid) datasets[partition.dataset_vid].p_score += partition.score datasets[partition.dataset_vid].partitions.add(partition) return list(datasets.values())
988,070
Creates a query for dataset from decomposed search terms. Args: terms (dict or unicode or string): Returns: tuple of (TextClause, dict): First element is FTS query, second is parameters of the query. Element of the execution of the query is pair: (vid, score).
def _make_query_from_terms(self, terms, limit=None): expanded_terms = self._expand_terms(terms) if expanded_terms['doc']: # create query with real score. query_parts = ["SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) as score"] if expanded_terms['doc'] and expanded_terms['keywords']: query_parts = ["SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) " " + ts_rank_cd(setweight(to_tsvector(coalesce(keywords::text,'')),'B'), to_tsquery(:keywords))" ' as score'] else: # create query with score = 1 because query will not touch doc field. query_parts = ['SELECT vid, 1 as score'] query_parts.append('FROM dataset_index') query_params = {} where_counter = 0 if expanded_terms['doc']: where_counter += 1 query_parts.append('WHERE doc @@ to_tsquery(:doc)') query_params['doc'] = self.backend._and_join(expanded_terms['doc']) if expanded_terms['keywords']: query_params['keywords'] = self.backend._and_join(expanded_terms['keywords']) kw_q = "to_tsvector(coalesce(keywords::text,'')) @@ to_tsquery(:keywords)" query_parts.append( ("AND " if where_counter else "WHERE ") + kw_q ) query_parts.append('ORDER BY score DESC') if limit: query_parts.append('LIMIT :limit') query_params['limit'] = limit query_parts.append(';') deb_msg = 'Dataset terms conversion: `{}` terms converted to `{}` with `{}` params query.'\ .format(terms, query_parts, query_params) logger.debug(deb_msg) q = text('\n'.join(query_parts)), query_params logger.debug('Dataset search query: {}'.format(q)) return q
988,074
Deletes given dataset from index. Args: vid (str): dataset vid.
def _delete(self, vid=None): assert vid is not None query = text() self.execute(query, vid=vid)
988,075
Creates a query for partition from decomposed search terms. Args: terms (dict or unicode or string): Returns: tuple of (TextClause, dict): First element is FTS query, second is parameters of the query. Element of the execution of the query is tuple of three elements: (vid, dataset_vid, score).
def _make_query_from_terms(self, terms, limit=None): expanded_terms = self._expand_terms(terms) terms_used = 0 if expanded_terms['doc']: # create query with real score. query_parts = ["SELECT vid, dataset_vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) as score"] if expanded_terms['doc'] and expanded_terms['keywords']: query_parts = ["SELECT vid, dataset_vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) " " + ts_rank_cd(setweight(to_tsvector(coalesce(keywords::text,'')),'B'), to_tsquery(:keywords))" ' as score'] else: # create query with score = 1 because query will not touch doc field. query_parts = ['SELECT vid, dataset_vid, 1 as score'] query_parts.append('FROM partition_index') query_params = {} where_count = 0 if expanded_terms['doc']: query_parts.append('WHERE doc @@ to_tsquery(:doc)') query_params['doc'] = self.backend._and_join(expanded_terms['doc']) where_count += 1 terms_used += 1 if expanded_terms['keywords']: query_params['keywords'] = self.backend._and_join(expanded_terms['keywords']) kw_q = "to_tsvector(coalesce(keywords::text,'')) @@ to_tsquery(:keywords)" query_parts.append(("AND " if where_count else "WHERE ") + kw_q) where_count += 1 terms_used += 1 if expanded_terms['from']: query_parts.append(("AND " if where_count else "WHERE ") + ' from_year >= :from_year') query_params['from_year'] = expanded_terms['from'] where_count += 1 terms_used += 1 if expanded_terms['to']: query_parts.append(("AND " if where_count else "WHERE ") + ' to_year <= :to_year') query_params['to_year'] = expanded_terms['to'] where_count += 1 terms_used += 1 query_parts.append('ORDER BY score DESC') if limit: query_parts.append('LIMIT :limit') query_params['limit'] = limit if not terms_used: logger.debug('No terms used; not creating query') return None, None query_parts.append(';') deb_msg = 'Dataset terms conversion: `{}` terms converted to `{}` with `{}` params query.'\ .format(terms, query_parts, query_params) logger.debug(deb_msg) return text('\n'.join(query_parts)), query_params
988,076
Finds partitions by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to generate. None means without limit. Generates: PartitionSearchResult instances.
def search(self, search_phrase, limit=None): query, query_params = self._make_query_from_terms(search_phrase, limit=limit) self._parsed_query = (str(query), query_params) if query is not None: self.backend.library.database.set_connection_search_path() results = self.execute(query, **query_params) for result in results: vid, dataset_vid, score = result yield PartitionSearchResult( vid=vid, dataset_vid=dataset_vid, score=score)
988,077
Converts partition to document indexed by to FTS index. Args: partition (orm.Partition): partition to convert. Returns: dict with structure matches to BasePartitionIndex._schema.
def _as_document(self, partition): doc = super(self.__class__, self)._as_document(partition) # pass time_coverage to the _index_document. doc['time_coverage'] = partition.time_coverage return doc
988,078
Finds identifiers by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to return. None means without limit. Returns: list of IdentifierSearchResult instances.
def search(self, search_phrase, limit=None): query_parts = [ 'SELECT identifier, type, name, similarity(name, :word) AS sml', 'FROM identifier_index', 'WHERE name % :word', 'ORDER BY sml DESC, name'] query_params = { 'word': search_phrase} if limit: query_parts.append('LIMIT :limit') query_params['limit'] = limit query_parts.append(';') query = text('\n'.join(query_parts)) self.backend.library.database.set_connection_search_path() results = self.execute(query, **query_params).fetchall() for result in results: vid, type, name, score = result yield IdentifierSearchResult( score=score, vid=vid, type=type, name=name)
988,082
Deletes given identifier from index. Args: identifier (str): identifier of the document to delete.
def _delete(self, identifier=None): query = text() self.execute(query, identifier=identifier)
988,084
Creates new partition and returns it. Args: table (orm.Table): Returns: orm.Partition
def new_partition(self, table, **kwargs): from . import Partition # Create the basic partition record, with a sequence ID. if isinstance(table, string_types): table = self.table(table) if 'sequence_id' in kwargs: sequence_id = kwargs['sequence_id'] del kwargs['sequence_id'] else: sequence_id = self._database.next_sequence_id(Dataset, self.vid, Partition) p = Partition( t_vid=table.vid, table_name=table.name, sequence_id=sequence_id, dataset=self, d_vid=self.vid, **kwargs ) p.update_id() return p
988,155
Finds appropriate term in the prop_tree and sets its value from config_instance. Args: configs_map (dict): key is id of the config, value is Config instance (AKA cache of the configs) prop_tree (PropertyDictTree): poperty tree to populate. config_instance (Config):
def _set_value(instance_to_path_map, path_to_instance_map, prop_tree, config_instance): path = instance_to_path_map[config_instance] # find group group = prop_tree for elem in path[:-1]: group = getattr(group, elem) assert group._key == config_instance.parent.key setattr(group, config_instance.key, config_instance.value) # # bind config to the term # # FIXME: Make all the terms to store config instance the same way. term = getattr(group, config_instance.key) try: if hasattr(term, '_term'): # ScalarTermS and ScalarTermU case term._term._config = config_instance return except KeyError: # python3 case. TODO: Find the way to make it simple. pass try: if hasattr(term, '_config'): term._config = config_instance return except KeyError: # python3 case. TODO: Find the way to make it simple. pass else: pass
988,170
Get or create sqlalchemy instance. Args: session (Sqlalchemy session): model (sqlalchemy model): kwargs (dict): kwargs to lookup or create instance. Returns: Tuple: first element is found or created instance, second is boolean - True if instance created, False if instance found.
def get_or_create(session, model, **kwargs): instance = session.query(model).filter_by(**kwargs).first() if instance: return instance, False else: instance = model(**kwargs) if 'dataset' in kwargs: instance.update_sequence_id(session, kwargs['dataset']) session.add(instance) session.commit() return instance, True
988,171
Finds appropriate config instance and returns it. Args: group_or_term (Group or Term): session (Sqlalchemy session): kwargs (dict): kwargs to pass to get_or_create. Returns: tuple of (Config, bool):
def _get_config_instance(group_or_term, session, **kwargs): path = group_or_term._get_path() cached = group_or_term._top._cached_configs.get(path) if cached: config = cached created = False else: # does not exist or not yet cached config, created = get_or_create(session, Config, **kwargs) return config, created
988,172
Creates materialized view for each indexed partition found in the query. Args: asql_query (str): asql query library (ambry.Library): backend (SQLiteBackend): connection (apsw.Connection): Returns: str: converted asql if it contains index query. If not, returns asql_query as is.
def _preprocess_sqlite_index(asql_query, library, backend, connection): new_query = None if asql_query.strip().lower().startswith('index'): logger.debug( '_preprocess_index: create index query found.\n asql query: {}' .format(asql_query)) index = parse_index(asql_query) partition = library.partition(index.source) table = backend.install(connection, partition, materialize=True) index_name = '{}_{}_ind'.format(partition.vid, '_'.join(index.columns)) new_query = 'CREATE INDEX IF NOT EXISTS {index} ON {table} ({columns});'.format( index=index_name, table=table, columns=','.join(index.columns)) logger.debug( '_preprocess_index: preprocess finished.\n asql query: {}\n new query: {}' .format(asql_query, new_query)) return new_query or asql_query
988,415
Creates virtual table or read-only table for gion. Args: ref (str): id, vid, name or versioned name of the partition. materialize (boolean): if True, create read-only table. If False create virtual table. Returns: str: name of the created table.
def install(self, connection, partition, table_name = None, index_columns=None, materialize=False, logger = None): virtual_table = partition.vid table = partition.vid if not table_name else table_name if self._relation_exists(connection, table): if logger: logger.debug("Skipping '{}'; already installed".format(table)) return else: if logger: logger.info("Installing '{}'".format(table)) partition.localize() virtual_table = partition.vid + '_vt' self._add_partition(connection, partition) if materialize: if self._relation_exists(connection, table): debug_logger.debug( 'Materialized table of the partition already exists.\n partition: {}, table: {}' .format(partition.name, table)) else: cursor = connection.cursor() # create table create_query = self.__class__._get_create_query(partition, table) debug_logger.debug( 'Creating new materialized view for partition mpr.' '\n partition: {}, view: {}, query: {}' .format(partition.name, table, create_query)) cursor.execute(create_query) # populate just created table with data from virtual table. copy_query = .format(table, virtual_table) debug_logger.debug( 'Populating sqlite table with rows from partition mpr.' '\n partition: {}, view: {}, query: {}' .format(partition.name, table, copy_query)) cursor.execute(copy_query) cursor.close() else: cursor = connection.cursor() view_q = "CREATE VIEW IF NOT EXISTS {} AS SELECT * FROM {} ".format(partition.vid, virtual_table) cursor.execute(view_q) cursor.close() if index_columns is not None: self.index(connection,table, index_columns) return table
988,416
Create an index on the columns. Args: connection (apsw.Connection): connection to sqlite database who stores mpr table or view. partition (orm.Partition): columns (list of str):
def index(self, connection, partition, columns): import hashlib query_tmpl = if not isinstance(columns,(list,tuple)): columns = [columns] col_list = ','.join('"{}"'.format(col) for col in columns) col_hash = hashlib.md5(col_list).hexdigest() try: table_name = partition.vid except AttributeError: table_name = partition # Its really a table name query = query_tmpl.format( index_name='{}_{}_i'.format(table_name, col_hash), table_name=table_name, columns=col_list) logger.debug('Creating sqlite index: query: {}'.format(query)) cursor = connection.cursor() cursor.execute(query)
988,417
Finds and returns view name in the sqlite db represented by given connection. Args: connection: connection to sqlite db where to look for partition table. table (orm.Table): Raises: MissingViewError: if database does not have partition table. Returns: str: database table storing partition data.
def _get_mpr_view(self, connection, table): logger.debug( 'Looking for view of the table.\n table: {}'.format(table.vid)) view = self.get_view_name(table) view_exists = self._relation_exists(connection, view) if view_exists: logger.debug( 'View of the table exists.\n table: {}, view: {}' .format(table.vid, view)) return view raise MissingViewError('sqlite database does not have view for {} table.' .format(table.vid))
988,419
Returns name of the sqlite table who stores mpr data. Args: connection (apsw.Connection): connection to sqlite database who stores mpr data. partition (orm.Partition): Returns: str: Raises: MissingTableError: if partition table not found in the db.
def _get_mpr_table(self, connection, partition): # TODO: This is the first candidate for optimization. Add field to partition # with table name and update it while table creation. # Optimized version. # # return partition.mpr_table or raise exception # Not optimized version. # # first check either partition has readonly table. virtual_table = partition.vid table = '{}_v'.format(virtual_table) logger.debug( 'Looking for materialized table of the partition.\n partition: {}'.format(partition.name)) table_exists = self._relation_exists(connection, table) if table_exists: logger.debug( 'Materialized table of the partition found.\n partition: {}, table: {}' .format(partition.name, table)) return table # now check for virtual table logger.debug( 'Looking for a virtual table of the partition.\n partition: {}'.format(partition.name)) virtual_exists = self._relation_exists(connection, virtual_table) if virtual_exists: logger.debug( 'Virtual table of the partition found.\n partition: {}, table: {}' .format(partition.name, table)) return virtual_table raise MissingTableError('sqlite database does not have table for mpr of {} partition.' .format(partition.vid))
988,420
Returns True if relation (table or view) exists in the sqlite db. Otherwise returns False. Args: connection (apsw.Connection): connection to sqlite database who stores mpr data. partition (orm.Partition): Returns: boolean: True if relation exists, False otherwise.
def _relation_exists(self, connection, relation): query = 'SELECT 1 FROM sqlite_master WHERE (type=\'table\' OR type=\'view\') AND name=?;' cursor = connection.cursor() cursor.execute(query, [relation]) result = cursor.fetchall() return result == [(1,)]
988,421
Creates and returns `CREATE TABLE ...` sql statement for given mprows. Args: partition (orm.Partition): tablename (str): name of the table in the return create query. include (list of str, optional): list of columns to include to query. Returns: str: create table query.
def _get_create_query(partition, tablename, include=None): TYPE_MAP = { 'int': 'INTEGER', 'float': 'REAL', six.binary_type.__name__: 'TEXT', six.text_type.__name__: 'TEXT', 'date': 'DATE', 'datetime': 'TIMESTAMP WITHOUT TIME ZONE' } columns_types = [] if not include: include = [] for column in sorted(partition.datafile.reader.columns, key=lambda x: x['pos']): if include and column['name'] not in include: continue sqlite_type = TYPE_MAP.get(column['type']) if not sqlite_type: raise Exception('Do not know how to convert {} to sql column.'.format(column['type'])) columns_types.append(' "{}" {}'.format(column['name'], sqlite_type)) columns_types_str = ',\n'.join(columns_types) query = 'CREATE TABLE IF NOT EXISTS {}(\n{})'.format(tablename, columns_types_str) return query
988,422
Creates sqlite virtual table for mpr file of the given partition. Args: connection: connection to the sqlite db who stores mpr data. partition (orm.Partition):
def _add_partition(self, connection, partition): logger.debug('Creating virtual table for partition.\n partition: {}'.format(partition.name)) sqlite_med.add_partition(connection, partition.datafile, partition.vid+'_vt')
988,424
Executes given query using given connection. Args: connection (apsw.Connection): connection to the sqlite db who stores mpr data. query (str): sql query fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch. Returns: iterable with query result.
def _execute(self, connection, query, fetch=True): cursor = connection.cursor() try: cursor.execute(query) except Exception as e: from ambry.mprlib.exceptions import BadSQLError raise BadSQLError("Failed to execute query: {}; {}".format(query, e)) if fetch: return cursor.fetchall() else: return cursor
988,425
Authenticate user by any means and return either true or false. Args: Returns: tuple (is_valid, username): True is valid user, False if not
def authenticate(self): basic_auth = request.authorization is_valid = False user = None if basic_auth: is_valid, user = self.check_basic_auth( basic_auth.username, basic_auth.password ) else: # Try token auth token = request.headers.get('Authorization', None) param_token = request.args.get('access_token') if token or param_token: if token: # slice the 'token ' piece of the header (following # github style): token = token[6:] else: # Grab it from query dict instead token = param_token log.debug('Received token: %s', token) is_valid, user = self.check_token_auth(token) return (is_valid, user)
988,452
Decode a Base X encoded string into the number. Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for encoding Stolen from: http://stackoverflow.com/a/1119769/1144479
def base62_decode(cls, string): alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" base = len(alphabet) strlen = len(string) num = 0 idx = 0 for char in string: power = (strlen - (idx + 1)) try: num += alphabet.index(char) * (base ** power) except ValueError: raise Base62DecodeError( "Failed to decode char: '{}'".format(char)) idx += 1 return num
988,485
Returns list of tuple containg columns of the table. Args: connection: sqlalchemy connection to sqlite database. table (str): name of the table Returns: list of (name, datatype, position): where name is column name, datatype is python type of the column, position is ordinal position of the column.
def _get_sqlite_columns(connection, table): # TODO: Move to the sqlite wrapper. # TODO: Consider sqlalchemy mapping. SQL_TO_PYTHON_TYPES = { 'INT': int, 'INTEGER': int, 'TINYINT': int, 'SMALLINT': int, 'MEDIUMINT': int, 'BIGINT': int, 'UNSIGNED BIG INT': int, 'INT': int, 'INT8': int, 'NUMERIC': float, 'REAL': float, 'FLOAT': float, 'DOUBLE': float, 'BOOLEAN': bool, 'CHARACTER': str, 'VARCHAR': str, 'TEXT': str } query = 'PRAGMA table_info(\'{}\');' result = connection.execute(query.format(table)) ret = [] for row in result: position = row[0] + 1 name = row[1] datatype = row[2] try: datatype = SQL_TO_PYTHON_TYPES[datatype] except KeyError: raise Exception( 'Do not know how to convert {} sql datatype to python data type.' .format(datatype)) ret.append((name, datatype, position)) return ret
988,703
Converts ambry bundle to dict ready to send to CKAN API. Args: bundle (ambry.bundle.Bundle): bundle to convert. Returns: dict: dict to send to CKAN to create dataset. See http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.package_create
def _convert_bundle(bundle): # shortcut for metadata meta = bundle.dataset.config.metadata notes = '' for f in bundle.dataset.files: if f.path.endswith('documentation.md'): contents = f.unpacked_contents if isinstance(contents, six.binary_type): contents = contents.decode('utf-8') notes = json.dumps(contents) break ret = { 'name': bundle.dataset.vid.lower(), 'title': meta.about.title, 'author': meta.contacts.wrangler.name, 'author_email': meta.contacts.wrangler.email, 'maintainer': meta.contacts.maintainer.name, 'maintainer_email': meta.contacts.maintainer.email, 'license_id': '', 'notes': notes, 'url': meta.identity.source, 'version': bundle.dataset.version, 'state': 'active', 'owner_org': CKAN_CONFIG['organization'], } return ret
988,764
Finds table by ref and returns it. Args: ref (str): id, vid (versioned id) or name of the table Raises: NotFoundError: if table with given ref not found. Returns: orm.Table
def table(self, ref): try: obj_number = ObjectNumber.parse(ref) ds_obj_number = obj_number.as_dataset dataset = self._db.dataset(ds_obj_number) # Could do it in on SQL query, but this is easier. table = dataset.table(ref) except NotObjectNumberError: q = self.database.session.query(Table)\ .filter(Table.name == str(ref))\ .order_by(Table.vid.desc()) table = q.first() if not table: raise NotFoundError("No table for ref: '{}'".format(ref)) return table
988,885
Indexes all datasets of the library. Args: tick_f (callable, optional): callable of one argument. Gets string with index state.
def index_library_datasets(self, tick_f=None): dataset_n = 0 partition_n = 0 def tick(d, p): if tick_f: tick_f('datasets: {} partitions: {}'.format(d, p)) for dataset in self.library.datasets: if self.backend.dataset_index.index_one(dataset): # dataset added to index dataset_n += 1 tick(dataset_n, partition_n) for partition in dataset.partitions: self.backend.partition_index.index_one(partition) partition_n += 1 tick(dataset_n, partition_n) else: # dataset already indexed pass
989,054
Finds partition by reference and installs it to warehouse db. Args: ref (str): id, vid (versioned id), name or vname (versioned name) of the partition.
def install(self, ref, table_name=None, index_columns=None,logger=None): try: obj_number = ObjectNumber.parse(ref) if isinstance(obj_number, TableNumber): table = self._library.table(ref) connection = self._backend._get_connection() return self._backend.install_table(connection, table, logger=logger) else: # assume partition raise NotObjectNumberError except NotObjectNumberError: # assume partition. partition = self._library.partition(ref) connection = self._backend._get_connection() return self._backend.install( connection, partition, table_name=table_name, index_columns=index_columns, logger=logger)
989,187
Creates materialized table for given partition reference. Args: ref (str): id, vid, name or vname of the partition. Returns: str: name of the partition table in the database.
def materialize(self, ref, table_name=None, index_columns=None, logger=None): from ambry.library import Library assert isinstance(self._library, Library) logger.debug('Materializing warehouse partition.\n partition: {}'.format(ref)) partition = self._library.partition(ref) connection = self._backend._get_connection() return self._backend.install(connection, partition, table_name=table_name, index_columns=index_columns, materialize=True, logger=logger)
989,188
Create an index on the columns. Args: ref (str): id, vid, name or versioned name of the partition. columns (list of str): names of the columns needed indexes.
def index(self, ref, columns): from ambry.orm.exc import NotFoundError logger.debug('Creating index for partition.\n ref: {}, columns: {}'.format(ref, columns)) connection = self._backend._get_connection() try: table_or_partition = self._library.partition(ref) except NotFoundError: table_or_partition = ref self._backend.index(connection, table_or_partition, columns)
989,189
Executes all sql statements from asql. Args: library (library.Library): asql (str): ambry sql query - see https://github.com/CivicKnowledge/ambry/issues/140 for details.
def parse_sql(self, asql): import sqlparse statements = sqlparse.parse(sqlparse.format(asql, strip_comments=True)) parsed_statements = [] for statement in statements: statement_str = statement.to_unicode().strip() for preprocessor in self._backend.sql_processors(): statement_str = preprocessor(statement_str, self._library, self._backend, self.connection) parsed_statements.append(statement_str) return parsed_statements
989,190
Return url parameter with given index. Args: - index: starts from zero, and come after controller and action names in url.
def get_url_param(self, index, default=None): params = self.get_url_params() return params[index] if index < len(params) else default
989,195
Gets a tzinfo object from a string. Args: tzinfo: A string (or string like) object, or a datetime.tzinfo object. Returns: An datetime.tzinfo object. Raises: UnknownTimeZoneError: If the timezone given can't be decoded.
def _tzinfome(tzinfo): if not isinstance(tzinfo, datetime.tzinfo): try: tzinfo = pytz.timezone(tzinfo) assert tzinfo.zone in pytz.all_timezones except AttributeError: raise pytz.UnknownTimeZoneError("Unknown timezone! %s" % tzinfo) return tzinfo
989,286
Localize a datetime to the local timezone. If dt is naive, returns the same datetime with the local timezone, otherwise uses astimezone to convert. Args: dt: datetime object. force_to_local: Force all results to be in local time. Returns: A datetime_tz object.
def localize(dt, force_to_local=True): if not isinstance(dt, datetime_tz): if not dt.tzinfo: return datetime_tz(dt, tzinfo=localtz()) dt = datetime_tz(dt) if force_to_local: return dt.astimezone(localtz()) return dt
989,287
Gets a naive datetime from a datetime. datetime_tz objects can't just have tzinfo replaced with None, you need to call asdatetime. Args: dt: datetime object. Returns: datetime object without any timezone information.
def get_naive(dt): if not dt.tzinfo: return dt if hasattr(dt, "asdatetime"): return dt.asdatetime() return dt.replace(tzinfo=None)
989,288
Wrap a method. Patch a method which might return a datetime.datetime to return a datetime_tz.datetime_tz instead. Args: name: The name of the method to patch
def _wrap_method(name): method = getattr(datetime.datetime, name) # Have to give the second argument as method has no __module__ option. @functools.wraps(method, ("__name__", "__doc__"), ()) def wrapper(self, *args, **kw): r = method(self, *args, **kw) if isinstance(r, datetime.datetime) and not isinstance(r, type(self)): r = type(self)(r) return r setattr(datetime_tz, name, wrapper)
989,295
Return this datetime_tz as a datetime object. Args: naive: Return *without* any tz info. Returns: This datetime_tz as a datetime object.
def asdatetime(self, naive=True): args = list(self.timetuple()[0:6])+[self.microsecond] if not naive: args.append(self.tzinfo) return datetime.datetime(*args)
989,299
Returns a version of this timestamp converted to the given timezone. Args: tzinfo: Either a datetime.tzinfo object or a string (which will be looked up in pytz. Returns: A datetime_tz object in the given timezone.
def astimezone(self, tzinfo): # Assert we are not a naive datetime object assert self.tzinfo is not None tzinfo = _tzinfome(tzinfo) d = self.asdatetime(naive=False).astimezone(tzinfo) return type(self)(d)
989,301
Return datetime with new specified fields given as arguments. For example, dt.replace(days=4) would return a new datetime_tz object with exactly the same as dt but with the days attribute equal to 4. Any attribute can be replaced, but tzinfo can not be set to None. Args: Any datetime_tz attribute. Returns: A datetime_tz object with the attributes replaced. Raises: TypeError: If the given replacement is invalid.
def replace(self, **kw): if "tzinfo" in kw: if kw["tzinfo"] is None: raise TypeError("Can not remove the timezone use asdatetime()") else: tzinfo = kw["tzinfo"] del kw["tzinfo"] else: tzinfo = None is_dst = None if "is_dst" in kw: is_dst = kw["is_dst"] del kw["is_dst"] else: # Use our own DST setting.. is_dst = self.is_dst replaced = self.asdatetime().replace(**kw) return type(self)( replaced, tzinfo=tzinfo or self.tzinfo.zone, is_dst=is_dst)
989,302
Iterate over the days between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a day apart.
def days(start, end=None): return iterate.between(start, datetime.timedelta(days=1), end)
989,310
Iterate over the hours between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a hour apart.
def hours(start, end=None): return iterate.between(start, datetime.timedelta(hours=1), end)
989,311
Iterate over the minutes between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a minute apart.
def minutes(start, end=None): return iterate.between(start, datetime.timedelta(minutes=1), end)
989,312
Parses asql query to view object. Args: query (str): asql query Returns: View instance: parsed view.
def parse_view(query): try: idx = query.lower().index('where') query = query[:idx] except ValueError: pass if not query.endswith(';'): query = query.strip() query += ';' result = _view_stmt.parseString(query) return View(result)
989,391
Updates current record. Args: rec (FIMRecord):
def update(self, rec=None, drop=None, tables=None, install=None, materialize=None, indexes=None, joins=0, views=0): if not drop: drop = [] if not tables: tables = set() if not install: install = set() if not materialize: materialize = set() if not indexes: indexes = set() if rec: self.update( drop=rec.drop, tables=rec.tables, install=rec.install, materialize=rec.materialize, indexes=rec.indexes, joins=rec.joins ) self.drop += drop self.tables |= set(tables) self.install |= set(install) self.materialize |= set(materialize) self.indexes |= set(indexes) self.joins += joins self.views += views # Joins or views promote installed partitions to materialized partitions if self.joins > 0 or self.views > 0: self.materialize |= self.install self.install = set()
989,404
Create a three letter acronym from the input string s. Args: past: A set object, for storing acronyms that have already been created prefix: A prefix added to the acronym before storing in the set s: The string to create the acronym from.
def make_acro(past, prefix, s): # pragma: no cover def _make_acro(s, t=0): # Really should cache these ... v = ['a', 'e', 'i', 'o', 'u', 'y'] c = [chr(x) for x in six_xrange(ord('a'), ord('z') + 1) if chr(x) not in v] s = re.sub(r'\W+', '', s.lower()) vx = [x for x in s if x in v] # Vowels in input string cx = [x for x in s if x in c] # Consonants in input string if s.startswith('Mc'): if t < 1: return 'Mc' + v[0] if t < 2: return 'Mc' + c[0] if s[0] in v: # Starts with a vowel if t < 1: return vx[0] + cx[0] + cx[1] if t < 2: return vx[0] + vx[1] + cx[0] if s[0] in c and s[1] in c: # Two first consonants if t < 1: return cx[0] + cx[1] + vx[0] if t < 2: return cx[0] + cx[1] + cx[2] if t < 3: return cx[0] + vx[0] + cx[1] if t < 4: return cx[0] + cx[1] + cx[2] if t < 5: return cx[0] + vx[0] + vx[1] if t < 6: return cx[0] + cx[1] + cx[-1] # These are punts; just take a substring if t < 7: return s[0:3] if t < 8: return s[1:4] if t < 9: return s[2:5] if t < 10: return s[3:6] return None for t in six_xrange(11): # Try multiple forms until one isn't in the past acronyms try: a = _make_acro(s, t) if a is not None: if prefix: aps = prefix + a else: aps = a if aps not in past: past.add(aps) return a except IndexError: pass raise Exception('Could not get acronym.')
989,774
Initalizes partition search result fields. Args: dataset_vid (str): vid of the partition's dataset. vid (str): partition vid. score (int): score of the search result.
def __init__(self, dataset_vid=None, vid=None, score=None): assert vid is not None, 'vid can not be None.' assert dataset_vid is not None, 'dataset_vid can not be None.' assert score is not None, 'score can not be None.' self.dataset_vid = dataset_vid self.vid = vid self.score = score
989,947
Joins terms using OR operator. Args: terms (list): terms to join Examples: self._or_join(['term1', 'term2']) -> 'term1 OR term2' Returns: str
def _or_join(self, terms): if isinstance(terms, (tuple, list)): if len(terms) > 1: return '(' + ' OR '.join(terms) + ')' else: return terms[0] else: return terms
989,950
Joins terms using AND operator. Args: terms (list): terms to join Examples: self._and_join(['term1']) -> 'term1' self._and_join(['term1', 'term2']) -> 'term1 AND term2' self._and_join(['term1', 'term2', 'term3']) -> 'term1 AND term2 AND term3' Returns: str
def _and_join(self, terms): if len(terms) > 1: return ' AND '.join([self._or_join(t) for t in terms]) else: return self._or_join(terms[0])
989,952
Indexes exactly one object of the Ambry system. Args: instance (any): instance to index. force (boolean): if True replace document in the index. Returns: boolean: True if document added to index, False if document already exists in the index.
def index_one(self, instance, force=False): if not self.is_indexed(instance) and not force: doc = self._as_document(instance) self._index_document(doc, force=force) logger.debug('{} indexed as\n {}'.format(instance.__class__, pformat(doc))) return True logger.debug('{} already indexed.'.format(instance.__class__)) return False
989,953
Converts dataset to document indexed by to FTS index. Args: dataset (orm.Dataset): dataset to convert. Returns: dict with structure matches to BaseDatasetIndex._schema.
def _as_document(self, dataset): # find tables. assert isinstance(dataset, Dataset) execute = object_session(dataset).connection().execute query = text() columns = u('\n').join( [u(' ').join(list(text_type(e) for e in t)) for t in execute(query, dataset_vid=str(dataset.identity.vid))]) doc = '\n'.join([u('{}').format(x) for x in [dataset.config.metadata.about.title, dataset.config.metadata.about.summary, dataset.identity.id_, dataset.identity.vid, dataset.identity.source, dataset.identity.name, dataset.identity.vname, columns]]) # From the source, make a variety of combinations for keywords: # foo.bar.com -> "foo foo.bar foo.bar.com bar.com" parts = u('{}').format(dataset.identity.source).split('.') sources = (['.'.join(g) for g in [parts[-i:] for i in range(2, len(parts) + 1)]] + ['.'.join(g) for g in [parts[:i] for i in range(0, len(parts))]]) # Re-calculate the summarization of grains, since the geoid 0.0.7 package had a bug where state level # summaries had the same value as state-level allvals def resum(g): try: return str(GVid.parse(g).summarize()) except (KeyError, ValueError): return g def as_list(value): if not value: return [] if isinstance(value, string_types): lst = [value] else: try: lst = list(value) except TypeError: lst = [value] return lst about_time = as_list(dataset.config.metadata.about.time) about_grain = as_list(dataset.config.metadata.about.grain) keywords = ( list(dataset.config.metadata.about.groups) + list(dataset.config.metadata.about.tags) + about_time + [resum(g) for g in about_grain] + sources) document = dict( vid=u('{}').format(dataset.identity.vid), title=u('{} {}').format(dataset.identity.name, dataset.config.metadata.about.title), doc=u('{}').format(doc), keywords=' '.join(u('{}').format(x) for x in keywords) ) return document
989,954
Expands terms of the dataset to the appropriate fields. It will parse the search phrase and return only the search term components that are applicable to a Dataset query. Args: terms (dict or str): Returns: dict: keys are field names, values are query strings
def _expand_terms(self, terms): ret = { 'keywords': list(), 'doc': list()} if not isinstance(terms, dict): stp = SearchTermParser() terms = stp.parse(terms, term_join=self.backend._and_join) if 'about' in terms: ret['doc'].append(terms['about']) if 'source' in terms: ret['keywords'].append(terms['source']) return ret
989,955
Converts given partition to the document indexed by FTS backend. Args: partition (orm.Partition): partition to convert. Returns: dict with structure matches to BasePartitionIndex._schema.
def _as_document(self, partition): schema = ' '.join( u'{} {} {} {} {}'.format( c.id, c.vid, c.name, c.altname, c.description) for c in partition.table.columns) values = '' for stat in partition.stats: if stat.uvalues : # SOme geometry vlaues are super long. They should not be in uvbalues, but when they are, # need to cut them down. values += ' '.join(e[:200] for e in stat.uvalues) + '\n' # Re-calculate the summarization of grains, since the geoid 0.0.7 package had a bug where state level # summaries had the same value as state-level allvals def resum(g): try: return str(GVid.parse(g).summarize()) except KeyError: return g except ValueError: logger.debug("Failed to parse gvid '{}' from partition '{}' grain coverage" .format(g, partition.identity.vname)) return g keywords = ( ' '.join(partition.space_coverage) + ' ' + ' '.join([resum(g) for g in partition.grain_coverage if resum(g)]) + ' ' + ' '.join(str(x) for x in partition.time_coverage) ) doc_field = u('{} {} {} {} {} {}').format( values, schema, ' '.join([ u('{}').format(partition.identity.vid), u('{}').format(partition.identity.id_), u('{}').format(partition.identity.name), u('{}').format(partition.identity.vname)]), partition.display.title, partition.display.description, partition.display.sub_description, partition.display.time_description, partition.display.geo_description ) document = dict( vid=u('{}').format(partition.identity.vid), dataset_vid=u('{}').format(partition.identity.as_dataset().vid), title=u('{}').format(partition.table.description), keywords=u('{}').format(keywords), doc=doc_field) return document
989,956
Expands partition terms to the appropriate fields. Args: terms (dict or str): Returns: dict: keys are field names, values are query strings
def _expand_terms(self, terms): ret = { 'keywords': list(), 'doc': list(), 'from': None, 'to': None} if not isinstance(terms, dict): stp = SearchTermParser() terms = stp.parse(terms, term_join=self.backend._and_join) if 'about' in terms: ret['doc'].append(terms['about']) if 'with' in terms: ret['doc'].append(terms['with']) if 'in' in terms: place_vids = self._expand_place_ids(terms['in']) ret['keywords'].append(place_vids) if 'by' in terms: ret['keywords'].append(terms['by']) ret['from'] = terms.get('from', None) ret['to'] = terms.get('to', None) return ret
989,957
Lookups all of the place identifiers to get gvids Args: terms (str or unicode): terms to lookup Returns: str or list: given terms if no identifiers found, otherwise list of identifiers.
def _expand_place_ids(self, terms): place_vids = [] first_type = None for result in self.backend.identifier_index.search(terms): if not first_type: first_type = result.type if result.type != first_type: # Ignore ones that aren't the same type as the best match continue place_vids.append(result.vid) if place_vids: # Add the 'all region' gvids for the higher level all_set = set(itertools.chain.from_iterable(iallval(GVid.parse(x)) for x in place_vids)) place_vids += list(str(x) for x in all_set) return place_vids else: return terms
989,958
Converts given identifier to the document indexed by FTS backend. Args: identifier (dict): identifier to convert. Dict contains at least 'identifier', 'type' and 'name' keys. Returns: dict with structure matches to BaseIdentifierIndex._schema.
def _as_document(self, identifier): return { 'identifier': u('{}').format(identifier['identifier']), 'type': u('{}').format(identifier['type']), 'name': u('{}').format(identifier['name']) }
989,959
Parses search term to Args: s (str): string with search term. or_join (callable): function to join 'OR' terms. Returns: dict: all of the terms grouped by marker. Key is a marker, value is a term. Example: >>> SearchTermParser().parse('table2 from 1978 to 1979 in california') {'to': 1979, 'about': 'table2', 'from': 1978, 'in': 'california'}
def parse(self, s, term_join=None): if not term_join: term_join = lambda x: '(' + ' OR '.join(x) + ')' toks = self.scan(s) # Examples: starting with this query: # diabetes from 2014 to 2016 source healthindicators.gov # Assume the first term is ABOUT, if it is not marked with a marker. if toks and toks[0] and (toks[0][0] == self.TERM or toks[0][0] == self.QUOTEDTERM): toks = [(self.MARKER, 'about')] + toks # The example query produces this list of tokens: #[(3, 'about'), # (0, 'diabetes'), # (3, 'from'), # (4, 2014), # (3, 'to'), # (4, 2016), # (3, 'source'), # (0, 'healthindicators.gov')] # Group the terms by their marker. bymarker = [] for t in toks: if t[0] == self.MARKER: bymarker.append((t[1], [])) else: bymarker[-1][1].append(t) # After grouping tokens by their markers # [('about', [(0, 'diabetes')]), # ('from', [(4, 2014)]), # ('to', [(4, 2016)]), # ('source', [(0, 'healthindicators.gov')]) # ] # Convert some of the markers based on their contents. This just changes the marker type for keywords # we'll do more adjustments later. comps = [] for t in bymarker: t = list(t) if t[0] == 'in' and len(t[1]) == 1 and isinstance(t[1][0][1], string_types) and self.stem( t[1][0][1]) in self.geograins.keys(): t[0] = 'by' # If the from term isn't an integer, then it is really a source. if t[0] == 'from' and len(t[1]) == 1 and t[1][0][0] != self.YEAR: t[0] = 'source' comps.append(t) # After conversions # [['about', [(0, 'diabetes')]], # ['from', [(4, 2014)]], # ['to', [(4, 2016)]], # ['source', [(0, 'healthindicators.gov')]]] # Join all of the terms into single marker groups groups = {marker: [] for marker, _ in comps} for marker, terms in comps: groups[marker] += [term for marker, term in terms] # At this point, the groups dict is formed, but it will have a list # for each marker that has multiple terms. # Only a few of the markers should have more than one term, so move # extras to the about group for marker, group in groups.items(): if marker == 'about': continue if len(group) > 1 and marker not in self.multiterms: groups[marker], extras = [group[0]], group[1:] if not 'about' in groups: groups['about'] = extras else: groups['about'] += extras if marker == 'by': groups['by'] = [ self.geograins.get(self.stem(e)) for e in group] for marker, terms in iteritems(groups): if len(terms) > 1: if marker in 'in': groups[marker] = ' '.join(terms) else: groups[marker] = term_join(terms) elif len(terms) == 1: groups[marker] = terms[0] else: pass # After grouping: # {'to': 2016, # 'about': 'diabetes', # 'from': 2014, # 'source': 'healthindicators.gov'} # If there were any markers with multiple terms, they would be cast in the or_join form. return groups
989,963
Returns table names found in the query. NOTE. This routine would use the sqlparse parse tree, but vnames don't parse very well. Args: statement (sqlparse.sql.Statement): parsed by sqlparse sql statement. Returns: list of str
def _get_table_names(statement): parts = statement.to_unicode().split() tables = set() for i, token in enumerate(parts): if token.lower() == 'from' or token.lower().endswith('join'): tables.add(parts[i + 1].rstrip(';')) return list(tables)
989,981
Installs partition's mpr to the database to allow to execute sql queries over mpr. Args: connection: partition (orm.Partition): materialize (boolean): if True, create generic table. If False create MED over mpr. Returns: str: name of the created table.
def install(self, connection, partition, table_name=None, index_columns=None, materialize=False, logger=None): raise NotImplementedError
989,983
Installs all partitons of the table and create view with union of all partitons. Args: connection: connection to database who stores mpr data. table (orm.Table):
def install_table(self, connection, table, logger = None): # first install all partitions of the table queries = [] query_tmpl = 'SELECT * FROM {}' for partition in table.partitions: partition.localize() installed_name = self.install(connection, partition) queries.append(query_tmpl.format(installed_name)) # now create view with union of all partitions. query = 'CREATE VIEW {} AS {} '.format( table.vid, '\nUNION ALL\n'.join(queries)) logger.debug('Creating view for table.\n table: {}\n query: {}'.format(table.vid, query)) self._execute(connection, query, fetch=False)
989,984
Creates virtual tables for all partitions found in the query and executes query. Args: query (str): sql query fetch (bool): fetch result from database if True, do not fetch overwise.
def query(self, connection, query, fetch=True): self.install_module(connection) statements = sqlparse.parse(sqlparse.format(query, strip_comments=True)) # install all partitions and replace table names in the query. # logger.debug('Finding and installing all partitions from query. \n query: {}'.format(query)) new_query = [] if len(statements) > 1: raise BadSQLError("Can only query a single statement") if len(statements) == 0: raise BadSQLError("DIdn't get any statements in '{}'".format(query)) statement = statements[0] logger.debug( 'Searching statement for partition ref.\n statement: {}'.format(statement.to_unicode())) #statement = self.install_statement(connection, statement.to_unicode()) logger.debug( 'Executing updated query after partition install.' '\n query before update: {}\n query to execute (updated query): {}' .format(statement, new_query)) return self._execute(connection, statement.to_unicode(), fetch=fetch)
989,985
Creates new index or opens existing. Args: root_dir (str): root dir where to find or create index. schema (whoosh.fields.Schema): schema of the index to create or open. index_name (str): name of the index. Returns: tuple ((whoosh.index.FileIndex, str)): first element is index, second is index directory.
def _init_index(root_dir, schema, index_name): index_dir = os.path.join(root_dir, index_name) try: if not os.path.exists(index_dir): os.makedirs(index_dir) return create_in(index_dir, schema), index_dir else: return open_dir(index_dir), index_dir except Exception as e: logger.error("Init error: failed to open search index at: '{}': {} ".format(index_dir, e)) raise
990,147
Finds datasets by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to return. None means without limit. Returns: list of DatasetSearchResult instances.
def search(self, search_phrase, limit=None): query_string = self._make_query_from_terms(search_phrase) self._parsed_query = query_string schema = self._get_generic_schema() parser = QueryParser('doc', schema=schema) query = parser.parse(query_string) datasets = defaultdict(DatasetSearchResult) # collect all datasets logger.debug('Searching datasets using `{}` query.'.format(query)) with self.index.searcher() as searcher: results = searcher.search(query, limit=limit) for hit in results: vid = hit['vid'] datasets[vid].vid = hit['vid'] datasets[vid].b_score += hit.score # extend datasets with partitions logger.debug('Extending datasets with partitions.') for partition in self.backend.partition_index.search(search_phrase): datasets[partition.dataset_vid].p_score += partition.score datasets[partition.dataset_vid].partitions.add(partition) return list(datasets.values())
990,151
Creates a query for dataset from decomposed search terms. Args: terms (dict or unicode or string): Returns: tuple: First element is str with FTS query, second is parameters of the query.
def _make_query_from_terms(self, terms): expanded_terms = self._expand_terms(terms) cterms = '' if expanded_terms['doc']: cterms = self.backend._and_join(expanded_terms['doc']) if expanded_terms['keywords']: if cterms: cterms = self.backend._and_join( cterms, self.backend._join_keywords(expanded_terms['keywords'])) else: cterms = self.backend._join_keywords(expanded_terms['keywords']) logger.debug('Dataset terms conversion: `{}` terms converted to `{}` query.'.format(terms, cterms)) return cterms
990,152
Deletes given dataset from index. Args: vid (str): dataset vid.
def _delete(self, vid=None): assert vid is not None, 'vid argument can not be None.' writer = self.index.writer() writer.delete_by_term('vid', vid) writer.commit()
990,154
Deletes given identifier from index. Args: identifier (str): identifier of the document to delete.
def _delete(self, identifier=None): assert identifier is not None, 'identifier argument can not be None.' writer = self.index.writer() writer.delete_by_term('identifier', identifier) writer.commit()
990,159
Finds partitions by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to generate. None means without limit. Yields: PartitionSearchResult instances.
def search(self, search_phrase, limit=None): query_string = self._make_query_from_terms(search_phrase) self._parsed_query = query_string schema = self._get_generic_schema() parser = QueryParser('doc', schema=schema) query = parser.parse(query_string) logger.debug('Searching partitions using `{}` query.'.format(query)) with self.index.searcher() as searcher: results = searcher.search(query, limit=limit) for hit in results: yield PartitionSearchResult( vid=hit['vid'], dataset_vid=hit['dataset_vid'], score=hit.score)
990,163
returns a FTS query for partition created from decomposed search terms. args: terms (dict or str): returns: str containing fts query.
def _make_query_from_terms(self, terms): expanded_terms = self._expand_terms(terms) cterms = '' if expanded_terms['doc']: cterms = self.backend._or_join(expanded_terms['doc']) keywords = expanded_terms['keywords'] frm_to = self._from_to_as_term(expanded_terms['from'], expanded_terms['to']) if frm_to: keywords.append(frm_to) if keywords: if cterms: cterms = self.backend._and_join( [cterms, self.backend._field_term('keywords', expanded_terms['keywords'])]) else: cterms = self.backend._field_term('keywords', expanded_terms['keywords']) logger.debug('partition terms conversion: `{}` terms converted to `{}` query.'.format(terms, cterms)) return cterms
990,164
Turns from and to into the query format. Args: frm (str): from year to (str): to year Returns: FTS query str with years range.
def _from_to_as_term(self, frm, to): # The wackiness with the conversion to int and str, and adding ' ', is because there # can't be a space between the 'TO' and the brackets in the time range # when one end is open from_year = '' to_year = '' def year_or_empty(prefix, year, suffix): try: return prefix + str(int(year)) + suffix except (ValueError, TypeError): return '' if frm: from_year = year_or_empty('', frm, ' ') if to: to_year = year_or_empty(' ', to, '') if bool(from_year) or bool(to_year): return '[{}TO{}]'.format(from_year, to_year) else: return None
990,165
Converts given string to ascii ignoring non ascii. Args: s (text or binary): Returns: str:
def _to_ascii(s): # TODO: Always use unicode within ambry. from six import text_type, binary_type if isinstance(s, text_type): ascii_ = s.encode('ascii', 'ignore') elif isinstance(s, binary_type): ascii_ = s.decode('utf-8').encode('ascii', 'ignore') else: raise Exception('Unknown text type - {}'.format(type(s))) return ascii_
990,214
Creates FDW or materialize view for given partition. Args: connection: connection to postgresql partition (orm.Partition): materialize (boolean): if True, create read-only table. If False create virtual table. Returns: str: name of the created table.
def install(self, connection, partition, table_name=None, columns=None, materialize=False, logger=None): partition.localize() self._add_partition(connection, partition) fdw_table = partition.vid view_table = '{}_v'.format(fdw_table) if materialize: with connection.cursor() as cursor: view_exists = self._relation_exists(connection, view_table) if view_exists: logger.debug( 'Materialized view of the partition already exists.\n partition: {}, view: {}' .format(partition.name, view_table)) else: query = 'CREATE MATERIALIZED VIEW {} AS SELECT * FROM {};'\ .format(view_table, fdw_table) logger.debug( 'Creating new materialized view of the partition.' '\n partition: {}, view: {}, query: {}' .format(partition.name, view_table, query)) cursor.execute(query) cursor.execute('COMMIT;') final_table = view_table if materialize else fdw_table with connection.cursor() as cursor: view_q = "CREATE VIEW IF NOT EXISTS {} AS SELECT * FROM {} ".format(partition.vid, final_table) cursor.execute(view_q) cursor.execute('COMMIT;') return partition.vid
990,327
Create an index on the columns. Args: connection: partition (orm.Partition): columns (list of str):
def index(self, connection, partition, columns): query_tmpl = 'CREATE INDEX ON {table_name} ({column});' table_name = '{}_v'.format(partition.vid) for column in columns: query = query_tmpl.format(table_name=table_name, column=column) logger.debug('Creating postgres index.\n column: {}, query: {}'.format(column, query)) with connection.cursor() as cursor: cursor.execute(query) cursor.execute('COMMIT;')
990,328
Returns name of the postgres table who stores mpr data. Args: connection: connection to postgres db who stores mpr data. partition (orm.Partition): Returns: str: Raises: MissingTableError: if partition table not found in the db.
def _get_mpr_table(self, connection, partition): # TODO: This is the first candidate for optimization. Add field to partition # with table name and update it while table creation. # Optimized version. # # return partition.mpr_table or raise exception # Not optimized version. # # first check either partition has materialized view. logger.debug( 'Looking for materialized view of the partition.\n partition: {}'.format(partition.name)) foreign_table = partition.vid view_table = '{}_v'.format(foreign_table) view_exists = self._relation_exists(connection, view_table) if view_exists: logger.debug( 'Materialized view of the partition found.\n partition: {}, view: {}' .format(partition.name, view_table)) return view_table # now check for fdw/virtual table logger.debug( 'Looking for foreign table of the partition.\n partition: {}'.format(partition.name)) foreign_exists = self._relation_exists(connection, foreign_table) if foreign_exists: logger.debug( 'Foreign table of the partition found.\n partition: {}, foreign table: {}' .format(partition.name, foreign_table)) return foreign_table raise MissingTableError('postgres database does not have table for {} partition.' .format(partition.vid))
990,330
Creates FDW for the partition. Args: connection: partition (orm.Partition):
def _add_partition(self, connection, partition): logger.debug('Creating foreign table for partition.\n partition: {}'.format(partition.name)) with connection.cursor() as cursor: postgres_med.add_partition(cursor, partition.datafile, partition.vid)
990,331
Executes given query and returns result. Args: connection: connection to postgres database who stores mpr data. query (str): sql query fetch (boolean, optional): if True, fetch query result and return it. If False, do not fetch. Returns: iterable with query result or None if fetch is False.
def _execute(self, connection, query, fetch=True): # execute query with connection.cursor() as cursor: cursor.execute(query) if fetch: return cursor.fetchall() else: cursor.execute('COMMIT;')
990,333
Returns True if relation exists in the postgres db. Otherwise returns False. Args: connection: connection to postgres database who stores mpr data. relation (str): name of the table, view or materialized view. Note: relation means table, view or materialized view here. Returns: boolean: True if relation exists, False otherwise.
def _relation_exists(cls, connection, relation): schema_name, table_name = relation.split('.') exists_query = with connection.cursor() as cursor: cursor.execute(exists_query, [schema_name, table_name]) result = cursor.fetchall() return result == [(1,)]
990,334
Creates a map between Windows and Olson timezone names. Args: windows_zones_xml: The CLDR XML mapping. Yields: (win32_name, olson_name, comment)
def create_win32tz_map(windows_zones_xml): coming_comment = None win32_name = None territory = None parser = genshi.input.XMLParser(StringIO(windows_zones_xml)) map_zones = {} zone_comments = {} for kind, data, _ in parser: if kind == genshi.core.START and str(data[0]) == "mapZone": attrs = data[1] win32_name, territory, olson_name = ( attrs.get("other"), attrs.get("territory"), attrs.get("type").split(" ")[0]) map_zones[(win32_name, territory)] = olson_name elif kind == genshi.core.END and str(data) == "mapZone" and win32_name: if coming_comment: zone_comments[(win32_name, territory)] = coming_comment coming_comment = None win32_name = None elif kind == genshi.core.COMMENT: coming_comment = data.strip() elif kind in (genshi.core.START, genshi.core.END, genshi.core.COMMENT): coming_comment = None for win32_name, territory in sorted(map_zones): yield (win32_name, territory, map_zones[(win32_name, territory)], zone_comments.get((win32_name, territory), None))
990,553
Collects all migrations and applies missed. Args: connection (sqlalchemy connection):
def migrate(connection, dsn): all_migrations = _get_all_migrations() logger.debug('Collected migrations: {}'.format(all_migrations)) for version, modname in all_migrations: if _is_missed(connection, version) and version <= SCHEMA_VERSION: logger.info('Missed migration: {} migration is missed. Migrating...'.format(version)) module = __import__(modname, fromlist='dummy') # run each migration under its own transaction. This allows us to apply valid migrations # and break on invalid. trans = connection.begin() try: module.Migration().migrate(connection) _update_version(connection, version) trans.commit() except: trans.rollback() logger.error("Failed to migrate '{}' on {} ".format(version, dsn)) raise
990,690
Creates migration file. Returns created file name. Args: name (str): name of the migration. Returns: str: name of the migration file.
def create_migration_template(name): assert name, 'Name of the migration can not be empty.' from . import migrations # Find next number # package = migrations prefix = package.__name__ + '.' all_versions = [] for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix): version = int(modname.split('.')[-1].split('_')[0]) all_versions.append(version) next_number = max(all_versions) + 1 # Generate next migration name # next_migration_name = '{}_{}.py'.format(next_number, name) migration_fullname = os.path.join(package.__path__[0], next_migration_name) # Write next migration file content. # with open(migration_fullname, 'w') as f: f.write(MIGRATION_TEMPLATE) return migration_fullname
990,691
Returns database version. Args: connection (sqlalchemy connection): Raises: Assuming user_version pragma (sqlite case) and user_version table (postgresql case) exist because they created with the database creation. Returns: int: version of the database.
def get_stored_version(connection): if connection.engine.name == 'sqlite': version = connection.execute('PRAGMA user_version').fetchone()[0] if version == 0: raise VersionIsNotStored return version elif connection.engine.name == 'postgresql': try: r = connection\ .execute('SELECT version FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME))\ .fetchone() if not r: raise VersionIsNotStored version = r[0] except ProgrammingError: # This happens when the user_version table doesn't exist raise VersionIsNotStored return version else: raise DatabaseError('Do not know how to get version from {} engine.'.format(connection.engine.name))
990,692
Updates version in the db to the given version. Args: connection (sqlalchemy connection): sqlalchemy session where to update version. version (int): version of the migration.
def _update_version(connection, version): if connection.engine.name == 'sqlite': connection.execute('PRAGMA user_version = {}'.format(version)) elif connection.engine.name == 'postgresql': connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_SCHEMA_NAME))) connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_PARTITION_SCHEMA_NAME))) connection.execute('CREATE TABLE IF NOT EXISTS {}.user_version(version INTEGER NOT NULL);' .format(POSTGRES_SCHEMA_NAME)) # upsert. if connection.execute('SELECT * FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME)).fetchone(): # update connection.execute('UPDATE {}.user_version SET version = {};' .format(POSTGRES_SCHEMA_NAME, version)) else: # insert connection.execute('INSERT INTO {}.user_version (version) VALUES ({})' .format(POSTGRES_SCHEMA_NAME, version)) else: raise DatabaseMissingError('Do not know how to migrate {} engine.' .format(connection.engine.driver))
990,695
Initializes database. Args: dsn (str): database connect string, 'sqlite://' for example. echo (boolean): echo parameter of the create_engine. engine_kwargs (dict): parameters to pass to the create_engine method of the Sqlalchemy.
def __init__(self, dsn, echo=False, foreign_keys=True, engine_kwargs=None, application_prefix='ambry'): self.dsn = dsn d = parse_url_to_dict(self.dsn) self.path = d['path'].replace('//', '/') self.driver = d['scheme'] self.engine_kwargs = engine_kwargs or {} self.Session = None self._session = None self._engine = None self._connection = None self._echo = echo self._foreign_keys = foreign_keys self._raise_on_commit = False # For debugging if self.driver in ['postgres', 'postgresql', 'postgresql+psycopg2', 'postgis']: self.driver = 'postgres' self._schema = POSTGRES_SCHEMA_NAME else: self._schema = None self.logger = logger self.library = None # Set externally when checking in in self._application_prefix = application_prefix
990,697
Get a partition by the id number. Arguments: id_ -- a partition id value Returns: A partitions.Partition object Throws: a Sqlalchemy exception if the partition either does not exist or is not unique Because this method works on the bundle, the id_ ( without version information ) is equivalent to the vid ( with version information )
def partition(self, id_): from ..orm import Partition as OrmPartition from sqlalchemy import or_ from ..identity import PartialPartitionName if isinstance(id_, PartitionIdentity): id_ = id_.id_ elif isinstance(id_, PartialPartitionName): id_ = id_.promote(self.bundle.identity.name) session = self.bundle.dataset._database.session q = session\ .query(OrmPartition)\ .filter(OrmPartition.d_vid == self.bundle.dataset.vid)\ .filter(or_(OrmPartition.id == str(id_).encode('ascii'), OrmPartition.vid == str(id_).encode('ascii'))) try: orm_partition = q.one() return self.bundle.wrap_partition(orm_partition) except NoResultFound: orm_partition = None if not orm_partition: q = session\ .query(OrmPartition)\ .filter(OrmPartition.d_vid == self.bundle.dataset.vid)\ .filter(OrmPartition.name == str(id_).encode('ascii')) try: orm_partition = q.one() return self.bundle.wrap_partition(orm_partition) except NoResultFound: orm_partition = None return orm_partition
990,835
Finds datasets by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to return. None means without limit. Returns: list of DatasetSearchResult instances.
def search(self, search_phrase, limit=None): # SQLite FTS can't find terms with `-`, therefore all hyphens were replaced with underscore # before save. Now to get appropriate result we need to replace all hyphens in the search phrase. # See http://stackoverflow.com/questions/3865733/how-do-i-escape-the-character-in-sqlite-fts3-queries search_phrase = search_phrase.replace('-', '_') query, query_params = self._make_query_from_terms(search_phrase) self._parsed_query = (query, query_params) connection = self.backend.library.database.connection # Operate on the raw connection connection.connection.create_function('rank', 1, _make_rank_func((1., .1, 0, 0))) logger.debug('Searching datasets using `{}` query.'.format(query)) results = connection.execute(query, **query_params).fetchall() # Query on the Sqlite proxy to the raw connection datasets = defaultdict(DatasetSearchResult) for result in results: vid, score = result datasets[vid] = DatasetSearchResult() datasets[vid].vid = vid datasets[vid].b_score = score logger.debug('Extending datasets with partitions.') for partition in self.backend.partition_index.search(search_phrase): datasets[partition.dataset_vid].p_score += partition.score datasets[partition.dataset_vid].partitions.add(partition) return list(datasets.values())
991,006
Converts dataset to document indexed by to FTS index. Args: dataset (orm.Dataset): dataset to convert. Returns: dict with structure matches to BaseDatasetIndex._schema.
def _as_document(self, dataset): assert isinstance(dataset, Dataset) doc = super(self.__class__, self)._as_document(dataset) # SQLite FTS can't find terms with `-`, replace it with underscore here and while searching. # See http://stackoverflow.com/questions/3865733/how-do-i-escape-the-character-in-sqlite-fts3-queries doc['keywords'] = doc['keywords'].replace('-', '_') doc['doc'] = doc['doc'].replace('-', '_') doc['title'] = doc['title'].replace('-', '_') return doc
991,007
Deletes given dataset from index. Args: vid (str): dataset vid.
def _delete(self, vid=None): query = text() self.backend.library.database.connection.execute(query, vid=vid)
991,009
Generates vids of all indexed identifiers. Args: limit (int, optional): If not empty, the maximum number of results to return Generates: str: vid of the document.
def list_documents(self, limit=None): limit_str = '' if limit: try: limit_str = 'LIMIT {}'.format(int(limit)) except (TypeError, ValueError): pass query = ('SELECT identifier FROM identifier_index ' + limit_str) for row in self.backend.library.database.connection.execute(query).fetchall(): yield row['identifier']
991,013
Deletes given identifier from index. Args: identifier (str): identifier of the document to delete.
def _delete(self, identifier=None): query = text() self.backend.library.database.connection.execute(query, identifier=identifier)
991,015
Finds partitions by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to generate. None means without limit. Generates: PartitionSearchResult instances.
def search(self, search_phrase, limit=None): # SQLite FTS can't find terms with `-`, therefore all hyphens replaced with underscore before save. # Now to make proper query we need to replace all hyphens in the search phrase. # See http://stackoverflow.com/questions/3865733/how-do-i-escape-the-character-in-sqlite-fts3-queries search_phrase = search_phrase.replace('-', '_') terms = SearchTermParser().parse(search_phrase) from_year = terms.pop('from', None) to_year = terms.pop('to', None) query, query_params = self._make_query_from_terms(terms) self._parsed_query = (query, query_params) connection = self.backend.library.database.connection connection.connection.create_function('rank', 1, _make_rank_func((1., .1, 0, 0))) # SQLite FTS implementation does not allow to create indexes on FTS tables. # see https://sqlite.org/fts3.html 1.5. Summary, p 1: # ... it is not possible to create indices ... # # So, filter years range here. results = connection.execute(query, query_params).fetchall() for result in results: vid, dataset_vid, score, db_from_year, db_to_year = result if from_year and from_year < db_from_year: continue if to_year and to_year > db_to_year: continue yield PartitionSearchResult( vid=vid, dataset_vid=dataset_vid, score=score)
991,018
Converts partition to document indexed by to FTS index. Args: partition (orm.Partition): partition to convert. Returns: dict with structure matches to BasePartitionIndex._schema.
def _as_document(self, partition): doc = super(self.__class__, self)._as_document(partition) # SQLite FTS can't find terms with `-`, replace it with underscore here and while searching. # See http://stackoverflow.com/questions/3865733/how-do-i-escape-the-character-in-sqlite-fts3-queries doc['keywords'] = doc['keywords'].replace('-', '_') doc['doc'] = doc['doc'].replace('-', '_') doc['title'] = doc['title'].replace('-', '_') # pass time_coverage to the _index_document. doc['time_coverage'] = partition.time_coverage return doc
991,019
Creates a query for partition from decomposed search terms. Args: terms (dict or unicode or string): Returns: tuple of (str, dict): First element is str with FTS query, second is parameters of the query.
def _make_query_from_terms(self, terms): match_query = '' expanded_terms = self._expand_terms(terms) if expanded_terms['doc']: match_query = self.backend._and_join(expanded_terms['doc']) if expanded_terms['keywords']: if match_query: match_query = self.backend._and_join( [match_query, self.backend._join_keywords(expanded_terms['keywords'])]) else: match_query = self.backend._join_keywords(expanded_terms['keywords']) if match_query: query = text() query_params = { 'match_query': match_query} else: query = text() query_params = {} return query, query_params
991,020
Sends email. Args: recipients (list of str): subject (str): message (str): attachments (list of str): list containing full paths (txt files only) to attach to email.
def send_email(recipients, subject, message, attachments=None): if not attachments: attachments = [] if os.path.exists(EMAIL_SETTINGS_FILE): email_settings = json.load(open(EMAIL_SETTINGS_FILE)) sender = email_settings.get('sender', 'ambry@localhost') use_tls = email_settings.get('use_tls') username = email_settings['username'] password = email_settings['password'] server = email_settings['server'] else: # use local smtp server = 'localhost' username = None password = None sender = 'ambry@localhost' # Create the container (outer) email message. msg = MIMEMultipart() msg['Subject'] = subject msg['From'] = sender msg['To'] = ','.join(recipients) msg.attach(MIMEText(message, 'plain')) # Add attachments. for file_name in attachments: if os.path.exists(file_name): with open(file_name, 'r') as fp: attachment = MIMEBase('application', 'text') attachment.set_payload(fp.read()) attachment.add_header( 'Content-Disposition', 'attachment; filename="{}"'.format(os.path.basename(file_name))) msg.attach(attachment) # The actual mail send. srv = smtplib.SMTP(server) if use_tls: srv.starttls() if username: srv.login(username, password) srv.sendmail(sender, ','.join(recipients), msg.as_string()) srv.quit()
991,082
A set of rules that applies to one or more directories within a Layout. Args: name (str): The name of the Domain. config (dict): The configuration dictionary that defines the entities and paths for the current domain.
def __init__(self, config): self.name = config['name'] self.config = config self.entities = {} self.files = [] self.include = listify(self.config.get('include', [])) self.exclude = listify(self.config.get('exclude', [])) if self.include and self.exclude: raise ValueError("The 'include' and 'exclude' arguments cannot " "both be set. Please pass at most one of these " "for domain '%s'." % self.name) self.path_patterns = listify(config.get('default_path_patterns', []))
991,420
Determine whether the passed file matches the Entity. Args: f (File): The File instance to match against. Returns: the matched value if a match was found, otherwise None.
def match_file(self, f, update_file=False): if self.map_func is not None: val = self.map_func(f) else: m = self.regex.search(f.path) val = m.group(1) if m is not None else None return self._astype(val)
991,422
Return a domain if one already exists, or create a new one if not. Args: domain (str, dict): Can be one of: - The name of the Domain to return (fails if none exists) - A path to the Domain configuration file - A dictionary containing configuration information
def _get_or_load_domain(self, domain): if isinstance(domain, six.string_types): if domain in self.domains: return self.domains[domain] elif exists(domain): with open(domain, 'r') as fobj: domain = json.load(fobj) else: raise ValueError("No domain could be found/loaded from input " "'{}'; value must be either the name of an " "existing Domain, or a valid path to a " "configuration file.".format(domain)) # At this point, domain is a dict name = domain['name'] if name in self.domains: msg = ("Domain with name '{}' already exists; returning existing " "Domain configuration.".format(name)) warnings.warn(msg) return self.domains[name] entities = domain.get('entities', []) domain = Domain(domain) for e in entities: self.add_entity(domain=domain, **e) self.domains[name] = domain return self.domains[name]
991,424
Save the current Layout's index to a .json file. Args: filename (str): Filename to write to. Note: At the moment, this won't serialize directory-specific config files. This means reconstructed indexes will only work properly in cases where there aren't multiple layout specs within a project.
def save_index(self, filename): data = {} for f in self.files.values(): entities = {v.entity.id: v.value for k, v in f.tags.items()} data[f.path] = {'domains': f.domains, 'entities': entities} with open(filename, 'w') as outfile: json.dump(data, outfile)
991,431
Return the count of unique values or files for the named entity. Args: entity (str): The name of the entity. files (bool): If True, counts the number of filenames that contain at least one value of the entity, rather than the number of unique values of the entity.
def count(self, entity, files=False): return self._find_entity(entity).count(files)
991,435
Return information for all Files tracked in the Layout as a pandas DataFrame. Args: kwargs: Optional keyword arguments passed on to get(). This allows one to easily select only a subset of files for export. Returns: A pandas DataFrame, where each row is a file, and each column is a tracked entity. NaNs are injected whenever a file has no value for a given attribute.
def as_data_frame(self, **kwargs): try: import pandas as pd except ImportError: raise ImportError("What are you doing trying to export a Layout " "as a pandas DataFrame when you don't have " "pandas installed? Eh? Eh?") if kwargs: files = self.get(return_type='obj', **kwargs) else: files = self.files.values() data = pd.DataFrame.from_records([f.entities for f in files]) data.insert(0, 'path', [f.path for f in files]) return data
991,436
Build FM-index Params: <iterator> | <generator> docs <str> filename
def build(self, docs=None, filename=None): if docs: if hasattr(docs, 'items'): for (idx, doc) in sorted(getattr(docs, 'items')(), key=lambda x: x[0]): self.fm.push_back(doc) else: for doc in filter(bool, docs): self.fm.push_back(doc) self.fm.build() if filename: self.fm.write(filename)
991,748
Merge of filter search results Params: <str> | <Sequential> query <bool> _or Return: <list> computed_dids
def _merge_search_result(self, search_results, _or=False): all_docids = reduce(add, [list(x.keys()) for x in search_results]) if _or: return sorted(set(all_docids), key=all_docids.index) return [docid for docid in set(all_docids) if all_docids.count(docid) > 1]
991,749