INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
---|---|
Toggle favorite stars on Slices and Dashboard | def favstar(self, class_name, obj_id, action):
"""Toggle favorite stars on Slices and Dashboard"""
session = db.session()
FavStar = models.FavStar # noqa
count = 0
favs = session.query(FavStar).filter_by(
class_name=class_name, obj_id=obj_id,
user_id=g.user.get_id()).all()
if action == 'select':
if not favs:
session.add(
FavStar(
class_name=class_name,
obj_id=obj_id,
user_id=g.user.get_id(),
dttm=datetime.now(),
),
)
count = 1
elif action == 'unselect':
for fav in favs:
session.delete(fav)
else:
count = len(favs)
session.commit()
return json_success(json.dumps({'count': count})) |
Server side rendering for a dashboard | def dashboard(self, dashboard_id):
"""Server side rendering for a dashboard"""
session = db.session()
qry = session.query(models.Dashboard)
if dashboard_id.isdigit():
qry = qry.filter_by(id=int(dashboard_id))
else:
qry = qry.filter_by(slug=dashboard_id)
dash = qry.one_or_none()
if not dash:
abort(404)
datasources = set()
for slc in dash.slices:
datasource = slc.datasource
if datasource:
datasources.add(datasource)
if config.get('ENABLE_ACCESS_REQUEST'):
for datasource in datasources:
if datasource and not security_manager.datasource_access(datasource):
flash(
__(security_manager.get_datasource_access_error_msg(datasource)),
'danger')
return redirect(
'superset/request_access/?'
f'dashboard_id={dash.id}&')
dash_edit_perm = check_ownership(dash, raise_if_false=False) and \
security_manager.can_access('can_save_dash', 'Superset')
dash_save_perm = security_manager.can_access('can_save_dash', 'Superset')
superset_can_explore = security_manager.can_access('can_explore', 'Superset')
superset_can_csv = security_manager.can_access('can_csv', 'Superset')
slice_can_edit = security_manager.can_access('can_edit', 'SliceModelView')
standalone_mode = request.args.get('standalone') == 'true'
edit_mode = request.args.get('edit') == 'true'
# Hack to log the dashboard_id properly, even when getting a slug
@log_this
def dashboard(**kwargs): # noqa
pass
dashboard(
dashboard_id=dash.id,
dashboard_version='v2',
dash_edit_perm=dash_edit_perm,
edit_mode=edit_mode)
dashboard_data = dash.data
dashboard_data.update({
'standalone_mode': standalone_mode,
'dash_save_perm': dash_save_perm,
'dash_edit_perm': dash_edit_perm,
'superset_can_explore': superset_can_explore,
'superset_can_csv': superset_can_csv,
'slice_can_edit': slice_can_edit,
})
bootstrap_data = {
'user_id': g.user.get_id(),
'dashboard_data': dashboard_data,
'datasources': {ds.uid: ds.data for ds in datasources},
'common': self.common_bootsrap_payload(),
'editMode': edit_mode,
}
if request.args.get('json') == 'true':
return json_success(json.dumps(bootstrap_data))
return self.render_template(
'superset/dashboard.html',
entry='dashboard',
standalone_mode=standalone_mode,
title=dash.dashboard_title,
bootstrap_data=json.dumps(bootstrap_data),
) |
Syncs the druid datasource in main db with the provided config.
The endpoint takes 3 arguments:
user - user name to perform the operation as
cluster - name of the druid cluster
config - configuration stored in json that contains:
name: druid datasource name
dimensions: list of the dimensions, they become druid columns
with the type STRING
metrics_spec: list of metrics (dictionary). Metric consists of
2 attributes: type and name. Type can be count,
etc. `count` type is stored internally as longSum
other fields will be ignored.
Example: {
'name': 'test_click',
'metrics_spec': [{'type': 'count', 'name': 'count'}],
'dimensions': ['affiliate_id', 'campaign', 'first_seen']
} | def sync_druid_source(self):
"""Syncs the druid datasource in main db with the provided config.
The endpoint takes 3 arguments:
user - user name to perform the operation as
cluster - name of the druid cluster
config - configuration stored in json that contains:
name: druid datasource name
dimensions: list of the dimensions, they become druid columns
with the type STRING
metrics_spec: list of metrics (dictionary). Metric consists of
2 attributes: type and name. Type can be count,
etc. `count` type is stored internally as longSum
other fields will be ignored.
Example: {
'name': 'test_click',
'metrics_spec': [{'type': 'count', 'name': 'count'}],
'dimensions': ['affiliate_id', 'campaign', 'first_seen']
}
"""
payload = request.get_json(force=True)
druid_config = payload['config']
user_name = payload['user']
cluster_name = payload['cluster']
user = security_manager.find_user(username=user_name)
DruidDatasource = ConnectorRegistry.sources['druid']
DruidCluster = DruidDatasource.cluster_class
if not user:
err_msg = __("Can't find User '%(name)s', please ask your admin "
'to create one.', name=user_name)
logging.error(err_msg)
return json_error_response(err_msg)
cluster = db.session.query(DruidCluster).filter_by(
cluster_name=cluster_name).first()
if not cluster:
err_msg = __("Can't find DruidCluster with cluster_name = "
"'%(name)s'", name=cluster_name)
logging.error(err_msg)
return json_error_response(err_msg)
try:
DruidDatasource.sync_to_db_from_config(
druid_config, user, cluster)
except Exception as e:
logging.exception(utils.error_msg_from_exception(e))
return json_error_response(utils.error_msg_from_exception(e))
return Response(status=201) |
Returns if a key from cache exist | def cache_key_exist(self, key):
"""Returns if a key from cache exist"""
key_exist = True if cache.get(key) else False
status = 200 if key_exist else 404
return json_success(json.dumps({'key_exist': key_exist}),
status=status) |
Serves a key off of the results backend | def results(self, key):
"""Serves a key off of the results backend"""
if not results_backend:
return json_error_response("Results backend isn't configured")
read_from_results_backend_start = now_as_float()
blob = results_backend.get(key)
stats_logger.timing(
'sqllab.query.results_backend_read',
now_as_float() - read_from_results_backend_start,
)
if not blob:
return json_error_response(
'Data could not be retrieved. '
'You may want to re-run the query.',
status=410,
)
query = db.session.query(Query).filter_by(results_key=key).one()
rejected_tables = security_manager.rejected_datasources(
query.sql, query.database, query.schema)
if rejected_tables:
return json_error_response(security_manager.get_table_access_error_msg(
'{}'.format(rejected_tables)), status=403)
payload = utils.zlib_decompress_to_string(blob)
display_limit = app.config.get('DEFAULT_SQLLAB_LIMIT', None)
if display_limit:
payload_json = json.loads(payload)
payload_json['data'] = payload_json['data'][:display_limit]
return json_success(
json.dumps(
payload_json,
default=utils.json_iso_dttm_ser,
ignore_nan=True,
),
) |
Runs arbitrary sql and returns and json | def sql_json(self):
"""Runs arbitrary sql and returns and json"""
async_ = request.form.get('runAsync') == 'true'
sql = request.form.get('sql')
database_id = request.form.get('database_id')
schema = request.form.get('schema') or None
template_params = json.loads(
request.form.get('templateParams') or '{}')
limit = int(request.form.get('queryLimit', 0))
if limit < 0:
logging.warning(
'Invalid limit of {} specified. Defaulting to max limit.'.format(limit))
limit = 0
limit = limit or app.config.get('SQL_MAX_ROW')
session = db.session()
mydb = session.query(models.Database).filter_by(id=database_id).first()
if not mydb:
json_error_response(
'Database with id {} is missing.'.format(database_id))
rejected_tables = security_manager.rejected_datasources(sql, mydb, schema)
if rejected_tables:
return json_error_response(
security_manager.get_table_access_error_msg(rejected_tables),
link=security_manager.get_table_access_link(rejected_tables),
status=403)
session.commit()
select_as_cta = request.form.get('select_as_cta') == 'true'
tmp_table_name = request.form.get('tmp_table_name')
if select_as_cta and mydb.force_ctas_schema:
tmp_table_name = '{}.{}'.format(
mydb.force_ctas_schema,
tmp_table_name,
)
client_id = request.form.get('client_id') or utils.shortid()[:10]
query = Query(
database_id=int(database_id),
sql=sql,
schema=schema,
select_as_cta=select_as_cta,
start_time=now_as_float(),
tab_name=request.form.get('tab'),
status=QueryStatus.PENDING if async_ else QueryStatus.RUNNING,
sql_editor_id=request.form.get('sql_editor_id'),
tmp_table_name=tmp_table_name,
user_id=g.user.get_id() if g.user else None,
client_id=client_id,
)
session.add(query)
session.flush()
query_id = query.id
session.commit() # shouldn't be necessary
if not query_id:
raise Exception(_('Query record was not created as expected.'))
logging.info('Triggering query_id: {}'.format(query_id))
try:
template_processor = get_template_processor(
database=query.database, query=query)
rendered_query = template_processor.process_template(
query.sql,
**template_params)
except Exception as e:
return json_error_response(
'Template rendering failed: {}'.format(utils.error_msg_from_exception(e)))
# set LIMIT after template processing
limits = [mydb.db_engine_spec.get_limit_from_sql(rendered_query), limit]
query.limit = min(lim for lim in limits if lim is not None)
# Async request.
if async_:
logging.info('Running query on a Celery worker')
# Ignore the celery future object and the request may time out.
try:
sql_lab.get_sql_results.delay(
query_id,
rendered_query,
return_results=False,
store_results=not query.select_as_cta,
user_name=g.user.username if g.user else None,
start_time=now_as_float())
except Exception as e:
logging.exception(e)
msg = _(
'Failed to start remote query on a worker. '
'Tell your administrator to verify the availability of '
'the message queue.')
query.status = QueryStatus.FAILED
query.error_message = msg
session.commit()
return json_error_response('{}'.format(msg))
resp = json_success(json.dumps(
{'query': query.to_dict()}, default=utils.json_int_dttm_ser,
ignore_nan=True), status=202)
session.commit()
return resp
# Sync request.
try:
timeout = config.get('SQLLAB_TIMEOUT')
timeout_msg = (
f'The query exceeded the {timeout} seconds timeout.')
with utils.timeout(seconds=timeout,
error_message=timeout_msg):
# pylint: disable=no-value-for-parameter
data = sql_lab.get_sql_results(
query_id,
rendered_query,
return_results=True,
user_name=g.user.username if g.user else None)
payload = json.dumps(
data,
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
except Exception as e:
logging.exception(e)
return json_error_response('{}'.format(e))
if data.get('status') == QueryStatus.FAILED:
return json_error_response(payload=data)
return json_success(payload) |
Download the query results as csv. | def csv(self, client_id):
"""Download the query results as csv."""
logging.info('Exporting CSV file [{}]'.format(client_id))
query = (
db.session.query(Query)
.filter_by(client_id=client_id)
.one()
)
rejected_tables = security_manager.rejected_datasources(
query.sql, query.database, query.schema)
if rejected_tables:
flash(
security_manager.get_table_access_error_msg('{}'.format(rejected_tables)))
return redirect('/')
blob = None
if results_backend and query.results_key:
logging.info(
'Fetching CSV from results backend '
'[{}]'.format(query.results_key))
blob = results_backend.get(query.results_key)
if blob:
logging.info('Decompressing')
json_payload = utils.zlib_decompress_to_string(blob)
obj = json.loads(json_payload)
columns = [c['name'] for c in obj['columns']]
df = pd.DataFrame.from_records(obj['data'], columns=columns)
logging.info('Using pandas to convert to CSV')
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
else:
logging.info('Running a query to turn into CSV')
sql = query.select_sql or query.executed_sql
df = query.database.get_df(sql, query.schema)
# TODO(bkyryliuk): add compression=gzip for big files.
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
response = Response(csv, mimetype='text/csv')
response.headers['Content-Disposition'] = f'attachment; filename={query.name}.csv'
logging.info('Ready to return response')
return response |
Get the updated queries. | def queries(self, last_updated_ms):
"""Get the updated queries."""
stats_logger.incr('queries')
if not g.user.get_id():
return json_error_response(
'Please login to access the queries.', status=403)
# Unix time, milliseconds.
last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0
# UTC date time, same that is stored in the DB.
last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)
sql_queries = (
db.session.query(Query)
.filter(
Query.user_id == g.user.get_id(),
Query.changed_on >= last_updated_dt,
)
.all()
)
dict_queries = {q.client_id: q.to_dict() for q in sql_queries}
now = int(round(time.time() * 1000))
unfinished_states = [
QueryStatus.PENDING,
QueryStatus.RUNNING,
]
queries_to_timeout = [
client_id for client_id, query_dict in dict_queries.items()
if (
query_dict['state'] in unfinished_states and (
now - query_dict['startDttm'] >
config.get('SQLLAB_ASYNC_TIME_LIMIT_SEC') * 1000
)
)
]
if queries_to_timeout:
update(Query).where(
and_(
Query.user_id == g.user.get_id(),
Query.client_id in queries_to_timeout,
),
).values(state=QueryStatus.TIMED_OUT)
for client_id in queries_to_timeout:
dict_queries[client_id]['status'] = QueryStatus.TIMED_OUT
return json_success(
json.dumps(dict_queries, default=utils.json_int_dttm_ser)) |
Search for previously run sqllab queries. Used for Sqllab Query Search
page /superset/sqllab#search.
Custom permission can_only_search_queries_owned restricts queries
to only queries run by current user.
:returns: Response with list of sql query dicts | def search_queries(self) -> Response:
"""
Search for previously run sqllab queries. Used for Sqllab Query Search
page /superset/sqllab#search.
Custom permission can_only_search_queries_owned restricts queries
to only queries run by current user.
:returns: Response with list of sql query dicts
"""
query = db.session.query(Query)
if security_manager.can_only_access_owned_queries():
search_user_id = g.user.get_user_id()
else:
search_user_id = request.args.get('user_id')
database_id = request.args.get('database_id')
search_text = request.args.get('search_text')
status = request.args.get('status')
# From and To time stamp should be Epoch timestamp in seconds
from_time = request.args.get('from')
to_time = request.args.get('to')
if search_user_id:
# Filter on user_id
query = query.filter(Query.user_id == search_user_id)
if database_id:
# Filter on db Id
query = query.filter(Query.database_id == database_id)
if status:
# Filter on status
query = query.filter(Query.status == status)
if search_text:
# Filter on search text
query = query \
.filter(Query.sql.like('%{}%'.format(search_text)))
if from_time:
query = query.filter(Query.start_time > int(from_time))
if to_time:
query = query.filter(Query.start_time < int(to_time))
query_limit = config.get('QUERY_SEARCH_LIMIT', 1000)
sql_queries = (
query.order_by(Query.start_time.asc())
.limit(query_limit)
.all()
)
dict_queries = [q.to_dict() for q in sql_queries]
return Response(
json.dumps(dict_queries, default=utils.json_int_dttm_ser),
status=200,
mimetype='application/json') |
Personalized welcome page | def welcome(self):
"""Personalized welcome page"""
if not g.user or not g.user.get_id():
return redirect(appbuilder.get_url_for_login)
welcome_dashboard_id = (
db.session
.query(UserAttribute.welcome_dashboard_id)
.filter_by(user_id=g.user.get_id())
.scalar()
)
if welcome_dashboard_id:
return self.dashboard(str(welcome_dashboard_id))
payload = {
'user': bootstrap_user_data(),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
entry='welcome',
title='Superset',
bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),
) |
User profile page | def profile(self, username):
"""User profile page"""
if not username and g.user:
username = g.user.username
payload = {
'user': bootstrap_user_data(username, include_perms=True),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
title=_("%(user)s's profile", user=username),
entry='profile',
bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),
) |
SQL Editor | def sqllab(self):
"""SQL Editor"""
d = {
'defaultDbId': config.get('SQLLAB_DEFAULT_DBID'),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
entry='sqllab',
bootstrap_data=json.dumps(d, default=utils.json_iso_dttm_ser),
) |
This method exposes an API endpoint to
get the database query string for this slice | def slice_query(self, slice_id):
"""
This method exposes an API endpoint to
get the database query string for this slice
"""
viz_obj = get_viz(slice_id)
security_manager.assert_datasource_permission(viz_obj.datasource)
return self.get_query_string_response(viz_obj) |
This method exposes an API endpoint to
get the schema access control settings for csv upload in this database | def schemas_access_for_csv_upload(self):
"""
This method exposes an API endpoint to
get the schema access control settings for csv upload in this database
"""
if not request.args.get('db_id'):
return json_error_response(
'No database is allowed for your csv upload')
db_id = int(request.args.get('db_id'))
database = (
db.session
.query(models.Database)
.filter_by(id=db_id)
.one()
)
try:
schemas_allowed = database.get_schema_access_for_csv_upload()
if (security_manager.database_access(database) or
security_manager.all_datasource_access()):
return self.json_response(schemas_allowed)
# the list schemas_allowed should not be empty here
# and the list schemas_allowed_processed returned from security_manager
# should not be empty either,
# otherwise the database should have been filtered out
# in CsvToDatabaseForm
schemas_allowed_processed = security_manager.schemas_accessible_by_user(
database, schemas_allowed, False)
return self.json_response(schemas_allowed_processed)
except Exception:
return json_error_response((
'Failed to fetch schemas allowed for csv upload in this database! '
'Please contact Superset Admin!\n\n'
'The error message returned was:\n{}').format(traceback.format_exc())) |
Provide a transactional scope around a series of operations. | def stats_timing(stats_key, stats_logger):
"""Provide a transactional scope around a series of operations."""
start_ts = now_as_float()
try:
yield start_ts
except Exception as e:
raise e
finally:
stats_logger.timing(stats_key, now_as_float() - start_ts) |
A decorator for caching views and handling etag conditional requests.
The decorator adds headers to GET requests that help with caching: Last-
Modified, Expires and ETag. It also handles conditional requests, when the
client send an If-Matches header.
If a cache is set, the decorator will cache GET responses, bypassing the
dataframe serialization. POST requests will still benefit from the
dataframe cache for requests that produce the same SQL. | def etag_cache(max_age, check_perms=bool):
"""
A decorator for caching views and handling etag conditional requests.
The decorator adds headers to GET requests that help with caching: Last-
Modified, Expires and ETag. It also handles conditional requests, when the
client send an If-Matches header.
If a cache is set, the decorator will cache GET responses, bypassing the
dataframe serialization. POST requests will still benefit from the
dataframe cache for requests that produce the same SQL.
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
# check if the user can access the resource
check_perms(*args, **kwargs)
# for POST requests we can't set cache headers, use the response
# cache nor use conditional requests; this will still use the
# dataframe cache in `superset/viz.py`, though.
if request.method == 'POST':
return f(*args, **kwargs)
response = None
if cache:
try:
# build the cache key from the function arguments and any
# other additional GET arguments (like `form_data`, eg).
key_args = list(args)
key_kwargs = kwargs.copy()
key_kwargs.update(request.args)
cache_key = wrapper.make_cache_key(f, *key_args, **key_kwargs)
response = cache.get(cache_key)
except Exception: # pylint: disable=broad-except
if app.debug:
raise
logging.exception('Exception possibly due to cache backend.')
# if no response was cached, compute it using the wrapped function
if response is None:
response = f(*args, **kwargs)
# add headers for caching: Last Modified, Expires and ETag
response.cache_control.public = True
response.last_modified = datetime.utcnow()
expiration = max_age if max_age != 0 else FAR_FUTURE
response.expires = \
response.last_modified + timedelta(seconds=expiration)
response.add_etag()
# if we have a cache, store the response from the request
if cache:
try:
cache.set(cache_key, response, timeout=max_age)
except Exception: # pylint: disable=broad-except
if app.debug:
raise
logging.exception('Exception possibly due to cache backend.')
return response.make_conditional(request)
if cache:
wrapper.uncached = f
wrapper.cache_timeout = max_age
wrapper.make_cache_key = \
cache._memoize_make_cache_key( # pylint: disable=protected-access
make_name=None, timeout=max_age)
return wrapper
return decorator |
Alters the SQL statement to apply a LIMIT clause | def apply_limit_to_sql(cls, sql, limit, database):
"""Alters the SQL statement to apply a LIMIT clause"""
if cls.limit_method == LimitMethod.WRAP_SQL:
sql = sql.strip('\t\n ;')
qry = (
select('*')
.select_from(
TextAsFrom(text(sql), ['*']).alias('inner_qry'),
)
.limit(limit)
)
return database.compile_sqla_query(qry)
elif LimitMethod.FORCE_LIMIT:
parsed_query = sql_parse.ParsedQuery(sql)
sql = parsed_query.get_query_with_new_limit(limit)
return sql |
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username | def modify_url_for_impersonation(cls, url, impersonate_user, username):
"""
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
"""
if impersonate_user is not None and username is not None:
url.username = username |
Conditionally mutate and/or quote a sql column/expression label. If
force_column_alias_quotes is set to True, return the label as a
sqlalchemy.sql.elements.quoted_name object to ensure that the select query
and query results have same case. Otherwise return the mutated label as a
regular string. If maxmimum supported column name length is exceeded,
generate a truncated label by calling truncate_label(). | def make_label_compatible(cls, label):
"""
Conditionally mutate and/or quote a sql column/expression label. If
force_column_alias_quotes is set to True, return the label as a
sqlalchemy.sql.elements.quoted_name object to ensure that the select query
and query results have same case. Otherwise return the mutated label as a
regular string. If maxmimum supported column name length is exceeded,
generate a truncated label by calling truncate_label().
"""
label_mutated = cls.mutate_label(label)
if cls.max_column_name_length and len(label_mutated) > cls.max_column_name_length:
label_mutated = cls.truncate_label(label)
if cls.force_column_alias_quotes:
label_mutated = quoted_name(label_mutated, True)
return label_mutated |
In the case that a label exceeds the max length supported by the engine,
this method is used to construct a deterministic and unique label based on
an md5 hash. | def truncate_label(cls, label):
"""
In the case that a label exceeds the max length supported by the engine,
this method is used to construct a deterministic and unique label based on
an md5 hash.
"""
label = hashlib.md5(label.encode('utf-8')).hexdigest()
# truncate hash if it exceeds max length
if cls.max_column_name_length and len(label) > cls.max_column_name_length:
label = label[:cls.max_column_name_length]
return label |
Need to consider foreign tables for PostgreSQL | def get_table_names(cls, inspector, schema):
"""Need to consider foreign tables for PostgreSQL"""
tables = inspector.get_table_names(schema)
tables.extend(inspector.get_foreign_table_names(schema))
return sorted(tables) |
Postgres is unable to identify mixed case column names unless they
are quoted. | def get_timestamp_column(expression, column_name):
"""Postgres is unable to identify mixed case column names unless they
are quoted."""
if expression:
return expression
elif column_name.lower() != column_name:
return f'"{column_name}"'
return column_name |
Extract error message for queries | def extract_error_message(cls, e):
"""Extract error message for queries"""
message = str(e)
try:
if isinstance(e.args, tuple) and len(e.args) > 1:
message = e.args[1]
except Exception:
pass
return message |
Returns a list of tables [schema1.table1, schema2.table2, ...]
Datasource_type can be 'table' or 'view'.
Empty schema corresponds to the list of full names of the all
tables or views: <schema>.<result_set_name>. | def fetch_result_sets(cls, db, datasource_type):
"""Returns a list of tables [schema1.table1, schema2.table2, ...]
Datasource_type can be 'table' or 'view'.
Empty schema corresponds to the list of full names of the all
tables or views: <schema>.<result_set_name>.
"""
result_set_df = db.get_df(
"""SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S
ORDER BY concat(table_schema, '.', table_name)""".format(
datasource_type.upper(),
),
None)
result_sets = []
for unused, row in result_set_df.iterrows():
result_sets.append('{}.{}'.format(
row['table_schema'], row['table_name']))
return result_sets |
Updates progress information | def handle_cursor(cls, cursor, query, session):
"""Updates progress information"""
logging.info('Polling the cursor for progress')
polled = cursor.poll()
# poll returns dict -- JSON status information or ``None``
# if the query is done
# https://github.com/dropbox/PyHive/blob/
# b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178
while polled:
# Update the object and wait for the kill signal.
stats = polled.get('stats', {})
query = session.query(type(query)).filter_by(id=query.id).one()
if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]:
cursor.cancel()
break
if stats:
state = stats.get('state')
# if already finished, then stop polling
if state == 'FINISHED':
break
completed_splits = float(stats.get('completedSplits'))
total_splits = float(stats.get('totalSplits'))
if total_splits and completed_splits:
progress = 100 * (completed_splits / total_splits)
logging.info(
'Query progress: {} / {} '
'splits'.format(completed_splits, total_splits))
if progress > query.progress:
query.progress = progress
session.commit()
time.sleep(1)
logging.info('Polling the cursor for progress')
polled = cursor.poll() |
Returns a partition query
:param table_name: the name of the table to get partitions from
:type table_name: str
:param limit: the number of partitions to be returned
:type limit: int
:param order_by: a list of tuples of field name and a boolean
that determines if that field should be sorted in descending
order
:type order_by: list of (str, bool) tuples
:param filters: dict of field name and filter value combinations | def _partition_query(
cls, table_name, limit=0, order_by=None, filters=None):
"""Returns a partition query
:param table_name: the name of the table to get partitions from
:type table_name: str
:param limit: the number of partitions to be returned
:type limit: int
:param order_by: a list of tuples of field name and a boolean
that determines if that field should be sorted in descending
order
:type order_by: list of (str, bool) tuples
:param filters: dict of field name and filter value combinations
"""
limit_clause = 'LIMIT {}'.format(limit) if limit else ''
order_by_clause = ''
if order_by:
l = [] # noqa: E741
for field, desc in order_by:
l.append(field + ' DESC' if desc else '')
order_by_clause = 'ORDER BY ' + ', '.join(l)
where_clause = ''
if filters:
l = [] # noqa: E741
for field, value in filters.items():
l.append(f"{field} = '{value}'")
where_clause = 'WHERE ' + ' AND '.join(l)
sql = textwrap.dedent(f"""\
SELECT * FROM "{table_name}$partitions"
{where_clause}
{order_by_clause}
{limit_clause}
""")
return sql |
Updates progress information | def handle_cursor(cls, cursor, query, session):
"""Updates progress information"""
from pyhive import hive # pylint: disable=no-name-in-module
unfinished_states = (
hive.ttypes.TOperationState.INITIALIZED_STATE,
hive.ttypes.TOperationState.RUNNING_STATE,
)
polled = cursor.poll()
last_log_line = 0
tracking_url = None
job_id = None
while polled.operationState in unfinished_states:
query = session.query(type(query)).filter_by(id=query.id).one()
if query.status == QueryStatus.STOPPED:
cursor.cancel()
break
log = cursor.fetch_logs() or ''
if log:
log_lines = log.splitlines()
progress = cls.progress(log_lines)
logging.info('Progress total: {}'.format(progress))
needs_commit = False
if progress > query.progress:
query.progress = progress
needs_commit = True
if not tracking_url:
tracking_url = cls.get_tracking_url(log_lines)
if tracking_url:
job_id = tracking_url.split('/')[-2]
logging.info(
'Found the tracking url: {}'.format(tracking_url))
tracking_url = tracking_url_trans(tracking_url)
logging.info(
'Transformation applied: {}'.format(tracking_url))
query.tracking_url = tracking_url
logging.info('Job id: {}'.format(job_id))
needs_commit = True
if job_id and len(log_lines) > last_log_line:
# Wait for job id before logging things out
# this allows for prefixing all log lines and becoming
# searchable in something like Kibana
for l in log_lines[last_log_line:]:
logging.info('[{}] {}'.format(job_id, l))
last_log_line = len(log_lines)
if needs_commit:
session.commit()
time.sleep(hive_poll_interval)
polled = cursor.poll() |
Uploads a csv file and creates a superset datasource in Hive. | def create_table_from_csv(form, table):
"""Uploads a csv file and creates a superset datasource in Hive."""
def convert_to_hive_type(col_type):
"""maps tableschema's types to hive types"""
tableschema_to_hive_types = {
'boolean': 'BOOLEAN',
'integer': 'INT',
'number': 'DOUBLE',
'string': 'STRING',
}
return tableschema_to_hive_types.get(col_type, 'STRING')
bucket_path = config['CSV_TO_HIVE_UPLOAD_S3_BUCKET']
if not bucket_path:
logging.info('No upload bucket specified')
raise Exception(
'No upload bucket specified. You can specify one in the config file.')
table_name = form.name.data
schema_name = form.schema.data
if config.get('UPLOADED_CSV_HIVE_NAMESPACE'):
if '.' in table_name or schema_name:
raise Exception(
"You can't specify a namespace. "
'All tables will be uploaded to the `{}` namespace'.format(
config.get('HIVE_NAMESPACE')))
full_table_name = '{}.{}'.format(
config.get('UPLOADED_CSV_HIVE_NAMESPACE'), table_name)
else:
if '.' in table_name and schema_name:
raise Exception(
"You can't specify a namespace both in the name of the table "
'and in the schema field. Please remove one')
full_table_name = '{}.{}'.format(
schema_name, table_name) if schema_name else table_name
filename = form.csv_file.data.filename
upload_prefix = config['CSV_TO_HIVE_UPLOAD_DIRECTORY']
upload_path = config['UPLOAD_FOLDER'] + \
secure_filename(filename)
# Optional dependency
from tableschema import Table # pylint: disable=import-error
hive_table_schema = Table(upload_path).infer()
column_name_and_type = []
for column_info in hive_table_schema['fields']:
column_name_and_type.append(
'`{}` {}'.format(
column_info['name'],
convert_to_hive_type(column_info['type'])))
schema_definition = ', '.join(column_name_and_type)
# Optional dependency
import boto3 # pylint: disable=import-error
s3 = boto3.client('s3')
location = os.path.join('s3a://', bucket_path, upload_prefix, table_name)
s3.upload_file(
upload_path, bucket_path,
os.path.join(upload_prefix, table_name, filename))
sql = f"""CREATE TABLE {full_table_name} ( {schema_definition} )
ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS
TEXTFILE LOCATION '{location}'
tblproperties ('skip.header.line.count'='1')"""
logging.info(form.con.data)
engine = create_engine(form.con.data.sqlalchemy_uri_decrypted)
engine.execute(sql) |
Return a configuration dictionary that can be merged with other configs
that can set the correct properties for impersonating users
:param uri: URI string
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
:return: Dictionary with configs required for impersonation | def get_configuration_for_impersonation(cls, uri, impersonate_user, username):
"""
Return a configuration dictionary that can be merged with other configs
that can set the correct properties for impersonating users
:param uri: URI string
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
:return: Dictionary with configs required for impersonation
"""
configuration = {}
url = make_url(uri)
backend_name = url.get_backend_name()
# Must be Hive connection, enable impersonation, and set param auth=LDAP|KERBEROS
if (backend_name == 'hive' and 'auth' in url.query.keys() and
impersonate_user is True and username is not None):
configuration['hive.server2.proxy.user'] = username
return configuration |
BigQuery field_name should start with a letter or underscore and contain only
alphanumeric characters. Labels that start with a number are prefixed with an
underscore. Any unsupported characters are replaced with underscores and an
md5 hash is added to the end of the label to avoid possible collisions.
:param str label: the original label which might include unsupported characters
:return: String that is supported by the database | def mutate_label(label):
"""
BigQuery field_name should start with a letter or underscore and contain only
alphanumeric characters. Labels that start with a number are prefixed with an
underscore. Any unsupported characters are replaced with underscores and an
md5 hash is added to the end of the label to avoid possible collisions.
:param str label: the original label which might include unsupported characters
:return: String that is supported by the database
"""
label_hashed = '_' + hashlib.md5(label.encode('utf-8')).hexdigest()
# if label starts with number, add underscore as first character
label_mutated = '_' + label if re.match(r'^\d', label) else label
# replace non-alphanumeric characters with underscores
label_mutated = re.sub(r'[^\w]+', '_', label_mutated)
if label_mutated != label:
# add md5 hash to label to avoid possible collisions
label_mutated += label_hashed
return label_mutated |
BigQuery dialect requires us to not use backtick in the fieldname which are
nested.
Using literal_column handles that issue.
https://docs.sqlalchemy.org/en/latest/core/tutorial.html#using-more-specific-text-with-table-literal-column-and-column
Also explicility specifying column names so we don't encounter duplicate
column names in the result. | def _get_fields(cls, cols):
"""
BigQuery dialect requires us to not use backtick in the fieldname which are
nested.
Using literal_column handles that issue.
https://docs.sqlalchemy.org/en/latest/core/tutorial.html#using-more-specific-text-with-table-literal-column-and-column
Also explicility specifying column names so we don't encounter duplicate
column names in the result.
"""
return [sqla.literal_column(c.get('name')).label(c.get('name').replace('.', '__'))
for c in cols] |
Loading time series data from a zip file in the repo | def load_multiformat_time_series():
"""Loading time series data from a zip file in the repo"""
data = get_example_data('multiformat_time_series.json.gz')
pdf = pd.read_json(data)
pdf.ds = pd.to_datetime(pdf.ds, unit='s')
pdf.ds2 = pd.to_datetime(pdf.ds2, unit='s')
pdf.to_sql(
'multiformat_time_series',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'ds': Date,
'ds2': DateTime,
'epoch_s': BigInteger,
'epoch_ms': BigInteger,
'string0': String(100),
'string1': String(100),
'string2': String(100),
'string3': String(100),
},
index=False)
print('Done loading table!')
print('-' * 80)
print('Creating table [multiformat_time_series] reference')
obj = db.session.query(TBL).filter_by(table_name='multiformat_time_series').first()
if not obj:
obj = TBL(table_name='multiformat_time_series')
obj.main_dttm_col = 'ds'
obj.database = utils.get_or_create_main_db()
dttm_and_expr_dict = {
'ds': [None, None],
'ds2': [None, None],
'epoch_s': ['epoch_s', None],
'epoch_ms': ['epoch_ms', None],
'string2': ['%Y%m%d-%H%M%S', None],
'string1': ['%Y-%m-%d^%H:%M:%S', None],
'string0': ['%Y-%m-%d %H:%M:%S.%f', None],
'string3': ['%Y/%m/%d%H:%M:%S.%f', None],
}
for col in obj.columns:
dttm_and_expr = dttm_and_expr_dict[col.column_name]
col.python_date_format = dttm_and_expr[0]
col.dbatabase_expr = dttm_and_expr[1]
col.is_dttm = True
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
print('Creating Heatmap charts')
for i, col in enumerate(tbl.columns):
slice_data = {
'metrics': ['count'],
'granularity_sqla': col.column_name,
'row_limit': config.get('ROW_LIMIT'),
'since': '2015',
'until': '2016',
'where': '',
'viz_type': 'cal_heatmap',
'domain_granularity': 'month',
'subdomain_granularity': 'day',
}
slc = Slice(
slice_name=f'Calendar Heatmap multiformat {i}',
viz_type='cal_heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
misc_dash_slices.add('Calendar Heatmap multiformat 0') |
Imports dashboards from a stream to databases | def import_dashboards(session, data_stream, import_time=None):
"""Imports dashboards from a stream to databases"""
current_tt = int(time.time())
import_time = current_tt if import_time is None else import_time
data = json.loads(data_stream.read(), object_hook=decode_dashboards)
# TODO: import DRUID datasources
for table in data['datasources']:
type(table).import_obj(table, import_time=import_time)
session.commit()
for dashboard in data['dashboards']:
Dashboard.import_obj(
dashboard, import_time=import_time)
session.commit() |
Returns all dashboards metadata as a json dump | def export_dashboards(session):
"""Returns all dashboards metadata as a json dump"""
logging.info('Starting export')
dashboards = session.query(Dashboard)
dashboard_ids = []
for dashboard in dashboards:
dashboard_ids.append(dashboard.id)
data = Dashboard.export_dashboards(dashboard_ids)
return data |
The cache key is made out of the key/values in `query_obj`, plus any
other key/values in `extra`
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now"). | def cache_key(self, **extra):
"""
The cache key is made out of the key/values in `query_obj`, plus any
other key/values in `extra`
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now").
"""
cache_dict = self.to_dict()
cache_dict.update(extra)
for k in ['from_dttm', 'to_dttm']:
del cache_dict[k]
if self.time_range:
cache_dict['time_range'] = self.time_range
json_data = self.json_dumps(cache_dict, sort_keys=True)
return hashlib.md5(json_data.encode('utf-8')).hexdigest() |
Local method handling error while processing the SQL | def handle_query_error(msg, query, session, payload=None):
"""Local method handling error while processing the SQL"""
payload = payload or {}
troubleshooting_link = config['TROUBLESHOOTING_LINK']
query.error_message = msg
query.status = QueryStatus.FAILED
query.tmp_table_name = None
session.commit()
payload.update({
'status': query.status,
'error': msg,
})
if troubleshooting_link:
payload['link'] = troubleshooting_link
return payload |
attemps to get the query and retry if it cannot | def get_query(query_id, session, retry_count=5):
"""attemps to get the query and retry if it cannot"""
query = None
attempt = 0
while not query and attempt < retry_count:
try:
query = session.query(Query).filter_by(id=query_id).one()
except Exception:
attempt += 1
logging.error(
'Query with id `{}` could not be retrieved'.format(query_id))
stats_logger.incr('error_attempting_orm_query_' + str(attempt))
logging.error('Sleeping for a sec before retrying...')
sleep(1)
if not query:
stats_logger.incr('error_failed_at_getting_orm_query')
raise SqlLabException('Failed at getting query')
return query |
Provide a transactional scope around a series of operations. | def session_scope(nullpool):
"""Provide a transactional scope around a series of operations."""
if nullpool:
engine = sqlalchemy.create_engine(
app.config.get('SQLALCHEMY_DATABASE_URI'), poolclass=NullPool)
session_class = sessionmaker()
session_class.configure(bind=engine)
session = session_class()
else:
session = db.session()
session.commit() # HACK
try:
yield session
session.commit()
except Exception as e:
session.rollback()
logging.exception(e)
raise
finally:
session.close() |
Executes the sql query returns the results. | def get_sql_results(
ctask, query_id, rendered_query, return_results=True, store_results=False,
user_name=None, start_time=None):
"""Executes the sql query returns the results."""
with session_scope(not ctask.request.called_directly) as session:
try:
return execute_sql_statements(
ctask, query_id, rendered_query, return_results, store_results, user_name,
session=session, start_time=start_time)
except Exception as e:
logging.exception(e)
stats_logger.incr('error_sqllab_unhandled')
query = get_query(query_id, session)
return handle_query_error(str(e), query, session) |
Executes a single SQL statement | def execute_sql_statement(sql_statement, query, user_name, session, cursor):
"""Executes a single SQL statement"""
database = query.database
db_engine_spec = database.db_engine_spec
parsed_query = ParsedQuery(sql_statement)
sql = parsed_query.stripped()
SQL_MAX_ROWS = app.config.get('SQL_MAX_ROW')
if not parsed_query.is_readonly() and not database.allow_dml:
raise SqlLabSecurityException(
_('Only `SELECT` statements are allowed against this database'))
if query.select_as_cta:
if not parsed_query.is_select():
raise SqlLabException(_(
'Only `SELECT` statements can be used with the CREATE TABLE '
'feature.'))
if not query.tmp_table_name:
start_dttm = datetime.fromtimestamp(query.start_time)
query.tmp_table_name = 'tmp_{}_table_{}'.format(
query.user_id, start_dttm.strftime('%Y_%m_%d_%H_%M_%S'))
sql = parsed_query.as_create_table(query.tmp_table_name)
query.select_as_cta_used = True
if parsed_query.is_select():
if SQL_MAX_ROWS and (not query.limit or query.limit > SQL_MAX_ROWS):
query.limit = SQL_MAX_ROWS
if query.limit:
sql = database.apply_limit_to_sql(sql, query.limit)
# Hook to allow environment-specific mutation (usually comments) to the SQL
SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR')
if SQL_QUERY_MUTATOR:
sql = SQL_QUERY_MUTATOR(sql, user_name, security_manager, database)
try:
if log_query:
log_query(
query.database.sqlalchemy_uri,
query.executed_sql,
query.schema,
user_name,
__name__,
security_manager,
)
query.executed_sql = sql
with stats_timing('sqllab.query.time_executing_query', stats_logger):
logging.info('Running query: \n{}'.format(sql))
db_engine_spec.execute(cursor, sql, async_=True)
logging.info('Handling cursor')
db_engine_spec.handle_cursor(cursor, query, session)
with stats_timing('sqllab.query.time_fetching_results', stats_logger):
logging.debug('Fetching data for query object: {}'.format(query.to_dict()))
data = db_engine_spec.fetch_data(cursor, query.limit)
except SoftTimeLimitExceeded as e:
logging.exception(e)
raise SqlLabTimeoutException(
"SQL Lab timeout. This environment's policy is to kill queries "
'after {} seconds.'.format(SQLLAB_TIMEOUT))
except Exception as e:
logging.exception(e)
raise SqlLabException(db_engine_spec.extract_error_message(e))
logging.debug('Fetching cursor description')
cursor_description = cursor.description
return dataframe.SupersetDataFrame(data, cursor_description, db_engine_spec) |
Executes the sql query returns the results. | def execute_sql_statements(
ctask, query_id, rendered_query, return_results=True, store_results=False,
user_name=None, session=None, start_time=None,
):
"""Executes the sql query returns the results."""
if store_results and start_time:
# only asynchronous queries
stats_logger.timing(
'sqllab.query.time_pending', now_as_float() - start_time)
query = get_query(query_id, session)
payload = dict(query_id=query_id)
database = query.database
db_engine_spec = database.db_engine_spec
db_engine_spec.patch()
if store_results and not results_backend:
raise SqlLabException("Results backend isn't configured.")
# Breaking down into multiple statements
parsed_query = ParsedQuery(rendered_query)
statements = parsed_query.get_statements()
logging.info(f'Executing {len(statements)} statement(s)')
logging.info("Set query to 'running'")
query.status = QueryStatus.RUNNING
query.start_running_time = now_as_float()
engine = database.get_sqla_engine(
schema=query.schema,
nullpool=True,
user_name=user_name,
source=sources.get('sql_lab', None),
)
# Sharing a single connection and cursor across the
# execution of all statements (if many)
with closing(engine.raw_connection()) as conn:
with closing(conn.cursor()) as cursor:
statement_count = len(statements)
for i, statement in enumerate(statements):
# TODO CHECK IF STOPPED
msg = f'Running statement {i+1} out of {statement_count}'
logging.info(msg)
query.set_extra_json_key('progress', msg)
session.commit()
try:
cdf = execute_sql_statement(
statement, query, user_name, session, cursor)
msg = f'Running statement {i+1} out of {statement_count}'
except Exception as e:
msg = str(e)
if statement_count > 1:
msg = f'[Statement {i+1} out of {statement_count}] ' + msg
payload = handle_query_error(msg, query, session, payload)
return payload
# Success, updating the query entry in database
query.rows = cdf.size
query.progress = 100
query.set_extra_json_key('progress', None)
query.status = QueryStatus.SUCCESS
if query.select_as_cta:
query.select_sql = database.select_star(
query.tmp_table_name,
limit=query.limit,
schema=database.force_ctas_schema,
show_cols=False,
latest_partition=False)
query.end_time = now_as_float()
payload.update({
'status': query.status,
'data': cdf.data if cdf.data else [],
'columns': cdf.columns if cdf.columns else [],
'query': query.to_dict(),
})
if store_results:
key = str(uuid.uuid4())
logging.info(f'Storing results in results backend, key: {key}')
with stats_timing('sqllab.query.results_backend_write', stats_logger):
json_payload = json.dumps(
payload, default=json_iso_dttm_ser, ignore_nan=True)
cache_timeout = database.cache_timeout
if cache_timeout is None:
cache_timeout = config.get('CACHE_DEFAULT_TIMEOUT', 0)
results_backend.set(key, zlib_compress(json_payload), cache_timeout)
query.results_key = key
session.commit()
if return_results:
return payload |
Flask's flash if available, logging call if not | def flasher(msg, severity=None):
"""Flask's flash if available, logging call if not"""
try:
flash(msg, severity)
except RuntimeError:
if severity == 'danger':
logging.error(msg)
else:
logging.info(msg) |
Converts a string to an int/float
Returns ``None`` if it can't be converted
>>> string_to_num('5')
5
>>> string_to_num('5.2')
5.2
>>> string_to_num(10)
10
>>> string_to_num(10.1)
10.1
>>> string_to_num('this is not a string') is None
True | def string_to_num(s: str):
"""Converts a string to an int/float
Returns ``None`` if it can't be converted
>>> string_to_num('5')
5
>>> string_to_num('5.2')
5.2
>>> string_to_num(10)
10
>>> string_to_num(10.1)
10.1
>>> string_to_num('this is not a string') is None
True
"""
if isinstance(s, (int, float)):
return s
if s.isdigit():
return int(s)
try:
return float(s)
except ValueError:
return None |
Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3] | def list_minus(l: List, minus: List) -> List:
"""Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3]
"""
return [o for o in l if o not in minus] |
Returns ``datetime.datetime`` from human readable strings
>>> from datetime import date, timedelta
>>> from dateutil.relativedelta import relativedelta
>>> parse_human_datetime('2015-04-03')
datetime.datetime(2015, 4, 3, 0, 0)
>>> parse_human_datetime('2/3/1969')
datetime.datetime(1969, 2, 3, 0, 0)
>>> parse_human_datetime('now') <= datetime.now()
True
>>> parse_human_datetime('yesterday') <= datetime.now()
True
>>> date.today() - timedelta(1) == parse_human_datetime('yesterday').date()
True
>>> year_ago_1 = parse_human_datetime('one year ago').date()
>>> year_ago_2 = (datetime.now() - relativedelta(years=1) ).date()
>>> year_ago_1 == year_ago_2
True | def parse_human_datetime(s):
"""
Returns ``datetime.datetime`` from human readable strings
>>> from datetime import date, timedelta
>>> from dateutil.relativedelta import relativedelta
>>> parse_human_datetime('2015-04-03')
datetime.datetime(2015, 4, 3, 0, 0)
>>> parse_human_datetime('2/3/1969')
datetime.datetime(1969, 2, 3, 0, 0)
>>> parse_human_datetime('now') <= datetime.now()
True
>>> parse_human_datetime('yesterday') <= datetime.now()
True
>>> date.today() - timedelta(1) == parse_human_datetime('yesterday').date()
True
>>> year_ago_1 = parse_human_datetime('one year ago').date()
>>> year_ago_2 = (datetime.now() - relativedelta(years=1) ).date()
>>> year_ago_1 == year_ago_2
True
"""
if not s:
return None
try:
dttm = parse(s)
except Exception:
try:
cal = parsedatetime.Calendar()
parsed_dttm, parsed_flags = cal.parseDT(s)
# when time is not extracted, we 'reset to midnight'
if parsed_flags & 2 == 0:
parsed_dttm = parsed_dttm.replace(hour=0, minute=0, second=0)
dttm = dttm_from_timtuple(parsed_dttm.utctimetuple())
except Exception as e:
logging.exception(e)
raise ValueError("Couldn't parse date string [{}]".format(s))
return dttm |
Function to be passed into json.loads obj_hook parameter
Recreates the dashboard object from a json representation. | def decode_dashboards(o):
"""
Function to be passed into json.loads obj_hook parameter
Recreates the dashboard object from a json representation.
"""
import superset.models.core as models
from superset.connectors.sqla.models import (
SqlaTable, SqlMetric, TableColumn,
)
if '__Dashboard__' in o:
d = models.Dashboard()
d.__dict__.update(o['__Dashboard__'])
return d
elif '__Slice__' in o:
d = models.Slice()
d.__dict__.update(o['__Slice__'])
return d
elif '__TableColumn__' in o:
d = TableColumn()
d.__dict__.update(o['__TableColumn__'])
return d
elif '__SqlaTable__' in o:
d = SqlaTable()
d.__dict__.update(o['__SqlaTable__'])
return d
elif '__SqlMetric__' in o:
d = SqlMetric()
d.__dict__.update(o['__SqlMetric__'])
return d
elif '__datetime__' in o:
return datetime.strptime(o['__datetime__'], '%Y-%m-%dT%H:%M:%S')
else:
return o |
Returns ``datetime.datetime`` from natural language time deltas
>>> parse_human_datetime('now') <= datetime.now()
True | def parse_human_timedelta(s: str):
"""
Returns ``datetime.datetime`` from natural language time deltas
>>> parse_human_datetime('now') <= datetime.now()
True
"""
cal = parsedatetime.Calendar()
dttm = dttm_from_timtuple(datetime.now().timetuple())
d = cal.parse(s or '', dttm)[0]
d = datetime(d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec)
return d - dttm |
Formats datetime to take less room when it is recent | def datetime_f(dttm):
"""Formats datetime to take less room when it is recent"""
if dttm:
dttm = dttm.isoformat()
now_iso = datetime.now().isoformat()
if now_iso[:10] == dttm[:10]:
dttm = dttm[11:]
elif now_iso[:4] == dttm[:4]:
dttm = dttm[5:]
return '<nobr>{}</nobr>'.format(dttm) |
json serializer that deals with dates
>>> dttm = datetime(1970, 1, 1)
>>> json.dumps({'dttm': dttm}, default=json_iso_dttm_ser)
'{"dttm": "1970-01-01T00:00:00"}' | def json_iso_dttm_ser(obj, pessimistic: Optional[bool] = False):
"""
json serializer that deals with dates
>>> dttm = datetime(1970, 1, 1)
>>> json.dumps({'dttm': dttm}, default=json_iso_dttm_ser)
'{"dttm": "1970-01-01T00:00:00"}'
"""
val = base_json_conv(obj)
if val is not None:
return val
if isinstance(obj, (datetime, date, time, pd.Timestamp)):
obj = obj.isoformat()
else:
if pessimistic:
return 'Unserializable [{}]'.format(type(obj))
else:
raise TypeError(
'Unserializable object {} of type {}'.format(obj, type(obj)))
return obj |
json serializer that deals with dates | def json_int_dttm_ser(obj):
"""json serializer that deals with dates"""
val = base_json_conv(obj)
if val is not None:
return val
if isinstance(obj, (datetime, pd.Timestamp)):
obj = datetime_to_epoch(obj)
elif isinstance(obj, date):
obj = (obj - EPOCH.date()).total_seconds() * 1000
else:
raise TypeError(
'Unserializable object {} of type {}'.format(obj, type(obj)))
return obj |
Translate exception into error message
Database have different ways to handle exception. This function attempts
to make sense of the exception object and construct a human readable
sentence.
TODO(bkyryliuk): parse the Presto error message from the connection
created via create_engine.
engine = create_engine('presto://localhost:3506/silver') -
gives an e.message as the str(dict)
presto.connect('localhost', port=3506, catalog='silver') - as a dict.
The latter version is parsed correctly by this function. | def error_msg_from_exception(e):
"""Translate exception into error message
Database have different ways to handle exception. This function attempts
to make sense of the exception object and construct a human readable
sentence.
TODO(bkyryliuk): parse the Presto error message from the connection
created via create_engine.
engine = create_engine('presto://localhost:3506/silver') -
gives an e.message as the str(dict)
presto.connect('localhost', port=3506, catalog='silver') - as a dict.
The latter version is parsed correctly by this function.
"""
msg = ''
if hasattr(e, 'message'):
if isinstance(e.message, dict):
msg = e.message.get('message')
elif e.message:
msg = '{}'.format(e.message)
return msg or '{}'.format(e) |
Utility to find a constraint name in alembic migrations | def generic_find_constraint_name(table, columns, referenced, db):
"""Utility to find a constraint name in alembic migrations"""
t = sa.Table(table, db.metadata, autoload=True, autoload_with=db.engine)
for fk in t.foreign_key_constraints:
if fk.referred_table.name == referenced and set(fk.column_keys) == columns:
return fk.name |
Utility to find a foreign-key constraint name in alembic migrations | def generic_find_fk_constraint_name(table, columns, referenced, insp):
"""Utility to find a foreign-key constraint name in alembic migrations"""
for fk in insp.get_foreign_keys(table):
if fk['referred_table'] == referenced and set(fk['referred_columns']) == columns:
return fk['name'] |
Utility to find foreign-key constraint names in alembic migrations | def generic_find_fk_constraint_names(table, columns, referenced, insp):
"""Utility to find foreign-key constraint names in alembic migrations"""
names = set()
for fk in insp.get_foreign_keys(table):
if fk['referred_table'] == referenced and set(fk['referred_columns']) == columns:
names.add(fk['name'])
return names |
Utility to find a unique constraint name in alembic migrations | def generic_find_uq_constraint_name(table, columns, insp):
"""Utility to find a unique constraint name in alembic migrations"""
for uq in insp.get_unique_constraints(table):
if columns == set(uq['column_names']):
return uq['name'] |
Utility to find a constraint name in alembic migrations | def table_has_constraint(table, name, db):
"""Utility to find a constraint name in alembic migrations"""
t = sa.Table(table, db.metadata, autoload=True, autoload_with=db.engine)
for c in t.constraints:
if c.name == name:
return True
return False |
Send an email with html content, eg:
send_email_smtp(
'[email protected]', 'foo', '<b>Foo</b> bar',['/dev/null'], dryrun=True) | def send_email_smtp(to, subject, html_content, config,
files=None, data=None, images=None, dryrun=False,
cc=None, bcc=None, mime_subtype='mixed'):
"""
Send an email with html content, eg:
send_email_smtp(
'[email protected]', 'foo', '<b>Foo</b> bar',['/dev/null'], dryrun=True)
"""
smtp_mail_from = config.get('SMTP_MAIL_FROM')
to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg['Subject'] = subject
msg['From'] = smtp_mail_from
msg['To'] = ', '.join(to)
msg.preamble = 'This is a multi-part message in MIME format.'
recipients = to
if cc:
cc = get_email_address_list(cc)
msg['CC'] = ', '.join(cc)
recipients = recipients + cc
if bcc:
# don't add bcc in header
bcc = get_email_address_list(bcc)
recipients = recipients + bcc
msg['Date'] = formatdate(localtime=True)
mime_text = MIMEText(html_content, 'html')
msg.attach(mime_text)
# Attach files by reading them from disk
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, 'rb') as f:
msg.attach(
MIMEApplication(
f.read(),
Content_Disposition="attachment; filename='%s'" % basename,
Name=basename))
# Attach any files passed directly
for name, body in (data or {}).items():
msg.attach(
MIMEApplication(
body,
Content_Disposition="attachment; filename='%s'" % name,
Name=name,
))
# Attach any inline images, which may be required for display in
# HTML content (inline)
for msgid, body in (images or {}).items():
image = MIMEImage(body)
image.add_header('Content-ID', '<%s>' % msgid)
image.add_header('Content-Disposition', 'inline')
msg.attach(image)
send_MIME_email(smtp_mail_from, recipients, msg, config, dryrun=dryrun) |
Setup the flask-cache on a flask app | def setup_cache(app: Flask, cache_config) -> Optional[Cache]:
"""Setup the flask-cache on a flask app"""
if cache_config and cache_config.get('CACHE_TYPE') != 'null':
return Cache(app, config=cache_config)
return None |
Compress things in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str) | def zlib_compress(data):
"""
Compress things in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str)
"""
if PY3K:
if isinstance(data, str):
return zlib.compress(bytes(data, 'utf-8'))
return zlib.compress(data)
return zlib.compress(data) |
Decompress things to a string in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str)
>>> got_str = zlib_decompress_to_string(blob)
>>> got_str == json_str
True | def zlib_decompress_to_string(blob):
"""
Decompress things to a string in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str)
>>> got_str = zlib_decompress_to_string(blob)
>>> got_str == json_str
True
"""
if PY3K:
if isinstance(blob, bytes):
decompressed = zlib.decompress(blob)
else:
decompressed = zlib.decompress(bytes(blob, 'utf-8'))
return decompressed.decode('utf-8')
return zlib.decompress(blob) |
Given a user ORM FAB object, returns a label | def user_label(user: User) -> Optional[str]:
"""Given a user ORM FAB object, returns a label"""
if user:
if user.first_name and user.last_name:
return user.first_name + ' ' + user.last_name
else:
return user.username
return None |
Return `since` and `until` date time tuple from string representations of
time_range, since, until and time_shift.
This functiom supports both reading the keys separately (from `since` and
`until`), as well as the new `time_range` key. Valid formats are:
- ISO 8601
- X days/years/hours/day/year/weeks
- X days/years/hours/day/year/weeks ago
- X days/years/hours/day/year/weeks from now
- freeform
Additionally, for `time_range` (these specify both `since` and `until`):
- Last day
- Last week
- Last month
- Last quarter
- Last year
- No filter
- Last X seconds/minutes/hours/days/weeks/months/years
- Next X seconds/minutes/hours/days/weeks/months/years | def get_since_until(time_range: Optional[str] = None,
since: Optional[str] = None,
until: Optional[str] = None,
time_shift: Optional[str] = None,
relative_end: Optional[str] = None) -> Tuple[datetime, datetime]:
"""Return `since` and `until` date time tuple from string representations of
time_range, since, until and time_shift.
This functiom supports both reading the keys separately (from `since` and
`until`), as well as the new `time_range` key. Valid formats are:
- ISO 8601
- X days/years/hours/day/year/weeks
- X days/years/hours/day/year/weeks ago
- X days/years/hours/day/year/weeks from now
- freeform
Additionally, for `time_range` (these specify both `since` and `until`):
- Last day
- Last week
- Last month
- Last quarter
- Last year
- No filter
- Last X seconds/minutes/hours/days/weeks/months/years
- Next X seconds/minutes/hours/days/weeks/months/years
"""
separator = ' : '
relative_end = parse_human_datetime(relative_end if relative_end else 'today')
common_time_frames = {
'Last day': (relative_end - relativedelta(days=1), relative_end), # noqa: T400
'Last week': (relative_end - relativedelta(weeks=1), relative_end), # noqa: T400
'Last month': (relative_end - relativedelta(months=1), relative_end), # noqa: E501, T400
'Last quarter': (relative_end - relativedelta(months=3), relative_end), # noqa: E501, T400
'Last year': (relative_end - relativedelta(years=1), relative_end), # noqa: T400
}
if time_range:
if separator in time_range:
since, until = time_range.split(separator, 1)
if since and since not in common_time_frames:
since = add_ago_to_since(since)
since = parse_human_datetime(since)
until = parse_human_datetime(until)
elif time_range in common_time_frames:
since, until = common_time_frames[time_range]
elif time_range == 'No filter':
since = until = None
else:
rel, num, grain = time_range.split()
if rel == 'Last':
since = relative_end - relativedelta(**{grain: int(num)}) # noqa: T400
until = relative_end
else: # rel == 'Next'
since = relative_end
until = relative_end + relativedelta(**{grain: int(num)}) # noqa: T400
else:
since = since or ''
if since:
since = add_ago_to_since(since)
since = parse_human_datetime(since)
until = parse_human_datetime(until) if until else relative_end
if time_shift:
time_shift = parse_human_timedelta(time_shift)
since = since if since is None else (since - time_shift) # noqa: T400
until = until if until is None else (until - time_shift) # noqa: T400
if since and until and since > until:
raise ValueError(_('From date cannot be larger than to date'))
return since, until |
Backwards compatibility hack. Without this slices with since: 7 days will
be treated as 7 days in the future.
:param str since:
:returns: Since with ago added if necessary
:rtype: str | def add_ago_to_since(since: str) -> str:
"""
Backwards compatibility hack. Without this slices with since: 7 days will
be treated as 7 days in the future.
:param str since:
:returns: Since with ago added if necessary
:rtype: str
"""
since_words = since.split(' ')
grains = ['days', 'years', 'hours', 'day', 'year', 'weeks']
if (len(since_words) == 2 and since_words[1] in grains):
since += ' ago'
return since |
Mutates form data to restructure the adhoc filters in the form of the four base
filters, `where`, `having`, `filters`, and `having_filters` which represent
free form where sql, free form having sql, structured where clauses and structured
having clauses. | def split_adhoc_filters_into_base_filters(fd):
"""
Mutates form data to restructure the adhoc filters in the form of the four base
filters, `where`, `having`, `filters`, and `having_filters` which represent
free form where sql, free form having sql, structured where clauses and structured
having clauses.
"""
adhoc_filters = fd.get('adhoc_filters')
if isinstance(adhoc_filters, list):
simple_where_filters = []
simple_having_filters = []
sql_where_filters = []
sql_having_filters = []
for adhoc_filter in adhoc_filters:
expression_type = adhoc_filter.get('expressionType')
clause = adhoc_filter.get('clause')
if expression_type == 'SIMPLE':
if clause == 'WHERE':
simple_where_filters.append({
'col': adhoc_filter.get('subject'),
'op': adhoc_filter.get('operator'),
'val': adhoc_filter.get('comparator'),
})
elif clause == 'HAVING':
simple_having_filters.append({
'col': adhoc_filter.get('subject'),
'op': adhoc_filter.get('operator'),
'val': adhoc_filter.get('comparator'),
})
elif expression_type == 'SQL':
if clause == 'WHERE':
sql_where_filters.append(adhoc_filter.get('sqlExpression'))
elif clause == 'HAVING':
sql_having_filters.append(adhoc_filter.get('sqlExpression'))
fd['where'] = ' AND '.join(['({})'.format(sql) for sql in sql_where_filters])
fd['having'] = ' AND '.join(['({})'.format(sql) for sql in sql_having_filters])
fd['having_filters'] = simple_having_filters
fd['filters'] = simple_where_filters |
Loads an energy related dataset to use with sankey and graphs | def load_energy():
"""Loads an energy related dataset to use with sankey and graphs"""
tbl_name = 'energy_usage'
data = get_example_data('energy.json.gz')
pdf = pd.read_json(data)
pdf.to_sql(
tbl_name,
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'source': String(255),
'target': String(255),
'value': Float(),
},
index=False)
print('Creating table [wb_health_population] reference')
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = 'Energy consumption'
tbl.database = utils.get_or_create_main_db()
if not any(col.metric_name == 'sum__value' for col in tbl.metrics):
tbl.metrics.append(SqlMetric(
metric_name='sum__value',
expression='SUM(value)',
))
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
slc = Slice(
slice_name='Energy Sankey',
viz_type='sankey',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"collapsed_fieldsets": "",
"groupby": [
"source",
"target"
],
"having": "",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Energy Sankey",
"viz_type": "sankey",
"where": ""
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name='Energy Force Layout',
viz_type='directed_force',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"charge": "-500",
"collapsed_fieldsets": "",
"groupby": [
"source",
"target"
],
"having": "",
"link_length": "200",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Force",
"viz_type": "directed_force",
"where": ""
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name='Heatmap',
viz_type='heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"all_columns_x": "source",
"all_columns_y": "target",
"canvas_image_rendering": "pixelated",
"collapsed_fieldsets": "",
"having": "",
"linear_color_scheme": "blue_white_yellow",
"metric": "sum__value",
"normalize_across": "heatmap",
"slice_name": "Heatmap",
"viz_type": "heatmap",
"where": "",
"xscale_interval": "1",
"yscale_interval": "1"
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc) |
Loading random time series data from a zip file in the repo | def load_random_time_series_data():
"""Loading random time series data from a zip file in the repo"""
data = get_example_data('random_time_series.json.gz')
pdf = pd.read_json(data)
pdf.ds = pd.to_datetime(pdf.ds, unit='s')
pdf.to_sql(
'random_time_series',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'ds': DateTime,
},
index=False)
print('Done loading table!')
print('-' * 80)
print('Creating table [random_time_series] reference')
obj = db.session.query(TBL).filter_by(table_name='random_time_series').first()
if not obj:
obj = TBL(table_name='random_time_series')
obj.main_dttm_col = 'ds'
obj.database = utils.get_or_create_main_db()
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
'granularity_sqla': 'day',
'row_limit': config.get('ROW_LIMIT'),
'since': '1 year ago',
'until': 'now',
'metric': 'count',
'where': '',
'viz_type': 'cal_heatmap',
'domain_granularity': 'month',
'subdomain_granularity': 'day',
}
print('Creating a slice')
slc = Slice(
slice_name='Calendar Heatmap',
viz_type='cal_heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc) |
Starts a Superset web server. | def runserver(debug, console_log, use_reloader, address, port, timeout, workers, socket):
"""Starts a Superset web server."""
debug = debug or config.get('DEBUG') or console_log
if debug:
print(Fore.BLUE + '-=' * 20)
print(
Fore.YELLOW + 'Starting Superset server in ' +
Fore.RED + 'DEBUG' +
Fore.YELLOW + ' mode')
print(Fore.BLUE + '-=' * 20)
print(Style.RESET_ALL)
if console_log:
console_log_run(app, port, use_reloader)
else:
debug_run(app, port, use_reloader)
else:
logging.info(
"The Gunicorn 'superset runserver' command is deprecated. Please "
"use the 'gunicorn' command instead.")
addr_str = f' unix:{socket} ' if socket else f' {address}:{port} '
cmd = (
'gunicorn '
f'-w {workers} '
f'--timeout {timeout} '
f'-b {addr_str} '
'--limit-request-line 0 '
'--limit-request-field_size 0 '
'superset:app'
)
print(Fore.GREEN + 'Starting server with command: ')
print(Fore.YELLOW + cmd)
print(Style.RESET_ALL)
Popen(cmd, shell=True).wait() |
Prints the current version number | def version(verbose):
"""Prints the current version number"""
print(Fore.BLUE + '-=' * 15)
print(Fore.YELLOW + 'Superset ' + Fore.CYAN + '{version}'.format(
version=config.get('VERSION_STRING')))
print(Fore.BLUE + '-=' * 15)
if verbose:
print('[DB] : ' + '{}'.format(db.engine))
print(Style.RESET_ALL) |
Refresh druid datasources | def refresh_druid(datasource, merge):
"""Refresh druid datasources"""
session = db.session()
from superset.connectors.druid.models import DruidCluster
for cluster in session.query(DruidCluster).all():
try:
cluster.refresh_datasources(datasource_name=datasource,
merge_flag=merge)
except Exception as e:
print(
"Error while processing cluster '{}'\n{}".format(
cluster, str(e)))
logging.exception(e)
cluster.metadata_last_refreshed = datetime.now()
print(
'Refreshed metadata from cluster '
'[' + cluster.cluster_name + ']')
session.commit() |
Import dashboards from JSON | def import_dashboards(path, recursive):
"""Import dashboards from JSON"""
p = Path(path)
files = []
if p.is_file():
files.append(p)
elif p.exists() and not recursive:
files.extend(p.glob('*.json'))
elif p.exists() and recursive:
files.extend(p.rglob('*.json'))
for f in files:
logging.info('Importing dashboard from file %s', f)
try:
with f.open() as data_stream:
dashboard_import_export.import_dashboards(
db.session, data_stream)
except Exception as e:
logging.error('Error when importing dashboard from file %s', f)
logging.error(e) |
Export dashboards to JSON | def export_dashboards(print_stdout, dashboard_file):
"""Export dashboards to JSON"""
data = dashboard_import_export.export_dashboards(db.session)
if print_stdout or not dashboard_file:
print(data)
if dashboard_file:
logging.info('Exporting dashboards to %s', dashboard_file)
with open(dashboard_file, 'w') as data_stream:
data_stream.write(data) |
Import datasources from YAML | def import_datasources(path, sync, recursive):
"""Import datasources from YAML"""
sync_array = sync.split(',')
p = Path(path)
files = []
if p.is_file():
files.append(p)
elif p.exists() and not recursive:
files.extend(p.glob('*.yaml'))
files.extend(p.glob('*.yml'))
elif p.exists() and recursive:
files.extend(p.rglob('*.yaml'))
files.extend(p.rglob('*.yml'))
for f in files:
logging.info('Importing datasources from file %s', f)
try:
with f.open() as data_stream:
dict_import_export.import_from_dict(
db.session,
yaml.safe_load(data_stream),
sync=sync_array)
except Exception as e:
logging.error('Error when importing datasources from file %s', f)
logging.error(e) |
Export datasources to YAML | def export_datasources(print_stdout, datasource_file,
back_references, include_defaults):
"""Export datasources to YAML"""
data = dict_import_export.export_to_dict(
session=db.session,
recursive=True,
back_references=back_references,
include_defaults=include_defaults)
if print_stdout or not datasource_file:
yaml.safe_dump(data, stdout, default_flow_style=False)
if datasource_file:
logging.info('Exporting datasources to %s', datasource_file)
with open(datasource_file, 'w') as data_stream:
yaml.safe_dump(data, data_stream, default_flow_style=False) |
Export datasource YAML schema to stdout | def export_datasource_schema(back_references):
"""Export datasource YAML schema to stdout"""
data = dict_import_export.export_schema_to_dict(
back_references=back_references)
yaml.safe_dump(data, stdout, default_flow_style=False) |
Refresh sqllab datasources cache | def update_datasources_cache():
"""Refresh sqllab datasources cache"""
from superset.models.core import Database
for database in db.session.query(Database).all():
if database.allow_multi_schema_metadata_fetch:
print('Fetching {} datasources ...'.format(database.name))
try:
database.all_table_names_in_database(
force=True, cache=True, cache_timeout=24 * 60 * 60)
database.all_view_names_in_database(
force=True, cache=True, cache_timeout=24 * 60 * 60)
except Exception as e:
print('{}'.format(str(e))) |
Starts a Superset worker for async SQL query execution. | def worker(workers):
"""Starts a Superset worker for async SQL query execution."""
logging.info(
"The 'superset worker' command is deprecated. Please use the 'celery "
"worker' command instead.")
if workers:
celery_app.conf.update(CELERYD_CONCURRENCY=workers)
elif config.get('SUPERSET_CELERY_WORKERS'):
celery_app.conf.update(
CELERYD_CONCURRENCY=config.get('SUPERSET_CELERY_WORKERS'))
worker = celery_app.Worker(optimization='fair')
worker.start() |
Runs a Celery Flower web server
Celery Flower is a UI to monitor the Celery operation on a given
broker | def flower(port, address):
"""Runs a Celery Flower web server
Celery Flower is a UI to monitor the Celery operation on a given
broker"""
BROKER_URL = celery_app.conf.BROKER_URL
cmd = (
'celery flower '
f'--broker={BROKER_URL} '
f'--port={port} '
f'--address={address} '
)
logging.info(
"The 'superset flower' command is deprecated. Please use the 'celery "
"flower' command instead.")
print(Fore.GREEN + 'Starting a Celery Flower instance')
print(Fore.BLUE + '-=' * 40)
print(Fore.YELLOW + cmd)
print(Fore.BLUE + '-=' * 40)
Popen(cmd, shell=True).wait() |
Loading random time series data from a zip file in the repo | def load_flights():
"""Loading random time series data from a zip file in the repo"""
tbl_name = 'flights'
data = get_example_data('flight_data.csv.gz', make_bytes=True)
pdf = pd.read_csv(data, encoding='latin-1')
# Loading airports info to join and get lat/long
airports_bytes = get_example_data('airports.csv.gz', make_bytes=True)
airports = pd.read_csv(airports_bytes, encoding='latin-1')
airports = airports.set_index('IATA_CODE')
pdf['ds'] = pdf.YEAR.map(str) + '-0' + pdf.MONTH.map(str) + '-0' + pdf.DAY.map(str)
pdf.ds = pd.to_datetime(pdf.ds)
del pdf['YEAR']
del pdf['MONTH']
del pdf['DAY']
pdf = pdf.join(airports, on='ORIGIN_AIRPORT', rsuffix='_ORIG')
pdf = pdf.join(airports, on='DESTINATION_AIRPORT', rsuffix='_DEST')
pdf.to_sql(
tbl_name,
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'ds': DateTime,
},
index=False)
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = 'Random set of flights in the US'
tbl.database = utils.get_or_create_main_db()
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
print('Done loading table!') |
Loading birth name dataset from a zip file in the repo | def load_birth_names():
"""Loading birth name dataset from a zip file in the repo"""
data = get_example_data('birth_names.json.gz')
pdf = pd.read_json(data)
pdf.ds = pd.to_datetime(pdf.ds, unit='ms')
pdf.to_sql(
'birth_names',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'ds': DateTime,
'gender': String(16),
'state': String(10),
'name': String(255),
},
index=False)
print('Done loading table!')
print('-' * 80)
print('Creating table [birth_names] reference')
obj = db.session.query(TBL).filter_by(table_name='birth_names').first()
if not obj:
obj = TBL(table_name='birth_names')
obj.main_dttm_col = 'ds'
obj.database = get_or_create_main_db()
obj.filter_select_enabled = True
if not any(col.column_name == 'num_california' for col in obj.columns):
obj.columns.append(TableColumn(
column_name='num_california',
expression="CASE WHEN state = 'CA' THEN num ELSE 0 END",
))
if not any(col.metric_name == 'sum__num' for col in obj.metrics):
obj.metrics.append(SqlMetric(
metric_name='sum__num',
expression='SUM(num)',
))
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
defaults = {
'compare_lag': '10',
'compare_suffix': 'o10Y',
'limit': '25',
'granularity_sqla': 'ds',
'groupby': [],
'metric': 'sum__num',
'metrics': ['sum__num'],
'row_limit': config.get('ROW_LIMIT'),
'since': '100 years ago',
'until': 'now',
'viz_type': 'table',
'where': '',
'markup_type': 'markdown',
}
admin = security_manager.find_user('admin')
print('Creating some slices')
slices = [
Slice(
slice_name='Girls',
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
groupby=['name'],
filters=[{
'col': 'gender',
'op': 'in',
'val': ['girl'],
}],
row_limit=50,
timeseries_limit_metric='sum__num')),
Slice(
slice_name='Boys',
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
groupby=['name'],
filters=[{
'col': 'gender',
'op': 'in',
'val': ['boy'],
}],
row_limit=50)),
Slice(
slice_name='Participants',
viz_type='big_number',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='big_number', granularity_sqla='ds',
compare_lag='5', compare_suffix='over 5Y')),
Slice(
slice_name='Genders',
viz_type='pie',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='pie', groupby=['gender'])),
Slice(
slice_name='Genders by State',
viz_type='dist_bar',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
adhoc_filters=[
{
'clause': 'WHERE',
'expressionType': 'SIMPLE',
'filterOptionName': '2745eae5',
'comparator': ['other'],
'operator': 'not in',
'subject': 'state',
},
],
viz_type='dist_bar',
metrics=[
{
'expressionType': 'SIMPLE',
'column': {
'column_name': 'sum_boys',
'type': 'BIGINT(20)',
},
'aggregate': 'SUM',
'label': 'Boys',
'optionName': 'metric_11',
},
{
'expressionType': 'SIMPLE',
'column': {
'column_name': 'sum_girls',
'type': 'BIGINT(20)',
},
'aggregate': 'SUM',
'label': 'Girls',
'optionName': 'metric_12',
},
],
groupby=['state'])),
Slice(
slice_name='Trends',
viz_type='line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='line', groupby=['name'],
granularity_sqla='ds', rich_tooltip=True, show_legend=True)),
Slice(
slice_name='Average and Sum Trends',
viz_type='dual_line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='dual_line',
metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num',
'type': 'BIGINT(20)',
},
'aggregate': 'AVG',
'label': 'AVG(num)',
'optionName': 'metric_vgops097wej_g8uff99zhk7',
},
metric_2='sum__num',
granularity_sqla='ds')),
Slice(
slice_name='Title',
viz_type='markup',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='markup', markup_type='html',
code="""\
<div style='text-align:center'>
<h1>Birth Names Dashboard</h1>
<p>
The source dataset came from
<a href='https://github.com/hadley/babynames' target='_blank'>[here]</a>
</p>
<img src='/static/assets/images/babytux.jpg'>
</div>
""")),
Slice(
slice_name='Name Cloud',
viz_type='word_cloud',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='word_cloud', size_from='10',
series='name', size_to='70', rotation='square',
limit='100')),
Slice(
slice_name='Pivot Table',
viz_type='pivot_table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='pivot_table', metrics=['sum__num'],
groupby=['name'], columns=['state'])),
Slice(
slice_name='Number of Girls',
viz_type='big_number_total',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='big_number_total', granularity_sqla='ds',
filters=[{
'col': 'gender',
'op': 'in',
'val': ['girl'],
}],
subheader='total female participants')),
Slice(
slice_name='Number of California Births',
viz_type='big_number_total',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
},
viz_type='big_number_total',
granularity_sqla='ds')),
Slice(
slice_name='Top 10 California Names Timeseries',
viz_type='line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
metrics=[{
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
}],
viz_type='line',
granularity_sqla='ds',
groupby=['name'],
timeseries_limit_metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
},
limit='10')),
Slice(
slice_name='Names Sorted by Num in California',
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
groupby=['name'],
row_limit=50,
timeseries_limit_metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
})),
Slice(
slice_name='Num Births Trend',
viz_type='line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='line')),
Slice(
slice_name='Daily Totals',
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
created_by=admin,
params=get_slice_json(
defaults,
groupby=['ds'],
since='40 years ago',
until='now',
viz_type='table')),
]
for slc in slices:
merge_slice(slc)
print('Creating a dashboard')
dash = db.session.query(Dash).filter_by(dashboard_title='Births').first()
if not dash:
dash = Dash()
js = textwrap.dedent("""\
{
"CHART-0dd270f0": {
"meta": {
"chartId": 51,
"width": 2,
"height": 50
},
"type": "CHART",
"id": "CHART-0dd270f0",
"children": []
},
"CHART-a3c21bcc": {
"meta": {
"chartId": 52,
"width": 2,
"height": 50
},
"type": "CHART",
"id": "CHART-a3c21bcc",
"children": []
},
"CHART-976960a5": {
"meta": {
"chartId": 53,
"width": 2,
"height": 25
},
"type": "CHART",
"id": "CHART-976960a5",
"children": []
},
"CHART-58575537": {
"meta": {
"chartId": 54,
"width": 2,
"height": 25
},
"type": "CHART",
"id": "CHART-58575537",
"children": []
},
"CHART-e9cd8f0b": {
"meta": {
"chartId": 55,
"width": 8,
"height": 38
},
"type": "CHART",
"id": "CHART-e9cd8f0b",
"children": []
},
"CHART-e440d205": {
"meta": {
"chartId": 56,
"width": 8,
"height": 50
},
"type": "CHART",
"id": "CHART-e440d205",
"children": []
},
"CHART-59444e0b": {
"meta": {
"chartId": 57,
"width": 3,
"height": 38
},
"type": "CHART",
"id": "CHART-59444e0b",
"children": []
},
"CHART-e2cb4997": {
"meta": {
"chartId": 59,
"width": 4,
"height": 50
},
"type": "CHART",
"id": "CHART-e2cb4997",
"children": []
},
"CHART-e8774b49": {
"meta": {
"chartId": 60,
"width": 12,
"height": 50
},
"type": "CHART",
"id": "CHART-e8774b49",
"children": []
},
"CHART-985bfd1e": {
"meta": {
"chartId": 61,
"width": 4,
"height": 50
},
"type": "CHART",
"id": "CHART-985bfd1e",
"children": []
},
"CHART-17f13246": {
"meta": {
"chartId": 62,
"width": 4,
"height": 50
},
"type": "CHART",
"id": "CHART-17f13246",
"children": []
},
"CHART-729324f6": {
"meta": {
"chartId": 63,
"width": 4,
"height": 50
},
"type": "CHART",
"id": "CHART-729324f6",
"children": []
},
"COLUMN-25a865d6": {
"meta": {
"width": 4,
"background": "BACKGROUND_TRANSPARENT"
},
"type": "COLUMN",
"id": "COLUMN-25a865d6",
"children": [
"ROW-cc97c6ac",
"CHART-e2cb4997"
]
},
"COLUMN-4557b6ba": {
"meta": {
"width": 8,
"background": "BACKGROUND_TRANSPARENT"
},
"type": "COLUMN",
"id": "COLUMN-4557b6ba",
"children": [
"ROW-d2e78e59",
"CHART-e9cd8f0b"
]
},
"GRID_ID": {
"type": "GRID",
"id": "GRID_ID",
"children": [
"ROW-8515ace3",
"ROW-1890385f",
"ROW-f0b64094",
"ROW-be9526b8"
]
},
"HEADER_ID": {
"meta": {
"text": "Births"
},
"type": "HEADER",
"id": "HEADER_ID"
},
"MARKDOWN-00178c27": {
"meta": {
"width": 5,
"code": "<div style=\\"text-align:center\\">\\n <h1>Birth Names Dashboard</h1>\\n <p>\\n The source dataset came from\\n <a href=\\"https://github.com/hadley/babynames\\" target=\\"_blank\\">[here]</a>\\n </p>\\n <img src=\\"/static/assets/images/babytux.jpg\\">\\n</div>\\n",
"height": 38
},
"type": "MARKDOWN",
"id": "MARKDOWN-00178c27",
"children": []
},
"ROOT_ID": {
"type": "ROOT",
"id": "ROOT_ID",
"children": [
"GRID_ID"
]
},
"ROW-1890385f": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-1890385f",
"children": [
"CHART-e440d205",
"CHART-0dd270f0",
"CHART-a3c21bcc"
]
},
"ROW-8515ace3": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-8515ace3",
"children": [
"COLUMN-25a865d6",
"COLUMN-4557b6ba"
]
},
"ROW-be9526b8": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-be9526b8",
"children": [
"CHART-985bfd1e",
"CHART-17f13246",
"CHART-729324f6"
]
},
"ROW-cc97c6ac": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-cc97c6ac",
"children": [
"CHART-976960a5",
"CHART-58575537"
]
},
"ROW-d2e78e59": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-d2e78e59",
"children": [
"MARKDOWN-00178c27",
"CHART-59444e0b"
]
},
"ROW-f0b64094": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-f0b64094",
"children": [
"CHART-e8774b49"
]
},
"DASHBOARD_VERSION_KEY": "v2"
}
""")
pos = json.loads(js)
# dashboard v2 doesn't allow add markup slice
dash.slices = [slc for slc in slices if slc.viz_type != 'markup']
update_slice_ids(pos, dash.slices)
dash.dashboard_title = 'Births'
dash.position_json = json.dumps(pos, indent=4)
dash.slug = 'births'
db.session.merge(dash)
db.session.commit() |
endpoint that refreshes druid datasources metadata | def refresh_datasources(self, refreshAll=True):
"""endpoint that refreshes druid datasources metadata"""
session = db.session()
DruidCluster = ConnectorRegistry.sources['druid'].cluster_class
for cluster in session.query(DruidCluster).all():
cluster_name = cluster.cluster_name
valid_cluster = True
try:
cluster.refresh_datasources(refreshAll=refreshAll)
except Exception as e:
valid_cluster = False
flash(
"Error while processing cluster '{}'\n{}".format(
cluster_name, utils.error_msg_from_exception(e)),
'danger')
logging.exception(e)
pass
if valid_cluster:
cluster.metadata_last_refreshed = datetime.now()
flash(
_('Refreshed metadata from cluster [{}]').format(
cluster.cluster_name),
'info')
session.commit()
return redirect('/druiddatasourcemodelview/list/') |
converts a positive integer into a (reversed) linked list.
for example: give 112
result 2 -> 1 -> 1 | def convert_to_list(number: int) -> Node:
"""
converts a positive integer into a (reversed) linked list.
for example: give 112
result 2 -> 1 -> 1
"""
if number >= 0:
head = Node(0)
current = head
remainder = number % 10
quotient = number // 10
while quotient != 0:
current.next = Node(remainder)
current = current.next
remainder = quotient % 10
quotient //= 10
current.next = Node(remainder)
return head.next
else:
print("number must be positive!") |
converts the non-negative number list into a string. | def convert_to_str(l: Node) -> str:
"""
converts the non-negative number list into a string.
"""
result = ""
while l:
result += str(l.val)
l = l.next
return result |
:type root: TreeNode
:rtype: int | def longest_consecutive(root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
max_len = 0
dfs(root, 0, root.val, max_len)
return max_len |
:param array: List[int]
:return: Set[ Tuple[int, int, int] ] | def three_sum(array):
"""
:param array: List[int]
:return: Set[ Tuple[int, int, int] ]
"""
res = set()
array.sort()
for i in range(len(array) - 2):
if i > 0 and array[i] == array[i - 1]:
continue
l, r = i + 1, len(array) - 1
while l < r:
s = array[i] + array[l] + array[r]
if s > 0:
r -= 1
elif s < 0:
l += 1
else:
# found three sum
res.add((array[i], array[l], array[r]))
# remove duplicates
while l < r and array[l] == array[l + 1]:
l += 1
while l < r and array[r] == array[r - 1]:
r -= 1
l += 1
r -= 1
return res |
Time complexity is the same as DFS, which is O(V + E)
Space complexity: O(V) | def top_sort_recursive(graph):
""" Time complexity is the same as DFS, which is O(V + E)
Space complexity: O(V)
"""
order, enter, state = [], set(graph), {}
def dfs(node):
state[node] = GRAY
#print(node)
for k in graph.get(node, ()):
sk = state.get(k, None)
if sk == GRAY:
raise ValueError("cycle")
if sk == BLACK:
continue
enter.discard(k)
dfs(k)
order.append(node)
state[node] = BLACK
while enter: dfs(enter.pop())
return order |
Time complexity is the same as DFS, which is O(V + E)
Space complexity: O(V) | def top_sort(graph):
""" Time complexity is the same as DFS, which is O(V + E)
Space complexity: O(V)
"""
order, enter, state = [], set(graph), {}
def is_ready(node):
lst = graph.get(node, ())
if len(lst) == 0:
return True
for k in lst:
sk = state.get(k, None)
if sk == GRAY:
raise ValueError("cycle")
if sk != BLACK:
return False
return True
while enter:
node = enter.pop()
stack = []
while True:
state[node] = GRAY
stack.append(node)
for k in graph.get(node, ()):
sk = state.get(k, None)
if sk == GRAY:
raise ValueError("cycle")
if sk == BLACK:
continue
enter.discard(k)
stack.append(k)
while stack and is_ready(stack[-1]):
node = stack.pop()
order.append(node)
state[node] = BLACK
if len(stack) == 0:
break
node = stack.pop()
return order |
:type nums: List[int]
:rtype: int | def max_product(nums):
"""
:type nums: List[int]
:rtype: int
"""
lmin = lmax = gmax = nums[0]
for i in range(len(nums)):
t1 = nums[i] * lmax
t2 = nums[i] * lmin
lmax = max(max(t1, t2), nums[i])
lmin = min(min(t1, t2), nums[i])
gmax = max(gmax, lmax) |
arr is list of positive/negative numbers | def subarray_with_max_product(arr):
''' arr is list of positive/negative numbers '''
l = len(arr)
product_so_far = max_product_end = 1
max_start_i = 0
so_far_start_i = so_far_end_i = 0
all_negative_flag = True
for i in range(l):
max_product_end *= arr[i]
if arr[i] > 0:
all_negative_flag = False
if max_product_end <= 0:
max_product_end = arr[i]
max_start_i = i
if product_so_far <= max_product_end:
product_so_far = max_product_end
so_far_end_i = i
so_far_start_i = max_start_i
if all_negative_flag:
print("max_product_so_far: %s, %s" %
(reduce(lambda x, y: x * y, arr), arr))
else:
print("max_product_so_far: %s, %s" %
(product_so_far, arr[so_far_start_i:so_far_end_i + 1])) |
:type words: list
:type max_width: int
:rtype: list | def text_justification(words, max_width):
'''
:type words: list
:type max_width: int
:rtype: list
'''
ret = [] # return value
row_len = 0 # current length of strs in a row
row_words = [] # current words in a row
index = 0 # the index of current word in words
is_first_word = True # is current word the first in a row
while index < len(words):
while row_len <= max_width and index < len(words):
if len(words[index]) > max_width:
raise ValueError("there exists word whose length is larger than max_width")
tmp = row_len
row_words.append(words[index])
tmp += len(words[index])
if not is_first_word:
tmp += 1 # except for the first word, each word should have at least a ' ' before it.
if tmp > max_width:
row_words.pop()
break
row_len = tmp
index += 1
is_first_word = False
# here we have already got a row of str , then we should supplement enough ' ' to make sure the length is max_width.
row = ""
# if the row is the last
if index == len(words):
for word in row_words:
row += (word + ' ')
row = row[:-1]
row += ' ' * (max_width - len(row))
# not the last row and more than one word
elif len(row_words) != 1:
space_num = max_width - row_len
space_num_of_each_interval = space_num // (len(row_words) - 1)
space_num_rest = space_num - space_num_of_each_interval * (len(row_words) - 1)
for j in range(len(row_words)):
row += row_words[j]
if j != len(row_words) - 1:
row += ' ' * (1 + space_num_of_each_interval)
if space_num_rest > 0:
row += ' '
space_num_rest -= 1
# row with only one word
else:
row += row_words[0]
row += ' ' * (max_width - len(row))
ret.append(row)
# after a row , reset those value
row_len = 0
row_words = []
is_first_word = True
return ret |
Insertion Sort
Complexity: O(n^2) | def insertion_sort(arr, simulation=False):
""" Insertion Sort
Complexity: O(n^2)
"""
iteration = 0
if simulation:
print("iteration",iteration,":",*arr)
for i in range(len(arr)):
cursor = arr[i]
pos = i
while pos > 0 and arr[pos - 1] > cursor:
# Swap the number down the list
arr[pos] = arr[pos - 1]
pos = pos - 1
# Break and do the final swap
arr[pos] = cursor
if simulation:
iteration = iteration + 1
print("iteration",iteration,":",*arr)
return arr |
cycle_sort
This is based on the idea that the permutations to be sorted
can be decomposed into cycles,
and the results can be individually sorted by cycling.
reference: https://en.wikipedia.org/wiki/Cycle_sort
Average time complexity : O(N^2)
Worst case time complexity : O(N^2) | def cycle_sort(arr):
"""
cycle_sort
This is based on the idea that the permutations to be sorted
can be decomposed into cycles,
and the results can be individually sorted by cycling.
reference: https://en.wikipedia.org/wiki/Cycle_sort
Average time complexity : O(N^2)
Worst case time complexity : O(N^2)
"""
len_arr = len(arr)
# Finding cycle to rotate.
for cur in range(len_arr - 1):
item = arr[cur]
# Finding an indx to put items in.
index = cur
for i in range(cur + 1, len_arr):
if arr[i] < item:
index += 1
# Case of there is not a cycle
if index == cur:
continue
# Putting the item immediately right after the duplicate item or on the right.
while item == arr[index]:
index += 1
arr[index], item = item, arr[index]
# Rotating the remaining cycle.
while index != cur:
# Finding where to put the item.
index = cur
for i in range(cur + 1, len_arr):
if arr[i] < item:
index += 1
# After item is duplicated, put it in place or put it there.
while item == arr[index]:
index += 1
arr[index], item = item, arr[index]
return arr |
Cocktail_shaker_sort
Sorting a given array
mutation of bubble sort
reference: https://en.wikipedia.org/wiki/Cocktail_shaker_sort
Worst-case performance: O(N^2) | def cocktail_shaker_sort(arr):
"""
Cocktail_shaker_sort
Sorting a given array
mutation of bubble sort
reference: https://en.wikipedia.org/wiki/Cocktail_shaker_sort
Worst-case performance: O(N^2)
"""
def swap(i, j):
arr[i], arr[j] = arr[j], arr[i]
n = len(arr)
swapped = True
while swapped:
swapped = False
for i in range(1, n):
if arr[i - 1] > arr[i]:
swap(i - 1, i)
swapped = True
if swapped == False:
return arr
swapped = False
for i in range(n-1,0,-1):
if arr[i - 1] > arr[i]:
swap(i - 1, i)
swapped = True
return arr |
:type people: List[List[int]]
:rtype: List[List[int]] | def reconstruct_queue(people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
queue = []
people.sort(key=lambda x: (-x[0], x[1]))
for h, k in people:
queue.insert(k, [h, k])
return queue |
:type root: TreeNode
:rtype: int | def min_depth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
if root.left is not None or root.right is not None:
return max(self.minDepth(root.left), self.minDepth(root.right))+1
return min(self.minDepth(root.left), self.minDepth(root.right)) + 1 |
:type s: str
:type t: str
:rtype: bool | def is_one_edit(s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if len(s) > len(t):
return is_one_edit(t, s)
if len(t) - len(s) > 1 or t == s:
return False
for i in range(len(s)):
if s[i] != t[i]:
return s[i+1:] == t[i+1:] or s[i:] == t[i+1:]
return True |
Shell Sort
Complexity: O(n^2) | def shell_sort(arr):
''' Shell Sort
Complexity: O(n^2)
'''
n = len(arr)
# Initialize size of the gap
gap = n//2
while gap > 0:
y_index = gap
while y_index < len(arr):
y = arr[y_index]
x_index = y_index - gap
while x_index >= 0 and y < arr[x_index]:
arr[x_index + gap] = arr[x_index]
x_index = x_index - gap
arr[x_index + gap] = y
y_index = y_index + 1
gap = gap//2
return arr |
Return prefix common of 2 strings | def common_prefix(s1, s2):
"Return prefix common of 2 strings"
if not s1 or not s2:
return ""
k = 0
while s1[k] == s2[k]:
k = k + 1
if k >= len(s1) or k >= len(s2):
return s1[0:k]
return s1[0:k] |
Euler's totient function or Phi function.
Time Complexity: O(sqrt(n)). | def euler_totient(n):
"""Euler's totient function or Phi function.
Time Complexity: O(sqrt(n))."""
result = n;
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
while n % i == 0:
n //= i
result -= result // i
if n > 1:
result -= result // n;
return result; |
This function builds up a dictionary where the keys are the values of the list,
and the values are the positions at which these values occur in the list.
We then iterate over the dict and if there is more than one key with an odd
number of occurrences, bail out and return False.
Otherwise, we want to ensure that the positions of occurrence sum to the
value of the length of the list - 1, working from the outside of the list inward.
For example:
Input: 1 -> 1 -> 2 -> 3 -> 2 -> 1 -> 1
d = {1: [0,1,5,6], 2: [2,4], 3: [3]}
'3' is the middle outlier, 2+4=6, 0+6=6 and 5+1=6 so we have a palindrome. | def is_palindrome_dict(head):
"""
This function builds up a dictionary where the keys are the values of the list,
and the values are the positions at which these values occur in the list.
We then iterate over the dict and if there is more than one key with an odd
number of occurrences, bail out and return False.
Otherwise, we want to ensure that the positions of occurrence sum to the
value of the length of the list - 1, working from the outside of the list inward.
For example:
Input: 1 -> 1 -> 2 -> 3 -> 2 -> 1 -> 1
d = {1: [0,1,5,6], 2: [2,4], 3: [3]}
'3' is the middle outlier, 2+4=6, 0+6=6 and 5+1=6 so we have a palindrome.
"""
if not head or not head.next:
return True
d = {}
pos = 0
while head:
if head.val in d.keys():
d[head.val].append(pos)
else:
d[head.val] = [pos]
head = head.next
pos += 1
checksum = pos - 1
middle = 0
for v in d.values():
if len(v) % 2 != 0:
middle += 1
else:
step = 0
for i in range(0, len(v)):
if v[i] + v[len(v) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True |
[summary]
This algorithm computes the n-th fibbonacci number
very quick. approximate O(n)
The algorithm use dynamic programming.
Arguments:
n {[int]} -- [description]
Returns:
[int] -- [description] | def fib_list(n):
"""[summary]
This algorithm computes the n-th fibbonacci number
very quick. approximate O(n)
The algorithm use dynamic programming.
Arguments:
n {[int]} -- [description]
Returns:
[int] -- [description]
"""
# precondition
assert n >= 0, 'n must be a positive integer'
list_results = [0, 1]
for i in range(2, n+1):
list_results.append(list_results[i-1] + list_results[i-2])
return list_results[n] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.