text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Sends a shutdown signal to the unity environment, and closes the socket connection. <END_TASK> <USER_TASK:> Description: def close(self): """ Sends a shutdown signal to the unity environment, and closes the socket connection. """
if self._socket is not None and self._conn is not None: message_input = UnityMessage() message_input.header.status = 400 self._communicator_send(message_input.SerializeToString()) if self._socket is not None: self._socket.close() self._socket = None if self._socket is not None: self._conn.close() self._conn = None
<SYSTEM_TASK:> Increment the step count of the trainer and Updates the last reward <END_TASK> <USER_TASK:> Description: def increment_step_and_update_last_reward(self): """ Increment the step count of the trainer and Updates the last reward """
if len(self.stats['Environment/Cumulative Reward']) > 0: mean_reward = np.mean(self.stats['Environment/Cumulative Reward']) self.policy.update_reward(mean_reward) self.policy.increment_step() self.step = self.policy.get_current_step()
<SYSTEM_TASK:> Uses demonstration_buffer to update the policy. <END_TASK> <USER_TASK:> Description: def update_policy(self): """ Uses demonstration_buffer to update the policy. """
self.trainer_metrics.start_policy_update_timer( number_experiences=len(self.training_buffer.update_buffer['actions']), mean_return=float(np.mean(self.cumulative_returns_since_policy_update))) n_sequences = max(int(self.trainer_parameters['batch_size'] / self.policy.sequence_length), 1) value_total, policy_total, forward_total, inverse_total = [], [], [], [] advantages = self.training_buffer.update_buffer['advantages'].get_batch() self.training_buffer.update_buffer['advantages'].set( (advantages - advantages.mean()) / (advantages.std() + 1e-10)) num_epoch = self.trainer_parameters['num_epoch'] for _ in range(num_epoch): self.training_buffer.update_buffer.shuffle() buffer = self.training_buffer.update_buffer for l in range(len(self.training_buffer.update_buffer['actions']) // n_sequences): start = l * n_sequences end = (l + 1) * n_sequences run_out = self.policy.update(buffer.make_mini_batch(start, end), n_sequences) value_total.append(run_out['value_loss']) policy_total.append(np.abs(run_out['policy_loss'])) if self.use_curiosity: inverse_total.append(run_out['inverse_loss']) forward_total.append(run_out['forward_loss']) self.stats['Losses/Value Loss'].append(np.mean(value_total)) self.stats['Losses/Policy Loss'].append(np.mean(policy_total)) if self.use_curiosity: self.stats['Losses/Forward Loss'].append(np.mean(forward_total)) self.stats['Losses/Inverse Loss'].append(np.mean(inverse_total)) self.training_buffer.reset_update_buffer() self.trainer_metrics.end_policy_update()
<SYSTEM_TASK:> Attempts to bind to the requested communicator port, checking if it is already in use. <END_TASK> <USER_TASK:> Description: def check_port(self, port): """ Attempts to bind to the requested communicator port, checking if it is already in use. """
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind(("localhost", port)) except socket.error: raise UnityWorkerInUseException(self.worker_id) finally: s.close()
<SYSTEM_TASK:> Sends a shutdown signal to the unity environment, and closes the grpc connection. <END_TASK> <USER_TASK:> Description: def close(self): """ Sends a shutdown signal to the unity environment, and closes the grpc connection. """
if self.is_open: message_input = UnityMessage() message_input.header.status = 400 self.unity_to_external.parent_conn.send(message_input) self.unity_to_external.parent_conn.close() self.server.stop(False) self.is_open = False
<SYSTEM_TASK:> Converts list of agent infos to BrainInfo. <END_TASK> <USER_TASK:> Description: def from_agent_proto(agent_info_list, brain_params): """ Converts list of agent infos to BrainInfo. """
vis_obs = [] for i in range(brain_params.number_visual_observations): obs = [BrainInfo.process_pixels(x.visual_observations[i], brain_params.camera_resolutions[i]['blackAndWhite']) for x in agent_info_list] vis_obs += [obs] if len(agent_info_list) == 0: memory_size = 0 else: memory_size = max([len(x.memories) for x in agent_info_list]) if memory_size == 0: memory = np.zeros((0, 0)) else: [x.memories.extend([0] * (memory_size - len(x.memories))) for x in agent_info_list] memory = np.array([list(x.memories) for x in agent_info_list]) total_num_actions = sum(brain_params.vector_action_space_size) mask_actions = np.ones((len(agent_info_list), total_num_actions)) for agent_index, agent_info in enumerate(agent_info_list): if agent_info.action_mask is not None: if len(agent_info.action_mask) == total_num_actions: mask_actions[agent_index, :] = [ 0 if agent_info.action_mask[k] else 1 for k in range(total_num_actions)] if any([np.isnan(x.reward) for x in agent_info_list]): logger.warning("An agent had a NaN reward for brain " + brain_params.brain_name) if any([np.isnan(x.stacked_vector_observation).any() for x in agent_info_list]): logger.warning("An agent had a NaN observation for brain " + brain_params.brain_name) if len(agent_info_list) == 0: vector_obs = np.zeros( (0, brain_params.vector_observation_space_size * brain_params.num_stacked_vector_observations) ) else: vector_obs = np.nan_to_num( np.array([x.stacked_vector_observation for x in agent_info_list]) ) brain_info = BrainInfo( visual_observation=vis_obs, vector_observation=vector_obs, text_observations=[x.text_observation for x in agent_info_list], memory=memory, reward=[x.reward if not np.isnan(x.reward) else 0 for x in agent_info_list], agents=[x.id for x in agent_info_list], local_done=[x.done for x in agent_info_list], vector_action=np.array([x.stored_vector_actions for x in agent_info_list]), text_action=[list(x.stored_text_actions) for x in agent_info_list], max_reached=[x.max_step_reached for x in agent_info_list], custom_observations=[x.custom_observation for x in agent_info_list], action_mask=mask_actions ) return brain_info
<SYSTEM_TASK:> Creates a new, blank dashboard and redirects to it in edit mode <END_TASK> <USER_TASK:> Description: def new(self): """Creates a new, blank dashboard and redirects to it in edit mode"""
new_dashboard = models.Dashboard( dashboard_title='[ untitled dashboard ]', owners=[g.user], ) db.session.add(new_dashboard) db.session.commit() return redirect(f'/superset/dashboard/{new_dashboard.id}/?edit=true')
<SYSTEM_TASK:> List all tags a given object has. <END_TASK> <USER_TASK:> Description: def get(self, object_type, object_id): """List all tags a given object has."""
if object_id == 0: return json_success(json.dumps([])) query = db.session.query(TaggedObject).filter(and_( TaggedObject.object_type == object_type, TaggedObject.object_id == object_id)) tags = [{'id': obj.tag.id, 'name': obj.tag.name} for obj in query] return json_success(json.dumps(tags))
<SYSTEM_TASK:> This is the data object serialized to the js layer <END_TASK> <USER_TASK:> Description: def data(self): """This is the data object serialized to the js layer"""
content = { 'form_data': self.form_data, 'token': self.token, 'viz_name': self.viz_type, 'filter_select_enabled': self.datasource.filter_select_enabled, } return content
<SYSTEM_TASK:> Returns the query object for this visualization <END_TASK> <USER_TASK:> Description: def query_obj(self): """Returns the query object for this visualization"""
d = super().query_obj() d['row_limit'] = self.form_data.get( 'row_limit', int(config.get('VIZ_ROW_LIMIT'))) numeric_columns = self.form_data.get('all_columns_x') if numeric_columns is None: raise Exception(_('Must have at least one numeric column specified')) self.columns = numeric_columns d['columns'] = numeric_columns + self.groupby # override groupby entry to avoid aggregation d['groupby'] = [] return d
<SYSTEM_TASK:> Compute the partition at each `level` from the dataframe. <END_TASK> <USER_TASK:> Description: def levels_for(self, time_op, groups, df): """ Compute the partition at each `level` from the dataframe. """
levels = {} for i in range(0, len(groups) + 1): agg_df = df.groupby(groups[:i]) if i else df levels[i] = ( agg_df.mean() if time_op == 'agg_mean' else agg_df.sum(numeric_only=True)) return levels
<SYSTEM_TASK:> Nest values at each level on the back-end with <END_TASK> <USER_TASK:> Description: def nest_values(self, levels, level=0, metric=None, dims=()): """ Nest values at each level on the back-end with access and setting, instead of summing from the bottom. """
if not level: return [{ 'name': m, 'val': levels[0][m], 'children': self.nest_values(levels, 1, m), } for m in levels[0].index] if level == 1: return [{ 'name': i, 'val': levels[1][metric][i], 'children': self.nest_values(levels, 2, metric, (i,)), } for i in levels[1][metric].index] if level >= len(levels): return [] return [{ 'name': i, 'val': levels[level][metric][dims][i], 'children': self.nest_values( levels, level + 1, metric, dims + (i,), ), } for i in levels[level][metric][dims].index]
<SYSTEM_TASK:> Update ORM one-to-many list from object list <END_TASK> <USER_TASK:> Description: def get_fk_many_from_list( self, object_list, fkmany, fkmany_class, key_attr): """Update ORM one-to-many list from object list Used for syncing metrics and columns using the same code"""
object_dict = {o.get(key_attr): o for o in object_list} object_keys = [o.get(key_attr) for o in object_list] # delete fks that have been removed fkmany = [o for o in fkmany if getattr(o, key_attr) in object_keys] # sync existing fks for fk in fkmany: obj = object_dict.get(getattr(fk, key_attr)) for attr in fkmany_class.update_from_object_fields: setattr(fk, attr, obj.get(attr)) # create new fks new_fks = [] orm_keys = [getattr(o, key_attr) for o in fkmany] for obj in object_list: key = obj.get(key_attr) if key not in orm_keys: del obj['id'] orm_kwargs = {} for k in obj: if ( k in fkmany_class.update_from_object_fields and k in obj ): orm_kwargs[k] = obj[k] new_obj = fkmany_class(**orm_kwargs) new_fks.append(new_obj) fkmany += new_fks return fkmany
<SYSTEM_TASK:> Update datasource from a data structure <END_TASK> <USER_TASK:> Description: def update_from_object(self, obj): """Update datasource from a data structure The UI's table editor crafts a complex data structure that contains most of the datasource's properties as well as an array of metrics and columns objects. This method receives the object from the UI and syncs the datasource to match it. Since the fields are different for the different connectors, the implementation uses ``update_from_object_fields`` which can be defined for each connector and defines which fields should be synced"""
for attr in self.update_from_object_fields: setattr(self, attr, obj.get(attr)) self.owners = obj.get('owners', []) # Syncing metrics metrics = self.get_fk_many_from_list( obj.get('metrics'), self.metrics, self.metric_class, 'metric_name') self.metrics = metrics # Syncing columns self.columns = self.get_fk_many_from_list( obj.get('columns'), self.columns, self.column_class, 'column_name')
<SYSTEM_TASK:> Converting metrics to numeric when pandas.read_sql cannot <END_TASK> <USER_TASK:> Description: def df_metrics_to_num(self, df, query_object): """Converting metrics to numeric when pandas.read_sql cannot"""
metrics = [metric for metric in query_object.metrics] for col, dtype in df.dtypes.items(): if dtype.type == np.object_ and col in metrics: df[col] = pd.to_numeric(df[col], errors='coerce')
<SYSTEM_TASK:> Returns a payload of metadata and data <END_TASK> <USER_TASK:> Description: def get_single_payload(self, query_obj): """Returns a payload of metadata and data"""
payload = self.get_df_payload(query_obj) df = payload.get('df') status = payload.get('status') if status != utils.QueryStatus.FAILED: if df is not None and df.empty: payload['error'] = 'No data' else: payload['data'] = self.get_data(df) if 'df' in payload: del payload['df'] return payload
<SYSTEM_TASK:> Data used to render slice in templates <END_TASK> <USER_TASK:> Description: def data(self): """Data used to render slice in templates"""
d = {} self.token = '' try: d = self.viz.data self.token = d.get('token') except Exception as e: logging.exception(e) d['error'] = str(e) return { 'datasource': self.datasource_name, 'description': self.description, 'description_markeddown': self.description_markeddown, 'edit_url': self.edit_url, 'form_data': self.form_data, 'slice_id': self.id, 'slice_name': self.slice_name, 'slice_url': self.slice_url, 'modified': self.modified(), 'changed_on_humanized': self.changed_on_humanized, 'changed_on': self.changed_on.isoformat(), }
<SYSTEM_TASK:> Inserts or overrides slc in the database. <END_TASK> <USER_TASK:> Description: def import_obj(cls, slc_to_import, slc_to_override, import_time=None): """Inserts or overrides slc in the database. remote_id and import_time fields in params_dict are set to track the slice origin and ensure correct overrides for multiple imports. Slice.perm is used to find the datasources and connect them. :param Slice slc_to_import: Slice object to import :param Slice slc_to_override: Slice to replace, id matches remote_id :returns: The resulting id for the imported slice :rtype: int """
session = db.session make_transient(slc_to_import) slc_to_import.dashboards = [] slc_to_import.alter_params( remote_id=slc_to_import.id, import_time=import_time) slc_to_import = slc_to_import.copy() params = slc_to_import.params_dict slc_to_import.datasource_id = ConnectorRegistry.get_datasource_by_name( session, slc_to_import.datasource_type, params['datasource_name'], params['schema'], params['database_name']).id if slc_to_override: slc_to_override.override(slc_to_import) session.flush() return slc_to_override.id session.add(slc_to_import) logging.info('Final slice: {}'.format(slc_to_import.to_json())) session.flush() return slc_to_import.id
<SYSTEM_TASK:> Allowing to lookup grain by either label or duration <END_TASK> <USER_TASK:> Description: def grains_dict(self): """Allowing to lookup grain by either label or duration For backward compatibility"""
d = {grain.duration: grain for grain in self.grains()} d.update({grain.label: grain for grain in self.grains()}) return d
<SYSTEM_TASK:> Decorator to log user actions <END_TASK> <USER_TASK:> Description: def log_this(cls, f): """Decorator to log user actions"""
@functools.wraps(f) def wrapper(*args, **kwargs): user_id = None if g.user: user_id = g.user.get_id() d = request.form.to_dict() or {} # request parameters can overwrite post body request_params = request.args.to_dict() d.update(request_params) d.update(kwargs) slice_id = d.get('slice_id') dashboard_id = d.get('dashboard_id') try: slice_id = int( slice_id or json.loads(d.get('form_data')).get('slice_id')) except (ValueError, TypeError): slice_id = 0 stats_logger.incr(f.__name__) start_dttm = datetime.now() value = f(*args, **kwargs) duration_ms = (datetime.now() - start_dttm).total_seconds() * 1000 # bulk insert try: explode_by = d.get('explode') records = json.loads(d.get(explode_by)) except Exception: records = [d] referrer = request.referrer[:1000] if request.referrer else None logs = [] for record in records: try: json_string = json.dumps(record) except Exception: json_string = None log = cls( action=f.__name__, json=json_string, dashboard_id=dashboard_id, slice_id=slice_id, duration_ms=duration_ms, referrer=referrer, user_id=user_id) logs.append(log) sesh = db.session() sesh.bulk_save_objects(logs) sesh.commit() return value return wrapper
<SYSTEM_TASK:> A decorator to label an endpoint as an API. Catches uncaught exceptions and <END_TASK> <USER_TASK:> Description: def api(f): """ A decorator to label an endpoint as an API. Catches uncaught exceptions and return the response in the JSON format """
def wraps(self, *args, **kwargs): try: return f(self, *args, **kwargs) except Exception as e: logging.exception(e) return json_error_response(get_error_msg()) return functools.update_wrapper(wraps, f)
<SYSTEM_TASK:> A decorator to catch superset exceptions. Use it after the @api decorator above <END_TASK> <USER_TASK:> Description: def handle_api_exception(f): """ A decorator to catch superset exceptions. Use it after the @api decorator above so superset exception handler is triggered before the handler for generic exceptions. """
def wraps(self, *args, **kwargs): try: return f(self, *args, **kwargs) except SupersetSecurityException as e: logging.exception(e) return json_error_response(utils.error_msg_from_exception(e), status=e.status, stacktrace=traceback.format_exc(), link=e.link) except SupersetException as e: logging.exception(e) return json_error_response(utils.error_msg_from_exception(e), stacktrace=traceback.format_exc(), status=e.status) except Exception as e: logging.exception(e) return json_error_response(utils.error_msg_from_exception(e), stacktrace=traceback.format_exc()) return functools.update_wrapper(wraps, f)
<SYSTEM_TASK:> Meant to be used in `pre_update` hooks on models to enforce ownership <END_TASK> <USER_TASK:> Description: def check_ownership(obj, raise_if_false=True): """Meant to be used in `pre_update` hooks on models to enforce ownership Admin have all access, and other users need to be referenced on either the created_by field that comes with the ``AuditMixin``, or in a field named ``owners`` which is expected to be a one-to-many with the User model. It is meant to be used in the ModelView's pre_update hook in which raising will abort the update. """
if not obj: return False security_exception = SupersetSecurityException( "You don't have the rights to alter [{}]".format(obj)) if g.user.is_anonymous: if raise_if_false: raise security_exception return False roles = [r.name for r in get_user_roles()] if 'Admin' in roles: return True session = db.create_scoped_session() orig_obj = session.query(obj.__class__).filter_by(id=obj.id).first() # Making a list of owners that works across ORM models owners = [] if hasattr(orig_obj, 'owners'): owners += orig_obj.owners if hasattr(orig_obj, 'owner'): owners += [orig_obj.owner] if hasattr(orig_obj, 'created_by'): owners += [orig_obj.created_by] owner_names = [o.username for o in owners if o] if ( g.user and hasattr(g.user, 'username') and g.user.username in owner_names): return True if raise_if_false: raise security_exception else: return False
<SYSTEM_TASK:> Customize how fields are bound by stripping all whitespace. <END_TASK> <USER_TASK:> Description: def bind_field( self, form: DynamicForm, unbound_field: UnboundField, options: Dict[Any, Any], ) -> Field: """ Customize how fields are bound by stripping all whitespace. :param form: The form :param unbound_field: The unbound field :param options: The field options :returns: The bound field """
filters = unbound_field.kwargs.get('filters', []) filters.append(lambda x: x.strip() if isinstance(x, str) else x) return unbound_field.bind(form=form, filters=filters, **options)
<SYSTEM_TASK:> Common data always sent to the client <END_TASK> <USER_TASK:> Description: def common_bootsrap_payload(self): """Common data always sent to the client"""
messages = get_flashed_messages(with_categories=True) locale = str(get_locale()) return { 'flash_messages': messages, 'conf': {k: conf.get(k) for k in FRONTEND_CONF_KEYS}, 'locale': locale, 'language_pack': get_language_pack(locale), 'feature_flags': get_feature_flags(), }
<SYSTEM_TASK:> Delete function logic, override to implement diferent logic <END_TASK> <USER_TASK:> Description: def _delete(self, pk): """ Delete function logic, override to implement diferent logic deletes the record with primary_key = pk :param pk: record primary key to delete """
item = self.datamodel.get(pk, self._base_filters) if not item: abort(404) try: self.pre_delete(item) except Exception as e: flash(str(e), 'danger') else: view_menu = security_manager.find_view_menu(item.get_perm()) pvs = security_manager.get_session.query( security_manager.permissionview_model).filter_by( view_menu=view_menu).all() schema_view_menu = None if hasattr(item, 'schema_perm'): schema_view_menu = security_manager.find_view_menu(item.schema_perm) pvs.extend(security_manager.get_session.query( security_manager.permissionview_model).filter_by( view_menu=schema_view_menu).all()) if self.datamodel.delete(item): self.post_delete(item) for pv in pvs: security_manager.get_session.delete(pv) if view_menu: security_manager.get_session.delete(view_menu) if schema_view_menu: security_manager.get_session.delete(schema_view_menu) security_manager.get_session.commit() flash(*self.datamodel.message) self.update_redirect()
<SYSTEM_TASK:> Returns a set of tuples with the perm name and view menu name <END_TASK> <USER_TASK:> Description: def get_all_permissions(self): """Returns a set of tuples with the perm name and view menu name"""
perms = set() for role in self.get_user_roles(): for perm_view in role.permissions: t = (perm_view.permission.name, perm_view.view_menu.name) perms.add(t) return perms
<SYSTEM_TASK:> Returns the details of view_menus for a perm name <END_TASK> <USER_TASK:> Description: def get_view_menus(self, permission_name): """Returns the details of view_menus for a perm name"""
vm = set() for perm_name, vm_name in self.get_all_permissions(): if perm_name == permission_name: vm.add(vm_name) return vm
<SYSTEM_TASK:> Given a schedule, delivery the dashboard as an email report <END_TASK> <USER_TASK:> Description: def deliver_dashboard(schedule): """ Given a schedule, delivery the dashboard as an email report """
dashboard = schedule.dashboard dashboard_url = _get_url_path( 'Superset.dashboard', dashboard_id=dashboard.id, ) # Create a driver, fetch the page, wait for the page to render driver = create_webdriver() window = config.get('WEBDRIVER_WINDOW')['dashboard'] driver.set_window_size(*window) driver.get(dashboard_url) time.sleep(PAGE_RENDER_WAIT) # Set up a function to retry once for the element. # This is buggy in certain selenium versions with firefox driver get_element = getattr(driver, 'find_element_by_class_name') element = retry_call( get_element, fargs=['grid-container'], tries=2, delay=PAGE_RENDER_WAIT, ) try: screenshot = element.screenshot_as_png except WebDriverException: # Some webdrivers do not support screenshots for elements. # In such cases, take a screenshot of the entire page. screenshot = driver.screenshot() # pylint: disable=no-member finally: destroy_webdriver(driver) # Generate the email body and attachments email = _generate_mail_content( schedule, screenshot, dashboard.dashboard_title, dashboard_url, ) subject = __( '%(prefix)s %(title)s', prefix=config.get('EMAIL_REPORTS_SUBJECT_PREFIX'), title=dashboard.dashboard_title, ) _deliver_email(schedule, subject, email)
<SYSTEM_TASK:> Given a schedule, delivery the slice as an email report <END_TASK> <USER_TASK:> Description: def deliver_slice(schedule): """ Given a schedule, delivery the slice as an email report """
if schedule.email_format == SliceEmailReportFormat.data: email = _get_slice_data(schedule) elif schedule.email_format == SliceEmailReportFormat.visualization: email = _get_slice_visualization(schedule) else: raise RuntimeError('Unknown email report format') subject = __( '%(prefix)s %(title)s', prefix=config.get('EMAIL_REPORTS_SUBJECT_PREFIX'), title=schedule.slice.slice_name, ) _deliver_email(schedule, subject, email)
<SYSTEM_TASK:> Celery beat job meant to be invoked hourly <END_TASK> <USER_TASK:> Description: def schedule_hourly(): """ Celery beat job meant to be invoked hourly """
if not config.get('ENABLE_SCHEDULED_EMAIL_REPORTS'): logging.info('Scheduled email reports not enabled in config') return resolution = config.get('EMAIL_REPORTS_CRON_RESOLUTION', 0) * 60 # Get the top of the hour start_at = datetime.now(tzlocal()).replace(microsecond=0, second=0, minute=0) stop_at = start_at + timedelta(seconds=3600) schedule_window(ScheduleType.dashboard.value, start_at, stop_at, resolution) schedule_window(ScheduleType.slice.value, start_at, stop_at, resolution)
<SYSTEM_TASK:> De-duplicates a list of string by suffixing a counter <END_TASK> <USER_TASK:> Description: def dedup(l, suffix='__', case_sensitive=True): """De-duplicates a list of string by suffixing a counter Always returns the same number of entries as provided, and always returns unique values. Case sensitive comparison by default. >>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar']))) foo,bar,bar__1,bar__2,Bar >>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'], case_sensitive=False))) foo,bar,bar__1,bar__2,Bar__3 """
new_l = [] seen = {} for s in l: s_fixed_case = s if case_sensitive else s.lower() if s_fixed_case in seen: seen[s_fixed_case] += 1 s += suffix + str(seen[s_fixed_case]) else: seen[s_fixed_case] = 0 new_l.append(s) return new_l
<SYSTEM_TASK:> Provides metadata about columns for data visualization. <END_TASK> <USER_TASK:> Description: def columns(self): """Provides metadata about columns for data visualization. :return: dict, with the fields name, type, is_date, is_dim and agg. """
if self.df.empty: return None columns = [] sample_size = min(INFER_COL_TYPES_SAMPLE_SIZE, len(self.df.index)) sample = self.df if sample_size: sample = self.df.sample(sample_size) for col in self.df.dtypes.keys(): db_type_str = ( self._type_dict.get(col) or self.db_type(self.df.dtypes[col]) ) column = { 'name': col, 'agg': self.agg_func(self.df.dtypes[col], col), 'type': db_type_str, 'is_date': self.is_date(self.df.dtypes[col], db_type_str), 'is_dim': self.is_dimension(self.df.dtypes[col], col), } if not db_type_str or db_type_str.upper() == 'OBJECT': v = sample[col].iloc[0] if not sample[col].empty else None if isinstance(v, str): column['type'] = 'STRING' elif isinstance(v, int): column['type'] = 'INT' elif isinstance(v, float): column['type'] = 'FLOAT' elif isinstance(v, (datetime, date)): column['type'] = 'DATETIME' column['is_date'] = True column['is_dim'] = False # check if encoded datetime if ( column['type'] == 'STRING' and self.datetime_conversion_rate(sample[col]) > INFER_COL_TYPES_THRESHOLD): column.update({ 'is_date': True, 'is_dim': False, 'agg': None, }) # 'agg' is optional attribute if not column['agg']: column.pop('agg', None) columns.append(column) return columns
<SYSTEM_TASK:> Getting the time component of the query <END_TASK> <USER_TASK:> Description: def get_timestamp_expression(self, time_grain): """Getting the time component of the query"""
label = utils.DTTM_ALIAS db = self.table.database pdf = self.python_date_format is_epoch = pdf in ('epoch_s', 'epoch_ms') if not self.expression and not time_grain and not is_epoch: sqla_col = column(self.column_name, type_=DateTime) return self.table.make_sqla_column_compatible(sqla_col, label) grain = None if time_grain: grain = db.grains_dict().get(time_grain) if not grain: raise NotImplementedError( f'No grain spec for {time_grain} for database {db.database_name}') col = db.db_engine_spec.get_timestamp_column(self.expression, self.column_name) expr = db.db_engine_spec.get_time_expr(col, pdf, time_grain, grain) sqla_col = literal_column(expr, type_=DateTime) return self.table.make_sqla_column_compatible(sqla_col, label)
<SYSTEM_TASK:> Convert datetime object to a SQL expression string <END_TASK> <USER_TASK:> Description: def dttm_sql_literal(self, dttm, is_epoch_in_utc): """Convert datetime object to a SQL expression string If database_expression is empty, the internal dttm will be parsed as the string with the pattern that the user inputted (python_date_format) If database_expression is not empty, the internal dttm will be parsed as the sql sentence for the database to convert """
tf = self.python_date_format if self.database_expression: return self.database_expression.format(dttm.strftime('%Y-%m-%d %H:%M:%S')) elif tf: if is_epoch_in_utc: seconds_since_epoch = dttm.timestamp() else: seconds_since_epoch = (dttm - datetime(1970, 1, 1)).total_seconds() seconds_since_epoch = int(seconds_since_epoch) if tf == 'epoch_s': return str(seconds_since_epoch) elif tf == 'epoch_ms': return str(seconds_since_epoch * 1000) return "'{}'".format(dttm.strftime(tf)) else: s = self.table.database.db_engine_spec.convert_dttm( self.type or '', dttm) return s or "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S.%f'))
<SYSTEM_TASK:> Runs query against sqla to retrieve some <END_TASK> <USER_TASK:> Description: def values_for_column(self, column_name, limit=10000): """Runs query against sqla to retrieve some sample values for the given column. """
cols = {col.column_name: col for col in self.columns} target_col = cols[column_name] tp = self.get_template_processor() qry = ( select([target_col.get_sqla_col()]) .select_from(self.get_from_clause(tp)) .distinct() ) if limit: qry = qry.limit(limit) if self.fetch_values_predicate: tp = self.get_template_processor() qry = qry.where(tp.process_template(self.fetch_values_predicate)) engine = self.database.get_sqla_engine() sql = '{}'.format( qry.compile(engine, compile_kwargs={'literal_binds': True}), ) sql = self.mutate_query_from_config(sql) df = pd.read_sql_query(sql=sql, con=engine) return [row[0] for row in df.to_records(index=False)]
<SYSTEM_TASK:> Apply config's SQL_QUERY_MUTATOR <END_TASK> <USER_TASK:> Description: def mutate_query_from_config(self, sql): """Apply config's SQL_QUERY_MUTATOR Typically adds comments to the query with context"""
SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR') if SQL_QUERY_MUTATOR: username = utils.get_username() sql = SQL_QUERY_MUTATOR(sql, username, security_manager, self.database) return sql
<SYSTEM_TASK:> Turn an adhoc metric into a sqlalchemy column. <END_TASK> <USER_TASK:> Description: def adhoc_metric_to_sqla(self, metric, cols): """ Turn an adhoc metric into a sqlalchemy column. :param dict metric: Adhoc metric definition :param dict cols: Columns for the current table :returns: The metric defined as a sqlalchemy column :rtype: sqlalchemy.sql.column """
expression_type = metric.get('expressionType') label = utils.get_metric_name(metric) if expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SIMPLE']: column_name = metric.get('column').get('column_name') table_column = cols.get(column_name) if table_column: sqla_column = table_column.get_sqla_col() else: sqla_column = column(column_name) sqla_metric = self.sqla_aggregations[metric.get('aggregate')](sqla_column) elif expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SQL']: sqla_metric = literal_column(metric.get('sqlExpression')) else: return None return self.make_sqla_column_compatible(sqla_metric, label)
<SYSTEM_TASK:> Fetches the metadata for the table and merges it in <END_TASK> <USER_TASK:> Description: def fetch_metadata(self): """Fetches the metadata for the table and merges it in"""
try: table = self.get_sqla_table_object() except Exception as e: logging.exception(e) raise Exception(_( "Table [{}] doesn't seem to exist in the specified database, " "couldn't fetch column information").format(self.table_name)) M = SqlMetric # noqa metrics = [] any_date_col = None db_engine_spec = self.database.db_engine_spec db_dialect = self.database.get_dialect() dbcols = ( db.session.query(TableColumn) .filter(TableColumn.table == self) .filter(or_(TableColumn.column_name == col.name for col in table.columns))) dbcols = {dbcol.column_name: dbcol for dbcol in dbcols} for col in table.columns: try: datatype = col.type.compile(dialect=db_dialect).upper() except Exception as e: datatype = 'UNKNOWN' logging.error( 'Unrecognized data type in {}.{}'.format(table, col.name)) logging.exception(e) dbcol = dbcols.get(col.name, None) if not dbcol: dbcol = TableColumn(column_name=col.name, type=datatype) dbcol.sum = dbcol.is_num dbcol.avg = dbcol.is_num dbcol.is_dttm = dbcol.is_time db_engine_spec.alter_new_orm_column(dbcol) else: dbcol.type = datatype dbcol.groupby = True dbcol.filterable = True self.columns.append(dbcol) if not any_date_col and dbcol.is_time: any_date_col = col.name metrics.append(M( metric_name='count', verbose_name='COUNT(*)', metric_type='count', expression='COUNT(*)', )) if not self.main_dttm_col: self.main_dttm_col = any_date_col self.add_missing_metrics(metrics) db.session.merge(self) db.session.commit()
<SYSTEM_TASK:> Returns a list of non empty values or None <END_TASK> <USER_TASK:> Description: def filter_not_empty_values(value): """Returns a list of non empty values or None"""
if not value: return None data = [x for x in value if x] if not data: return None return data
<SYSTEM_TASK:> If the user has access to the database or all datasource <END_TASK> <USER_TASK:> Description: def at_least_one_schema_is_allowed(database): """ If the user has access to the database or all datasource 1. if schemas_allowed_for_csv_upload is empty a) if database does not support schema user is able to upload csv without specifying schema name b) if database supports schema user is able to upload csv to any schema 2. if schemas_allowed_for_csv_upload is not empty a) if database does not support schema This situation is impossible and upload will fail b) if database supports schema user is able to upload to schema in schemas_allowed_for_csv_upload elif the user does not access to the database or all datasource 1. if schemas_allowed_for_csv_upload is empty a) if database does not support schema user is unable to upload csv b) if database supports schema user is unable to upload csv 2. if schemas_allowed_for_csv_upload is not empty a) if database does not support schema This situation is impossible and user is unable to upload csv b) if database supports schema user is able to upload to schema in schemas_allowed_for_csv_upload """
if (security_manager.database_access(database) or security_manager.all_datasource_access()): return True schemas = database.get_schema_access_for_csv_upload() if (schemas and security_manager.schemas_accessible_by_user( database, schemas, False)): return True return False
<SYSTEM_TASK:> Filter queries to only those owned by current user if <END_TASK> <USER_TASK:> Description: def apply( self, query: BaseQuery, func: Callable) -> BaseQuery: """ Filter queries to only those owned by current user if can_only_access_owned_queries permission is set. :returns: query """
if security_manager.can_only_access_owned_queries(): query = ( query .filter(Query.user_id == g.user.get_user_id()) ) return query
<SYSTEM_TASK:> Simple hack to redirect to explore view after saving <END_TASK> <USER_TASK:> Description: def edit(self, pk): """Simple hack to redirect to explore view after saving"""
resp = super(TableModelView, self).edit(pk) if isinstance(resp, str): return resp return redirect('/superset/explore/table/{}/'.format(pk))
<SYSTEM_TASK:> Build `form_data` for chart GET request from dashboard's `default_filters`. <END_TASK> <USER_TASK:> Description: def get_form_data(chart_id, dashboard=None): """ Build `form_data` for chart GET request from dashboard's `default_filters`. When a dashboard has `default_filters` they need to be added as extra filters in the GET request for charts. """
form_data = {'slice_id': chart_id} if dashboard is None or not dashboard.json_metadata: return form_data json_metadata = json.loads(dashboard.json_metadata) # do not apply filters if chart is immune to them if chart_id in json_metadata.get('filter_immune_slices', []): return form_data default_filters = json.loads(json_metadata.get('default_filters', 'null')) if not default_filters: return form_data # are some of the fields in the chart immune to filters? filter_immune_slice_fields = json_metadata.get('filter_immune_slice_fields', {}) immune_fields = filter_immune_slice_fields.get(str(chart_id), []) extra_filters = [] for filters in default_filters.values(): for col, val in filters.items(): if col not in immune_fields: extra_filters.append({'col': col, 'op': 'in', 'val': val}) if extra_filters: form_data['extra_filters'] = extra_filters return form_data
<SYSTEM_TASK:> Warm up cache. <END_TASK> <USER_TASK:> Description: def cache_warmup(strategy_name, *args, **kwargs): """ Warm up cache. This task periodically hits charts to warm up the cache. """
logger.info('Loading strategy') class_ = None for class_ in strategies: if class_.name == strategy_name: break else: message = f'No strategy {strategy_name} found!' logger.error(message) return message logger.info(f'Loading {class_.__name__}') try: strategy = class_(*args, **kwargs) logger.info('Success!') except TypeError: message = 'Error loading strategy!' logger.exception(message) return message results = {'success': [], 'errors': []} for url in strategy.get_urls(): try: logger.info(f'Fetching {url}') requests.get(url) results['success'].append(url) except RequestException: logger.exception('Error warming up cache!') results['errors'].append(url) return results
<SYSTEM_TASK:> Refresh metadata of all datasources in the cluster <END_TASK> <USER_TASK:> Description: def refresh_datasources( self, datasource_name=None, merge_flag=True, refreshAll=True): """Refresh metadata of all datasources in the cluster If ``datasource_name`` is specified, only that datasource is updated """
ds_list = self.get_datasources() blacklist = conf.get('DRUID_DATA_SOURCE_BLACKLIST', []) ds_refresh = [] if not datasource_name: ds_refresh = list(filter(lambda ds: ds not in blacklist, ds_list)) elif datasource_name not in blacklist and datasource_name in ds_list: ds_refresh.append(datasource_name) else: return self.refresh(ds_refresh, merge_flag, refreshAll)
<SYSTEM_TASK:> Fetches metadata for the specified datasources and <END_TASK> <USER_TASK:> Description: def refresh(self, datasource_names, merge_flag, refreshAll): """ Fetches metadata for the specified datasources and merges to the Superset database """
session = db.session ds_list = ( session.query(DruidDatasource) .filter(DruidDatasource.cluster_name == self.cluster_name) .filter(DruidDatasource.datasource_name.in_(datasource_names)) ) ds_map = {ds.name: ds for ds in ds_list} for ds_name in datasource_names: datasource = ds_map.get(ds_name, None) if not datasource: datasource = DruidDatasource(datasource_name=ds_name) with session.no_autoflush: session.add(datasource) flasher( _('Adding new datasource [{}]').format(ds_name), 'success') ds_map[ds_name] = datasource elif refreshAll: flasher( _('Refreshing datasource [{}]').format(ds_name), 'info') else: del ds_map[ds_name] continue datasource.cluster = self datasource.merge_flag = merge_flag session.flush() # Prepare multithreaded executation pool = ThreadPool() ds_refresh = list(ds_map.values()) metadata = pool.map(_fetch_metadata_for, ds_refresh) pool.close() pool.join() for i in range(0, len(ds_refresh)): datasource = ds_refresh[i] cols = metadata[i] if cols: col_objs_list = ( session.query(DruidColumn) .filter(DruidColumn.datasource_id == datasource.id) .filter(DruidColumn.column_name.in_(cols.keys())) ) col_objs = {col.column_name: col for col in col_objs_list} for col in cols: if col == '__time': # skip the time column continue col_obj = col_objs.get(col) if not col_obj: col_obj = DruidColumn( datasource_id=datasource.id, column_name=col) with session.no_autoflush: session.add(col_obj) col_obj.type = cols[col]['type'] col_obj.datasource = datasource if col_obj.type == 'STRING': col_obj.groupby = True col_obj.filterable = True datasource.refresh_metrics() session.commit()
<SYSTEM_TASK:> Refresh metrics based on the column metadata <END_TASK> <USER_TASK:> Description: def refresh_metrics(self): """Refresh metrics based on the column metadata"""
metrics = self.get_metrics() dbmetrics = ( db.session.query(DruidMetric) .filter(DruidMetric.datasource_id == self.datasource_id) .filter(DruidMetric.metric_name.in_(metrics.keys())) ) dbmetrics = {metric.metric_name: metric for metric in dbmetrics} for metric in metrics.values(): dbmetric = dbmetrics.get(metric.metric_name) if dbmetric: for attr in ['json', 'metric_type']: setattr(dbmetric, attr, getattr(metric, attr)) else: with db.session.no_autoflush: metric.datasource_id = self.datasource_id db.session.add(metric)
<SYSTEM_TASK:> Merges the ds config from druid_config into one stored in the db. <END_TASK> <USER_TASK:> Description: def sync_to_db_from_config( cls, druid_config, user, cluster, refresh=True): """Merges the ds config from druid_config into one stored in the db."""
session = db.session datasource = ( session.query(cls) .filter_by(datasource_name=druid_config['name']) .first() ) # Create a new datasource. if not datasource: datasource = cls( datasource_name=druid_config['name'], cluster=cluster, owners=[user], changed_by_fk=user.id, created_by_fk=user.id, ) session.add(datasource) elif not refresh: return dimensions = druid_config['dimensions'] col_objs = ( session.query(DruidColumn) .filter(DruidColumn.datasource_id == datasource.id) .filter(DruidColumn.column_name.in_(dimensions)) ) col_objs = {col.column_name: col for col in col_objs} for dim in dimensions: col_obj = col_objs.get(dim, None) if not col_obj: col_obj = DruidColumn( datasource_id=datasource.id, column_name=dim, groupby=True, filterable=True, # TODO: fetch type from Hive. type='STRING', datasource=datasource, ) session.add(col_obj) # Import Druid metrics metric_objs = ( session.query(DruidMetric) .filter(DruidMetric.datasource_id == datasource.id) .filter(DruidMetric.metric_name.in_( spec['name'] for spec in druid_config['metrics_spec'] )) ) metric_objs = {metric.metric_name: metric for metric in metric_objs} for metric_spec in druid_config['metrics_spec']: metric_name = metric_spec['name'] metric_type = metric_spec['type'] metric_json = json.dumps(metric_spec) if metric_type == 'count': metric_type = 'longSum' metric_json = json.dumps({ 'type': 'longSum', 'name': metric_name, 'fieldName': metric_name, }) metric_obj = metric_objs.get(metric_name, None) if not metric_obj: metric_obj = DruidMetric( metric_name=metric_name, metric_type=metric_type, verbose_name='%s(%s)' % (metric_type, metric_name), datasource=datasource, json=metric_json, description=( 'Imported from the airolap config dir for %s' % druid_config['name']), ) session.add(metric_obj) session.commit()
<SYSTEM_TASK:> For a metric specified as `postagg` returns the <END_TASK> <USER_TASK:> Description: def get_post_agg(mconf): """ For a metric specified as `postagg` returns the kind of post aggregation for pydruid. """
if mconf.get('type') == 'javascript': return JavascriptPostAggregator( name=mconf.get('name', ''), field_names=mconf.get('fieldNames', []), function=mconf.get('function', '')) elif mconf.get('type') == 'quantile': return Quantile( mconf.get('name', ''), mconf.get('probability', ''), ) elif mconf.get('type') == 'quantiles': return Quantiles( mconf.get('name', ''), mconf.get('probabilities', ''), ) elif mconf.get('type') == 'fieldAccess': return Field(mconf.get('name')) elif mconf.get('type') == 'constant': return Const( mconf.get('value'), output_name=mconf.get('name', ''), ) elif mconf.get('type') == 'hyperUniqueCardinality': return HyperUniqueCardinality( mconf.get('name'), ) elif mconf.get('type') == 'arithmetic': return Postaggregator( mconf.get('fn', '/'), mconf.get('fields', []), mconf.get('name', '')) else: return CustomPostAggregator( mconf.get('name', ''), mconf)
<SYSTEM_TASK:> Return a list of metrics that are post aggregations <END_TASK> <USER_TASK:> Description: def find_postaggs_for(postagg_names, metrics_dict): """Return a list of metrics that are post aggregations"""
postagg_metrics = [ metrics_dict[name] for name in postagg_names if metrics_dict[name].metric_type == POST_AGG_TYPE ] # Remove post aggregations that were found for postagg in postagg_metrics: postagg_names.remove(postagg.metric_name) return postagg_metrics
<SYSTEM_TASK:> Retrieve some values for the given column <END_TASK> <USER_TASK:> Description: def values_for_column(self, column_name, limit=10000): """Retrieve some values for the given column"""
logging.info( 'Getting values for columns [{}] limited to [{}]' .format(column_name, limit)) # TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid if self.fetch_values_from: from_dttm = utils.parse_human_datetime(self.fetch_values_from) else: from_dttm = datetime(1970, 1, 1) qry = dict( datasource=self.datasource_name, granularity='all', intervals=from_dttm.isoformat() + '/' + datetime.now().isoformat(), aggregations=dict(count=count('count')), dimension=column_name, metric='count', threshold=limit, ) client = self.cluster.get_pydruid_client() client.topn(**qry) df = client.export_pandas() return [row[column_name] for row in df.to_records(index=False)]
<SYSTEM_TASK:> Returns a dictionary of aggregation metric names to aggregation json objects <END_TASK> <USER_TASK:> Description: def get_aggregations(metrics_dict, saved_metrics, adhoc_metrics=[]): """ Returns a dictionary of aggregation metric names to aggregation json objects :param metrics_dict: dictionary of all the metrics :param saved_metrics: list of saved metric names :param adhoc_metrics: list of adhoc metric names :raise SupersetException: if one or more metric names are not aggregations """
aggregations = OrderedDict() invalid_metric_names = [] for metric_name in saved_metrics: if metric_name in metrics_dict: metric = metrics_dict[metric_name] if metric.metric_type == POST_AGG_TYPE: invalid_metric_names.append(metric_name) else: aggregations[metric_name] = metric.json_obj else: invalid_metric_names.append(metric_name) if len(invalid_metric_names) > 0: raise SupersetException( _('Metric(s) {} must be aggregations.').format(invalid_metric_names)) for adhoc_metric in adhoc_metrics: aggregations[adhoc_metric['label']] = { 'fieldName': adhoc_metric['column']['column_name'], 'fieldNames': [adhoc_metric['column']['column_name']], 'type': DruidDatasource.druid_type_from_adhoc_metric(adhoc_metric), 'name': adhoc_metric['label'], } return aggregations
<SYSTEM_TASK:> Replace dimensions specs with their `dimension` <END_TASK> <USER_TASK:> Description: def _dimensions_to_values(dimensions): """ Replace dimensions specs with their `dimension` values, and ignore those without """
values = [] for dimension in dimensions: if isinstance(dimension, dict): if 'extractionFn' in dimension: values.append(dimension) elif 'dimension' in dimension: values.append(dimension['dimension']) else: values.append(dimension) return values
<SYSTEM_TASK:> Converting all GROUPBY columns to strings <END_TASK> <USER_TASK:> Description: def homogenize_types(df, groupby_cols): """Converting all GROUPBY columns to strings When grouping by a numeric (say FLOAT) column, pydruid returns strings in the dataframe. This creates issues downstream related to having mixed types in the dataframe Here we replace None with <NULL> and make the whole series a str instead of an object. """
for col in groupby_cols: df[col] = df[col].fillna('<NULL>').astype('unicode') return df
<SYSTEM_TASK:> Get the environment variable or raise exception. <END_TASK> <USER_TASK:> Description: def get_env_variable(var_name, default=None): """Get the environment variable or raise exception."""
try: return os.environ[var_name] except KeyError: if default is not None: return default else: error_msg = 'The environment variable {} was missing, abort...'\ .format(var_name) raise EnvironmentError(error_msg)
<SYSTEM_TASK:> Loading data for map with country map <END_TASK> <USER_TASK:> Description: def load_country_map_data(): """Loading data for map with country map"""
csv_bytes = get_example_data( 'birth_france_data_for_country_map.csv', is_gzip=False, make_bytes=True) data = pd.read_csv(csv_bytes, encoding='utf-8') data['dttm'] = datetime.datetime.now().date() data.to_sql( # pylint: disable=no-member 'birth_france_by_region', db.engine, if_exists='replace', chunksize=500, dtype={ 'DEPT_ID': String(10), '2003': BigInteger, '2004': BigInteger, '2005': BigInteger, '2006': BigInteger, '2007': BigInteger, '2008': BigInteger, '2009': BigInteger, '2010': BigInteger, '2011': BigInteger, '2012': BigInteger, '2013': BigInteger, '2014': BigInteger, 'dttm': Date(), }, index=False) print('Done loading table!') print('-' * 80) print('Creating table reference') obj = db.session.query(TBL).filter_by(table_name='birth_france_by_region').first() if not obj: obj = TBL(table_name='birth_france_by_region') obj.main_dttm_col = 'dttm' obj.database = utils.get_or_create_main_db() if not any(col.metric_name == 'avg__2004' for col in obj.metrics): obj.metrics.append(SqlMetric( metric_name='avg__2004', expression='AVG(2004)', )) db.session.merge(obj) db.session.commit() obj.fetch_metadata() tbl = obj slice_data = { 'granularity_sqla': '', 'since': '', 'until': '', 'where': '', 'viz_type': 'country_map', 'entity': 'DEPT_ID', 'metric': { 'expressionType': 'SIMPLE', 'column': { 'type': 'INT', 'column_name': '2004', }, 'aggregate': 'AVG', 'label': 'Boys', 'optionName': 'metric_112342', }, 'row_limit': 500000, } print('Creating a slice') slc = Slice( slice_name='Birth in France by department in 2016', viz_type='country_map', datasource_type='table', datasource_id=tbl.id, params=get_slice_json(slice_data), ) misc_dash_slices.add(slc.slice_name) merge_slice(slc)
<SYSTEM_TASK:> Returns a list of SQL statements as strings, stripped <END_TASK> <USER_TASK:> Description: def get_statements(self): """Returns a list of SQL statements as strings, stripped"""
statements = [] for statement in self._parsed: if statement: sql = str(statement).strip(' \n;\t') if sql: statements.append(sql) return statements
<SYSTEM_TASK:> Reformats the query into the create table as query. <END_TASK> <USER_TASK:> Description: def as_create_table(self, table_name, overwrite=False): """Reformats the query into the create table as query. Works only for the single select SQL statements, in all other cases the sql query is not modified. :param superset_query: string, sql query that will be executed :param table_name: string, will contain the results of the query execution :param overwrite, boolean, table table_name will be dropped if true :return: string, create table as query """
exec_sql = '' sql = self.stripped() if overwrite: exec_sql = f'DROP TABLE IF EXISTS {table_name};\n' exec_sql += f'CREATE TABLE {table_name} AS \n{sql}' return exec_sql
<SYSTEM_TASK:> returns the query with the specified limit <END_TASK> <USER_TASK:> Description: def get_query_with_new_limit(self, new_limit): """returns the query with the specified limit"""
"""does not change the underlying query""" if not self._limit: return self.sql + ' LIMIT ' + str(new_limit) limit_pos = None tokens = self._parsed[0].tokens # Add all items to before_str until there is a limit for pos, item in enumerate(tokens): if item.ttype in Keyword and item.value.lower() == 'limit': limit_pos = pos break limit = tokens[limit_pos + 2] if limit.ttype == sqlparse.tokens.Literal.Number.Integer: tokens[limit_pos + 2].value = new_limit elif limit.is_group: tokens[limit_pos + 2].value = ( '{}, {}'.format(next(limit.get_identifiers()), new_limit) ) str_res = '' for i in tokens: str_res += str(i.value) return str_res
<SYSTEM_TASK:> Read a url or post parameter and use it in your SQL Lab query <END_TASK> <USER_TASK:> Description: def url_param(param, default=None): """Read a url or post parameter and use it in your SQL Lab query When in SQL Lab, it's possible to add arbitrary URL "query string" parameters, and use those in your SQL code. For instance you can alter your url and add `?foo=bar`, as in `{domain}/superset/sqllab?foo=bar`. Then if your query is something like SELECT * FROM foo = '{{ url_param('foo') }}', it will be parsed at runtime and replaced by the value in the URL. As you create a visualization form this SQL Lab query, you can pass parameters in the explore view as well as from the dashboard, and it should carry through to your queries. :param param: the parameter to lookup :type param: str :param default: the value to return in the absence of the parameter :type default: str """
if request.args.get(param): return request.args.get(param, default) # Supporting POST as well as get if request.form.get('form_data'): form_data = json.loads(request.form.get('form_data')) url_params = form_data.get('url_params') or {} return url_params.get(param, default) return default
<SYSTEM_TASK:> Gets a values for a particular filter as a list <END_TASK> <USER_TASK:> Description: def filter_values(column, default=None): """ Gets a values for a particular filter as a list This is useful if: - you want to use a filter box to filter a query where the name of filter box column doesn't match the one in the select statement - you want to have the ability for filter inside the main query for speed purposes This searches for "filters" and "extra_filters" in form_data for a match Usage example: SELECT action, count(*) as times FROM logs WHERE action in ( {{ "'" + "','".join(filter_values('action_type')) + "'" }} ) GROUP BY 1 :param column: column/filter name to lookup :type column: str :param default: default value to return if there's no matching columns :type default: str :return: returns a list of filter values :type: list """
form_data = json.loads(request.form.get('form_data', '{}')) return_val = [] for filter_type in ['filters', 'extra_filters']: if filter_type not in form_data: continue for f in form_data[filter_type]: if f['col'] == column: for v in f['val']: return_val.append(v) if return_val: return return_val if default: return [default] else: return []
<SYSTEM_TASK:> Compatibility layer for handling of datasource info <END_TASK> <USER_TASK:> Description: def get_datasource_info(datasource_id, datasource_type, form_data): """Compatibility layer for handling of datasource info datasource_id & datasource_type used to be passed in the URL directory, now they should come as part of the form_data, This function allows supporting both without duplicating code"""
datasource = form_data.get('datasource', '') if '__' in datasource: datasource_id, datasource_type = datasource.split('__') # The case where the datasource has been deleted datasource_id = None if datasource_id == 'None' else datasource_id if not datasource_id: raise Exception( 'The datasource associated with this chart no longer exists') datasource_id = int(datasource_id) return datasource_id, datasource_type
<SYSTEM_TASK:> FAB leaves faulty permissions that need to be cleaned up <END_TASK> <USER_TASK:> Description: def clean_perms(self): """FAB leaves faulty permissions that need to be cleaned up"""
logging.info('Cleaning faulty perms') sesh = self.get_session pvms = ( sesh.query(ab_models.PermissionView) .filter(or_( ab_models.PermissionView.permission == None, # NOQA ab_models.PermissionView.view_menu == None, # NOQA )) ) deleted_count = pvms.delete() sesh.commit() if deleted_count: logging.info('Deleted {} faulty permissions'.format(deleted_count))
<SYSTEM_TASK:> Inits the Superset application with security roles and such <END_TASK> <USER_TASK:> Description: def sync_role_definitions(self): """Inits the Superset application with security roles and such"""
from superset import conf logging.info('Syncing role definition') self.create_custom_permissions() # Creating default roles self.set_role('Admin', self.is_admin_pvm) self.set_role('Alpha', self.is_alpha_pvm) self.set_role('Gamma', self.is_gamma_pvm) self.set_role('granter', self.is_granter_pvm) self.set_role('sql_lab', self.is_sql_lab_pvm) if conf.get('PUBLIC_ROLE_LIKE_GAMMA', False): self.set_role('Public', self.is_gamma_pvm) self.create_missing_perms() # commit role and view menu updates self.get_session.commit() self.clean_perms()
<SYSTEM_TASK:> Exports databases and druid clusters to a dictionary <END_TASK> <USER_TASK:> Description: def export_to_dict(session, recursive, back_references, include_defaults): """Exports databases and druid clusters to a dictionary"""
logging.info('Starting export') dbs = session.query(Database) databases = [database.export_to_dict(recursive=recursive, include_parent_ref=back_references, include_defaults=include_defaults) for database in dbs] logging.info('Exported %d %s', len(databases), DATABASES_KEY) cls = session.query(DruidCluster) clusters = [cluster.export_to_dict(recursive=recursive, include_parent_ref=back_references, include_defaults=include_defaults) for cluster in cls] logging.info('Exported %d %s', len(clusters), DRUID_CLUSTERS_KEY) data = dict() if databases: data[DATABASES_KEY] = databases if clusters: data[DRUID_CLUSTERS_KEY] = clusters return data
<SYSTEM_TASK:> Imports databases and druid clusters from dictionary <END_TASK> <USER_TASK:> Description: def import_from_dict(session, data, sync=[]): """Imports databases and druid clusters from dictionary"""
if isinstance(data, dict): logging.info('Importing %d %s', len(data.get(DATABASES_KEY, [])), DATABASES_KEY) for database in data.get(DATABASES_KEY, []): Database.import_from_dict(session, database, sync=sync) logging.info('Importing %d %s', len(data.get(DRUID_CLUSTERS_KEY, [])), DRUID_CLUSTERS_KEY) for datasource in data.get(DRUID_CLUSTERS_KEY, []): DruidCluster.import_from_dict(session, datasource, sync=sync) session.commit() else: logging.info('Supplied object is not a dictionary.')
<SYSTEM_TASK:> Loads 2 css templates to demonstrate the feature <END_TASK> <USER_TASK:> Description: def load_css_templates(): """Loads 2 css templates to demonstrate the feature"""
print('Creating default CSS templates') obj = db.session.query(CssTemplate).filter_by(template_name='Flat').first() if not obj: obj = CssTemplate(template_name='Flat') css = textwrap.dedent("""\ .gridster div.widget { transition: background-color 0.5s ease; background-color: #FAFAFA; border: 1px solid #CCC; box-shadow: none; border-radius: 0px; } .gridster div.widget:hover { border: 1px solid #000; background-color: #EAEAEA; } .navbar { transition: opacity 0.5s ease; opacity: 0.05; } .navbar:hover { opacity: 1; } .chart-header .header{ font-weight: normal; font-size: 12px; } /* var bnbColors = [ //rausch hackb kazan babu lima beach tirol '#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c', '#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a', '#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e', ]; */ """) obj.css = css db.session.merge(obj) db.session.commit() obj = ( db.session.query(CssTemplate).filter_by(template_name='Courier Black').first()) if not obj: obj = CssTemplate(template_name='Courier Black') css = textwrap.dedent("""\ .gridster div.widget { transition: background-color 0.5s ease; background-color: #EEE; border: 2px solid #444; border-radius: 15px; box-shadow: none; } h2 { color: white; font-size: 52px; } .navbar { box-shadow: none; } .gridster div.widget:hover { border: 2px solid #000; background-color: #EAEAEA; } .navbar { transition: opacity 0.5s ease; opacity: 0.05; } .navbar:hover { opacity: 1; } .chart-header .header{ font-weight: normal; font-size: 12px; } .nvd3 text { font-size: 12px; font-family: inherit; } body{ background: #000; font-family: Courier, Monaco, monospace;; } /* var bnbColors = [ //rausch hackb kazan babu lima beach tirol '#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c', '#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a', '#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e', ]; */ """) obj.css = css db.session.merge(obj) db.session.commit()
<SYSTEM_TASK:> Get a mapping of foreign name to the local name of foreign keys <END_TASK> <USER_TASK:> Description: def _parent_foreign_key_mappings(cls): """Get a mapping of foreign name to the local name of foreign keys"""
parent_rel = cls.__mapper__.relationships.get(cls.export_parent) if parent_rel: return {l.name: r.name for (l, r) in parent_rel.local_remote_pairs} return {}
<SYSTEM_TASK:> Export schema as a dictionary <END_TASK> <USER_TASK:> Description: def export_schema(cls, recursive=True, include_parent_ref=False): """Export schema as a dictionary"""
parent_excludes = {} if not include_parent_ref: parent_ref = cls.__mapper__.relationships.get(cls.export_parent) if parent_ref: parent_excludes = {c.name for c in parent_ref.local_columns} def formatter(c): return ('{0} Default ({1})'.format( str(c.type), c.default.arg) if c.default else str(c.type)) schema = {c.name: formatter(c) for c in cls.__table__.columns if (c.name in cls.export_fields and c.name not in parent_excludes)} if recursive: for c in cls.export_children: child_class = cls.__mapper__.relationships[c].argument.class_ schema[c] = [child_class.export_schema(recursive=recursive, include_parent_ref=include_parent_ref)] return schema
<SYSTEM_TASK:> Export obj to dictionary <END_TASK> <USER_TASK:> Description: def export_to_dict(self, recursive=True, include_parent_ref=False, include_defaults=False): """Export obj to dictionary"""
cls = self.__class__ parent_excludes = {} if recursive and not include_parent_ref: parent_ref = cls.__mapper__.relationships.get(cls.export_parent) if parent_ref: parent_excludes = {c.name for c in parent_ref.local_columns} dict_rep = {c.name: getattr(self, c.name) for c in cls.__table__.columns if (c.name in self.export_fields and c.name not in parent_excludes and (include_defaults or ( getattr(self, c.name) is not None and (not c.default or getattr(self, c.name) != c.default.arg)))) } if recursive: for c in self.export_children: # sorting to make lists of children stable dict_rep[c] = sorted( [ child.export_to_dict( recursive=recursive, include_parent_ref=include_parent_ref, include_defaults=include_defaults, ) for child in getattr(self, c) ], key=lambda k: sorted(k.items())) return dict_rep
<SYSTEM_TASK:> Overrides the plain fields of the dashboard. <END_TASK> <USER_TASK:> Description: def override(self, obj): """Overrides the plain fields of the dashboard."""
for field in obj.__class__.export_fields: setattr(self, field, getattr(obj, field))
<SYSTEM_TASK:> Move since and until to time_range. <END_TASK> <USER_TASK:> Description: def update_time_range(form_data): """Move since and until to time_range."""
if 'since' in form_data or 'until' in form_data: form_data['time_range'] = '{} : {}'.format( form_data.pop('since', '') or '', form_data.pop('until', '') or '', )
<SYSTEM_TASK:> Use this decorator to cache functions that have predefined first arg. <END_TASK> <USER_TASK:> Description: def memoized_func(key=view_cache_key, attribute_in_key=None): """Use this decorator to cache functions that have predefined first arg. enable_cache is treated as True by default, except enable_cache = False is passed to the decorated function. force means whether to force refresh the cache and is treated as False by default, except force = True is passed to the decorated function. timeout of cache is set to 600 seconds by default, except cache_timeout = {timeout in seconds} is passed to the decorated function. memoized_func uses simple_cache and stored the data in memory. Key is a callable function that takes function arguments and returns the caching key. """
def wrap(f): if tables_cache: def wrapped_f(self, *args, **kwargs): if not kwargs.get('cache', True): return f(self, *args, **kwargs) if attribute_in_key: cache_key = key(*args, **kwargs).format( getattr(self, attribute_in_key)) else: cache_key = key(*args, **kwargs) o = tables_cache.get(cache_key) if not kwargs.get('force') and o is not None: return o o = f(self, *args, **kwargs) tables_cache.set(cache_key, o, timeout=kwargs.get('cache_timeout')) return o else: # noop def wrapped_f(self, *args, **kwargs): return f(self, *args, **kwargs) return wrapped_f return wrap
<SYSTEM_TASK:> Check if user can access a cached response from explore_json. <END_TASK> <USER_TASK:> Description: def check_datasource_perms(self, datasource_type=None, datasource_id=None): """ Check if user can access a cached response from explore_json. This function takes `self` since it must have the same signature as the the decorated method. """
form_data = get_form_data()[0] datasource_id, datasource_type = get_datasource_info( datasource_id, datasource_type, form_data) viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=False, ) security_manager.assert_datasource_permission(viz_obj.datasource)
<SYSTEM_TASK:> Check if user can access a cached response from slice_json. <END_TASK> <USER_TASK:> Description: def check_slice_perms(self, slice_id): """ Check if user can access a cached response from slice_json. This function takes `self` since it must have the same signature as the the decorated method. """
form_data, slc = get_form_data(slice_id, use_slice_data=True) datasource_type = slc.datasource.type datasource_id = slc.datasource.id viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=False, ) security_manager.assert_datasource_permission(viz_obj.datasource)
<SYSTEM_TASK:> Applies the configuration's http headers to all responses <END_TASK> <USER_TASK:> Description: def apply_caching(response): """Applies the configuration's http headers to all responses"""
for k, v in config.get('HTTP_HEADERS').items(): response.headers[k] = v return response
<SYSTEM_TASK:> Updates the role with the give datasource permissions. <END_TASK> <USER_TASK:> Description: def override_role_permissions(self): """Updates the role with the give datasource permissions. Permissions not in the request will be revoked. This endpoint should be available to admins only. Expects JSON in the format: { 'role_name': '{role_name}', 'database': [{ 'datasource_type': '{table|druid}', 'name': '{database_name}', 'schema': [{ 'name': '{schema_name}', 'datasources': ['{datasource name}, {datasource name}'] }] }] } """
data = request.get_json(force=True) role_name = data['role_name'] databases = data['database'] db_ds_names = set() for dbs in databases: for schema in dbs['schema']: for ds_name in schema['datasources']: fullname = utils.get_datasource_full_name( dbs['name'], ds_name, schema=schema['name']) db_ds_names.add(fullname) existing_datasources = ConnectorRegistry.get_all_datasources(db.session) datasources = [ d for d in existing_datasources if d.full_name in db_ds_names] role = security_manager.find_role(role_name) # remove all permissions role.permissions = [] # grant permissions to the list of datasources granted_perms = [] for datasource in datasources: view_menu_perm = security_manager.find_permission_view_menu( view_menu_name=datasource.perm, permission_name='datasource_access') # prevent creating empty permissions if view_menu_perm and view_menu_perm.view_menu: role.permissions.append(view_menu_perm) granted_perms.append(view_menu_perm.view_menu.name) db.session.commit() return self.json_response({ 'granted': granted_perms, 'requested': list(db_ds_names), }, status=201)
<SYSTEM_TASK:> Serves all request that GET or POST form_data <END_TASK> <USER_TASK:> Description: def explore_json(self, datasource_type=None, datasource_id=None): """Serves all request that GET or POST form_data This endpoint evolved to be the entry point of many different requests that GETs or POSTs a form_data. `self.generate_json` receives this input and returns different payloads based on the request args in the first block TODO: break into one endpoint for each return shape"""
csv = request.args.get('csv') == 'true' query = request.args.get('query') == 'true' results = request.args.get('results') == 'true' samples = request.args.get('samples') == 'true' force = request.args.get('force') == 'true' form_data = get_form_data()[0] datasource_id, datasource_type = get_datasource_info( datasource_id, datasource_type, form_data) viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=force, ) return self.generate_json( viz_obj, csv=csv, query=query, results=results, samples=samples, )
<SYSTEM_TASK:> Overrides the dashboards using json instances from the file. <END_TASK> <USER_TASK:> Description: def import_dashboards(self): """Overrides the dashboards using json instances from the file."""
f = request.files.get('file') if request.method == 'POST' and f: dashboard_import_export.import_dashboards(db.session, f.stream) return redirect('/dashboard/list/') return self.render_template('superset/import_dashboards.html')
<SYSTEM_TASK:> Deprecated endpoint, here for backward compatibility of urls <END_TASK> <USER_TASK:> Description: def explorev2(self, datasource_type, datasource_id): """Deprecated endpoint, here for backward compatibility of urls"""
return redirect(url_for( 'Superset.explore', datasource_type=datasource_type, datasource_id=datasource_id, **request.args))
<SYSTEM_TASK:> Save or overwrite a slice <END_TASK> <USER_TASK:> Description: def save_or_overwrite_slice( self, args, slc, slice_add_perm, slice_overwrite_perm, slice_download_perm, datasource_id, datasource_type, datasource_name): """Save or overwrite a slice"""
slice_name = args.get('slice_name') action = args.get('action') form_data = get_form_data()[0] if action in ('saveas'): if 'slice_id' in form_data: form_data.pop('slice_id') # don't save old slice_id slc = models.Slice(owners=[g.user] if g.user else []) slc.params = json.dumps(form_data, indent=2, sort_keys=True) slc.datasource_name = datasource_name slc.viz_type = form_data['viz_type'] slc.datasource_type = datasource_type slc.datasource_id = datasource_id slc.slice_name = slice_name if action in ('saveas') and slice_add_perm: self.save_slice(slc) elif action == 'overwrite' and slice_overwrite_perm: self.overwrite_slice(slc) # Adding slice to a dashboard if requested dash = None if request.args.get('add_to_dash') == 'existing': dash = ( db.session.query(models.Dashboard) .filter_by(id=int(request.args.get('save_to_dashboard_id'))) .one() ) # check edit dashboard permissions dash_overwrite_perm = check_ownership(dash, raise_if_false=False) if not dash_overwrite_perm: return json_error_response( _('You don\'t have the rights to ') + _('alter this ') + _('dashboard'), status=400) flash( _('Chart [{}] was added to dashboard [{}]').format( slc.slice_name, dash.dashboard_title), 'info') elif request.args.get('add_to_dash') == 'new': # check create dashboard permissions dash_add_perm = security_manager.can_access('can_add', 'DashboardModelView') if not dash_add_perm: return json_error_response( _('You don\'t have the rights to ') + _('create a ') + _('dashboard'), status=400) dash = models.Dashboard( dashboard_title=request.args.get('new_dashboard_name'), owners=[g.user] if g.user else []) flash( _('Dashboard [{}] just got created and chart [{}] was added ' 'to it').format( dash.dashboard_title, slc.slice_name), 'info') if dash and slc not in dash.slices: dash.slices.append(slc) db.session.commit() response = { 'can_add': slice_add_perm, 'can_download': slice_download_perm, 'can_overwrite': is_owner(slc, g.user), 'form_data': slc.form_data, 'slice': slc.data, 'dashboard_id': dash.id if dash else None, } if request.args.get('goto_dash') == 'true': response.update({'dashboard': dash.url}) return json_success(json.dumps(response))
<SYSTEM_TASK:> Endpoint to fetch the list of tables for given database <END_TASK> <USER_TASK:> Description: def tables(self, db_id, schema, substr, force_refresh='false'): """Endpoint to fetch the list of tables for given database"""
db_id = int(db_id) force_refresh = force_refresh.lower() == 'true' schema = utils.js_string_to_python(schema) substr = utils.js_string_to_python(substr) database = db.session.query(models.Database).filter_by(id=db_id).one() if schema: table_names = database.all_table_names_in_schema( schema=schema, force=force_refresh, cache=database.table_cache_enabled, cache_timeout=database.table_cache_timeout) view_names = database.all_view_names_in_schema( schema=schema, force=force_refresh, cache=database.table_cache_enabled, cache_timeout=database.table_cache_timeout) else: table_names = database.all_table_names_in_database( cache=True, force=False, cache_timeout=24 * 60 * 60) view_names = database.all_view_names_in_database( cache=True, force=False, cache_timeout=24 * 60 * 60) table_names = security_manager.accessible_by_user(database, table_names, schema) view_names = security_manager.accessible_by_user(database, view_names, schema) if substr: table_names = [tn for tn in table_names if substr in tn] view_names = [vn for vn in view_names if substr in vn] if not schema and database.default_schemas: def get_schema(tbl_or_view_name): return tbl_or_view_name.split('.')[0] if '.' in tbl_or_view_name else None user_schema = g.user.email.split('@')[0] valid_schemas = set(database.default_schemas + [user_schema]) table_names = [tn for tn in table_names if get_schema(tn) in valid_schemas] view_names = [vn for vn in view_names if get_schema(vn) in valid_schemas] max_items = config.get('MAX_TABLE_NAMES') or len(table_names) total_items = len(table_names) + len(view_names) max_tables = len(table_names) max_views = len(view_names) if total_items and substr: max_tables = max_items * len(table_names) // total_items max_views = max_items * len(view_names) // total_items table_options = [{'value': tn, 'label': tn} for tn in table_names[:max_tables]] table_options.extend([{'value': vn, 'label': '[view] {}'.format(vn)} for vn in view_names[:max_views]]) payload = { 'tableLength': len(table_names) + len(view_names), 'options': table_options, } return json_success(json.dumps(payload))
<SYSTEM_TASK:> Add and save slices to a dashboard <END_TASK> <USER_TASK:> Description: def add_slices(self, dashboard_id): """Add and save slices to a dashboard"""
data = json.loads(request.form.get('data')) session = db.session() Slice = models.Slice # noqa dash = ( session.query(models.Dashboard).filter_by(id=dashboard_id).first()) check_ownership(dash, raise_if_false=True) new_slices = session.query(Slice).filter( Slice.id.in_(data['slice_ids'])) dash.slices += new_slices session.merge(dash) session.commit() session.close() return 'SLICES ADDED'
<SYSTEM_TASK:> List of slices a user created, or faved <END_TASK> <USER_TASK:> Description: def user_slices(self, user_id=None): """List of slices a user created, or faved"""
if not user_id: user_id = g.user.id Slice = models.Slice # noqa FavStar = models.FavStar # noqa qry = ( db.session.query(Slice, FavStar.dttm).join( models.FavStar, sqla.and_( models.FavStar.user_id == int(user_id), models.FavStar.class_name == 'slice', models.Slice.id == models.FavStar.obj_id, ), isouter=True).filter( sqla.or_( Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id, FavStar.user_id == user_id, ), ) .order_by(Slice.slice_name.asc()) ) payload = [{ 'id': o.Slice.id, 'title': o.Slice.slice_name, 'url': o.Slice.slice_url, 'data': o.Slice.form_data, 'dttm': o.dttm if o.dttm else o.Slice.changed_on, 'viz_type': o.Slice.viz_type, } for o in qry.all()] return json_success( json.dumps(payload, default=utils.json_int_dttm_ser))
<SYSTEM_TASK:> List of slices created by this user <END_TASK> <USER_TASK:> Description: def created_slices(self, user_id=None): """List of slices created by this user"""
if not user_id: user_id = g.user.id Slice = models.Slice # noqa qry = ( db.session.query(Slice) .filter( sqla.or_( Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id, ), ) .order_by(Slice.changed_on.desc()) ) payload = [{ 'id': o.id, 'title': o.slice_name, 'url': o.slice_url, 'dttm': o.changed_on, 'viz_type': o.viz_type, } for o in qry.all()] return json_success( json.dumps(payload, default=utils.json_int_dttm_ser))
<SYSTEM_TASK:> Warms up the cache for the slice or table. <END_TASK> <USER_TASK:> Description: def warm_up_cache(self): """Warms up the cache for the slice or table. Note for slices a force refresh occurs. """
slices = None session = db.session() slice_id = request.args.get('slice_id') table_name = request.args.get('table_name') db_name = request.args.get('db_name') if not slice_id and not (table_name and db_name): return json_error_response(__( 'Malformed request. slice_id or table_name and db_name ' 'arguments are expected'), status=400) if slice_id: slices = session.query(models.Slice).filter_by(id=slice_id).all() if not slices: return json_error_response(__( 'Chart %(id)s not found', id=slice_id), status=404) elif table_name and db_name: SqlaTable = ConnectorRegistry.sources['table'] table = ( session.query(SqlaTable) .join(models.Database) .filter( models.Database.database_name == db_name or SqlaTable.table_name == table_name) ).first() if not table: return json_error_response(__( "Table %(t)s wasn't found in the database %(d)s", t=table_name, s=db_name), status=404) slices = session.query(models.Slice).filter_by( datasource_id=table.id, datasource_type=table.type).all() for slc in slices: try: form_data = get_form_data(slc.id, use_slice_data=True)[0] obj = get_viz( datasource_type=slc.datasource.type, datasource_id=slc.datasource.id, form_data=form_data, force=True, ) obj.get_json() except Exception as e: return json_error_response(utils.error_msg_from_exception(e)) return json_success(json.dumps( [{'slice_id': slc.id, 'slice_name': slc.slice_name} for slc in slices]))
<SYSTEM_TASK:> Syncs the druid datasource in main db with the provided config. <END_TASK> <USER_TASK:> Description: def sync_druid_source(self): """Syncs the druid datasource in main db with the provided config. The endpoint takes 3 arguments: user - user name to perform the operation as cluster - name of the druid cluster config - configuration stored in json that contains: name: druid datasource name dimensions: list of the dimensions, they become druid columns with the type STRING metrics_spec: list of metrics (dictionary). Metric consists of 2 attributes: type and name. Type can be count, etc. `count` type is stored internally as longSum other fields will be ignored. Example: { 'name': 'test_click', 'metrics_spec': [{'type': 'count', 'name': 'count'}], 'dimensions': ['affiliate_id', 'campaign', 'first_seen'] } """
payload = request.get_json(force=True) druid_config = payload['config'] user_name = payload['user'] cluster_name = payload['cluster'] user = security_manager.find_user(username=user_name) DruidDatasource = ConnectorRegistry.sources['druid'] DruidCluster = DruidDatasource.cluster_class if not user: err_msg = __("Can't find User '%(name)s', please ask your admin " 'to create one.', name=user_name) logging.error(err_msg) return json_error_response(err_msg) cluster = db.session.query(DruidCluster).filter_by( cluster_name=cluster_name).first() if not cluster: err_msg = __("Can't find DruidCluster with cluster_name = " "'%(name)s'", name=cluster_name) logging.error(err_msg) return json_error_response(err_msg) try: DruidDatasource.sync_to_db_from_config( druid_config, user, cluster) except Exception as e: logging.exception(utils.error_msg_from_exception(e)) return json_error_response(utils.error_msg_from_exception(e)) return Response(status=201)
<SYSTEM_TASK:> Returns if a key from cache exist <END_TASK> <USER_TASK:> Description: def cache_key_exist(self, key): """Returns if a key from cache exist"""
key_exist = True if cache.get(key) else False status = 200 if key_exist else 404 return json_success(json.dumps({'key_exist': key_exist}), status=status)
<SYSTEM_TASK:> Serves a key off of the results backend <END_TASK> <USER_TASK:> Description: def results(self, key): """Serves a key off of the results backend"""
if not results_backend: return json_error_response("Results backend isn't configured") read_from_results_backend_start = now_as_float() blob = results_backend.get(key) stats_logger.timing( 'sqllab.query.results_backend_read', now_as_float() - read_from_results_backend_start, ) if not blob: return json_error_response( 'Data could not be retrieved. ' 'You may want to re-run the query.', status=410, ) query = db.session.query(Query).filter_by(results_key=key).one() rejected_tables = security_manager.rejected_datasources( query.sql, query.database, query.schema) if rejected_tables: return json_error_response(security_manager.get_table_access_error_msg( '{}'.format(rejected_tables)), status=403) payload = utils.zlib_decompress_to_string(blob) display_limit = app.config.get('DEFAULT_SQLLAB_LIMIT', None) if display_limit: payload_json = json.loads(payload) payload_json['data'] = payload_json['data'][:display_limit] return json_success( json.dumps( payload_json, default=utils.json_iso_dttm_ser, ignore_nan=True, ), )
<SYSTEM_TASK:> Download the query results as csv. <END_TASK> <USER_TASK:> Description: def csv(self, client_id): """Download the query results as csv."""
logging.info('Exporting CSV file [{}]'.format(client_id)) query = ( db.session.query(Query) .filter_by(client_id=client_id) .one() ) rejected_tables = security_manager.rejected_datasources( query.sql, query.database, query.schema) if rejected_tables: flash( security_manager.get_table_access_error_msg('{}'.format(rejected_tables))) return redirect('/') blob = None if results_backend and query.results_key: logging.info( 'Fetching CSV from results backend ' '[{}]'.format(query.results_key)) blob = results_backend.get(query.results_key) if blob: logging.info('Decompressing') json_payload = utils.zlib_decompress_to_string(blob) obj = json.loads(json_payload) columns = [c['name'] for c in obj['columns']] df = pd.DataFrame.from_records(obj['data'], columns=columns) logging.info('Using pandas to convert to CSV') csv = df.to_csv(index=False, **config.get('CSV_EXPORT')) else: logging.info('Running a query to turn into CSV') sql = query.select_sql or query.executed_sql df = query.database.get_df(sql, query.schema) # TODO(bkyryliuk): add compression=gzip for big files. csv = df.to_csv(index=False, **config.get('CSV_EXPORT')) response = Response(csv, mimetype='text/csv') response.headers['Content-Disposition'] = f'attachment; filename={query.name}.csv' logging.info('Ready to return response') return response
<SYSTEM_TASK:> This method exposes an API endpoint to <END_TASK> <USER_TASK:> Description: def slice_query(self, slice_id): """ This method exposes an API endpoint to get the database query string for this slice """
viz_obj = get_viz(slice_id) security_manager.assert_datasource_permission(viz_obj.datasource) return self.get_query_string_response(viz_obj)
<SYSTEM_TASK:> This method exposes an API endpoint to <END_TASK> <USER_TASK:> Description: def schemas_access_for_csv_upload(self): """ This method exposes an API endpoint to get the schema access control settings for csv upload in this database """
if not request.args.get('db_id'): return json_error_response( 'No database is allowed for your csv upload') db_id = int(request.args.get('db_id')) database = ( db.session .query(models.Database) .filter_by(id=db_id) .one() ) try: schemas_allowed = database.get_schema_access_for_csv_upload() if (security_manager.database_access(database) or security_manager.all_datasource_access()): return self.json_response(schemas_allowed) # the list schemas_allowed should not be empty here # and the list schemas_allowed_processed returned from security_manager # should not be empty either, # otherwise the database should have been filtered out # in CsvToDatabaseForm schemas_allowed_processed = security_manager.schemas_accessible_by_user( database, schemas_allowed, False) return self.json_response(schemas_allowed_processed) except Exception: return json_error_response(( 'Failed to fetch schemas allowed for csv upload in this database! ' 'Please contact Superset Admin!\n\n' 'The error message returned was:\n{}').format(traceback.format_exc()))
<SYSTEM_TASK:> A decorator for caching views and handling etag conditional requests. <END_TASK> <USER_TASK:> Description: def etag_cache(max_age, check_perms=bool): """ A decorator for caching views and handling etag conditional requests. The decorator adds headers to GET requests that help with caching: Last- Modified, Expires and ETag. It also handles conditional requests, when the client send an If-Matches header. If a cache is set, the decorator will cache GET responses, bypassing the dataframe serialization. POST requests will still benefit from the dataframe cache for requests that produce the same SQL. """
def decorator(f): @wraps(f) def wrapper(*args, **kwargs): # check if the user can access the resource check_perms(*args, **kwargs) # for POST requests we can't set cache headers, use the response # cache nor use conditional requests; this will still use the # dataframe cache in `superset/viz.py`, though. if request.method == 'POST': return f(*args, **kwargs) response = None if cache: try: # build the cache key from the function arguments and any # other additional GET arguments (like `form_data`, eg). key_args = list(args) key_kwargs = kwargs.copy() key_kwargs.update(request.args) cache_key = wrapper.make_cache_key(f, *key_args, **key_kwargs) response = cache.get(cache_key) except Exception: # pylint: disable=broad-except if app.debug: raise logging.exception('Exception possibly due to cache backend.') # if no response was cached, compute it using the wrapped function if response is None: response = f(*args, **kwargs) # add headers for caching: Last Modified, Expires and ETag response.cache_control.public = True response.last_modified = datetime.utcnow() expiration = max_age if max_age != 0 else FAR_FUTURE response.expires = \ response.last_modified + timedelta(seconds=expiration) response.add_etag() # if we have a cache, store the response from the request if cache: try: cache.set(cache_key, response, timeout=max_age) except Exception: # pylint: disable=broad-except if app.debug: raise logging.exception('Exception possibly due to cache backend.') return response.make_conditional(request) if cache: wrapper.uncached = f wrapper.cache_timeout = max_age wrapper.make_cache_key = \ cache._memoize_make_cache_key( # pylint: disable=protected-access make_name=None, timeout=max_age) return wrapper return decorator
<SYSTEM_TASK:> In the case that a label exceeds the max length supported by the engine, <END_TASK> <USER_TASK:> Description: def truncate_label(cls, label): """ In the case that a label exceeds the max length supported by the engine, this method is used to construct a deterministic and unique label based on an md5 hash. """
label = hashlib.md5(label.encode('utf-8')).hexdigest() # truncate hash if it exceeds max length if cls.max_column_name_length and len(label) > cls.max_column_name_length: label = label[:cls.max_column_name_length] return label
<SYSTEM_TASK:> Need to consider foreign tables for PostgreSQL <END_TASK> <USER_TASK:> Description: def get_table_names(cls, inspector, schema): """Need to consider foreign tables for PostgreSQL"""
tables = inspector.get_table_names(schema) tables.extend(inspector.get_foreign_table_names(schema)) return sorted(tables)
<SYSTEM_TASK:> Postgres is unable to identify mixed case column names unless they <END_TASK> <USER_TASK:> Description: def get_timestamp_column(expression, column_name): """Postgres is unable to identify mixed case column names unless they are quoted."""
if expression: return expression elif column_name.lower() != column_name: return f'"{column_name}"' return column_name
<SYSTEM_TASK:> Returns a partition query <END_TASK> <USER_TASK:> Description: def _partition_query( cls, table_name, limit=0, order_by=None, filters=None): """Returns a partition query :param table_name: the name of the table to get partitions from :type table_name: str :param limit: the number of partitions to be returned :type limit: int :param order_by: a list of tuples of field name and a boolean that determines if that field should be sorted in descending order :type order_by: list of (str, bool) tuples :param filters: dict of field name and filter value combinations """
limit_clause = 'LIMIT {}'.format(limit) if limit else '' order_by_clause = '' if order_by: l = [] # noqa: E741 for field, desc in order_by: l.append(field + ' DESC' if desc else '') order_by_clause = 'ORDER BY ' + ', '.join(l) where_clause = '' if filters: l = [] # noqa: E741 for field, value in filters.items(): l.append(f"{field} = '{value}'") where_clause = 'WHERE ' + ' AND '.join(l) sql = textwrap.dedent(f"""\ SELECT * FROM "{table_name}$partitions" {where_clause} {order_by_clause} {limit_clause} """) return sql
<SYSTEM_TASK:> Loading time series data from a zip file in the repo <END_TASK> <USER_TASK:> Description: def load_multiformat_time_series(): """Loading time series data from a zip file in the repo"""
data = get_example_data('multiformat_time_series.json.gz') pdf = pd.read_json(data) pdf.ds = pd.to_datetime(pdf.ds, unit='s') pdf.ds2 = pd.to_datetime(pdf.ds2, unit='s') pdf.to_sql( 'multiformat_time_series', db.engine, if_exists='replace', chunksize=500, dtype={ 'ds': Date, 'ds2': DateTime, 'epoch_s': BigInteger, 'epoch_ms': BigInteger, 'string0': String(100), 'string1': String(100), 'string2': String(100), 'string3': String(100), }, index=False) print('Done loading table!') print('-' * 80) print('Creating table [multiformat_time_series] reference') obj = db.session.query(TBL).filter_by(table_name='multiformat_time_series').first() if not obj: obj = TBL(table_name='multiformat_time_series') obj.main_dttm_col = 'ds' obj.database = utils.get_or_create_main_db() dttm_and_expr_dict = { 'ds': [None, None], 'ds2': [None, None], 'epoch_s': ['epoch_s', None], 'epoch_ms': ['epoch_ms', None], 'string2': ['%Y%m%d-%H%M%S', None], 'string1': ['%Y-%m-%d^%H:%M:%S', None], 'string0': ['%Y-%m-%d %H:%M:%S.%f', None], 'string3': ['%Y/%m/%d%H:%M:%S.%f', None], } for col in obj.columns: dttm_and_expr = dttm_and_expr_dict[col.column_name] col.python_date_format = dttm_and_expr[0] col.dbatabase_expr = dttm_and_expr[1] col.is_dttm = True db.session.merge(obj) db.session.commit() obj.fetch_metadata() tbl = obj print('Creating Heatmap charts') for i, col in enumerate(tbl.columns): slice_data = { 'metrics': ['count'], 'granularity_sqla': col.column_name, 'row_limit': config.get('ROW_LIMIT'), 'since': '2015', 'until': '2016', 'where': '', 'viz_type': 'cal_heatmap', 'domain_granularity': 'month', 'subdomain_granularity': 'day', } slc = Slice( slice_name=f'Calendar Heatmap multiformat {i}', viz_type='cal_heatmap', datasource_type='table', datasource_id=tbl.id, params=get_slice_json(slice_data), ) merge_slice(slc) misc_dash_slices.add('Calendar Heatmap multiformat 0')
<SYSTEM_TASK:> Imports dashboards from a stream to databases <END_TASK> <USER_TASK:> Description: def import_dashboards(session, data_stream, import_time=None): """Imports dashboards from a stream to databases"""
current_tt = int(time.time()) import_time = current_tt if import_time is None else import_time data = json.loads(data_stream.read(), object_hook=decode_dashboards) # TODO: import DRUID datasources for table in data['datasources']: type(table).import_obj(table, import_time=import_time) session.commit() for dashboard in data['dashboards']: Dashboard.import_obj( dashboard, import_time=import_time) session.commit()