schema
stringclasses 471
values | key
stringlengths 0
203
| description
stringlengths 0
4.37k
| object
stringlengths 2
322k
|
---|---|---|---|
datahub_ingestion_schema.json | models_pattern | Regex patterns for ml models to filter in ingestion. | {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]} |
datahub_ingestion_schema.json | include_projections | Whether projections should be ingested. | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | include_models | Whether Models should be ingested. | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | include_view_lineage | If the source supports it, include view lineage to the underlying storage location. | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | include_projection_lineage | If the source supports it, include view lineage to the underlying storage location. | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | connect_uri | Redash base URL. | {"default": "http://localhost:5000", "type": "string"} |
datahub_ingestion_schema.json | api_key | Redash user API key. | {"default": "REDASH_API_KEY", "type": "string"} |
datahub_ingestion_schema.json | dashboard_patterns | regex patterns for dashboards to filter for ingestion. | {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]} |
datahub_ingestion_schema.json | chart_patterns | regex patterns for charts to filter for ingestion. | {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]} |
datahub_ingestion_schema.json | skip_draft | Only ingest published dashboards and charts. | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | page_size | Limit on number of items to be queried at once. | {"default": 25, "type": "integer"} |
datahub_ingestion_schema.json | api_page_limit | Limit on number of pages queried for ingesting dashboards and charts API during pagination. | {"default": 9223372036854775807, "type": "integer"} |
datahub_ingestion_schema.json | parallelism | Parallelism to use while processing. | {"default": 1, "type": "integer"} |
datahub_ingestion_schema.json | parse_table_names_from_sql | See note below. | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | sql_parser | custom SQL parser. See note below for details. | {"default": "datahub.utilities.sql_parser.DefaultSQLParser", "type": "string"} |
datahub_ingestion_schema.json | env | Environment to use in namespace when constructing URNs. | {"default": "PROD", "type": "string"} |
datahub_ingestion_schema.json | dbt_config | Base configuration class for stateful ingestion for source configs to inherit from. | {"type": "object", "properties": {"incremental_lineage": {"default": true, "type": "boolean"}, "sql_parser_use_external_process": {"default": false, "type": "boolean"}, "env": {"default": "PROD", "type": "string"}, "platform_instance": {"type": "string"}, "stateful_ingestion": {"allOf": [{}]}, "target_platform": {"type": "string"}, "target_platform_instance": {"type": "string"}, "use_identifiers": {"default": false, "type": "boolean"}, "entities_enabled": {"default": {"models": "YES", "sources": "YES", "seeds": "YES", "snapshots": "YES", "test_definitions": "YES", "test_results": "YES"}, "allOf": [{}]}, "tag_prefix": {"default": "dbt:", "type": "string"}, "node_name_pattern": {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]}, "meta_mapping": {"default": {}, "type": "object"}, "column_meta_mapping": {"default": {}, "type": "object"}, "query_tag_mapping": {"default": {}, "type": "object"}, "write_semantics": {"default": "PATCH", "type": "string"}, "strip_user_ids_from_email": {"default": false, "type": "boolean"}, "enable_owner_extraction": {"default": true, "type": "boolean"}, "owner_extraction_pattern": {"type": "string"}, "include_env_in_assertion_guid": {"default": false, "type": "boolean"}, "convert_column_urns_to_lowercase": {"default": false, "type": "boolean"}, "enable_meta_mapping": {"default": true, "type": "boolean"}, "enable_query_tag_mapping": {"default": true, "type": "boolean"}, "manifest_path": {"type": "string"}, "catalog_path": {"type": "string"}, "sources_path": {"type": "string"}, "test_results_path": {"type": "string"}, "aws_connection": {"allOf": [{}]}, "git_info": {"allOf": [{}]}}, "required": ["target_platform", "manifest_path", "catalog_path"], "additionalProperties": false} |
datahub_ingestion_schema.json | incremental_lineage | When enabled, emits lineage as incremental to existing lineage already in DataHub. When disabled, re-states lineage on each run. | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | sql_parser_use_external_process | When enabled, sql parser will run in isolated in a separate process. This can affect processing time but can protect from sql parser's mem leak. | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | env | Environment to use in namespace when constructing URNs. | {"default": "PROD", "type": "string"} |
datahub_ingestion_schema.json | platform_instance | The instance of the platform that all assets produced by this recipe belong to | {"type": "string"} |
datahub_ingestion_schema.json | stateful_ingestion | DBT Stateful Ingestion Config. | {"allOf": [{}]} |
datahub_ingestion_schema.json | target_platform | The platform that dbt is loading onto. (e.g. bigquery / redshift / postgres etc.) | {"type": "string"} |
datahub_ingestion_schema.json | target_platform_instance | The platform instance for the platform that dbt is operating on. Use this if you have multiple instances of the same platform (e.g. redshift) and need to distinguish between them. | {"type": "string"} |
datahub_ingestion_schema.json | use_identifiers | Use model identifier instead of model name if defined (if not, default to model name). | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | entities_enabled | Controls for enabling / disabling metadata emission for different dbt entities (models, test definitions, test results, etc.) | {"default": {"models": "YES", "sources": "YES", "seeds": "YES", "snapshots": "YES", "test_definitions": "YES", "test_results": "YES"}, "allOf": [{}]} |
datahub_ingestion_schema.json | tag_prefix | Prefix added to tags during ingestion. | {"default": "dbt:", "type": "string"} |
datahub_ingestion_schema.json | node_name_pattern | regex patterns for dbt model names to filter in ingestion. | {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]} |
datahub_ingestion_schema.json | meta_mapping | mapping rules that will be executed against dbt meta properties. Refer to the section below on dbt meta automated mappings. | {"default": {}, "type": "object"} |
datahub_ingestion_schema.json | column_meta_mapping | mapping rules that will be executed against dbt column meta properties. Refer to the section below on dbt meta automated mappings. | {"default": {}, "type": "object"} |
datahub_ingestion_schema.json | query_tag_mapping | mapping rules that will be executed against dbt query_tag meta properties. Refer to the section below on dbt meta automated mappings. | {"default": {}, "type": "object"} |
datahub_ingestion_schema.json | write_semantics | Whether the new tags, terms and owners to be added will override the existing ones added only by this source or not. Value for this config can be "PATCH" or "OVERRIDE" | {"default": "PATCH", "type": "string"} |
datahub_ingestion_schema.json | strip_user_ids_from_email | Whether or not to strip email id while adding owners using dbt meta actions. | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | enable_owner_extraction | When enabled, ownership info will be extracted from the dbt meta | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | owner_extraction_pattern | Regex string to extract owner from the dbt node using the `(?P<name>...) syntax` of the [match object](https://docs.python.org/3/library/re.html#match-objects), where the group name must be `owner`. Examples: (1)`r"(?P<owner>(.*)): (\w+) (\w+)"` will extract `jdoe` as the owner from `"jdoe: John Doe"` (2) `r"@(?P<owner>(.*))"` will extract `alice` as the owner from `"@alice"`. | {"type": "string"} |
datahub_ingestion_schema.json | include_env_in_assertion_guid | Prior to version 0.9.4.2, the assertion GUIDs did not include the environment. If you're using multiple dbt ingestion that are only distinguished by env, then you should set this flag to True. | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | convert_column_urns_to_lowercase | When enabled, converts column URNs to lowercase to ensure cross-platform compatibility. If `target_platform` is Snowflake, the default is True. | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | enable_meta_mapping | When enabled, applies the mappings that are defined through the meta_mapping directives. | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | enable_query_tag_mapping | When enabled, applies the mappings that are defined through the `query_tag_mapping` directives. | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | manifest_path | Path to dbt manifest JSON. See https://docs.getdbt.com/reference/artifacts/manifest-json Note this can be a local file or a URI. | {"type": "string"} |
datahub_ingestion_schema.json | catalog_path | Path to dbt catalog JSON. See https://docs.getdbt.com/reference/artifacts/catalog-json Note this can be a local file or a URI. | {"type": "string"} |
datahub_ingestion_schema.json | sources_path | Path to dbt sources JSON. See https://docs.getdbt.com/reference/artifacts/sources-json. If not specified, last-modified fields will not be populated. Note this can be a local file or a URI. | {"type": "string"} |
datahub_ingestion_schema.json | test_results_path | Path to output of dbt test run as run_results file in JSON format. See https://docs.getdbt.com/reference/artifacts/run-results-json. If not specified, test execution results will not be populated in DataHub. | {"type": "string"} |
datahub_ingestion_schema.json | aws_connection | When fetching manifest files from s3, configuration for aws connection details | {"allOf": [{}]} |
datahub_ingestion_schema.json | git_info | Reference to your git location to enable easy navigation from DataHub to your dbt files. | {"allOf": [{}]} |
datahub_ingestion_schema.json | presto-on-hive_config | Base configuration class for stateful ingestion for source configs to inherit from. | {"type": "object", "properties": {"env": {"default": "PROD", "type": "string"}, "platform_instance": {"type": "string"}, "stateful_ingestion": {}, "options": {"type": "object"}, "schema_pattern": {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]}, "table_pattern": {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]}, "view_pattern": {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]}, "profile_pattern": {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]}, "domain": {"default": {}, "type": "object", "additionalProperties": {}}, "include_views": {"default": true, "type": "boolean"}, "include_tables": {"default": true, "type": "boolean"}, "include_table_location_lineage": {"default": true, "type": "boolean"}, "profiling": {"default": {"enabled": false, "operation_config": {"lower_freq_profile_enabled": false, "profile_day_of_week": null, "profile_date_of_month": null}, "limit": null, "offset": null, "report_dropped_profiles": false, "turn_off_expensive_profiling_metrics": false, "profile_table_level_only": false, "include_field_null_count": true, "include_field_distinct_count": true, "include_field_min_value": true, "include_field_max_value": true, "include_field_mean_value": true, "include_field_median_value": true, "include_field_stddev_value": true, "include_field_quantiles": false, "include_field_distinct_value_frequencies": false, "include_field_histogram": false, "include_field_sample_values": true, "field_sample_values_limit": 20, "max_number_of_fields_to_profile": null, "profile_if_updated_since_days": null, "profile_table_size_limit": 5, "profile_table_row_limit": 5000000, "profile_table_row_count_estimate_only": false, "max_workers": 10, "query_combiner_enabled": true, "catch_exceptions": true, "partition_profiling_enabled": true, "partition_datetime": null}, "allOf": [{}]}, "username": {"type": "string"}, "password": {"type": "string", "writeOnly": true, "format": "password"}, "host_port": {"default": "localhost:3306", "type": "string"}, "database": {"type": "string"}, "database_alias": {"type": "string"}, "sqlalchemy_uri": {"type": "string"}, "views_where_clause_suffix": {"default": "", "type": "string"}, "tables_where_clause_suffix": {"default": "", "type": "string"}, "schemas_where_clause_suffix": {"default": "", "type": "string"}, "ingestion_job_id": {"default": "", "type": "string"}, "database_pattern": {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]}, "metastore_db_name": {"type": "string"}, "mode": {"default": "presto-on-hive", "allOf": [{}]}, "use_catalog_subtype": {"default": true, "type": "boolean"}, "use_dataset_pascalcase_subtype": {"default": false, "type": "boolean"}, "include_catalog_name_in_ids": {"default": false, "type": "boolean"}, "enable_properties_merge": {"default": false, "type": "boolean"}, "simplify_nested_field_paths": {"default": false, "type": "boolean"}}, "additionalProperties": false} |
datahub_ingestion_schema.json | env | The environment that all assets produced by this connector belong to | {"default": "PROD", "type": "string"} |
datahub_ingestion_schema.json | platform_instance | The instance of the platform that all assets produced by this recipe belong to | {"type": "string"} |
datahub_ingestion_schema.json | options | Any options specified here will be passed to [SQLAlchemy.create_engine](https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine) as kwargs. | {"type": "object"} |
datahub_ingestion_schema.json | schema_pattern | Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics' | {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]} |
datahub_ingestion_schema.json | table_pattern | Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*' | {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]} |
datahub_ingestion_schema.json | view_pattern | Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*' | {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]} |
datahub_ingestion_schema.json | profile_pattern | Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the `table_pattern` will be considered. | {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]} |
datahub_ingestion_schema.json | domain | Attach domains to databases, schemas or tables during ingestion using regex patterns. Domain key can be a guid like *urn:li:domain:ec428203-ce86-4db3-985d-5a8ee6df32ba* or a string like "Marketing".) If you provide strings, then datahub will attempt to resolve this name to a guid, and will error out if this fails. There can be multiple domain keys specified. | {"default": {}, "type": "object", "additionalProperties": {}} |
datahub_ingestion_schema.json | include_views | Whether views should be ingested. | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | include_tables | Whether tables should be ingested. | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | include_table_location_lineage | If the source supports it, include table lineage to the underlying storage location. | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | username | username | {"type": "string"} |
datahub_ingestion_schema.json | password | password | {"type": "string", "writeOnly": true, "format": "password"} |
datahub_ingestion_schema.json | host_port | Host URL and port to connect to. Example: localhost:3306 | {"default": "localhost:3306", "type": "string"} |
datahub_ingestion_schema.json | database | database (catalog) | {"type": "string"} |
datahub_ingestion_schema.json | database_alias | [Deprecated] Alias to apply to database when ingesting. | {"type": "string"} |
datahub_ingestion_schema.json | sqlalchemy_uri | URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters. | {"type": "string"} |
datahub_ingestion_schema.json | views_where_clause_suffix | Where clause to specify what Presto views should be ingested. | {"default": "", "type": "string"} |
datahub_ingestion_schema.json | tables_where_clause_suffix | Where clause to specify what Hive tables should be ingested. | {"default": "", "type": "string"} |
datahub_ingestion_schema.json | schemas_where_clause_suffix | Where clause to specify what Hive schemas should be ingested. | {"default": "", "type": "string"} |
datahub_ingestion_schema.json | database_pattern | Regex patterns for hive/presto database to filter in ingestion. Specify regex to only match the database name. e.g. to match all tables in database analytics, use the regex 'analytics' | {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]} |
datahub_ingestion_schema.json | metastore_db_name | Name of the Hive metastore's database (usually: metastore). For backward compatibility, if this field is not provided, the database field will be used. If both the 'database' and 'metastore_db_name' fields are set then the 'database' field will be used to filter the hive/presto/trino database | {"type": "string"} |
datahub_ingestion_schema.json | mode | The ingested data will be stored under this platform. Valid options: ['hive', 'presto', 'presto-on-hive', 'trino'] | {"default": "presto-on-hive", "allOf": [{}]} |
datahub_ingestion_schema.json | use_catalog_subtype | Container Subtype name to be 'Database' or 'Catalog' Valid options: ['True', 'False'] | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | use_dataset_pascalcase_subtype | Dataset Subtype name to be 'Table' or 'View' Valid options: ['True', 'False'] | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | include_catalog_name_in_ids | Add the Presto catalog name (e.g. hive) to the generated dataset urns. `urn:li:dataset:(urn:li:dataPlatform:hive,hive.user.logging_events,PROD)` versus `urn:li:dataset:(urn:li:dataPlatform:hive,user.logging_events,PROD)` | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | enable_properties_merge | By default, the connector overwrites properties every time. Set this to True to enable merging of properties with what exists on the server. | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | simplify_nested_field_paths | Simplify v2 field paths to v1 by default. If the schema has Union or Array types, still falls back to v2 | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | powerbi_config | Base configuration class for stateful ingestion for source configs to inherit from. | {"type": "object", "properties": {"env": {"default": "PROD", "type": "string"}, "platform_instance": {"type": "string"}, "stateful_ingestion": {"allOf": [{}]}, "tenant_id": {"type": "string"}, "workspace_id_pattern": {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]}, "server_to_platform_instance": {"default": {}, "type": "object", "additionalProperties": {}}, "client_id": {"type": "string"}, "client_secret": {"type": "string"}, "scan_timeout": {"default": 60, "type": "integer"}, "scan_batch_size": {"default": 1, "exclusiveMinimum": 0, "maximum": 100, "type": "integer"}, "workspace_id_as_urn_part": {"default": false, "type": "boolean"}, "extract_ownership": {"default": false, "type": "boolean"}, "extract_reports": {"default": true, "type": "boolean"}, "ownership": {"default": {"create_corp_user": true, "use_powerbi_email": false, "remove_email_suffix": false, "dataset_configured_by_as_owner": false, "owner_criteria": null}, "allOf": [{}]}, "modified_since": {"type": "string"}, "extract_dashboards": {"default": true, "type": "boolean"}, "extract_dataset_schema": {"default": false, "type": "boolean"}, "extract_lineage": {"default": true, "type": "boolean"}, "extract_endorsements_to_tags": {"default": false, "type": "boolean"}, "filter_dataset_endorsements": {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]}, "extract_workspaces_to_containers": {"default": true, "type": "boolean"}, "extract_datasets_to_containers": {"default": false, "type": "boolean"}, "native_query_parsing": {"default": true, "type": "boolean"}, "convert_urns_to_lowercase": {"default": false, "type": "boolean"}, "convert_lineage_urns_to_lowercase": {"default": true, "type": "boolean"}, "admin_apis_only": {"default": false, "type": "boolean"}, "extract_independent_datasets": {"default": false, "type": "boolean"}}, "required": ["tenant_id", "client_id", "client_secret"], "additionalProperties": false} |
datahub_ingestion_schema.json | env | The environment that all assets produced by this connector belong to | {"default": "PROD", "type": "string"} |
datahub_ingestion_schema.json | platform_instance | The instance of the platform that all assets produced by this recipe belong to | {"type": "string"} |
datahub_ingestion_schema.json | stateful_ingestion | PowerBI Stateful Ingestion Config. | {"allOf": [{}]} |
datahub_ingestion_schema.json | tenant_id | PowerBI tenant identifier | {"type": "string"} |
datahub_ingestion_schema.json | workspace_id_pattern | Regex patterns to filter PowerBI workspaces in ingestion | {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]} |
datahub_ingestion_schema.json | server_to_platform_instance | A mapping of PowerBI datasource's server i.e host[:port] to Data platform instance. :port is optional and only needed if your datasource server is running on non-standard port.For Google BigQuery the datasource's server is google bigquery project name | {"default": {}, "type": "object", "additionalProperties": {}} |
datahub_ingestion_schema.json | client_id | Azure app client identifier | {"type": "string"} |
datahub_ingestion_schema.json | client_secret | Azure app client secret | {"type": "string"} |
datahub_ingestion_schema.json | scan_timeout | timeout for PowerBI metadata scanning | {"default": 60, "type": "integer"} |
datahub_ingestion_schema.json | scan_batch_size | batch size for sending workspace_ids to PBI, 100 is the limit | {"default": 1, "exclusiveMinimum": 0, "maximum": 100, "type": "integer"} |
datahub_ingestion_schema.json | workspace_id_as_urn_part | Highly recommend changing this to True, as you can have the same workspace nameTo maintain backward compatability, this is set to False which uses workspace name | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | extract_ownership | Whether ownership should be ingested. Admin API access is required if this setting is enabled. Note that enabling this may overwrite owners that you've added inside DataHub's web application. | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | extract_reports | Whether reports should be ingested | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | ownership | Configure how is ownership ingested | {"default": {"create_corp_user": true, "use_powerbi_email": false, "remove_email_suffix": false, "dataset_configured_by_as_owner": false, "owner_criteria": null}, "allOf": [{}]} |
datahub_ingestion_schema.json | modified_since | Get only recently modified workspaces based on modified_since datetime '2023-02-10T00:00:00.0000000Z', excludePersonalWorkspaces and excludeInActiveWorkspaces limit to last 30 days | {"type": "string"} |
datahub_ingestion_schema.json | extract_dashboards | Whether to ingest PBI Dashboard and Tiles as Datahub Dashboard and Chart | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | extract_dataset_schema | Whether to ingest PBI Dataset Table columns and measures | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | extract_lineage | Whether lineage should be ingested between X and Y. Admin API access is required if this setting is enabled | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | extract_endorsements_to_tags | Whether to extract endorsements to tags, note that this may overwrite existing tags. Admin API access is required is this setting is enabled | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | filter_dataset_endorsements | Filter and ingest datasets which are 'Certified' or 'Promoted' endorsement. If both are added, dataset which are 'Certified' or 'Promoted' will be ingested . Default setting allows all dataset to be ingested | {"default": {"allow": [".*"], "deny": [], "ignoreCase": true}, "allOf": [{}]} |
datahub_ingestion_schema.json | extract_workspaces_to_containers | Extract workspaces to DataHub containers | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | extract_datasets_to_containers | PBI tables will be grouped under a Datahub Container, the container reflect a PBI Dataset | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | native_query_parsing | Whether PowerBI native query should be parsed to extract lineage | {"default": true, "type": "boolean"} |
datahub_ingestion_schema.json | convert_urns_to_lowercase | Whether to convert the PowerBI assets urns to lowercase | {"default": false, "type": "boolean"} |
datahub_ingestion_schema.json | convert_lineage_urns_to_lowercase | Whether to convert the urns of ingested lineage dataset to lowercase | {"default": true, "type": "boolean"} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.