schema
stringclasses 471
values | key
stringlengths 0
203
| description
stringlengths 0
4.37k
| object
stringlengths 2
322k
|
---|---|---|---|
comet.json | domain | Output domain in output Area (Will be the Database name in Hive or Dataset in BigQuery) | {"type": "string"} |
comet.json | table | Dataset Name in output Area (Will be the Table name in Hive & BigQuery) | {"type": "string"} |
comet.json | partition | List of columns used for partitioning the output. | {"type": "array", "items": {"type": "string"}} |
comet.json | presql | List of SQL requests to executed before the main SQL request is run | {"type": "array", "items": {"type": "string"}} |
comet.json | postsql | List of SQL requests to executed after the main SQL request is run | {"type": "array", "items": {"type": "string"}} |
comet.json | expectations | Expectations to check after Load / Transform has succeeded | {} |
comet.json | acl | Map of rolename -> List[Users]. | {"type": "array", "items": {}} |
comet.json | comment | Output table description | {"type": "string"} |
comet.json | freshness | Configure freshness checks on the output table | {} |
comet.json | attributesDesc | Attributes comments and access policies | {"type": "array", "items": {}} |
comet.json | python | Python script URI to execute instead of the SQL request | {"type": "string"} |
comet.json | tags | Set of string to attach to the output table | {"type": "array", "items": {"type": "string"}} |
comet.json | schedule | Cron expression to use for this task | {"type": "string"} |
comet.json | dagRef | Cron expression to use for this domain/table | {"type": "string"} |
comet.json | path | Name of the lock | {"type": "string"} |
comet.json | timeout | Name of the lock | {"type": "integer"} |
comet.json | pollTime | Name of the lock | {"type": "string"} |
comet.json | refreshTime | Name of the lock | {"type": "string"} |
comet.json | path | Main SQL request to execute (do not forget to prefix table names with the database name to avoid conflicts) | {"type": "string"} |
comet.json | sink | Output Database (refer to a project id in BigQuery). Default to SL_DATABASE env var if set. | {} |
comet.json | maxErrors | Output domain in output Area (Will be the Database name in Hive or Dataset in BigQuery) | {"type": "string"} |
comet.json | database | Dataset Name in output Area (Will be the Table name in Hive & BigQuery) | {"type": "string"} |
comet.json | active | Output table description | {"type": "boolean"} |
comet.json | name | Column name | {"type": "string"} |
comet.json | comment | Column description | {"type": "string"} |
comet.json | accessPolicy | Access policy to apply to this column | {"type": "string"} |
comet.json | Domain | A schema in JDBC database or a folder in HDFS or a dataset in BigQuery. | {"type": "object", "properties": {"name": {"type": "string"}, "rename": {"type": "string"}, "metadata": {}, "tables": {"type": "array", "items": {}}, "comment": {"type": "string"}, "database": {"type": "string"}, "tags": {"type": "array", "items": {"type": "string"}}}} |
comet.json | name | Domain name. Make sure you use a name that may be used as a folder name on the target storage.
- When using HDFS or Cloud Storage, files once ingested are stored in a sub-directory named after the domain name.
- When used with BigQuery, files are ingested and sorted in tables under a dataset named after the domain name. | {"type": "string"} |
comet.json | rename | If present, the attribute is renamed with this name | {"type": "string"} |
comet.json | tables | List of schemas for each dataset in this domain.
A domain usually contains multiple schemas. Each schema defining how the contents of the input file should be parsed.
See Schema for more details. | {"type": "array", "items": {}} |
comet.json | comment | Domain Description (free text) | {"type": "string"} |
comet.json | database | Output Database (refer to a project id in BigQuery). Default to SL_DATABASE env var if set. | {"type": "string"} |
comet.json | tags | Set of string to attach to this domain | {"type": "array", "items": {"type": "string"}} |
comet.json | name | Optional name. If not specified, the name of the file without the extension is used. | {"type": "string"} |
comet.json | default | Default task properties to apply to all tasks defined in tasks section and in included files | {} |
comet.json | items | List of transform tasks to execute | {} |
comet.json | name | table name. Set to '*' to extract all tables | {"type": "string"} |
comet.json | columns | List of columns to extract. All columns by default. | {"type": "array", "items": {"type": "string"}} |
comet.json | catalog | Optional catalog name in the source database | {"type": "string"} |
comet.json | schema | Database schema where source tables are located | {"type": "string"} |
comet.json | template | Metadata to use for the generated YAML file. | {"type": "string"} |
comet.json | tables | List of tables to extract | {"type": "array", "items": {}} |
comet.json | tableTypes | One or many of the predefined table types | {"type": "array", "items": {"type": "string"}} |
comet.json | jdbcSchemas | List database connections to use to extract the data | {"type": "array", "items": {}} |
comet.json | connectionRef | Connection name as defined in the connections section of the application.conf file | {"type": "string"} |
comet.json | connection | JDBC connection options: url, user, password ... | {} |
comet.json | RefInput | Input for ref object | {"type": "object", "properties": {"database": {"type": "string"}, "domain": {"type": "string"}, "table": {"type": "string"}}, "required": ["table"]} |
comet.json | database | Database pattern to match, none if any database | {"type": "string"} |
comet.json | domain | Domain pattern to match, none if any domain match | {"type": "string"} |
comet.json | table | Table pattern to match | {"type": "string"} |
comet.json | RefOutput | Output for ref object | {"type": "object", "properties": {"database": {"type": "string"}, "domain": {"type": "string"}, "table": {"type": "string"}}, "required": ["table", "domain", "database"]} |
comet.json | database | {"type": "string"} |
|
comet.json | domain | {"type": "string"} |
|
comet.json | table | {"type": "string"} |
|
comet.json | Ref | Describe how to resolve a reference in a transform task | {"type": "object", "properties": {"input": {}, "output": {}}, "required": ["input", "output"]} |
comet.json | input | The input table to resolve | {} |
comet.json | output | The output table resolved with the domain and database | {} |
comet.json | env | Default environment to use. May be also set using the SL_ENV environment variable | {"type": "string"} |
comet.json | datasets | When using filesystem storage, default path to store the datasets | {"type": "string"} |
comet.json | dags | {"type": "string"} |
|
comet.json | metadata | default metadata folder name. May be also set using the SL_METADATA environment variable | {"type": "string"} |
comet.json | validateOnLoad | Validate the YAML file when loading it. If set to true fails on any error | {"type": "boolean"} |
comet.json | archive | Should ingested files be archived after ingestion ? | {"type": "boolean"} |
comet.json | sinkReplayToFile | Should invalid records be stored in a replay file ? | {"type": "boolean"} |
comet.json | defaultFormat | Default write format in Spark. parquet is the default | {"type": "string"} |
comet.json | defaultRejectedWriteFormat | Default write format in Spark for rejected records. parquet is the default | {"type": "string"} |
comet.json | defaultAuditWriteFormat | Default write format in Spark for audit records. parquet is the default | {"type": "string"} |
comet.json | csvOutput | output files in CSV format ? Default is false | {"type": "boolean"} |
comet.json | csvOutputExt | CSV file extension when csvOutput is true. Default is .csv | {"type": "string"} |
comet.json | privacyOnly | Only generate privacy tasks. Reserved for internal use | {"type": "boolean"} |
comet.json | emptyIsNull | Should empty strings be considered as null values ? | {"type": "boolean"} |
comet.json | validator | Default validator to use when none is specified in the schema. Valid values are 'spark' or 'native'. Default is 'spark' | {"type": "string"} |
comet.json | rowValidatorClass | {"type": "string"} |
|
comet.json | treeValidatorClass | {"type": "string"} |
|
comet.json | loadStrategyClass | In what order should the files for a same table be loaded ? By time (default) or by or name ?
| {"type": "string", "enum": ["ai.starlake.job.load.IngestionNameStrategy", "ai.starlake.job.load.IngestionTimeStrategy"]} |
comet.json | analyze | Should we analyze the result and generate HIVE statistics ? (useful for Spark / Databricks) | {"type": "boolean"} |
comet.json | hive | Should we create the table in Hive ? (useful for Spark / Databricks) | {"type": "boolean"} |
comet.json | grouped | Should we load of the files to be stored in the same table in a single task or one by one ? | {"type": "boolean"} |
comet.json | groupedMax | Maximum number of files to be stored in the same table in a single task | {"type": "integer"} |
comet.json | mergeForceDistinct | Should we force a distinct on the merge ? | {"type": "boolean"} |
comet.json | mergeOptimizePartitionWrite | Should we optimize the partition write on the merge ? | {"type": "boolean"} |
comet.json | area | pending, accepted ... areas configuration | {} |
comet.json | airflow | Airflow endpoint configuration when using Airflow Launcher | {} |
comet.json | hadoop | Hadoop configuration if applicable | {} |
comet.json | connections | Connection configurations | {} |
comet.json | jdbcEngines | JDBC engine configurations | {} |
comet.json | privacy | Privacy algorithms | {} |
comet.json | root | Root folder for the application. May be also set using the SL_ROOT environment variable | {"type": "string"} |
comet.json | internal | Internal configuration | {} |
comet.json | accessPolicies | Access policies configuration | {} |
comet.json | scheduling | Spark Job scheduling configuration | {} |
comet.json | udfs | Coma separated list of UDF to register in Spark jobs. May be also set using the SL_UDFS environment variable | {"type": "string"} |
comet.json | expectations | Expectations configuration | {} |
comet.json | sqlParameterPattern | Pattern to use to replace parameters in SQL queries in addition to the jinja syntax {{param}}. Default is ${param} | {"type": "string"} |
comet.json | rejectAllOnError | Should we reject all records when an error occurs ? Default is false | {"type": "string"} |
comet.json | rejectMaxRecords | Maximum number of records to reject when an error occurs. Default is 100 | {"type": "integer"} |
comet.json | maxParCopy | {"type": "integer"} |
|
comet.json | dsvOptions | DSV ingestion extra options | {} |
comet.json | forceViewPattern | reserved | {"type": "string"} |
comet.json | forceDomainPattern | reserved | {"type": "string"} |
Subsets and Splits