code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def move(self, source_path, destination_path):
"""
Rename/move an object from one GCS location to another.
"""
self.copy(source_path, destination_path)
self.remove(source_path) | Rename/move an object from one GCS location to another. | move | python | spotify/luigi | luigi/contrib/gcs.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/gcs.py | Apache-2.0 |
def listdir(self, path):
"""
Get an iterable with GCS folder contents.
Iterable contains paths relative to queried path.
"""
bucket, obj = self._path_to_bucket_and_key(path)
obj_prefix = self._add_path_delimiter(obj)
if self._is_root(obj_prefix):
obj_prefix = ''
obj_prefix_len = len(obj_prefix)
for it in self._list_iter(bucket, obj_prefix):
yield self._add_path_delimiter(path) + it['name'][obj_prefix_len:] | Get an iterable with GCS folder contents.
Iterable contains paths relative to queried path. | listdir | python | spotify/luigi | luigi/contrib/gcs.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/gcs.py | Apache-2.0 |
def list_wildcard(self, wildcard_path):
"""Yields full object URIs matching the given wildcard.
Currently only the '*' wildcard after the last path delimiter is supported.
(If we need "full" wildcard functionality we should bring in gsutil dependency with its
https://github.com/GoogleCloudPlatform/gsutil/blob/master/gslib/wildcard_iterator.py...)
"""
path, wildcard_obj = wildcard_path.rsplit('/', 1)
assert '*' not in path, "The '*' wildcard character is only supported after the last '/'"
wildcard_parts = wildcard_obj.split('*')
assert len(wildcard_parts) == 2, "Only one '*' wildcard is supported"
for it in self.listdir(path):
if it.startswith(path + '/' + wildcard_parts[0]) and it.endswith(wildcard_parts[1]) and \
len(it) >= len(path + '/' + wildcard_parts[0]) + len(wildcard_parts[1]):
yield it | Yields full object URIs matching the given wildcard.
Currently only the '*' wildcard after the last path delimiter is supported.
(If we need "full" wildcard functionality we should bring in gsutil dependency with its
https://github.com/GoogleCloudPlatform/gsutil/blob/master/gslib/wildcard_iterator.py...) | list_wildcard | python | spotify/luigi | luigi/contrib/gcs.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/gcs.py | Apache-2.0 |
def download(self, path, chunksize=None, chunk_callback=lambda _: False):
"""Downloads the object contents to local file system.
Optionally stops after the first chunk for which chunk_callback returns True.
"""
chunksize = chunksize or self.chunksize
bucket, obj = self._path_to_bucket_and_key(path)
with tempfile.NamedTemporaryFile(delete=False) as fp:
# We can't return the tempfile reference because of a bug in python: http://bugs.python.org/issue18879
return_fp = _DeleteOnCloseFile(fp.name, 'r')
# Special case empty files because chunk-based downloading doesn't work.
result = self.client.objects().get(bucket=bucket, object=obj).execute()
if int(result['size']) == 0:
return return_fp
request = self.client.objects().get_media(bucket=bucket, object=obj)
downloader = http.MediaIoBaseDownload(fp, request, chunksize=chunksize)
done = False
while not done:
_, done = downloader.next_chunk()
if chunk_callback(fp):
done = True
return return_fp | Downloads the object contents to local file system.
Optionally stops after the first chunk for which chunk_callback returns True. | download | python | spotify/luigi | luigi/contrib/gcs.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/gcs.py | Apache-2.0 |
def __init__(self, path, format=None, client=None, flag='_SUCCESS'):
"""
Initializes a GCSFlagTarget.
:param path: the directory where the files are stored.
:type path: str
:param client:
:type client:
:param flag:
:type flag: str
"""
if format is None:
format = luigi.format.get_default_format()
if path[-1] != "/":
raise ValueError("GCSFlagTarget requires the path to be to a "
"directory. It must end with a slash ( / ).")
super(GCSFlagTarget, self).__init__(path, format=format, client=client)
self.format = format
self.fs = client or GCSClient()
self.flag = flag | Initializes a GCSFlagTarget.
:param path: the directory where the files are stored.
:type path: str
:param client:
:type client:
:param flag:
:type flag: str | __init__ | python | spotify/luigi | luigi/contrib/gcs.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/gcs.py | Apache-2.0 |
def _parse_qstat_state(qstat_out, job_id):
"""Parse "state" column from `qstat` output for given job_id
Returns state for the *first* job matching job_id. Returns 'u' if
`qstat` output is empty or job_id is not found.
"""
if qstat_out.strip() == '':
return 'u'
lines = qstat_out.split('\n')
# skip past header
while not lines.pop(0).startswith('---'):
pass
for line in lines:
if line:
job, prior, name, user, state = line.strip().split()[0:5]
if int(job) == int(job_id):
return state
return 'u' | Parse "state" column from `qstat` output for given job_id
Returns state for the *first* job matching job_id. Returns 'u' if
`qstat` output is empty or job_id is not found. | _parse_qstat_state | python | spotify/luigi | luigi/contrib/sge.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/sge.py | Apache-2.0 |
def _parse_qsub_job_id(qsub_out):
"""Parse job id from qsub output string.
Assume format:
"Your job <job_id> ("<job_name>") has been submitted"
"""
return int(qsub_out.split()[2]) | Parse job id from qsub output string.
Assume format:
"Your job <job_id> ("<job_name>") has been submitted" | _parse_qsub_job_id | python | spotify/luigi | luigi/contrib/sge.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/sge.py | Apache-2.0 |
def _build_qsub_command(cmd, job_name, outfile, errfile, pe, n_cpu):
"""Submit shell command to SGE queue via `qsub`"""
qsub_template = """echo {cmd} | qsub -o ":{outfile}" -e ":{errfile}" -V -r y -pe {pe} {n_cpu} -N {job_name}"""
return qsub_template.format(
cmd=cmd, job_name=job_name, outfile=outfile, errfile=errfile,
pe=pe, n_cpu=n_cpu) | Submit shell command to SGE queue via `qsub` | _build_qsub_command | python | spotify/luigi | luigi/contrib/sge.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/sge.py | Apache-2.0 |
def work(self):
"""Override this method, rather than ``run()``, for your actual work."""
pass | Override this method, rather than ``run()``, for your actual work. | work | python | spotify/luigi | luigi/contrib/sge.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/sge.py | Apache-2.0 |
def _dump(self, out_dir=''):
"""Dump instance to file."""
with self.no_unpicklable_properties():
self.job_file = os.path.join(out_dir, 'job-instance.pickle')
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
d = d.replace('(c__main__', "(c" + module_name)
with open(self.job_file, "w") as f:
f.write(d)
else:
with open(self.job_file, "wb") as f:
pickle.dump(self, f) | Dump instance to file. | _dump | python | spotify/luigi | luigi/contrib/sge.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/sge.py | Apache-2.0 |
def run_hive(args, check_return_code=True):
"""
Runs the `hive` from the command line, passing in the given args, and
returning stdout.
With the apache release of Hive, so of the table existence checks
(which are done using DESCRIBE do not exit with a return code of 0
so we need an option to ignore the return code and just return stdout for parsing
"""
cmd = load_hive_cmd() + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if check_return_code and p.returncode != 0:
raise HiveCommandError("Hive command: {0} failed with error code: {1}".format(" ".join(cmd), p.returncode),
stdout, stderr)
return stdout.decode('utf-8') | Runs the `hive` from the command line, passing in the given args, and
returning stdout.
With the apache release of Hive, so of the table existence checks
(which are done using DESCRIBE do not exit with a return code of 0
so we need an option to ignore the return code and just return stdout for parsing | run_hive | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def run_hive_cmd(hivecmd, check_return_code=True):
"""
Runs the given hive query and returns stdout.
"""
return run_hive(['-e', hivecmd], check_return_code) | Runs the given hive query and returns stdout. | run_hive_cmd | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def run_hive_script(script):
"""
Runs the contents of the given script in hive and returns stdout.
"""
if not os.path.isfile(script):
raise RuntimeError("Hive script: {0} does not exist.".format(script))
return run_hive(['-f', script]) | Runs the contents of the given script in hive and returns stdout. | run_hive_script | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def _validate_partition(partition):
"""
If partition is set and its size is more than one and not ordered,
then we're unable to restore its path in the warehouse
"""
if (
partition
and len(partition) > 1
and not _is_ordered_dict(partition)
):
raise ValueError('Unable to restore table/partition location') | If partition is set and its size is more than one and not ordered,
then we're unable to restore its path in the warehouse | _validate_partition | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def table_location(self, table, database='default', partition=None):
"""
Returns location of db.table (or db.table.partition). partition is a dict of partition key to
value.
"""
pass | Returns location of db.table (or db.table.partition). partition is a dict of partition key to
value. | table_location | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def table_schema(self, table, database='default'):
"""
Returns list of [(name, type)] for each column in database.table.
"""
pass | Returns list of [(name, type)] for each column in database.table. | table_schema | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def table_exists(self, table, database='default', partition=None):
"""
Returns true if db.table (or db.table.partition) exists. partition is a dict of partition key to
value.
"""
pass | Returns true if db.table (or db.table.partition) exists. partition is a dict of partition key to
value. | table_exists | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def partition_spec(self, partition):
""" Turn a dict into a string partition specification """
pass | Turn a dict into a string partition specification | partition_spec | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def partition_spec(self, partition):
"""
Turns a dict into the a Hive partition specification string.
"""
return ','.join(["`{0}`='{1}'".format(k, v) for (k, v) in
sorted(partition.items(), key=operator.itemgetter(0))]) | Turns a dict into the a Hive partition specification string. | partition_spec | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def table_exists(self, table, database='default', partition=None):
"""
The table/partition is considered existing if corresponding path in hdfs exists
and contains file except those which match pattern set in `ignored_file_masks`
"""
path = self.table_location(table, database, partition)
if self.hdfs_client.exists(path):
ignored_files = get_ignored_file_masks()
if ignored_files is None:
return True
filenames = self.hdfs_client.listdir(path)
pattern = re.compile(ignored_files)
for filename in filenames:
if not pattern.match(filename):
return True
return False | The table/partition is considered existing if corresponding path in hdfs exists
and contains file except those which match pattern set in `ignored_file_masks` | table_exists | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def query(self):
""" Text of query to run in hive """
raise RuntimeError("Must implement query!") | Text of query to run in hive | query | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def hiverc(self):
"""
Location of an rc file to run before the query
if hiverc-location key is specified in luigi.cfg, will default to the value there
otherwise returns None.
Returning a list of rc files will load all of them in order.
"""
return luigi.configuration.get_config().get('hive', 'hiverc-location', default=None) | Location of an rc file to run before the query
if hiverc-location key is specified in luigi.cfg, will default to the value there
otherwise returns None.
Returning a list of rc files will load all of them in order. | hiverc | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def hivevars(self):
"""
Returns a dict of key=value settings to be passed along
to the hive command line via --hivevar.
This option can be used as a separated namespace for script local variables.
See https://cwiki.apache.org/confluence/display/Hive/LanguageManual+VariableSubstitution
"""
return {} | Returns a dict of key=value settings to be passed along
to the hive command line via --hivevar.
This option can be used as a separated namespace for script local variables.
See https://cwiki.apache.org/confluence/display/Hive/LanguageManual+VariableSubstitution | hivevars | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def hiveconfs(self):
"""
Returns a dict of key=value settings to be passed along
to the hive command line via --hiveconf. By default, sets
mapred.job.name to task_id and if not None, sets:
* mapred.reduce.tasks (n_reduce_tasks)
* mapred.fairscheduler.pool (pool) or mapred.job.queue.name (pool)
* hive.exec.reducers.bytes.per.reducer (bytes_per_reducer)
* hive.exec.reducers.max (reducers_max)
"""
jcs = {}
jcs['mapred.job.name'] = "'" + self.task_id + "'"
if self.n_reduce_tasks is not None:
jcs['mapred.reduce.tasks'] = self.n_reduce_tasks
if self.pool is not None:
# Supporting two schedulers: fair (default) and capacity using the same option
scheduler_type = luigi.configuration.get_config().get('hadoop', 'scheduler', 'fair')
if scheduler_type == 'fair':
jcs['mapred.fairscheduler.pool'] = self.pool
elif scheduler_type == 'capacity':
jcs['mapred.job.queue.name'] = self.pool
if self.bytes_per_reducer is not None:
jcs['hive.exec.reducers.bytes.per.reducer'] = self.bytes_per_reducer
if self.reducers_max is not None:
jcs['hive.exec.reducers.max'] = self.reducers_max
return jcs | Returns a dict of key=value settings to be passed along
to the hive command line via --hiveconf. By default, sets
mapred.job.name to task_id and if not None, sets:
* mapred.reduce.tasks (n_reduce_tasks)
* mapred.fairscheduler.pool (pool) or mapred.job.queue.name (pool)
* hive.exec.reducers.bytes.per.reducer (bytes_per_reducer)
* hive.exec.reducers.max (reducers_max) | hiveconfs | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def prepare_outputs(self, job):
"""
Called before job is started.
If output is a `FileSystemTarget`, create parent directories so the hive command won't fail
"""
outputs = flatten(job.output())
for o in outputs:
if isinstance(o, FileSystemTarget):
parent_dir = os.path.dirname(o.path)
if parent_dir and not o.fs.exists(parent_dir):
logger.info("Creating parent directory %r", parent_dir)
try:
# there is a possible race condition
# which needs to be handled here
o.fs.mkdir(parent_dir)
except FileAlreadyExists:
pass | Called before job is started.
If output is a `FileSystemTarget`, create parent directories so the hive command won't fail | prepare_outputs | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def __init__(self, table, partition, database='default', fail_missing_table=True, client=None):
"""
@param table: Table name
@type table: str
@param partition: partition specificaton in form of
dict of {"partition_column_1": "partition_value_1", "partition_column_2": "partition_value_2", ... }
If `partition` is `None` or `{}` then target is Hive nonpartitioned table
@param database: Database name
@param fail_missing_table: flag to ignore errors raised due to table nonexistence
@param client: `HiveCommandClient` instance. Default if `client is None`
"""
self.database = database
self.table = table
self.partition = partition
self.client = client or get_default_client()
self.fail_missing_table = fail_missing_table | @param table: Table name
@type table: str
@param partition: partition specificaton in form of
dict of {"partition_column_1": "partition_value_1", "partition_column_2": "partition_value_2", ... }
If `partition` is `None` or `{}` then target is Hive nonpartitioned table
@param database: Database name
@param fail_missing_table: flag to ignore errors raised due to table nonexistence
@param client: `HiveCommandClient` instance. Default if `client is None` | __init__ | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def exists(self):
"""
returns `True` if the partition/table exists
"""
try:
logger.debug(
"Checking Hive table '{d}.{t}' for partition {p}".format(
d=self.database,
t=self.table,
p=str(self.partition or {})
)
)
return self.client.table_exists(self.table, self.database, self.partition)
except HiveCommandError:
if self.fail_missing_table:
raise
else:
if self.client.table_exists(self.table, self.database):
# a real error occurred
raise
else:
# oh the table just doesn't exist
return False | returns `True` if the partition/table exists | exists | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def path(self):
"""
Returns the path for this HiveTablePartitionTarget's data.
"""
location = self.client.table_location(self.table, self.database, self.partition)
if not location:
raise Exception("Couldn't find location for table: {0}".format(str(self)))
return location | Returns the path for this HiveTablePartitionTarget's data. | path | python | spotify/luigi | luigi/contrib/hive.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/hive.py | Apache-2.0 |
def app_options(self):
"""
Subclass this method to map your task parameters to the app's arguments
"""
return [] | Subclass this method to map your task parameters to the app's arguments | app_options | python | spotify/luigi | luigi/contrib/spark.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/spark.py | Apache-2.0 |
def setup(self, conf):
"""
Called by the pyspark_runner with a SparkConf instance that will be used to instantiate the SparkContext
:param conf: SparkConf
""" | Called by the pyspark_runner with a SparkConf instance that will be used to instantiate the SparkContext
:param conf: SparkConf | setup | python | spotify/luigi | luigi/contrib/spark.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/spark.py | Apache-2.0 |
def main(self, sc, *args):
"""
Called by the pyspark_runner with a SparkContext and any arguments returned by ``app_options()``
:param sc: SparkContext
:param args: arguments list
"""
raise NotImplementedError("subclass should define a main method") | Called by the pyspark_runner with a SparkContext and any arguments returned by ``app_options()``
:param sc: SparkContext
:param args: arguments list | main | python | spotify/luigi | luigi/contrib/spark.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/spark.py | Apache-2.0 |
def _setup_packages(self, sc):
"""
This method compresses and uploads packages to the cluster
"""
packages = self.py_packages
if not packages:
return
for package in packages:
mod = importlib.import_module(package)
try:
mod_path = mod.__path__[0]
except AttributeError:
mod_path = mod.__file__
os.makedirs(self.run_path, exist_ok=True)
tar_path = os.path.join(self.run_path, package + '.tar.gz')
tar = tarfile.open(tar_path, "w:gz")
tar.add(mod_path, os.path.basename(mod_path))
tar.close()
sc.addPyFile(tar_path) | This method compresses and uploads packages to the cluster | _setup_packages | python | spotify/luigi | luigi/contrib/spark.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/spark.py | Apache-2.0 |
def _get_input_schema(self):
"""Arbitrarily picks an object in input and reads the Avro schema from it."""
assert avro, 'avro module required'
input_target = flatten(self.input())[0]
input_fs = input_target.fs if hasattr(input_target, 'fs') else GCSClient()
input_uri = self.source_uris()[0]
if '*' in input_uri:
file_uris = list(input_fs.list_wildcard(input_uri))
if file_uris:
input_uri = file_uris[0]
else:
raise RuntimeError('No match for ' + input_uri)
schema = []
exception_reading_schema = []
def read_schema(fp):
# fp contains the file part downloaded thus far. We rely on that the DataFileReader
# initializes itself fine as soon as the file header with schema is downloaded, without
# requiring the remainder of the file...
try:
reader = avro.datafile.DataFileReader(fp, avro.io.DatumReader())
schema[:] = [BigQueryLoadAvro._get_writer_schema(reader.datum_reader)]
except Exception as e:
# Save but assume benign unless schema reading ultimately fails. The benign
# exception in case of insufficiently big downloaded file part seems to be:
# TypeError('ord() expected a character, but string of length 0 found',).
exception_reading_schema[:] = [e]
return False
return True
input_fs.download(input_uri, 64 * 1024, read_schema).close()
if not schema:
raise exception_reading_schema[0]
return schema[0] | Arbitrarily picks an object in input and reads the Avro schema from it. | _get_input_schema | python | spotify/luigi | luigi/contrib/bigquery_avro.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/bigquery_avro.py | Apache-2.0 |
def _get_writer_schema(datum_reader):
"""Python-version agnostic getter for datum_reader writer(s)_schema attribute
Parameters:
datum_reader (avro.io.DatumReader): DatumReader
Returns:
Returning correct attribute name depending on Python version.
"""
return datum_reader.writer_schema | Python-version agnostic getter for datum_reader writer(s)_schema attribute
Parameters:
datum_reader (avro.io.DatumReader): DatumReader
Returns:
Returning correct attribute name depending on Python version. | _get_writer_schema | python | spotify/luigi | luigi/contrib/bigquery_avro.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/bigquery_avro.py | Apache-2.0 |
def configuration_section(self):
"""
Override to change the configuration section used
to obtain default credentials.
"""
return 'redshift' | Override to change the configuration section used
to obtain default credentials. | configuration_section | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def aws_access_key_id(self):
"""
Override to return the key id.
"""
return self._get_configuration_attribute('aws_access_key_id') | Override to return the key id. | aws_access_key_id | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def aws_secret_access_key(self):
"""
Override to return the secret access key.
"""
return self._get_configuration_attribute('aws_secret_access_key') | Override to return the secret access key. | aws_secret_access_key | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def aws_account_id(self):
"""
Override to return the account id.
"""
return self._get_configuration_attribute('aws_account_id') | Override to return the account id. | aws_account_id | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def aws_arn_role_name(self):
"""
Override to return the arn role name.
"""
return self._get_configuration_attribute('aws_arn_role_name') | Override to return the arn role name. | aws_arn_role_name | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def aws_session_token(self):
"""
Override to return the session token.
"""
return self._get_configuration_attribute('aws_session_token') | Override to return the session token. | aws_session_token | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def _credentials(self):
"""
Return a credential string for the provided task. If no valid
credentials are set, raise a NotImplementedError.
"""
if self.aws_account_id and self.aws_arn_role_name:
return 'aws_iam_role=arn:aws:iam::{id}:role/{role}'.format(
id=self.aws_account_id,
role=self.aws_arn_role_name
)
elif self.aws_access_key_id and self.aws_secret_access_key:
return 'aws_access_key_id={key};aws_secret_access_key={secret}{opt}'.format(
key=self.aws_access_key_id,
secret=self.aws_secret_access_key,
opt=';token={}'.format(self.aws_session_token) if self.aws_session_token else ''
)
else:
raise NotImplementedError("Missing Credentials. "
"Ensure one of the pairs of auth args below are set "
"in a configuration file, environment variables or by "
"being overridden in the task: "
"'aws_access_key_id' AND 'aws_secret_access_key' OR "
"'aws_account_id' AND 'aws_arn_role_name'") | Return a credential string for the provided task. If no valid
credentials are set, raise a NotImplementedError. | _credentials | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def s3_load_path(self):
"""
Override to return the load path.
"""
return None | Override to return the load path. | s3_load_path | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def copy_options(self):
"""
Add extra copy options, for example:
* TIMEFORMAT 'auto'
* IGNOREHEADER 1
* TRUNCATECOLUMNS
* IGNOREBLANKLINES
* DELIMITER '\t'
"""
return '' | Add extra copy options, for example:
* TIMEFORMAT 'auto'
* IGNOREHEADER 1
* TRUNCATECOLUMNS
* IGNOREBLANKLINES
* DELIMITER '\t' | copy_options | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def prune_table(self):
"""
Override to set equal to the name of the table which is to be pruned.
Intended to be used in conjunction with prune_column and prune_date
i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table
"""
return None | Override to set equal to the name of the table which is to be pruned.
Intended to be used in conjunction with prune_column and prune_date
i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table | prune_table | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def prune_column(self):
"""
Override to set equal to the column of the prune_table which is to be compared
Intended to be used in conjunction with prune_table and prune_date
i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table
"""
return None | Override to set equal to the column of the prune_table which is to be compared
Intended to be used in conjunction with prune_table and prune_date
i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table | prune_column | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def prune_date(self):
"""
Override to set equal to the date by which prune_column is to be compared
Intended to be used in conjunction with prune_table and prune_column
i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table
"""
return None | Override to set equal to the date by which prune_column is to be compared
Intended to be used in conjunction with prune_table and prune_column
i.e. copy to temp table, prune production table to prune_column with a date greater than prune_date, then insert into production table from temp table | prune_date | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def table_attributes(self):
"""
Add extra table attributes, for example:
DISTSTYLE KEY
DISTKEY (MY_FIELD)
SORTKEY (MY_FIELD_2, MY_FIELD_3)
"""
return '' | Add extra table attributes, for example:
DISTSTYLE KEY
DISTKEY (MY_FIELD)
SORTKEY (MY_FIELD_2, MY_FIELD_3) | table_attributes | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def table_constraints(self):
"""
Add extra table constraints, for example:
PRIMARY KEY (MY_FIELD, MY_FIELD_2)
UNIQUE KEY (MY_FIELD_3)
"""
return '' | Add extra table constraints, for example:
PRIMARY KEY (MY_FIELD, MY_FIELD_2)
UNIQUE KEY (MY_FIELD_3) | table_constraints | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def do_truncate_table(self):
"""
Return True if table should be truncated before copying new data in.
"""
return False | Return True if table should be truncated before copying new data in. | do_truncate_table | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def do_prune(self):
"""
Return True if prune_table, prune_column, and prune_date are implemented.
If only a subset of prune variables are override, an exception is raised to remind the user to implement all or none.
Prune (data newer than prune_date deleted) before copying new data in.
"""
if self.prune_table and self.prune_column and self.prune_date:
return True
elif self.prune_table or self.prune_column or self.prune_date:
raise Exception('override zero or all prune variables')
else:
return False | Return True if prune_table, prune_column, and prune_date are implemented.
If only a subset of prune variables are override, an exception is raised to remind the user to implement all or none.
Prune (data newer than prune_date deleted) before copying new data in. | do_prune | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def table_type(self):
"""
Return table type (i.e. 'temp').
"""
return '' | Return table type (i.e. 'temp'). | table_type | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def queries(self):
"""
Override to return a list of queries to be executed in order.
"""
return [] | Override to return a list of queries to be executed in order. | queries | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def create_schema(self, connection):
"""
Will create the schema in the database
"""
if '.' not in self.table:
return
query = 'CREATE SCHEMA IF NOT EXISTS {schema_name};'.format(schema_name=self.table.split('.')[0])
connection.cursor().execute(query) | Will create the schema in the database | create_schema | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def create_table(self, connection):
"""
Override to provide code for creating the target table.
By default it will be created using types (optionally)
specified in columns.
If overridden, use the provided connection object for
setting up the table in order to create the table and
insert data using the same transaction.
"""
if len(self.columns[0]) == 1:
# only names of columns specified, no types
raise NotImplementedError("create_table() not implemented "
"for %r and columns types not "
"specified" % self.table)
elif len(self.columns[0]) == 2:
# if columns is specified as (name, type) tuples
coldefs = ','.join(
'{name} {type}'.format(
name=name,
type=type) for name, type in self.columns
)
table_constraints = ''
if self.table_constraints != '':
table_constraints = ', ' + self.table_constraints
query = ("CREATE {type} TABLE "
"{table} ({coldefs} {table_constraints}) "
"{table_attributes}").format(
type=self.table_type,
table=self.table,
coldefs=coldefs,
table_constraints=table_constraints,
table_attributes=self.table_attributes)
connection.cursor().execute(query)
elif len(self.columns[0]) == 3:
# if columns is specified as (name, type, encoding) tuples
# possible column encodings: https://docs.aws.amazon.com/redshift/latest/dg/c_Compression_encodings.html
coldefs = ','.join(
'{name} {type} ENCODE {encoding}'.format(
name=name,
type=type,
encoding=encoding) for name, type, encoding in self.columns
)
table_constraints = ''
if self.table_constraints != '':
table_constraints = ',' + self.table_constraints
query = ("CREATE {type} TABLE "
"{table} ({coldefs} {table_constraints}) "
"{table_attributes}").format(
type=self.table_type,
table=self.table,
coldefs=coldefs,
table_constraints=table_constraints,
table_attributes=self.table_attributes)
connection.cursor().execute(query)
else:
raise ValueError("create_table() found no columns for %r"
% self.table) | Override to provide code for creating the target table.
By default it will be created using types (optionally)
specified in columns.
If overridden, use the provided connection object for
setting up the table in order to create the table and
insert data using the same transaction. | create_table | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def run(self):
"""
If the target table doesn't exist, self.create_table
will be called to attempt to create the table.
"""
if not (self.table):
raise Exception("table need to be specified")
path = self.s3_load_path()
output = self.output()
connection = output.connect()
cursor = connection.cursor()
self.init_copy(connection)
self.copy(cursor, path)
self.post_copy(cursor)
if self.enable_metadata_columns:
self.post_copy_metacolumns(cursor)
# update marker table
output.touch(connection)
connection.commit()
# commit and clean up
connection.close() | If the target table doesn't exist, self.create_table
will be called to attempt to create the table. | run | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def copy(self, cursor, f):
"""
Defines copying from s3 into redshift.
If both key-based and role-based credentials are provided, role-based will be used.
"""
logger.info("Inserting file: %s", f)
colnames = ''
if self.columns and len(self.columns) > 0:
colnames = ",".join([x[0] for x in self.columns])
colnames = '({})'.format(colnames)
cursor.execute("""
COPY {table} {colnames} from '{source}'
CREDENTIALS '{creds}'
{options}
;""".format(
table=self.table,
colnames=colnames,
source=f,
creds=self._credentials(),
options=self.copy_options)
) | Defines copying from s3 into redshift.
If both key-based and role-based credentials are provided, role-based will be used. | copy | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def output(self):
"""
Returns a RedshiftTarget representing the inserted dataset.
Normally you don't override this.
"""
return RedshiftTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.table,
update_id=self.update_id) | Returns a RedshiftTarget representing the inserted dataset.
Normally you don't override this. | output | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def does_schema_exist(self, connection):
"""
Determine whether the schema already exists.
"""
if '.' in self.table:
query = ("select 1 as schema_exists "
"from pg_namespace "
"where nspname = lower(%s) limit 1")
else:
return True
cursor = connection.cursor()
try:
schema = self.table.split('.')[0]
cursor.execute(query, [schema])
result = cursor.fetchone()
return bool(result)
finally:
cursor.close() | Determine whether the schema already exists. | does_schema_exist | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def does_table_exist(self, connection):
"""
Determine whether the table already exists.
"""
if '.' in self.table:
query = ("select 1 as table_exists "
"from information_schema.tables "
"where table_schema = lower(%s) and table_name = lower(%s) limit 1")
else:
query = ("select 1 as table_exists "
"from pg_table_def "
"where tablename = lower(%s) limit 1")
cursor = connection.cursor()
try:
cursor.execute(query, tuple(self.table.split('.')))
result = cursor.fetchone()
return bool(result)
finally:
cursor.close() | Determine whether the table already exists. | does_table_exist | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def init_copy(self, connection):
"""
Perform pre-copy sql - such as creating table, truncating, or removing data older than x.
"""
if not self.does_schema_exist(connection):
logger.info("Creating schema for %s", self.table)
self.create_schema(connection)
if not self.does_table_exist(connection):
logger.info("Creating table %s", self.table)
self.create_table(connection)
if self.enable_metadata_columns:
self._add_metadata_columns(connection)
if self.do_truncate_table:
logger.info("Truncating table %s", self.table)
self.truncate_table(connection)
if self.do_prune():
logger.info("Removing %s older than %s from %s", self.prune_column, self.prune_date, self.prune_table)
self.prune(connection) | Perform pre-copy sql - such as creating table, truncating, or removing data older than x. | init_copy | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def post_copy(self, cursor):
"""
Performs post-copy sql - such as cleansing data, inserting into production table (if copied to temp table), etc.
"""
logger.info('Executing post copy queries')
for query in self.queries:
cursor.execute(query) | Performs post-copy sql - such as cleansing data, inserting into production table (if copied to temp table), etc. | post_copy | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def post_copy_metacolums(self, cursor):
"""
Performs post-copy to fill metadata columns.
"""
logger.info('Executing post copy metadata queries')
for query in self.metadata_queries:
cursor.execute(query) | Performs post-copy to fill metadata columns. | post_copy_metacolums | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def jsonpath(self):
"""
Override the jsonpath schema location for the table.
"""
return '' | Override the jsonpath schema location for the table. | jsonpath | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def copy_json_options(self):
"""
Add extra copy options, for example:
* GZIP
* LZOP
"""
return '' | Add extra copy options, for example:
* GZIP
* LZOP | copy_json_options | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def copy(self, cursor, f):
"""
Defines copying JSON from s3 into redshift.
"""
logger.info("Inserting file: %s", f)
cursor.execute("""
COPY %s from '%s'
CREDENTIALS '%s'
JSON AS '%s' %s
%s
;""" % (self.table, f, self._credentials(),
self.jsonpath, self.copy_json_options, self.copy_options)) | Defines copying JSON from s3 into redshift. | copy | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def update_id(self):
"""
This update id will be a unique identifier
for this insert on this table.
"""
return self.task_id | This update id will be a unique identifier
for this insert on this table. | update_id | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def output(self):
"""
Returns a RedshiftTarget representing the inserted dataset.
Normally you don't override this.
"""
# uses class name as a meta-table
return RedshiftTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.__class__.__name__,
update_id=self.update_id) | Returns a RedshiftTarget representing the inserted dataset.
Normally you don't override this. | output | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def run(self):
"""
Kill any open Redshift sessions for the given database.
"""
connection = self.output().connect()
# kill any sessions other than ours and
# internal Redshift sessions (rdsdb)
query = ("select pg_terminate_backend(process) "
"from STV_SESSIONS "
"where db_name=%s "
"and user_name != 'rdsdb' "
"and process != pg_backend_pid()")
cursor = connection.cursor()
logger.info('Killing all open Redshift sessions for database: %s', self.database)
try:
cursor.execute(query, (self.database,))
cursor.close()
connection.commit()
except psycopg2.DatabaseError as e:
if e.message and 'EOF' in e.message:
# sometimes this operation kills the current session.
# rebuild the connection. Need to pause for 30-60 seconds
# before Redshift will allow us back in.
connection.close()
logger.info('Pausing %s seconds for Redshift to reset connection', self.connection_reset_wait_seconds)
time.sleep(self.connection_reset_wait_seconds)
logger.info('Reconnecting to Redshift')
connection = self.output().connect()
else:
raise
try:
self.output().touch(connection)
connection.commit()
finally:
connection.close()
logger.info('Done killing all open Redshift sessions for database: %s', self.database) | Kill any open Redshift sessions for the given database. | run | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def output(self):
"""
Returns a RedshiftTarget representing the executed query.
Normally you don't override this.
"""
return RedshiftTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.table,
update_id=self.update_id
) | Returns a RedshiftTarget representing the executed query.
Normally you don't override this. | output | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def s3_unload_path(self):
"""
Override to return the load path.
"""
return '' | Override to return the load path. | s3_unload_path | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def unload_options(self):
"""
Add extra or override default unload options:
"""
return "DELIMITER '|' ADDQUOTES GZIP ALLOWOVERWRITE PARALLEL ON" | Add extra or override default unload options: | unload_options | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def unload_query(self):
"""
Default UNLOAD command
"""
return ("UNLOAD ( '{query}' ) TO '{s3_unload_path}' "
"credentials '{credentials}' "
"{unload_options};") | Default UNLOAD command | unload_query | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def output(self):
"""
Returns a RedshiftTarget representing the executed query.
Normally you don't override this.
"""
return RedshiftTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.table,
update_id=self.update_id
) | Returns a RedshiftTarget representing the executed query.
Normally you don't override this. | output | python | spotify/luigi | luigi/contrib/redshift.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/redshift.py | Apache-2.0 |
def track_job(job_id):
"""
Tracking is done by requesting each job and then searching for whether the job
has one of the following states:
- "RUN",
- "PEND",
- "SSUSP",
- "EXIT"
based on the LSF documentation
"""
cmd = ["bjobs", "-noheader", "-o", "stat", str(job_id)]
track_job_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, shell=False)
status = track_job_proc.communicate()[0].strip('\n')
return status | Tracking is done by requesting each job and then searching for whether the job
has one of the following states:
- "RUN",
- "PEND",
- "SSUSP",
- "EXIT"
based on the LSF documentation | track_job | python | spotify/luigi | luigi/contrib/lsf.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/lsf.py | Apache-2.0 |
def kill_job(job_id):
"""
Kill a running LSF job
"""
subprocess.call(['bkill', job_id]) | Kill a running LSF job | kill_job | python | spotify/luigi | luigi/contrib/lsf.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/lsf.py | Apache-2.0 |
def fetch_task_failures(self):
"""
Read in the error file from bsub
"""
error_file = os.path.join(self.tmp_dir, "job.err")
if os.path.isfile(error_file):
with open(error_file, "r") as f_err:
errors = f_err.readlines()
else:
errors = ''
return errors | Read in the error file from bsub | fetch_task_failures | python | spotify/luigi | luigi/contrib/lsf.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/lsf.py | Apache-2.0 |
def fetch_task_output(self):
"""
Read in the output file
"""
# Read in the output file
if os.path.isfile(os.path.join(self.tmp_dir, "job.out")):
with open(os.path.join(self.tmp_dir, "job.out"), "r") as f_out:
outputs = f_out.readlines()
else:
outputs = ''
return outputs | Read in the output file | fetch_task_output | python | spotify/luigi | luigi/contrib/lsf.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/lsf.py | Apache-2.0 |
def init_local(self):
"""
Implement any work to setup any internal datastructure etc here.
You can add extra input using the requires_local/input_local methods.
Anything you set on the object will be pickled and available on the compute nodes.
"""
pass | Implement any work to setup any internal datastructure etc here.
You can add extra input using the requires_local/input_local methods.
Anything you set on the object will be pickled and available on the compute nodes. | init_local | python | spotify/luigi | luigi/contrib/lsf.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/lsf.py | Apache-2.0 |
def run(self):
"""
The procedure:
- Pickle the class
- Tarball the dependencies
- Construct a bsub argument that runs a generic runner function with the path to the pickled class
- Runner function loads the class from pickle
- Runner class untars the dependencies
- Runner function hits the button on the class's work() method
"""
self._init_local()
self._run_job() | The procedure:
- Pickle the class
- Tarball the dependencies
- Construct a bsub argument that runs a generic runner function with the path to the pickled class
- Runner function loads the class from pickle
- Runner class untars the dependencies
- Runner function hits the button on the class's work() method | run | python | spotify/luigi | luigi/contrib/lsf.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/lsf.py | Apache-2.0 |
def work(self):
"""
Subclass this for where you're doing your actual work.
Why not run(), like other tasks? Because we need run to always be
something that the Worker can call, and that's the real logical place to
do LSF scheduling.
So, the work will happen in work().
"""
pass | Subclass this for where you're doing your actual work.
Why not run(), like other tasks? Because we need run to always be
something that the Worker can call, and that's the real logical place to
do LSF scheduling.
So, the work will happen in work(). | work | python | spotify/luigi | luigi/contrib/lsf.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/lsf.py | Apache-2.0 |
def _dump(self, out_dir=''):
"""
Dump instance to file.
"""
self.job_file = os.path.join(out_dir, 'job-instance.pickle')
if self.__module__ == '__main__':
dump_inst = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
dump_inst = dump_inst.replace('(c__main__', "(c" + module_name)
open(self.job_file, "w").write(dump_inst)
else:
pickle.dump(self, open(self.job_file, "w")) | Dump instance to file. | _dump | python | spotify/luigi | luigi/contrib/lsf.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/lsf.py | Apache-2.0 |
def _run_job(self):
"""
Build a bsub argument that will run lsf_runner.py on the directory we've specified.
"""
args = []
if isinstance(self.output(), list):
log_output = os.path.split(self.output()[0].path)
else:
log_output = os.path.split(self.output().path)
args += ["bsub", "-q", self.queue_flag]
args += ["-n", str(self.n_cpu_flag)]
args += ["-M", str(self.memory_flag)]
args += ["-R", "rusage[%s]" % self.resource_flag]
args += ["-W", str(self.runtime_flag)]
if self.job_name_flag:
args += ["-J", str(self.job_name_flag)]
args += ["-o", os.path.join(log_output[0], "job.out")]
args += ["-e", os.path.join(log_output[0], "job.err")]
if self.extra_bsub_args:
args += self.extra_bsub_args.split()
# Find where the runner file is
runner_path = os.path.abspath(lsf_runner.__file__)
args += [runner_path]
args += [self.tmp_dir]
# That should do it. Let the world know what we're doing.
LOGGER.info("### LSF SUBMISSION ARGS: %s",
" ".join([str(a) for a in args]))
# Submit the job
run_job_proc = subprocess.Popen(
[str(a) for a in args],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=self.tmp_dir)
output = run_job_proc.communicate()[0]
# ASSUMPTION
# The result will be of the format
# Job <123> is submitted ot queue <myqueue>
# So get the number in those first brackets.
# I cannot think of a better workaround that leaves logic on the Task side of things.
LOGGER.info("### JOB SUBMISSION OUTPUT: %s", str(output))
self.job_id = int(output.split("<")[1].split(">")[0])
LOGGER.info(
"Job %ssubmitted as job %s",
self.job_name_flag + ' ',
str(self.job_id)
)
self._track_job()
# If we want to save the job temporaries, then do so
# We'll move them to be next to the job output
if self.save_job_info:
LOGGER.info("Saving up temporary bits")
# dest_dir = self.output().path
shutil.move(self.tmp_dir, "/".join(log_output[0:-1]))
# Now delete the temporaries, if they're there.
self._finish() | Build a bsub argument that will run lsf_runner.py on the directory we've specified. | _run_job | python | spotify/luigi | luigi/contrib/lsf.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/lsf.py | Apache-2.0 |
def pig_env_vars(self):
"""
Dictionary of environment variables that should be set when running Pig.
Ex::
return { 'PIG_CLASSPATH': '/your/path' }
"""
return {} | Dictionary of environment variables that should be set when running Pig.
Ex::
return { 'PIG_CLASSPATH': '/your/path' } | pig_env_vars | python | spotify/luigi | luigi/contrib/pig.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/pig.py | Apache-2.0 |
def pig_properties(self):
"""
Dictionary of properties that should be set when running Pig.
Example::
return { 'pig.additional.jars':'/path/to/your/jar' }
"""
return {} | Dictionary of properties that should be set when running Pig.
Example::
return { 'pig.additional.jars':'/path/to/your/jar' } | pig_properties | python | spotify/luigi | luigi/contrib/pig.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/pig.py | Apache-2.0 |
def pig_parameters(self):
"""
Dictionary of parameters that should be set for the Pig job.
Example::
return { 'YOUR_PARAM_NAME':'Your param value' }
"""
return {} | Dictionary of parameters that should be set for the Pig job.
Example::
return { 'YOUR_PARAM_NAME':'Your param value' } | pig_parameters | python | spotify/luigi | luigi/contrib/pig.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/pig.py | Apache-2.0 |
def pig_options(self):
"""
List of options that will be appended to the Pig command.
Example::
return ['-x', 'local']
"""
return [] | List of options that will be appended to the Pig command.
Example::
return ['-x', 'local'] | pig_options | python | spotify/luigi | luigi/contrib/pig.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/pig.py | Apache-2.0 |
def pig_script_path(self):
"""
Return the path to the Pig script to be run.
"""
raise NotImplementedError("subclass should define pig_script_path") | Return the path to the Pig script to be run. | pig_script_path | python | spotify/luigi | luigi/contrib/pig.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/pig.py | Apache-2.0 |
def host_config_options(self):
'''
Override this to specify host_config options like gpu requests or shm
size e.g. `{"device_requests": [docker.types.DeviceRequest(count=1, capabilities=[["gpu"]])]}`
See https://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_host_config
'''
return {} | Override this to specify host_config options like gpu requests or shm
size e.g. `{"device_requests": [docker.types.DeviceRequest(count=1, capabilities=[["gpu"]])]}`
See https://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_host_config | host_config_options | python | spotify/luigi | luigi/contrib/docker_runner.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/docker_runner.py | Apache-2.0 |
def container_options(self):
'''
Override this to specify container options like user or ports e.g.
`{"user": f"{os.getuid()}:{os.getgid()}"}`
See https://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_container
'''
return {} | Override this to specify container options like user or ports e.g.
`{"user": f"{os.getuid()}:{os.getgid()}"}`
See https://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_container | container_options | python | spotify/luigi | luigi/contrib/docker_runner.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/docker_runner.py | Apache-2.0 |
def binds(self):
'''
Override this to mount local volumes, in addition to the /tmp/luigi
which gets defined by default. This should return a list of strings.
e.g. ['/hostpath1:/containerpath1', '/hostpath2:/containerpath2']
'''
return None | Override this to mount local volumes, in addition to the /tmp/luigi
which gets defined by default. This should return a list of strings.
e.g. ['/hostpath1:/containerpath1', '/hostpath2:/containerpath2'] | binds | python | spotify/luigi | luigi/contrib/docker_runner.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/docker_runner.py | Apache-2.0 |
def __init__(self, *args, **kwargs):
'''
When a new instance of the DockerTask class gets created:
- call the parent class __init__ method
- start the logger
- init an instance of the docker client
- create a tmp dir
- add the temp dir to the volume binds specified in the task
'''
super(DockerTask, self).__init__(*args, **kwargs)
self.__logger = logger
'''init docker client
using the low level API as the higher level API does not allow to mount single
files as volumes
'''
self._client = docker.APIClient(self.docker_url)
# add latest tag if nothing else is specified by task
if ':' not in self.image:
self._image = ':'.join([self.image, 'latest'])
else:
self._image = self.image
if self.mount_tmp:
# create a tmp_dir, NOTE: /tmp needs to be specified for it to work on
# macOS, despite what the python documentation says
self._host_tmp_dir = mkdtemp(suffix=self.task_id,
prefix='luigi-docker-tmp-dir-',
dir='/tmp')
self._binds = ['{0}:{1}'.format(self._host_tmp_dir, self.container_tmp_dir)]
else:
self._binds = []
# update environment property with the (internal) location of tmp_dir
self.environment['LUIGI_TMP_DIR'] = self.container_tmp_dir
# add additional volume binds specified by the user to the tmp_Dir bind
if isinstance(self.binds, str):
self._binds.append(self.binds)
elif isinstance(self.binds, list):
self._binds.extend(self.binds)
# derive volumes (ie. list of container destination paths) from
# specified binds
self._volumes = [b.split(':')[1] for b in self._binds] | When a new instance of the DockerTask class gets created:
- call the parent class __init__ method
- start the logger
- init an instance of the docker client
- create a tmp dir
- add the temp dir to the volume binds specified in the task | __init__ | python | spotify/luigi | luigi/contrib/docker_runner.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/docker_runner.py | Apache-2.0 |
def get_active_queue(self):
"""Get name of first active job queue"""
# Get dict of active queues keyed by name
queues = {q['jobQueueName']: q for q in self._client.describe_job_queues()['jobQueues']
if q['state'] == 'ENABLED' and q['status'] == 'VALID'}
if not queues:
raise Exception('No job queues with state=ENABLED and status=VALID')
# Pick the first queue as default
return list(queues.keys())[0] | Get name of first active job queue | get_active_queue | python | spotify/luigi | luigi/contrib/batch.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/batch.py | Apache-2.0 |
def get_job_id_from_name(self, job_name):
"""Retrieve the first job ID matching the given name"""
jobs = self._client.list_jobs(jobQueue=self._queue, jobStatus='RUNNING')['jobSummaryList']
matching_jobs = [job for job in jobs if job['jobName'] == job_name]
if matching_jobs:
return matching_jobs[0]['jobId'] | Retrieve the first job ID matching the given name | get_job_id_from_name | python | spotify/luigi | luigi/contrib/batch.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/batch.py | Apache-2.0 |
def get_job_status(self, job_id):
"""Retrieve task statuses from ECS API
:param job_id (str): AWS Batch job uuid
Returns one of {SUBMITTED|PENDING|RUNNABLE|STARTING|RUNNING|SUCCEEDED|FAILED}
"""
response = self._client.describe_jobs(jobs=[job_id])
# Error checking
status_code = response['ResponseMetadata']['HTTPStatusCode']
if status_code != 200:
msg = 'Job status request received status code {0}:\n{1}'
raise Exception(msg.format(status_code, response))
return response['jobs'][0]['status'] | Retrieve task statuses from ECS API
:param job_id (str): AWS Batch job uuid
Returns one of {SUBMITTED|PENDING|RUNNABLE|STARTING|RUNNING|SUCCEEDED|FAILED} | get_job_status | python | spotify/luigi | luigi/contrib/batch.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/batch.py | Apache-2.0 |
def get_logs(self, log_stream_name, get_last=50):
"""Retrieve log stream from CloudWatch"""
response = self._log_client.get_log_events(
logGroupName='/aws/batch/job',
logStreamName=log_stream_name,
startFromHead=False)
events = response['events']
return '\n'.join(e['message'] for e in events[-get_last:]) | Retrieve log stream from CloudWatch | get_logs | python | spotify/luigi | luigi/contrib/batch.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/batch.py | Apache-2.0 |
def submit_job(self, job_definition, parameters, job_name=None, queue=None):
"""Wrap submit_job with useful defaults"""
if job_name is None:
job_name = _random_id()
response = self._client.submit_job(
jobName=job_name,
jobQueue=queue or self.get_active_queue(),
jobDefinition=job_definition,
parameters=parameters
)
return response['jobId'] | Wrap submit_job with useful defaults | submit_job | python | spotify/luigi | luigi/contrib/batch.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/batch.py | Apache-2.0 |
def wait_on_job(self, job_id):
"""Poll task status until STOPPED"""
while True:
status = self.get_job_status(job_id)
if status == 'SUCCEEDED':
logger.info('Batch job {} SUCCEEDED'.format(job_id))
return True
elif status == 'FAILED':
# Raise and notify if job failed
jobs = self._client.describe_jobs(jobs=[job_id])['jobs']
job_str = json.dumps(jobs, indent=4)
logger.debug('Job details:\n' + job_str)
log_stream_name = jobs[0]['attempts'][0]['container']['logStreamName']
logs = self.get_logs(log_stream_name)
raise BatchJobException('Job {} failed: {}'.format(
job_id, logs))
time.sleep(self.poll_time)
logger.debug('Batch job status for job {0}: {1}'.format(
job_id, status)) | Poll task status until STOPPED | wait_on_job | python | spotify/luigi | luigi/contrib/batch.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/batch.py | Apache-2.0 |
def register_job_definition(self, json_fpath):
"""Register a job definition with AWS Batch, using a JSON"""
with open(json_fpath) as f:
job_def = json.load(f)
response = self._client.register_job_definition(**job_def)
status_code = response['ResponseMetadata']['HTTPStatusCode']
if status_code != 200:
msg = 'Register job definition request received status code {0}:\n{1}'
raise Exception(msg.format(status_code, response))
return response | Register a job definition with AWS Batch, using a JSON | register_job_definition | python | spotify/luigi | luigi/contrib/batch.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/batch.py | Apache-2.0 |
def parameters(self):
"""Override to return a dict of parameters for the Batch Task"""
return {} | Override to return a dict of parameters for the Batch Task | parameters | python | spotify/luigi | luigi/contrib/batch.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/batch.py | Apache-2.0 |
def __init__(self, token, user_agent="Luigi", root_namespace_id=None):
"""
:param str token: Dropbox Oauth2 Token. See :class:`DropboxTarget` for more information about generating a token
:param str root_namespace_id: Root namespace ID for interacting with Team Spaces
"""
if not token:
raise ValueError("The token parameter must contain a valid Dropbox Oauth2 Token")
try:
conn = dropbox.dropbox_client.Dropbox(oauth2_access_token=token, user_agent=user_agent)
except Exception as e:
raise Exception("Cannot connect to Dropbox. Check your Internet connection and the token. \n" + repr(e))
if root_namespace_id:
conn = conn.with_path_root(dropbox.common.PathRoot.root(root_namespace_id))
self.token = token
self.conn = conn | :param str token: Dropbox Oauth2 Token. See :class:`DropboxTarget` for more information about generating a token
:param str root_namespace_id: Root namespace ID for interacting with Team Spaces | __init__ | python | spotify/luigi | luigi/contrib/dropbox.py | https://github.com/spotify/luigi/blob/master/luigi/contrib/dropbox.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.