text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Get the contents of an object stored in S3 as string. <END_TASK> <USER_TASK:> Description: def get_as_string(self, s3_path, encoding='utf-8'): """ Get the contents of an object stored in S3 as string. :param s3_path: URL for target S3 location :param encoding: Encoding to decode bytes to string :return: File contents as a string """
content = self.get_as_bytes(s3_path) return content.decode(encoding)
<SYSTEM_TASK:> Is the parameter S3 path a directory? <END_TASK> <USER_TASK:> Description: def isdir(self, path): """ Is the parameter S3 path a directory? """
(bucket, key) = self._path_to_bucket_and_key(path) s3_bucket = self.s3.Bucket(bucket) # root is a directory if self._is_root(key): return True for suffix in (S3_DIRECTORY_MARKER_SUFFIX_0, S3_DIRECTORY_MARKER_SUFFIX_1): try: self.s3.meta.client.get_object( Bucket=bucket, Key=key + suffix) except botocore.exceptions.ClientError as e: if not e.response['Error']['Code'] in ['NoSuchKey', '404']: raise else: return True # files with this prefix key_path = self._add_path_delimiter(key) s3_bucket_list_result = list(itertools.islice( s3_bucket.objects.filter(Prefix=key_path), 1)) if s3_bucket_list_result: return True return False
<SYSTEM_TASK:> Return `True` if file or directory at `path` exist, False otherwise. <END_TASK> <USER_TASK:> Description: def exists(self, path, mtime=None): """ Return `True` if file or directory at `path` exist, False otherwise. Additional check on modified time when mtime is passed in. Return False if the file's modified time is older mtime. """
self._connect() if self.sftp: exists = self._sftp_exists(path, mtime) else: exists = self._ftp_exists(path, mtime) self._close() return exists
<SYSTEM_TASK:> Recursively delete a directory tree on a remote server. <END_TASK> <USER_TASK:> Description: def _rm_recursive(self, ftp, path): """ Recursively delete a directory tree on a remote server. Source: https://gist.github.com/artlogic/2632647 """
wd = ftp.pwd() # check if it is a file first, because some FTP servers don't return # correctly on ftp.nlst(file) try: ftp.cwd(path) except ftplib.all_errors: # this is a file, we will just delete the file ftp.delete(path) return try: names = ftp.nlst() except ftplib.all_errors: # some FTP servers complain when you try and list non-existent paths return for name in names: if os.path.split(name)[1] in ('.', '..'): continue try: ftp.cwd(name) # if we can cwd to it, it's a folder ftp.cwd(wd) # don't try a nuke a folder we're in ftp.cwd(path) # then go back to where we were self._rm_recursive(ftp, name) except ftplib.all_errors: ftp.delete(name) try: ftp.cwd(wd) # do not delete the folder that we are in ftp.rmd(path) except ftplib.all_errors as e: print('_rm_recursive: Could not remove {0}: {1}'.format(path, e))
<SYSTEM_TASK:> Open the FileSystem target. <END_TASK> <USER_TASK:> Description: def open(self, mode): """ Open the FileSystem target. This method returns a file-like object which can either be read from or written to depending on the specified mode. :param mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will open the FileSystemTarget in write mode. Subclasses can implement additional options. :type mode: str """
if mode == 'w': return self.format.pipe_writer(AtomicFtpFile(self._fs, self.path)) elif mode == 'r': temp_dir = os.path.join(tempfile.gettempdir(), 'luigi-contrib-ftp') self.__tmp_path = temp_dir + '/' + self.path.lstrip('/') + '-luigi-tmp-%09d' % random.randrange(0, 1e10) # download file to local self._fs.get(self.path, self.__tmp_path) return self.format.pipe_reader( FileWrapper(io.BufferedReader(io.FileIO(self.__tmp_path, 'r'))) ) else: raise Exception("mode must be 'r' or 'w' (got: %s)" % mode)
<SYSTEM_TASK:> Write value to the target <END_TASK> <USER_TASK:> Description: def write(self, value): """ Write value to the target """
self.get_collection().update_one( {'_id': self._document_id}, {'$set': {self._path: value}}, upsert=True )
<SYSTEM_TASK:> Read the targets value <END_TASK> <USER_TASK:> Description: def read(self): """ Read the targets value """
cursor = self.get_collection().find( { '_id': {'$in': self._document_ids}, self._field: {'$exists': True} }, {self._field: True} ) return {doc['_id']: doc[self._field] for doc in cursor}
<SYSTEM_TASK:> Get documents id with missing targeted field <END_TASK> <USER_TASK:> Description: def get_empty_ids(self): """ Get documents id with missing targeted field """
cursor = self.get_collection().find( { '_id': {'$in': self._document_ids}, self._field: {'$exists': True} }, {'_id': True} ) return set(self._document_ids) - {doc['_id'] for doc in cursor}
<SYSTEM_TASK:> Get logging settings from config file section "logging". <END_TASK> <USER_TASK:> Description: def _section(cls, opts): """Get logging settings from config file section "logging"."""
if isinstance(cls.config, LuigiConfigParser): return False try: logging_config = cls.config['logging'] except (TypeError, KeyError, NoSectionError): return False logging.config.dictConfig(logging_config) return True
<SYSTEM_TASK:> Get an iterable with GCS folder contents. <END_TASK> <USER_TASK:> Description: def listdir(self, path): """ Get an iterable with GCS folder contents. Iterable contains paths relative to queried path. """
bucket, obj = self._path_to_bucket_and_key(path) obj_prefix = self._add_path_delimiter(obj) if self._is_root(obj_prefix): obj_prefix = '' obj_prefix_len = len(obj_prefix) for it in self._list_iter(bucket, obj_prefix): yield self._add_path_delimiter(path) + it['name'][obj_prefix_len:]
<SYSTEM_TASK:> Yields full object URIs matching the given wildcard. <END_TASK> <USER_TASK:> Description: def list_wildcard(self, wildcard_path): """Yields full object URIs matching the given wildcard. Currently only the '*' wildcard after the last path delimiter is supported. (If we need "full" wildcard functionality we should bring in gsutil dependency with its https://github.com/GoogleCloudPlatform/gsutil/blob/master/gslib/wildcard_iterator.py...) """
path, wildcard_obj = wildcard_path.rsplit('/', 1) assert '*' not in path, "The '*' wildcard character is only supported after the last '/'" wildcard_parts = wildcard_obj.split('*') assert len(wildcard_parts) == 2, "Only one '*' wildcard is supported" for it in self.listdir(path): if it.startswith(path + '/' + wildcard_parts[0]) and it.endswith(wildcard_parts[1]) and \ len(it) >= len(path + '/' + wildcard_parts[0]) + len(wildcard_parts[1]): yield it
<SYSTEM_TASK:> Downloads the object contents to local file system. <END_TASK> <USER_TASK:> Description: def download(self, path, chunksize=None, chunk_callback=lambda _: False): """Downloads the object contents to local file system. Optionally stops after the first chunk for which chunk_callback returns True. """
chunksize = chunksize or self.chunksize bucket, obj = self._path_to_bucket_and_key(path) with tempfile.NamedTemporaryFile(delete=False) as fp: # We can't return the tempfile reference because of a bug in python: http://bugs.python.org/issue18879 return_fp = _DeleteOnCloseFile(fp.name, 'r') # Special case empty files because chunk-based downloading doesn't work. result = self.client.objects().get(bucket=bucket, object=obj).execute() if int(result['size']) == 0: return return_fp request = self.client.objects().get_media(bucket=bucket, object=obj) downloader = http.MediaIoBaseDownload(fp, request, chunksize=chunksize) attempts = 0 done = False while not done: error = None try: _, done = downloader.next_chunk() if chunk_callback(fp): done = True except errors.HttpError as err: error = err if err.resp.status < 500: raise logger.warning('Error downloading file, retrying', exc_info=True) except RETRYABLE_ERRORS as err: logger.warning('Error downloading file, retrying', exc_info=True) error = err if error: attempts += 1 if attempts >= NUM_RETRIES: raise error else: attempts = 0 return return_fp
<SYSTEM_TASK:> Closes and waits for subprocess to exit. <END_TASK> <USER_TASK:> Description: def _finish(self): """ Closes and waits for subprocess to exit. """
if self._process.returncode is None: self._process.stdin.flush() self._process.stdin.close() self._process.wait() self.closed = True
<SYSTEM_TASK:> Checks if task is complete, puts the result to out_queue. <END_TASK> <USER_TASK:> Description: def check_complete(task, out_queue): """ Checks if task is complete, puts the result to out_queue. """
logger.debug("Checking if %s is complete", task) try: is_complete = task.complete() except Exception: is_complete = TracebackWrapper(traceback.format_exc()) out_queue.put((task, is_complete))
<SYSTEM_TASK:> Add a Task for the worker to check and possibly schedule and run. <END_TASK> <USER_TASK:> Description: def add(self, task, multiprocess=False, processes=0): """ Add a Task for the worker to check and possibly schedule and run. Returns True if task and its dependencies were successfully scheduled or completed before. """
if self._first_task is None and hasattr(task, 'task_id'): self._first_task = task.task_id self.add_succeeded = True if multiprocess: queue = multiprocessing.Manager().Queue() pool = multiprocessing.Pool(processes=processes if processes > 0 else None) else: queue = DequeQueue() pool = SingleProcessPool() self._validate_task(task) pool.apply_async(check_complete, [task, queue]) # we track queue size ourselves because len(queue) won't work for multiprocessing queue_size = 1 try: seen = {task.task_id} while queue_size: current = queue.get() queue_size -= 1 item, is_complete = current for next in self._add(item, is_complete): if next.task_id not in seen: self._validate_task(next) seen.add(next.task_id) pool.apply_async(check_complete, [next, queue]) queue_size += 1 except (KeyboardInterrupt, TaskException): raise except Exception as ex: self.add_succeeded = False formatted_traceback = traceback.format_exc() self._log_unexpected_error(task) task.trigger_event(Event.BROKEN_TASK, task, ex) self._email_unexpected_error(task, formatted_traceback) raise finally: pool.close() pool.join() return self.add_succeeded
<SYSTEM_TASK:> Find dead children and put a response on the result queue. <END_TASK> <USER_TASK:> Description: def _purge_children(self): """ Find dead children and put a response on the result queue. :return: """
for task_id, p in six.iteritems(self._running_tasks): if not p.is_alive() and p.exitcode: error_msg = 'Task {} died unexpectedly with exit code {}'.format(task_id, p.exitcode) p.task.trigger_event(Event.PROCESS_FAILURE, p.task, error_msg) elif p.timeout_time is not None and time.time() > float(p.timeout_time) and p.is_alive(): p.terminate() error_msg = 'Task {} timed out after {} seconds and was terminated.'.format(task_id, p.worker_timeout) p.task.trigger_event(Event.TIMEOUT, p.task, error_msg) else: continue logger.info(error_msg) self._task_result_queue.put((task_id, FAILED, error_msg, [], []))
<SYSTEM_TASK:> Returns true if a worker should stay alive given. <END_TASK> <USER_TASK:> Description: def _keep_alive(self, get_work_response): """ Returns true if a worker should stay alive given. If worker-keep-alive is not set, this will always return false. For an assistant, it will always return the value of worker-keep-alive. Otherwise, it will return true for nonzero n_pending_tasks. If worker-count-uniques is true, it will also require that one of the tasks is unique to this worker. """
if not self._config.keep_alive: return False elif self._assistant: return True elif self._config.count_last_scheduled: return get_work_response.n_pending_last_scheduled > 0 elif self._config.count_uniques: return get_work_response.n_unique_pending > 0 elif get_work_response.n_pending_tasks == 0: return False elif not self._config.max_keep_alive_idle_duration: return True elif not self._idle_since: return True else: time_to_shutdown = self._idle_since + self._config.max_keep_alive_idle_duration - datetime.datetime.now() logger.debug("[%s] %s until shutdown", self._id, time_to_shutdown) return time_to_shutdown > datetime.timedelta(0)
<SYSTEM_TASK:> Returns True if all scheduled tasks were executed successfully. <END_TASK> <USER_TASK:> Description: def run(self): """ Returns True if all scheduled tasks were executed successfully. """
logger.info('Running Worker with %d processes', self.worker_processes) sleeper = self._sleeper() self.run_succeeded = True self._add_worker() while True: while len(self._running_tasks) >= self.worker_processes > 0: logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks)) self._handle_next_task() get_work_response = self._get_work() if get_work_response.worker_state == WORKER_STATE_DISABLED: self._start_phasing_out() if get_work_response.task_id is None: if not self._stop_requesting_work: self._log_remote_tasks(get_work_response) if len(self._running_tasks) == 0: self._idle_since = self._idle_since or datetime.datetime.now() if self._keep_alive(get_work_response): six.next(sleeper) continue else: break else: self._handle_next_task() continue # task_id is not None: logger.debug("Pending tasks: %s", get_work_response.n_pending_tasks) self._run_task(get_work_response.task_id) while len(self._running_tasks): logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks)) self._handle_next_task() return self.run_succeeded
<SYSTEM_TASK:> Ensure the database schema is up to date with the codebase. <END_TASK> <USER_TASK:> Description: def _upgrade_schema(engine): """ Ensure the database schema is up to date with the codebase. :param engine: SQLAlchemy engine of the underlying database. """
inspector = reflection.Inspector.from_engine(engine) with engine.connect() as conn: # Upgrade 1. Add task_id column and index to tasks if 'task_id' not in [x['name'] for x in inspector.get_columns('tasks')]: logger.warning('Upgrading DbTaskHistory schema: Adding tasks.task_id') conn.execute('ALTER TABLE tasks ADD COLUMN task_id VARCHAR(200)') conn.execute('CREATE INDEX ix_task_id ON tasks (task_id)') # Upgrade 2. Alter value column to be TEXT, note that this is idempotent so no if-guard if 'mysql' in engine.dialect.name: conn.execute('ALTER TABLE task_parameters MODIFY COLUMN value TEXT') elif 'oracle' in engine.dialect.name: conn.execute('ALTER TABLE task_parameters MODIFY value TEXT') elif 'mssql' in engine.dialect.name: conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TEXT') elif 'postgresql' in engine.dialect.name: if str([x for x in inspector.get_columns('task_parameters') if x['name'] == 'value'][0]['type']) != 'TEXT': conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TYPE TEXT') elif 'sqlite' in engine.dialect.name: # SQLite does not support changing column types. A database file will need # to be used to pickup this migration change. for i in conn.execute('PRAGMA table_info(task_parameters);').fetchall(): if i['name'] == 'value' and i['type'] != 'TEXT': logger.warning( 'SQLite can not change column types. Please use a new database ' 'to pickup column type changes.' ) else: logger.warning( 'SQLAlcheny dialect {} could not be migrated to the TEXT type'.format( engine.dialect ) )
<SYSTEM_TASK:> Find tasks with the given task_name and the same parameters as the kwargs. <END_TASK> <USER_TASK:> Description: def find_all_by_parameters(self, task_name, session=None, **task_params): """ Find tasks with the given task_name and the same parameters as the kwargs. """
with self._session(session) as session: query = session.query(TaskRecord).join(TaskEvent).filter(TaskRecord.name == task_name) for (k, v) in six.iteritems(task_params): alias = sqlalchemy.orm.aliased(TaskParameter) query = query.join(alias).filter(alias.name == k, alias.value == v) tasks = query.order_by(TaskEvent.ts) for task in tasks: # Sanity check assert all(k in task.parameters and v == str(task.parameters[k].value) for (k, v) in six.iteritems(task_params)) yield task
<SYSTEM_TASK:> Return all tasks that have been updated. <END_TASK> <USER_TASK:> Description: def find_all_runs(self, session=None): """ Return all tasks that have been updated. """
with self._session(session) as session: return session.query(TaskRecord).all()
<SYSTEM_TASK:> Find task with the given record ID. <END_TASK> <USER_TASK:> Description: def find_task_by_id(self, id, session=None): """ Find task with the given record ID. """
with self._session(session) as session: return session.query(TaskRecord).get(id)
<SYSTEM_TASK:> Returns a dictionary with keyword arguments for use with discovery <END_TASK> <USER_TASK:> Description: def get_authenticate_kwargs(oauth_credentials=None, http_=None): """Returns a dictionary with keyword arguments for use with discovery Prioritizes oauth_credentials or a http client provided by the user If none provided, falls back to default credentials provided by google's command line utilities. If that also fails, tries using httplib2.Http() Used by `gcs.GCSClient` and `bigquery.BigQueryClient` to initiate the API Client """
if oauth_credentials: authenticate_kwargs = { "credentials": oauth_credentials } elif http_: authenticate_kwargs = { "http": http_ } else: # neither http_ or credentials provided try: # try default credentials credentials, _ = google.auth.default() authenticate_kwargs = { "credentials": credentials } except google.auth.exceptions.DefaultCredentialsError: # try http using httplib2 authenticate_kwargs = { "http": httplib2.Http() } return authenticate_kwargs
<SYSTEM_TASK:> Return a credential string for the provided task. If no valid <END_TASK> <USER_TASK:> Description: def _credentials(self): """ Return a credential string for the provided task. If no valid credentials are set, raise a NotImplementedError. """
if self.aws_account_id and self.aws_arn_role_name: return 'aws_iam_role=arn:aws:iam::{id}:role/{role}'.format( id=self.aws_account_id, role=self.aws_arn_role_name ) elif self.aws_access_key_id and self.aws_secret_access_key: return 'aws_access_key_id={key};aws_secret_access_key={secret}{opt}'.format( key=self.aws_access_key_id, secret=self.aws_secret_access_key, opt=';token={}'.format(self.aws_session_token) if self.aws_session_token else '' ) else: raise NotImplementedError("Missing Credentials. " "Ensure one of the pairs of auth args below are set " "in a configuration file, environment variables or by " "being overridden in the task: " "'aws_access_key_id' AND 'aws_secret_access_key' OR " "'aws_account_id' AND 'aws_arn_role_name'")
<SYSTEM_TASK:> Will create the schema in the database <END_TASK> <USER_TASK:> Description: def create_schema(self, connection): """ Will create the schema in the database """
if '.' not in self.table: return query = 'CREATE SCHEMA IF NOT EXISTS {schema_name};'.format(schema_name=self.table.split('.')[0]) connection.cursor().execute(query)
<SYSTEM_TASK:> If the target table doesn't exist, self.create_table <END_TASK> <USER_TASK:> Description: def run(self): """ If the target table doesn't exist, self.create_table will be called to attempt to create the table. """
if not (self.table): raise Exception("table need to be specified") path = self.s3_load_path() output = self.output() connection = output.connect() cursor = connection.cursor() self.init_copy(connection) self.copy(cursor, path) self.post_copy(cursor) if self.enable_metadata_columns: self.post_copy_metacolumns(cursor) # update marker table output.touch(connection) connection.commit() # commit and clean up connection.close()
<SYSTEM_TASK:> Determine whether the schema already exists. <END_TASK> <USER_TASK:> Description: def does_schema_exist(self, connection): """ Determine whether the schema already exists. """
if '.' in self.table: query = ("select 1 as schema_exists " "from pg_namespace " "where nspname = lower(%s) limit 1") else: return True cursor = connection.cursor() try: schema = self.table.split('.')[0] cursor.execute(query, [schema]) result = cursor.fetchone() return bool(result) finally: cursor.close()
<SYSTEM_TASK:> Determine whether the table already exists. <END_TASK> <USER_TASK:> Description: def does_table_exist(self, connection): """ Determine whether the table already exists. """
if '.' in self.table: query = ("select 1 as table_exists " "from information_schema.tables " "where table_schema = lower(%s) and table_name = lower(%s) limit 1") else: query = ("select 1 as table_exists " "from pg_table_def " "where tablename = lower(%s) limit 1") cursor = connection.cursor() try: cursor.execute(query, tuple(self.table.split('.'))) result = cursor.fetchone() return bool(result) finally: cursor.close()
<SYSTEM_TASK:> Perform pre-copy sql - such as creating table, truncating, or removing data older than x. <END_TASK> <USER_TASK:> Description: def init_copy(self, connection): """ Perform pre-copy sql - such as creating table, truncating, or removing data older than x. """
if not self.does_schema_exist(connection): logger.info("Creating schema for %s", self.table) self.create_schema(connection) if not self.does_table_exist(connection): logger.info("Creating table %s", self.table) self.create_table(connection) if self.enable_metadata_columns: self._add_metadata_columns(connection) if self.do_truncate_table: logger.info("Truncating table %s", self.table) self.truncate_table(connection) if self.do_prune(): logger.info("Removing %s older than %s from %s", self.prune_column, self.prune_date, self.prune_table) self.prune(connection)
<SYSTEM_TASK:> Performs post-copy to fill metadata columns. <END_TASK> <USER_TASK:> Description: def post_copy_metacolums(self, cursor): """ Performs post-copy to fill metadata columns. """
logger.info('Executing post copy metadata queries') for query in self.metadata_queries: cursor.execute(query)
<SYSTEM_TASK:> Returns a RedshiftTarget representing the inserted dataset. <END_TASK> <USER_TASK:> Description: def output(self): """ Returns a RedshiftTarget representing the inserted dataset. Normally you don't override this. """
# uses class name as a meta-table return RedshiftTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.__class__.__name__, update_id=self.update_id)
<SYSTEM_TASK:> Kill any open Redshift sessions for the given database. <END_TASK> <USER_TASK:> Description: def run(self): """ Kill any open Redshift sessions for the given database. """
connection = self.output().connect() # kill any sessions other than ours and # internal Redshift sessions (rdsdb) query = ("select pg_terminate_backend(process) " "from STV_SESSIONS " "where db_name=%s " "and user_name != 'rdsdb' " "and process != pg_backend_pid()") cursor = connection.cursor() logger.info('Killing all open Redshift sessions for database: %s', self.database) try: cursor.execute(query, (self.database,)) cursor.close() connection.commit() except psycopg2.DatabaseError as e: if e.message and 'EOF' in e.message: # sometimes this operation kills the current session. # rebuild the connection. Need to pause for 30-60 seconds # before Redshift will allow us back in. connection.close() logger.info('Pausing %s seconds for Redshift to reset connection', self.connection_reset_wait_seconds) time.sleep(self.connection_reset_wait_seconds) logger.info('Reconnecting to Redshift') connection = self.output().connect() else: raise try: self.output().touch(connection) connection.commit() finally: connection.close() logger.info('Done killing all open Redshift sessions for database: %s', self.database)
<SYSTEM_TASK:> The execution of this task will write 4 lines of data on this task's target output. <END_TASK> <USER_TASK:> Description: def run(self): """ The execution of this task will write 4 lines of data on this task's target output. """
with self.output().open('w') as outfile: print("data 0 200 10 50 60", file=outfile) print("data 1 190 9 52 60", file=outfile) print("data 2 200 10 52 60", file=outfile) print("data 3 195 1 52 60", file=outfile)
<SYSTEM_TASK:> Copies the contents of a single file path to dest <END_TASK> <USER_TASK:> Description: def copy(self, path, dest, raise_if_exists=False): """ Copies the contents of a single file path to dest """
if raise_if_exists and dest in self.get_all_data(): raise RuntimeError('Destination exists: %s' % path) contents = self.get_all_data()[path] self.get_all_data()[dest] = contents
<SYSTEM_TASK:> Removes the given mockfile. skip_trash doesn't have any meaning. <END_TASK> <USER_TASK:> Description: def remove(self, path, recursive=True, skip_trash=True): """ Removes the given mockfile. skip_trash doesn't have any meaning. """
if recursive: to_delete = [] for s in self.get_all_data().keys(): if s.startswith(path): to_delete.append(s) for s in to_delete: self.get_all_data().pop(s) else: self.get_all_data().pop(path)
<SYSTEM_TASK:> Moves a single file from path to dest <END_TASK> <USER_TASK:> Description: def move(self, path, dest, raise_if_exists=False): """ Moves a single file from path to dest """
if raise_if_exists and dest in self.get_all_data(): raise RuntimeError('Destination exists: %s' % path) contents = self.get_all_data().pop(path) self.get_all_data()[dest] = contents
<SYSTEM_TASK:> Recursively walks ``Mapping``s and ``list``s and converts them to ``_FrozenOrderedDict`` and ``tuples``, respectively. <END_TASK> <USER_TASK:> Description: def _recursively_freeze(value): """ Recursively walks ``Mapping``s and ``list``s and converts them to ``_FrozenOrderedDict`` and ``tuples``, respectively. """
if isinstance(value, Mapping): return _FrozenOrderedDict(((k, _recursively_freeze(v)) for k, v in value.items())) elif isinstance(value, list) or isinstance(value, tuple): return tuple(_recursively_freeze(v) for v in value) return value
<SYSTEM_TASK:> Loads the default from the config. Returns _no_value if it doesn't exist <END_TASK> <USER_TASK:> Description: def _get_value_from_config(self, section, name): """Loads the default from the config. Returns _no_value if it doesn't exist"""
conf = configuration.get_config() try: value = conf.get(section, name) except (NoSectionError, NoOptionError, KeyError): return _no_value return self.parse(value)
<SYSTEM_TASK:> Yield the parameter values, with optional deprecation warning as second tuple value. <END_TASK> <USER_TASK:> Description: def _value_iterator(self, task_name, param_name): """ Yield the parameter values, with optional deprecation warning as second tuple value. The parameter value will be whatever non-_no_value that is yielded first. """
cp_parser = CmdlineParser.get_instance() if cp_parser: dest = self._parser_global_dest(param_name, task_name) found = getattr(cp_parser.known_args, dest, None) yield (self._parse_or_no_value(found), None) yield (self._get_value_from_config(task_name, param_name), None) if self._config_path: yield (self._get_value_from_config(self._config_path['section'], self._config_path['name']), 'The use of the configuration [{}] {} is deprecated. Please use [{}] {}'.format( self._config_path['section'], self._config_path['name'], task_name, param_name)) yield (self._default, None)
<SYSTEM_TASK:> Parse a list of values from the scheduler. <END_TASK> <USER_TASK:> Description: def _parse_list(self, xs): """ Parse a list of values from the scheduler. Only possible if this is_batchable() is True. This will combine the list into a single parameter value using batch method. This should never need to be overridden. :param xs: list of values to parse and combine :return: the combined parsed values """
if not self._is_batchable(): raise NotImplementedError('No batch method found') elif not xs: raise ValueError('Empty parameter list passed to parse_list') else: return self._batch_method(map(self.parse, xs))
<SYSTEM_TASK:> Parses a date string formatted like ``YYYY-MM-DD``. <END_TASK> <USER_TASK:> Description: def parse(self, s): """ Parses a date string formatted like ``YYYY-MM-DD``. """
return datetime.datetime.strptime(s, self.date_format).date()
<SYSTEM_TASK:> Add ``months`` months to ``date``. <END_TASK> <USER_TASK:> Description: def _add_months(self, date, months): """ Add ``months`` months to ``date``. Unfortunately we can't use timedeltas to add months because timedelta counts in days and there's no foolproof way to add N months in days without counting the number of days per month. """
year = date.year + (date.month + months - 1) // 12 month = (date.month + months - 1) % 12 + 1 return datetime.date(year=year, month=month, day=1)
<SYSTEM_TASK:> Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case. <END_TASK> <USER_TASK:> Description: def parse(self, val): """ Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case. """
s = str(val).lower() if s == "true": return True elif s == "false": return False else: raise ValueError("cannot interpret '{}' as boolean".format(val))
<SYSTEM_TASK:> Parses a time delta from the input. <END_TASK> <USER_TASK:> Description: def parse(self, input): """ Parses a time delta from the input. See :py:class:`TimeDeltaParameter` for details on supported formats. """
result = self._parseIso8601(input) if not result: result = self._parseSimple(input) if result is not None: return result else: raise ParameterException("Invalid time delta - could not parse %s" % input)
<SYSTEM_TASK:> Converts datetime.timedelta to a string <END_TASK> <USER_TASK:> Description: def serialize(self, x): """ Converts datetime.timedelta to a string :param x: the value to serialize. """
weeks = x.days // 7 days = x.days % 7 hours = x.seconds // 3600 minutes = (x.seconds % 3600) // 60 seconds = (x.seconds % 3600) % 60 result = "{} w {} d {} h {} m {} s".format(weeks, days, hours, minutes, seconds) return result
<SYSTEM_TASK:> Parse an individual value from the input. <END_TASK> <USER_TASK:> Description: def parse(self, x): """ Parse an individual value from the input. :param str x: the value to parse. :return: the parsed value. """
# Since the result of json.dumps(tuple) differs from a tuple string, we must handle either case. # A tuple string may come from a config file or from cli execution. # t = ((1, 2), (3, 4)) # t_str = '((1,2),(3,4))' # t_json_str = json.dumps(t) # t_json_str == '[[1, 2], [3, 4]]' # json.loads(t_json_str) == t # json.loads(t_str) == ValueError: No JSON object could be decoded # Therefore, if json.loads(x) returns a ValueError, try ast.literal_eval(x). # ast.literal_eval(t_str) == t try: # loop required to parse tuple of tuples return tuple(tuple(x) for x in json.loads(x, object_pairs_hook=_FrozenOrderedDict)) except (ValueError, TypeError): return tuple(literal_eval(x))
<SYSTEM_TASK:> Create a tar archive which will contain the files for the packages listed in packages. <END_TASK> <USER_TASK:> Description: def create_packages_archive(packages, filename): """ Create a tar archive which will contain the files for the packages listed in packages. """
import tarfile tar = tarfile.open(filename, "w") def add(src, dst): logger.debug('adding to tar: %s -> %s', src, dst) tar.add(src, dst) def add_files_for_package(sub_package_path, root_package_path, root_package_name): for root, dirs, files in os.walk(sub_package_path): if '.svn' in dirs: dirs.remove('.svn') for f in files: if not f.endswith(".pyc") and not f.startswith("."): add(dereference(root + "/" + f), root.replace(root_package_path, root_package_name) + "/" + f) for package in packages: # Put a submodule's entire package in the archive. This is the # magic that usually packages everything you need without # having to attach packages/modules explicitly if not getattr(package, "__path__", None) and '.' in package.__name__: package = __import__(package.__name__.rpartition('.')[0], None, None, 'non_empty') n = package.__name__.replace(".", "/") if getattr(package, "__path__", None): # TODO: (BUG) picking only the first path does not # properly deal with namespaced packages in different # directories p = package.__path__[0] if p.endswith('.egg') and os.path.isfile(p): raise 'egg files not supported!!!' # Add the entire egg file # p = p[:p.find('.egg') + 4] # add(dereference(p), os.path.basename(p)) else: # include __init__ files from parent projects root = [] for parent in package.__name__.split('.')[0:-1]: root.append(parent) module_name = '.'.join(root) directory = '/'.join(root) add(dereference(__import__(module_name, None, None, 'non_empty').__path__[0] + "/__init__.py"), directory + "/__init__.py") add_files_for_package(p, p, n) # include egg-info directories that are parallel: for egg_info_path in glob.glob(p + '*.egg-info'): logger.debug( 'Adding package metadata to archive for "%s" found at "%s"', package.__name__, egg_info_path ) add_files_for_package(egg_info_path, p, n) else: f = package.__file__ if f.endswith("pyc"): f = f[:-3] + "py" if n.find(".") == -1: add(dereference(f), os.path.basename(f)) else: add(dereference(f), n + ".py") tar.close()
<SYSTEM_TASK:> A simple generator which flattens a sequence. <END_TASK> <USER_TASK:> Description: def flatten(sequence): """ A simple generator which flattens a sequence. Only one level is flattened. .. code-block:: python (1, (2, 3), 4) -> (1, 2, 3, 4) """
for item in sequence: if hasattr(item, "__iter__") and not isinstance(item, str) and not isinstance(item, bytes): for i in item: yield i else: yield item
<SYSTEM_TASK:> Uses mechanize to fetch the actual task logs from the task tracker. <END_TASK> <USER_TASK:> Description: def fetch_task_failures(tracking_url): """ Uses mechanize to fetch the actual task logs from the task tracker. This is highly opportunistic, and we might not succeed. So we set a low timeout and hope it works. If it does not, it's not the end of the world. TODO: Yarn has a REST API that we should probably use instead: http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html """
import mechanize timeout = 3.0 failures_url = tracking_url.replace('jobdetails.jsp', 'jobfailures.jsp') + '&cause=failed' logger.debug('Fetching data from %s', failures_url) b = mechanize.Browser() b.open(failures_url, timeout=timeout) links = list(b.links(text_regex='Last 4KB')) # For some reason text_regex='All' doesn't work... no idea why links = random.sample(links, min(10, len(links))) # Fetch a random subset of all failed tasks, so not to be biased towards the early fails error_text = [] for link in links: task_url = link.url.replace('&start=-4097', '&start=-100000') # Increase the offset logger.debug('Fetching data from %s', task_url) b2 = mechanize.Browser() try: r = b2.open(task_url, timeout=timeout) data = r.read() except Exception as e: logger.debug('Error fetching data from %s: %s', task_url, e) continue # Try to get the hex-encoded traceback back from the output for exc in re.findall(r'luigi-exc-hex=[0-9a-f]+', data): error_text.append('---------- %s:' % task_url) error_text.append(exc.split('=')[-1].decode('hex')) return '\n'.join(error_text)
<SYSTEM_TASK:> Get the MapReduce runner for this job. <END_TASK> <USER_TASK:> Description: def job_runner(self): # We recommend that you define a subclass, override this method and set up your own config """ Get the MapReduce runner for this job. If all outputs are HdfsTargets, the DefaultHadoopJobRunner will be used. Otherwise, the LocalJobRunner which streams all data through the local machine will be used (great for testing). """
outputs = luigi.task.flatten(self.output()) for output in outputs: if not isinstance(output, luigi.contrib.hdfs.HdfsTarget): warnings.warn("Job is using one or more non-HdfsTarget outputs" + " so it will be run in local mode") return LocalJobRunner() else: return DefaultHadoopJobRunner()
<SYSTEM_TASK:> Writer format is a method which iterates over the output records <END_TASK> <USER_TASK:> Description: def writer(self, outputs, stdout, stderr=sys.stderr): """ Writer format is a method which iterates over the output records from the reducer and formats them for output. The default implementation outputs tab separated items. """
for output in outputs: try: output = flatten(output) if self.data_interchange_format == "json": # Only dump one json string, and skip another one, maybe key or value. output = filter(lambda x: x, output) else: # JSON is already serialized, so we put `self.serialize` in a else statement. output = map(self.serialize, output) print("\t".join(output), file=stdout) except BaseException: print(output, file=stderr) raise
<SYSTEM_TASK:> Increments any unflushed counter values. <END_TASK> <USER_TASK:> Description: def _flush_batch_incr_counter(self): """ Increments any unflushed counter values. """
for key, count in six.iteritems(self._counter_dict): if count == 0: continue args = list(key) + [count] self._incr_counter(*args) self._counter_dict[key] = 0
<SYSTEM_TASK:> Iterate over input and call the mapper for each item. <END_TASK> <USER_TASK:> Description: def _map_input(self, input_stream): """ Iterate over input and call the mapper for each item. If the job has a parser defined, the return values from the parser will be passed as arguments to the mapper. If the input is coded output from a previous run, the arguments will be splitted in key and value. """
for record in self.reader(input_stream): for output in self.mapper(*record): yield output if self.final_mapper != NotImplemented: for output in self.final_mapper(): yield output self._flush_batch_incr_counter()
<SYSTEM_TASK:> Iterate over input, collect values with the same key, and call the reducer for each unique key. <END_TASK> <USER_TASK:> Description: def _reduce_input(self, inputs, reducer, final=NotImplemented): """ Iterate over input, collect values with the same key, and call the reducer for each unique key. """
for key, values in groupby(inputs, key=lambda x: self.internal_serialize(x[0])): for output in reducer(self.deserialize(key), (v[1] for v in values)): yield output if final != NotImplemented: for output in final(): yield output self._flush_batch_incr_counter()
<SYSTEM_TASK:> Run the mapper on the hadoop node. <END_TASK> <USER_TASK:> Description: def run_mapper(self, stdin=sys.stdin, stdout=sys.stdout): """ Run the mapper on the hadoop node. """
self.init_hadoop() self.init_mapper() outputs = self._map_input((line[:-1] for line in stdin)) if self.reducer == NotImplemented: self.writer(outputs, stdout) else: self.internal_writer(outputs, stdout)
<SYSTEM_TASK:> Run the reducer on the hadoop node. <END_TASK> <USER_TASK:> Description: def run_reducer(self, stdin=sys.stdin, stdout=sys.stdout): """ Run the reducer on the hadoop node. """
self.init_hadoop() self.init_reducer() outputs = self._reduce_input(self.internal_reader((line[:-1] for line in stdin)), self.reducer, self.final_reducer) self.writer(outputs, stdout)
<SYSTEM_TASK:> Reader which uses python eval on each part of a tab separated string. <END_TASK> <USER_TASK:> Description: def internal_reader(self, input_stream): """ Reader which uses python eval on each part of a tab separated string. Yields a tuple of python objects. """
for input_line in input_stream: yield list(map(self.deserialize, input_line.split("\t")))
<SYSTEM_TASK:> Writer which outputs the python repr for each item. <END_TASK> <USER_TASK:> Description: def internal_writer(self, outputs, stdout): """ Writer which outputs the python repr for each item. """
for output in outputs: print("\t".join(map(self.internal_serialize, output)), file=stdout)
<SYSTEM_TASK:> Get a psycopg2 connection object to the database where the table is. <END_TASK> <USER_TASK:> Description: def connect(self): """ Get a psycopg2 connection object to the database where the table is. """
connection = psycopg2.connect( host=self.host, port=self.port, database=self.database, user=self.user, password=self.password) connection.set_client_encoding('utf-8') return connection
<SYSTEM_TASK:> Applied to each column of every row returned by `rows`. <END_TASK> <USER_TASK:> Description: def map_column(self, value): """ Applied to each column of every row returned by `rows`. Default behaviour is to escape special characters and identify any self.null_values. """
if value in self.null_values: return r'\\N' else: return default_escape(six.text_type(value))
<SYSTEM_TASK:> Returns a PostgresTarget representing the inserted dataset. <END_TASK> <USER_TASK:> Description: def output(self): """ Returns a PostgresTarget representing the inserted dataset. Normally you don't override this. """
return PostgresTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id, port=self.port )
<SYSTEM_TASK:> Get configs singleton for parser <END_TASK> <USER_TASK:> Description: def get_config(parser=PARSER): """Get configs singleton for parser """
parser_class = PARSERS[parser] _check_parser(parser_class, parser) return parser_class.instance()
<SYSTEM_TASK:> Select config parser by file extension and add path into parser. <END_TASK> <USER_TASK:> Description: def add_config_path(path): """Select config parser by file extension and add path into parser. """
if not os.path.isfile(path): warnings.warn("Config file does not exist: {path}".format(path=path)) return False # select parser by file extension _base, ext = os.path.splitext(path) if ext and ext[1:] in PARSERS: parser = ext[1:] else: parser = PARSER parser_class = PARSERS[parser] _check_parser(parser_class, parser) if parser != PARSER: msg = ( "Config for {added} parser added, but used {used} parser. " "Set up right parser via env var: " "export LUIGI_CONFIG_PARSER={added}" ) warnings.warn(msg.format(added=parser, used=PARSER)) # add config path to parser parser_class.add_config_path(path) return True
<SYSTEM_TASK:> This method compresses and uploads packages to the cluster <END_TASK> <USER_TASK:> Description: def _setup_packages(self, sc): """ This method compresses and uploads packages to the cluster """
packages = self.py_packages if not packages: return for package in packages: mod = importlib.import_module(package) try: mod_path = mod.__path__[0] except AttributeError: mod_path = mod.__file__ tar_path = os.path.join(self.run_path, package + '.tar.gz') tar = tarfile.open(tar_path, "w:gz") tar.add(mod_path, os.path.basename(mod_path)) tar.close() sc.addPyFile(tar_path)
<SYSTEM_TASK:> Run either the mapper, combiner, or reducer from the class instance in the file "job-instance.pickle". <END_TASK> <USER_TASK:> Description: def main(args=None, stdin=sys.stdin, stdout=sys.stdout, print_exception=print_exception): """ Run either the mapper, combiner, or reducer from the class instance in the file "job-instance.pickle". Arguments: kind -- is either map, combiner, or reduce """
try: # Set up logging. logging.basicConfig(level=logging.WARN) kind = args is not None and args[1] or sys.argv[1] Runner().run(kind, stdin=stdin, stdout=stdout) except Exception as exc: # Dump encoded data that we will try to fetch using mechanize print_exception(exc) raise
<SYSTEM_TASK:> Run luigi with command line parsing, but raise ``SystemExit`` with the configured exit code. <END_TASK> <USER_TASK:> Description: def run_with_retcodes(argv): """ Run luigi with command line parsing, but raise ``SystemExit`` with the configured exit code. Note: Usually you use the luigi binary directly and don't call this function yourself. :param argv: Should (conceptually) be ``sys.argv[1:]`` """
logger = logging.getLogger('luigi-interface') with luigi.cmdline_parser.CmdlineParser.global_instance(argv): retcodes = retcode() worker = None try: worker = luigi.interface._run(argv).worker except luigi.interface.PidLockAlreadyTakenExit: sys.exit(retcodes.already_running) except Exception: # Some errors occur before logging is set up, we set it up now env_params = luigi.interface.core() InterfaceLogging.setup(env_params) logger.exception("Uncaught exception in luigi") sys.exit(retcodes.unhandled_exception) with luigi.cmdline_parser.CmdlineParser.global_instance(argv): task_sets = luigi.execution_summary._summary_dict(worker) root_task = luigi.execution_summary._root_task(worker) non_empty_categories = {k: v for k, v in task_sets.items() if v}.keys() def has(status): assert status in luigi.execution_summary._ORDERED_STATUSES return status in non_empty_categories codes_and_conds = ( (retcodes.missing_data, has('still_pending_ext')), (retcodes.task_failed, has('failed')), (retcodes.already_running, has('run_by_other_worker')), (retcodes.scheduling_error, has('scheduling_error')), (retcodes.not_run, has('not_run')), ) expected_ret_code = max(code * (1 if cond else 0) for code, cond in codes_and_conds) if expected_ret_code == 0 and \ root_task not in task_sets["completed"] and \ root_task not in task_sets["already_done"]: sys.exit(retcodes.not_run) else: sys.exit(expected_ret_code)
<SYSTEM_TASK:> Tweaks glob into a list of more specific globs that together still cover paths and not too much extra. <END_TASK> <USER_TASK:> Description: def _constrain_glob(glob, paths, limit=5): """ Tweaks glob into a list of more specific globs that together still cover paths and not too much extra. Saves us minutes long listings for long dataset histories. Specifically, in this implementation the leftmost occurrences of "[0-9]" give rise to a few separate globs that each specialize the expression to digits that actually occur in paths. """
def digit_set_wildcard(chars): """ Makes a wildcard expression for the set, a bit readable, e.g. [1-5]. """ chars = sorted(chars) if len(chars) > 1 and ord(chars[-1]) - ord(chars[0]) == len(chars) - 1: return '[%s-%s]' % (chars[0], chars[-1]) else: return '[%s]' % ''.join(chars) current = {glob: paths} while True: pos = list(current.keys())[0].find('[0-9]') if pos == -1: # no wildcard expressions left to specialize in the glob return list(current.keys()) char_sets = {} for g, p in six.iteritems(current): char_sets[g] = sorted({path[pos] for path in p}) if sum(len(s) for s in char_sets.values()) > limit: return [g.replace('[0-9]', digit_set_wildcard(char_sets[g]), 1) for g in current] for g, s in six.iteritems(char_sets): for c in s: new_glob = g.replace('[0-9]', c, 1) new_paths = list(filter(lambda p: p[pos] == c, current[g])) current[new_glob] = new_paths del current[g]
<SYSTEM_TASK:> Builds a glob listing existing output paths. <END_TASK> <USER_TASK:> Description: def _get_per_location_glob(tasks, outputs, regexes): """ Builds a glob listing existing output paths. Esoteric reverse engineering, but worth it given that (compared to an equivalent contiguousness guarantee by naive complete() checks) requests to the filesystem are cut by orders of magnitude, and users don't even have to retrofit existing tasks anyhow. """
paths = [o.path for o in outputs] # naive, because some matches could be confused by numbers earlier # in path, e.g. /foo/fifa2000k/bar/2000-12-31/00 matches = [r.search(p) for r, p in zip(regexes, paths)] for m, p, t in zip(matches, paths, tasks): if m is None: raise NotImplementedError("Couldn't deduce datehour representation in output path %r of task %s" % (p, t)) n_groups = len(matches[0].groups()) # the most common position of every group is likely # to be conclusive hit or miss positions = [most_common((m.start(i), m.end(i)) for m in matches)[0] for i in range(1, n_groups + 1)] glob = list(paths[0]) # FIXME sanity check that it's the same for all paths for start, end in positions: glob = glob[:start] + ['[0-9]'] * (end - start) + glob[end:] # chop off the last path item # (wouldn't need to if `hadoop fs -ls -d` equivalent were available) return ''.join(glob).rsplit('/', 1)[0]
<SYSTEM_TASK:> Get all the paths that do in fact exist. Returns a set of all existing paths. <END_TASK> <USER_TASK:> Description: def _list_existing(filesystem, glob, paths): """ Get all the paths that do in fact exist. Returns a set of all existing paths. Takes a luigi.target.FileSystem object, a str which represents a glob and a list of strings representing paths. """
globs = _constrain_glob(glob, paths) time_start = time.time() listing = [] for g in sorted(globs): logger.debug('Listing %s', g) if filesystem.exists(g): listing.extend(filesystem.listdir(g)) logger.debug('%d %s listings took %f s to return %d items', len(globs), filesystem.__class__.__name__, time.time() - time_start, len(listing)) return set(listing)
<SYSTEM_TASK:> Efficiently determines missing datetimes by filesystem listing. <END_TASK> <USER_TASK:> Description: def infer_bulk_complete_from_fs(datetimes, datetime_to_task, datetime_to_re): """ Efficiently determines missing datetimes by filesystem listing. The current implementation works for the common case of a task writing output to a ``FileSystemTarget`` whose path is built using strftime with format like '...%Y...%m...%d...%H...', without custom ``complete()`` or ``exists()``. (Eventually Luigi could have ranges of completion as first-class citizens. Then this listing business could be factored away/be provided for explicitly in target API or some kind of a history server.) """
filesystems_and_globs_by_location = _get_filesystems_and_globs(datetime_to_task, datetime_to_re) paths_by_datetime = [[o.path for o in flatten_output(datetime_to_task(d))] for d in datetimes] listing = set() for (f, g), p in zip(filesystems_and_globs_by_location, zip(*paths_by_datetime)): # transposed, so here we're iterating over logical outputs, not datetimes listing |= _list_existing(f, g, p) # quickly learn everything that's missing missing_datetimes = [] for d, p in zip(datetimes, paths_by_datetime): if not set(p) <= listing: missing_datetimes.append(d) return missing_datetimes
<SYSTEM_TASK:> Override in subclasses to do bulk checks. <END_TASK> <USER_TASK:> Description: def missing_datetimes(self, finite_datetimes): """ Override in subclasses to do bulk checks. Returns a sorted list. This is a conservative base implementation that brutally checks completeness, instance by instance. Inadvisable as it may be slow. """
return [d for d in finite_datetimes if not self._instantiate_task_cls(self.datetime_to_parameter(d)).complete()]
<SYSTEM_TASK:> Given a dictionary of parameters, will extract the ranged task parameter value <END_TASK> <USER_TASK:> Description: def parameters_to_datetime(self, p): """ Given a dictionary of parameters, will extract the ranged task parameter value """
dt = p[self._param_name] return datetime(dt.year, dt.month, dt.day)
<SYSTEM_TASK:> Simply returns the points in time that correspond to turn of day. <END_TASK> <USER_TASK:> Description: def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to turn of day. """
date_start = datetime(finite_start.year, finite_start.month, finite_start.day) dates = [] for i in itertools.count(): t = date_start + timedelta(days=i) if t >= finite_stop: return dates if t >= finite_start: dates.append(t)
<SYSTEM_TASK:> Simply returns the points in time that correspond to whole hours. <END_TASK> <USER_TASK:> Description: def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to whole hours. """
datehour_start = datetime(finite_start.year, finite_start.month, finite_start.day, finite_start.hour) datehours = [] for i in itertools.count(): t = datehour_start + timedelta(hours=i) if t >= finite_stop: return datehours if t >= finite_start: datehours.append(t)
<SYSTEM_TASK:> Simply returns the points in time that correspond to a whole number of minutes intervals. <END_TASK> <USER_TASK:> Description: def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to a whole number of minutes intervals. """
# Validate that the minutes_interval can divide 60 and it is greater than 0 and lesser than 60 if not (0 < self.minutes_interval < 60): raise ParameterException('minutes-interval must be within 0..60') if (60 / self.minutes_interval) * self.minutes_interval != 60: raise ParameterException('minutes-interval does not evenly divide 60') # start of a complete interval, e.g. 20:13 and the interval is 5 -> 20:10 start_minute = int(finite_start.minute/self.minutes_interval)*self.minutes_interval datehour_start = datetime( year=finite_start.year, month=finite_start.month, day=finite_start.day, hour=finite_start.hour, minute=start_minute) datehours = [] for i in itertools.count(): t = datehour_start + timedelta(minutes=i*self.minutes_interval) if t >= finite_stop: return datehours if t >= finite_start: datehours.append(t)
<SYSTEM_TASK:> Simply returns the points in time that correspond to turn of month. <END_TASK> <USER_TASK:> Description: def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to turn of month. """
start_date = self._align(finite_start) aligned_stop = self._align(finite_stop) dates = [] for m in itertools.count(): t = start_date + relativedelta(months=m) if t >= aligned_stop: return dates if t >= finite_start: dates.append(t)
<SYSTEM_TASK:> Create a SQL Server connection and return a connection object <END_TASK> <USER_TASK:> Description: def connect(self): """ Create a SQL Server connection and return a connection object """
connection = _mssql.connect(user=self.user, password=self.password, server=self.host, port=self.port, database=self.database) return connection
<SYSTEM_TASK:> Retrieve an opener for the given protocol <END_TASK> <USER_TASK:> Description: def get_opener(self, name): """Retrieve an opener for the given protocol :param name: name of the opener to open :type name: string :raises NoOpenerError: if no opener has been registered of that name """
if name not in self.registry: raise NoOpenerError("No opener for %s" % name) index = self.registry[name] return self.openers[index]
<SYSTEM_TASK:> Adds an opener to the registry <END_TASK> <USER_TASK:> Description: def add(self, opener): """Adds an opener to the registry :param opener: Opener object :type opener: Opener inherited object """
index = len(self.openers) self.openers[index] = opener for name in opener.names: self.registry[name] = index
<SYSTEM_TASK:> Converts the query string from a target uri, uses <END_TASK> <USER_TASK:> Description: def conform_query(cls, query): """Converts the query string from a target uri, uses cls.allowed_kwargs, and cls.filter_kwargs to drive logic. :param query: Unparsed query string :type query: urllib.parse.unsplit(uri).query :returns: Dictionary of parsed values, everything in cls.allowed_kwargs with values set to True will be parsed as json strings. """
query = parse_qs(query, keep_blank_values=True) # Remove any unexpected keywords from the query string. if cls.filter_kwargs: query = {x: y for x, y in query.items() if x in cls.allowed_kwargs} for key, vals in query.items(): # Multiple values of the same name could be passed use first # Also params without strings will be treated as true values if cls.allowed_kwargs.get(key, False): val = json.loads(vals[0] or 'true') else: val = vals[0] or 'true' query[key] = val return query
<SYSTEM_TASK:> Please dont use. Instead use `luigi` binary. <END_TASK> <USER_TASK:> Description: def run(*args, **kwargs): """ Please dont use. Instead use `luigi` binary. Run from cmdline using argparse. :param use_dynamic_argparse: Deprecated and ignored """
luigi_run_result = _run(*args, **kwargs) return luigi_run_result if kwargs.get('detailed_summary') else luigi_run_result.scheduling_succeeded
<SYSTEM_TASK:> Run internally, bypassing the cmdline parsing. <END_TASK> <USER_TASK:> Description: def build(tasks, worker_scheduler_factory=None, detailed_summary=False, **env_params): """ Run internally, bypassing the cmdline parsing. Useful if you have some luigi code that you want to run internally. Example: .. code-block:: python luigi.build([MyTask1(), MyTask2()], local_scheduler=True) One notable difference is that `build` defaults to not using the identical process lock. Otherwise, `build` would only be callable once from each process. :param tasks: :param worker_scheduler_factory: :param env_params: :return: True if there were no scheduling errors, even if tasks may fail. """
if "no_lock" not in env_params: env_params["no_lock"] = True luigi_run_result = _schedule_and_run(tasks, worker_scheduler_factory, override_defaults=env_params) return luigi_run_result if detailed_summary else luigi_run_result.scheduling_succeeded
<SYSTEM_TASK:> Coerce input arguments to use temporary files when used for output. <END_TASK> <USER_TASK:> Description: def fix_paths(job): """ Coerce input arguments to use temporary files when used for output. Return a list of temporary file pairs (tmpfile, destination path) and a list of arguments. Converts each HdfsTarget to a string for the path. """
tmp_files = [] args = [] for x in job.args(): if isinstance(x, luigi.contrib.hdfs.HdfsTarget): # input/output if x.exists() or not job.atomic_output(): # input args.append(x.path) else: # output x_path_no_slash = x.path[:-1] if x.path[-1] == '/' else x.path y = luigi.contrib.hdfs.HdfsTarget(x_path_no_slash + '-luigi-tmp-%09d' % random.randrange(0, 1e10)) tmp_files.append((y, x_path_no_slash)) logger.info('Using temp path: %s for path %s', y.path, x.path) args.append(y.path) else: try: # hopefully the target has a path to use args.append(x.path) except AttributeError: # if there's no path then hope converting it to a string will work args.append(str(x)) return (tmp_files, args)
<SYSTEM_TASK:> Get name of first active job queue <END_TASK> <USER_TASK:> Description: def get_active_queue(self): """Get name of first active job queue"""
# Get dict of active queues keyed by name queues = {q['jobQueueName']: q for q in self._client.describe_job_queues()['jobQueues'] if q['state'] == 'ENABLED' and q['status'] == 'VALID'} if not queues: raise Exception('No job queues with state=ENABLED and status=VALID') # Pick the first queue as default return list(queues.keys())[0]
<SYSTEM_TASK:> Wrap submit_job with useful defaults <END_TASK> <USER_TASK:> Description: def submit_job(self, job_definition, parameters, job_name=None, queue=None): """Wrap submit_job with useful defaults"""
if job_name is None: job_name = _random_id() response = self._client.submit_job( jobName=job_name, jobQueue=queue or self.get_active_queue(), jobDefinition=job_definition, parameters=parameters ) return response['jobId']
<SYSTEM_TASK:> Arbitrarily picks an object in input and reads the Avro schema from it. <END_TASK> <USER_TASK:> Description: def _get_input_schema(self): """Arbitrarily picks an object in input and reads the Avro schema from it."""
assert avro, 'avro module required' input_target = flatten(self.input())[0] input_fs = input_target.fs if hasattr(input_target, 'fs') else GCSClient() input_uri = self.source_uris()[0] if '*' in input_uri: file_uris = list(input_fs.list_wildcard(input_uri)) if file_uris: input_uri = file_uris[0] else: raise RuntimeError('No match for ' + input_uri) schema = [] exception_reading_schema = [] def read_schema(fp): # fp contains the file part downloaded thus far. We rely on that the DataFileReader # initializes itself fine as soon as the file header with schema is downloaded, without # requiring the remainder of the file... try: reader = avro.datafile.DataFileReader(fp, avro.io.DatumReader()) schema[:] = [reader.datum_reader.writers_schema] except Exception as e: # Save but assume benign unless schema reading ultimately fails. The benign # exception in case of insufficiently big downloaded file part seems to be: # TypeError('ord() expected a character, but string of length 0 found',). exception_reading_schema[:] = [e] return False return True input_fs.download(input_uri, 64 * 1024, read_schema).close() if not schema: raise exception_reading_schema[0] return schema[0]
<SYSTEM_TASK:> Returns whether the given table exists. <END_TASK> <USER_TASK:> Description: def table_exists(self, table): """Returns whether the given table exists. :param table: :type table: BQTable """
if not self.dataset_exists(table.dataset): return False try: self.client.tables().get(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id).execute() except http.HttpError as ex: if ex.resp.status == 404: return False raise return True
<SYSTEM_TASK:> Creates a new dataset with the default permissions. <END_TASK> <USER_TASK:> Description: def make_dataset(self, dataset, raise_if_exists=False, body=None): """Creates a new dataset with the default permissions. :param dataset: :type dataset: BQDataset :param raise_if_exists: whether to raise an exception if the dataset already exists. :raises luigi.target.FileAlreadyExists: if raise_if_exists=True and the dataset exists """
if body is None: body = {} try: # Construct a message body in the format required by # https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.datasets.html#insert body['datasetReference'] = { 'projectId': dataset.project_id, 'datasetId': dataset.dataset_id } if dataset.location is not None: body['location'] = dataset.location self.client.datasets().insert(projectId=dataset.project_id, body=body).execute() except http.HttpError as ex: if ex.resp.status == 409: if raise_if_exists: raise luigi.target.FileAlreadyExists() else: raise
<SYSTEM_TASK:> Returns the list of datasets in a given project. <END_TASK> <USER_TASK:> Description: def list_datasets(self, project_id): """Returns the list of datasets in a given project. :param project_id: :type project_id: str """
request = self.client.datasets().list(projectId=project_id, maxResults=1000) response = request.execute() while response is not None: for ds in response.get('datasets', []): yield ds['datasetReference']['datasetId'] request = self.client.datasets().list_next(request, response) if request is None: break response = request.execute()
<SYSTEM_TASK:> Returns the list of tables in a given dataset. <END_TASK> <USER_TASK:> Description: def list_tables(self, dataset): """Returns the list of tables in a given dataset. :param dataset: :type dataset: BQDataset """
request = self.client.tables().list(projectId=dataset.project_id, datasetId=dataset.dataset_id, maxResults=1000) response = request.execute() while response is not None: for t in response.get('tables', []): yield t['tableReference']['tableId'] request = self.client.tables().list_next(request, response) if request is None: break response = request.execute()
<SYSTEM_TASK:> Returns the SQL query for a view, or None if it doesn't exist or is not a view. <END_TASK> <USER_TASK:> Description: def get_view(self, table): """Returns the SQL query for a view, or None if it doesn't exist or is not a view. :param table: The table containing the view. :type table: BQTable """
request = self.client.tables().get(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id) try: response = request.execute() except http.HttpError as ex: if ex.resp.status == 404: return None raise return response['view']['query'] if 'view' in response else None
<SYSTEM_TASK:> Updates the SQL query for a view. <END_TASK> <USER_TASK:> Description: def update_view(self, table, view): """Updates the SQL query for a view. If the output table exists, it is replaced with the supplied view query. Otherwise a new table is created with this view. :param table: The table to contain the view. :type table: BQTable :param view: The SQL query for the view. :type view: str """
body = { 'tableReference': { 'projectId': table.project_id, 'datasetId': table.dataset_id, 'tableId': table.table_id }, 'view': { 'query': view } } if self.table_exists(table): self.client.tables().update(projectId=table.project_id, datasetId=table.dataset_id, tableId=table.table_id, body=body).execute() else: self.client.tables().insert(projectId=table.project_id, datasetId=table.dataset_id, body=body).execute()
<SYSTEM_TASK:> Runs a BigQuery "job". See the documentation for the format of body. <END_TASK> <USER_TASK:> Description: def run_job(self, project_id, body, dataset=None): """Runs a BigQuery "job". See the documentation for the format of body. .. note:: You probably don't need to use this directly. Use the tasks defined below. :param dataset: :type dataset: BQDataset """
if dataset and not self.dataset_exists(dataset): self.make_dataset(dataset) new_job = self.client.jobs().insert(projectId=project_id, body=body).execute() job_id = new_job['jobReference']['jobId'] logger.info('Started import job %s:%s', project_id, job_id) while True: status = self.client.jobs().get(projectId=project_id, jobId=job_id).execute(num_retries=10) if status['status']['state'] == 'DONE': if status['status'].get('errorResult'): raise Exception('BigQuery job failed: {}'.format(status['status']['errorResult'])) return logger.info('Waiting for job %s:%s to complete...', project_id, job_id) time.sleep(5)
<SYSTEM_TASK:> The fully-qualified URIs that point to your data in Google Cloud Storage. <END_TASK> <USER_TASK:> Description: def source_uris(self): """The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name."""
return [x.path for x in luigi.task.flatten(self.input())]
<SYSTEM_TASK:> Execute a shell command remotely and return the output. <END_TASK> <USER_TASK:> Description: def check_output(self, cmd): """ Execute a shell command remotely and return the output. Simplified version of Popen when you only want the output as a string and detect any errors. """
p = self.Popen(cmd, stdout=subprocess.PIPE) output, _ = p.communicate() if p.returncode != 0: raise RemoteCalledProcessError(p.returncode, cmd, self.host, output=output) return output
<SYSTEM_TASK:> Return `True` if directory at `path` exist, False otherwise. <END_TASK> <USER_TASK:> Description: def isdir(self, path): """ Return `True` if directory at `path` exist, False otherwise. """
try: self.remote_context.check_output(["test", "-d", path]) except subprocess.CalledProcessError as e: if e.returncode == 1: return False else: raise return True
<SYSTEM_TASK:> Returns command of process. <END_TASK> <USER_TASK:> Description: def getpcmd(pid): """ Returns command of process. :param pid: """
if os.name == "nt": # Use wmic command instead of ps on Windows. cmd = 'wmic path win32_process where ProcessID=%s get Commandline 2> nul' % (pid, ) with os.popen(cmd, 'r') as p: lines = [line for line in p.readlines() if line.strip("\r\n ") != ""] if lines: _, val = lines return val elif sys.platform == "darwin": # Use pgrep instead of /proc on macOS. pidfile = ".%d.pid" % (pid, ) with open(pidfile, 'w') as f: f.write(str(pid)) try: p = Popen(['pgrep', '-lf', '-F', pidfile], stdout=PIPE) stdout, _ = p.communicate() line = stdout.decode('utf8').strip() if line: _, scmd = line.split(' ', 1) return scmd finally: os.unlink(pidfile) else: # Use the /proc filesystem # At least on android there have been some issues with not all # process infos being readable. In these cases using the `ps` command # worked. See the pull request at # https://github.com/spotify/luigi/pull/1876 try: with open('/proc/{0}/cmdline'.format(pid), 'r') as fh: if six.PY3: return fh.read().replace('\0', ' ').rstrip() else: return fh.read().replace('\0', ' ').decode('utf8').rstrip() except IOError: # the system may not allow reading the command line # of a process owned by another user pass # Fallback instead of None, for e.g. Cygwin where -o is an "unknown option" for the ps command: return '[PROCESS_WITH_PID={}]'.format(pid)
<SYSTEM_TASK:> Makes sure the process is only run once at the same time with the same name. <END_TASK> <USER_TASK:> Description: def acquire_for(pid_dir, num_available=1, kill_signal=None): """ Makes sure the process is only run once at the same time with the same name. Notice that we since we check the process name, different parameters to the same command can spawn multiple processes at the same time, i.e. running "/usr/bin/my_process" does not prevent anyone from launching "/usr/bin/my_process --foo bar". """
my_pid, my_cmd, pid_file = get_info(pid_dir) # Create a pid file if it does not exist try: os.mkdir(pid_dir) os.chmod(pid_dir, 0o777) except OSError as exc: if exc.errno != errno.EEXIST: raise pass # Let variable "pids" be all pids who exist in the .pid-file who are still # about running the same command. pids = {pid for pid in _read_pids_file(pid_file) if getpcmd(pid) == my_cmd} if kill_signal is not None: for pid in pids: os.kill(pid, kill_signal) print('Sent kill signal to Pids: {}'.format(pids)) # We allow for the killer to progress, yet we don't want these to stack # up! So we only allow it once. num_available += 1 if len(pids) >= num_available: # We are already running under a different pid print('Pid(s) {} already running'.format(pids)) if kill_signal is not None: print('Note: There have (probably) been 1 other "--take-lock"' ' process which continued to run! Probably no need to run' ' this one as well.') return False _write_pids_file(pid_file, pids | {my_pid}) return True
<SYSTEM_TASK:> Add a failure event with the current timestamp. <END_TASK> <USER_TASK:> Description: def add_failure(self): """ Add a failure event with the current timestamp. """
failure_time = time.time() if not self.first_failure_time: self.first_failure_time = failure_time self.failures.append(failure_time)
<SYSTEM_TASK:> Return the number of failures in the window. <END_TASK> <USER_TASK:> Description: def num_failures(self): """ Return the number of failures in the window. """
min_time = time.time() - self.window while self.failures and self.failures[0] < min_time: self.failures.popleft() return len(self.failures)