code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def add_config_path(path): """Select config parser by file extension and add path into parser. """ if not os.path.isfile(path): warnings.warn("Config file does not exist: {path}".format(path=path)) return False # select parser by file extension default_parser = _get_default_parser() _base, ext = os.path.splitext(path) if ext and ext[1:] in PARSERS: parser = ext[1:] else: parser = default_parser parser_class = PARSERS[parser] _check_parser(parser_class, parser) if parser != default_parser: msg = ( "Config for {added} parser added, but used {used} parser. " "Set up right parser via env var: " "export LUIGI_CONFIG_PARSER={added}" ) warnings.warn(msg.format(added=parser, used=default_parser)) # add config path to parser parser_class.add_config_path(path) return True
Select config parser by file extension and add path into parser.
add_config_path
python
spotify/luigi
luigi/configuration/core.py
https://github.com/spotify/luigi/blob/master/luigi/configuration/core.py
Apache-2.0
def _get_with_default(self, method, section, option, default, expected_type=None, **kwargs): """ Gets the value of the section/option using method. Returns default if value is not found. Raises an exception if the default value is not None and doesn't match the expected_type. """ try: try: # Underscore-style is the recommended configuration style option = option.replace('-', '_') return method(self, section, option, **kwargs) except (NoOptionError, NoSectionError): # Support dash-style option names (with deprecation warning). option_alias = option.replace('_', '-') value = method(self, section, option_alias, **kwargs) warn = 'Configuration [{s}] {o} (with dashes) should be avoided. Please use underscores: {u}.'.format( s=section, o=option_alias, u=option) warnings.warn(warn, DeprecationWarning) return value except (NoOptionError, NoSectionError): if default is LuigiConfigParser.NO_DEFAULT: raise if expected_type is not None and default is not None and \ not isinstance(default, expected_type): raise return default
Gets the value of the section/option using method. Returns default if value is not found. Raises an exception if the default value is not None and doesn't match the expected_type.
_get_with_default
python
spotify/luigi
luigi/configuration/cfg_parser.py
https://github.com/spotify/luigi/blob/master/luigi/configuration/cfg_parser.py
Apache-2.0
def has_option(self, section, option): """modified has_option Check for the existence of a given option in a given section. If the specified 'section' is None or an empty string, DEFAULT is assumed. If the specified 'section' does not exist, returns False. """ # Underscore-style is the recommended configuration style option = option.replace('-', '_') if ConfigParser.has_option(self, section, option): return True # Support dash-style option names (with deprecation warning). option_alias = option.replace('_', '-') if ConfigParser.has_option(self, section, option_alias): warn = 'Configuration [{s}] {o} (with dashes) should be avoided. Please use underscores: {u}.'.format( s=section, o=option_alias, u=option) warnings.warn(warn, DeprecationWarning) return True return False
modified has_option Check for the existence of a given option in a given section. If the specified 'section' is None or an empty string, DEFAULT is assumed. If the specified 'section' does not exist, returns False.
has_option
python
spotify/luigi
luigi/configuration/cfg_parser.py
https://github.com/spotify/luigi/blob/master/luigi/configuration/cfg_parser.py
Apache-2.0
def _fetch_json(self): """Returns the json representation of the dep graph""" print("Fetching from url: " + self.graph_url) resp = urlopen(self.graph_url).read() return json.loads(resp.decode('utf-8'))
Returns the json representation of the dep graph
_fetch_json
python
spotify/luigi
luigi/tools/luigi_grep.py
https://github.com/spotify/luigi/blob/master/luigi/tools/luigi_grep.py
Apache-2.0
def prefix_search(self, job_name_prefix): """Searches for jobs matching the given ``job_name_prefix``.""" json = self._fetch_json() jobs = json['response'] for job in jobs: if job.startswith(job_name_prefix): yield self._build_results(jobs, job)
Searches for jobs matching the given ``job_name_prefix``.
prefix_search
python
spotify/luigi
luigi/tools/luigi_grep.py
https://github.com/spotify/luigi/blob/master/luigi/tools/luigi_grep.py
Apache-2.0
def status_search(self, status): """Searches for jobs matching the given ``status``.""" json = self._fetch_json() jobs = json['response'] for job in jobs: job_info = jobs[job] if job_info['status'].lower() == status.lower(): yield self._build_results(jobs, job)
Searches for jobs matching the given ``status``.
status_search
python
spotify/luigi
luigi/tools/luigi_grep.py
https://github.com/spotify/luigi/blob/master/luigi/tools/luigi_grep.py
Apache-2.0
def of_cls(self): """ DONT USE. Will be deleted soon. Use ``self.of``! """ if isinstance(self.of, str): warnings.warn('When using Range programatically, dont pass "of" param as string!') return Register.get_task_cls(self.of) return self.of
DONT USE. Will be deleted soon. Use ``self.of``!
of_cls
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def datetime_to_parameters(self, dt): """ Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter """ raise NotImplementedError
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
datetime_to_parameters
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def parameters_to_datetime(self, p): """ Given a dictionary of parameters, will extract the ranged task parameter value """ raise NotImplementedError
Given a dictionary of parameters, will extract the ranged task parameter value
parameters_to_datetime
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def moving_start(self, now): """ Returns a datetime from which to ensure contiguousness in the case when start is None or unfeasibly far back. """ raise NotImplementedError
Returns a datetime from which to ensure contiguousness in the case when start is None or unfeasibly far back.
moving_start
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def moving_stop(self, now): """ Returns a datetime till which to ensure contiguousness in the case when stop is None or unfeasibly far forward. """ raise NotImplementedError
Returns a datetime till which to ensure contiguousness in the case when stop is None or unfeasibly far forward.
moving_stop
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def finite_datetimes(self, finite_start, finite_stop): """ Returns the individual datetimes in interval [finite_start, finite_stop) for which task completeness should be required, as a sorted list. """ raise NotImplementedError
Returns the individual datetimes in interval [finite_start, finite_stop) for which task completeness should be required, as a sorted list.
finite_datetimes
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def _emit_metrics(self, missing_datetimes, finite_start, finite_stop): """ For consistent metrics one should consider the entire range, but it is open (infinite) if stop or start is None. Hence make do with metrics respective to the finite simplification. """ datetimes = self.finite_datetimes( finite_start if self.start is None else min(finite_start, self.parameter_to_datetime(self.start)), finite_stop if self.stop is None else max(finite_stop, self.parameter_to_datetime(self.stop))) delay_in_jobs = len(datetimes) - datetimes.index(missing_datetimes[0]) if datetimes and missing_datetimes else 0 self.trigger_event(RangeEvent.DELAY, self.of_cls.task_family, delay_in_jobs) expected_count = len(datetimes) complete_count = expected_count - len(missing_datetimes) self.trigger_event(RangeEvent.COMPLETE_COUNT, self.of_cls.task_family, complete_count) self.trigger_event(RangeEvent.COMPLETE_FRACTION, self.of_cls.task_family, float(complete_count) / expected_count if expected_count else 1)
For consistent metrics one should consider the entire range, but it is open (infinite) if stop or start is None. Hence make do with metrics respective to the finite simplification.
_emit_metrics
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def missing_datetimes(self, finite_datetimes): """ Override in subclasses to do bulk checks. Returns a sorted list. This is a conservative base implementation that brutally checks completeness, instance by instance. Inadvisable as it may be slow. """ return [d for d in finite_datetimes if not self._instantiate_task_cls(self.datetime_to_parameter(d)).complete()]
Override in subclasses to do bulk checks. Returns a sorted list. This is a conservative base implementation that brutally checks completeness, instance by instance. Inadvisable as it may be slow.
missing_datetimes
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def _missing_datetimes(self, finite_datetimes): """ Backward compatible wrapper. Will be deleted eventually (stated on Dec 2015) """ try: return self.missing_datetimes(finite_datetimes) except TypeError as ex: if 'missing_datetimes()' in repr(ex): warnings.warn('In your Range* subclass, missing_datetimes() should only take 1 argument (see latest docs)') return self.missing_datetimes(self.of_cls, finite_datetimes) else: raise
Backward compatible wrapper. Will be deleted eventually (stated on Dec 2015)
_missing_datetimes
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def datetime_to_parameters(self, dt): """ Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter """ return self._task_parameters(dt.date())
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
datetime_to_parameters
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def parameters_to_datetime(self, p): """ Given a dictionary of parameters, will extract the ranged task parameter value """ dt = p[self._param_name] return datetime(dt.year, dt.month, dt.day)
Given a dictionary of parameters, will extract the ranged task parameter value
parameters_to_datetime
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to turn of day. """ date_start = datetime(finite_start.year, finite_start.month, finite_start.day) dates = [] for i in itertools.count(): t = date_start + timedelta(days=i) if t >= finite_stop: return dates if t >= finite_start: dates.append(t)
Simply returns the points in time that correspond to turn of day.
finite_datetimes
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def datetime_to_parameters(self, dt): """ Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter """ return self._task_parameters(dt)
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
datetime_to_parameters
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def parameters_to_datetime(self, p): """ Given a dictionary of parameters, will extract the ranged task parameter value """ return p[self._param_name]
Given a dictionary of parameters, will extract the ranged task parameter value
parameters_to_datetime
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to whole hours. """ datehour_start = datetime(finite_start.year, finite_start.month, finite_start.day, finite_start.hour) datehours = [] for i in itertools.count(): t = datehour_start + timedelta(hours=i) if t >= finite_stop: return datehours if t >= finite_start: datehours.append(t)
Simply returns the points in time that correspond to whole hours.
finite_datetimes
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def datetime_to_parameters(self, dt): """ Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter """ return self._task_parameters(dt)
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
datetime_to_parameters
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def parameters_to_datetime(self, p): """ Given a dictionary of parameters, will extract the ranged task parameter value """ dt = p[self._param_name] return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute)
Given a dictionary of parameters, will extract the ranged task parameter value
parameters_to_datetime
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to a whole number of minutes intervals. """ # Validate that the minutes_interval can divide 60 and it is greater than 0 and lesser than 60 if not (0 < self.minutes_interval < 60): raise ParameterException('minutes-interval must be within 0..60') if 60 % self.minutes_interval != 0: raise ParameterException('minutes-interval does not evenly divide 60') # start of a complete interval, e.g. 20:13 and the interval is 5 -> 20:10 start_minute = int(finite_start.minute/self.minutes_interval)*self.minutes_interval datehour_start = datetime( year=finite_start.year, month=finite_start.month, day=finite_start.day, hour=finite_start.hour, minute=start_minute) datehours = [] for i in itertools.count(): t = datehour_start + timedelta(minutes=i*self.minutes_interval) if t >= finite_stop: return datehours if t >= finite_start: datehours.append(t)
Simply returns the points in time that correspond to a whole number of minutes intervals.
finite_datetimes
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def digit_set_wildcard(chars): """ Makes a wildcard expression for the set, a bit readable, e.g. [1-5]. """ chars = sorted(chars) if len(chars) > 1 and ord(chars[-1]) - ord(chars[0]) == len(chars) - 1: return '[%s-%s]' % (chars[0], chars[-1]) else: return '[%s]' % ''.join(chars)
Makes a wildcard expression for the set, a bit readable, e.g. [1-5].
_constrain_glob.digit_set_wildcard
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def _constrain_glob(glob, paths, limit=5): """ Tweaks glob into a list of more specific globs that together still cover paths and not too much extra. Saves us minutes long listings for long dataset histories. Specifically, in this implementation the leftmost occurrences of "[0-9]" give rise to a few separate globs that each specialize the expression to digits that actually occur in paths. """ def digit_set_wildcard(chars): """ Makes a wildcard expression for the set, a bit readable, e.g. [1-5]. """ chars = sorted(chars) if len(chars) > 1 and ord(chars[-1]) - ord(chars[0]) == len(chars) - 1: return '[%s-%s]' % (chars[0], chars[-1]) else: return '[%s]' % ''.join(chars) current = {glob: paths} while True: pos = list(current.keys())[0].find('[0-9]') if pos == -1: # no wildcard expressions left to specialize in the glob return list(current.keys()) char_sets = {} for g, p in current.items(): char_sets[g] = sorted({path[pos] for path in p}) if sum(len(s) for s in char_sets.values()) > limit: return [g.replace('[0-9]', digit_set_wildcard(char_sets[g]), 1) for g in current] for g, s in char_sets.items(): for c in s: new_glob = g.replace('[0-9]', c, 1) new_paths = list(filter(lambda p: p[pos] == c, current[g])) current[new_glob] = new_paths del current[g]
Tweaks glob into a list of more specific globs that together still cover paths and not too much extra. Saves us minutes long listings for long dataset histories. Specifically, in this implementation the leftmost occurrences of "[0-9]" give rise to a few separate globs that each specialize the expression to digits that actually occur in paths.
_constrain_glob
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def _get_per_location_glob(tasks, outputs, regexes): """ Builds a glob listing existing output paths. Esoteric reverse engineering, but worth it given that (compared to an equivalent contiguousness guarantee by naive complete() checks) requests to the filesystem are cut by orders of magnitude, and users don't even have to retrofit existing tasks anyhow. """ paths = [o.path for o in outputs] # naive, because some matches could be confused by numbers earlier # in path, e.g. /foo/fifa2000k/bar/2000-12-31/00 matches = [r.search(p) for r, p in zip(regexes, paths)] for m, p, t in zip(matches, paths, tasks): if m is None: raise NotImplementedError("Couldn't deduce datehour representation in output path %r of task %s" % (p, t)) n_groups = len(matches[0].groups()) # the most common position of every group is likely # to be conclusive hit or miss positions = [most_common((m.start(i), m.end(i)) for m in matches)[0] for i in range(1, n_groups + 1)] glob = list(paths[0]) # FIXME sanity check that it's the same for all paths for start, end in positions: glob = glob[:start] + ['[0-9]'] * (end - start) + glob[end:] # chop off the last path item # (wouldn't need to if `hadoop fs -ls -d` equivalent were available) return ''.join(glob).rsplit('/', 1)[0]
Builds a glob listing existing output paths. Esoteric reverse engineering, but worth it given that (compared to an equivalent contiguousness guarantee by naive complete() checks) requests to the filesystem are cut by orders of magnitude, and users don't even have to retrofit existing tasks anyhow.
_get_per_location_glob
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def _get_filesystems_and_globs(datetime_to_task, datetime_to_re): """ Yields a (filesystem, glob) tuple per every output location of task. The task can have one or several FileSystemTarget outputs. For convenience, the task can be a luigi.WrapperTask, in which case outputs of all its dependencies are considered. """ # probe some scattered datetimes unlikely to all occur in paths, other than by being sincere datetime parameter's representations # TODO limit to [self.start, self.stop) so messages are less confusing? Done trivially it can kill correctness sample_datetimes = [datetime(y, m, d, h) for y in range(2000, 2050, 10) for m in range(1, 4) for d in range(5, 8) for h in range(21, 24)] regexes = [re.compile(datetime_to_re(d)) for d in sample_datetimes] sample_tasks = [datetime_to_task(d) for d in sample_datetimes] sample_outputs = [flatten_output(t) for t in sample_tasks] for o, t in zip(sample_outputs, sample_tasks): if len(o) != len(sample_outputs[0]): raise NotImplementedError("Outputs must be consistent over time, sorry; was %r for %r and %r for %r" % (o, t, sample_outputs[0], sample_tasks[0])) # TODO fall back on requiring last couple of days? to avoid astonishing blocking when changes like that are deployed # erm, actually it's not hard to test entire hours_back..hours_forward and split into consistent subranges FIXME? for target in o: if not isinstance(target, FileSystemTarget): raise NotImplementedError("Output targets must be instances of FileSystemTarget; was %r for %r" % (target, t)) for o in zip(*sample_outputs): # transposed, so here we're iterating over logical outputs, not datetimes glob = _get_per_location_glob(sample_tasks, o, regexes) yield o[0].fs, glob
Yields a (filesystem, glob) tuple per every output location of task. The task can have one or several FileSystemTarget outputs. For convenience, the task can be a luigi.WrapperTask, in which case outputs of all its dependencies are considered.
_get_filesystems_and_globs
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def _list_existing(filesystem, glob, paths): """ Get all the paths that do in fact exist. Returns a set of all existing paths. Takes a luigi.target.FileSystem object, a str which represents a glob and a list of strings representing paths. """ globs = _constrain_glob(glob, paths) time_start = time.time() listing = [] for g in sorted(globs): logger.debug('Listing %s', g) if filesystem.exists(g): listing.extend(filesystem.listdir(g)) logger.debug('%d %s listings took %f s to return %d items', len(globs), filesystem.__class__.__name__, time.time() - time_start, len(listing)) return set(listing)
Get all the paths that do in fact exist. Returns a set of all existing paths. Takes a luigi.target.FileSystem object, a str which represents a glob and a list of strings representing paths.
_list_existing
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def infer_bulk_complete_from_fs(datetimes, datetime_to_task, datetime_to_re): """ Efficiently determines missing datetimes by filesystem listing. The current implementation works for the common case of a task writing output to a ``FileSystemTarget`` whose path is built using strftime with format like '...%Y...%m...%d...%H...', without custom ``complete()`` or ``exists()``. (Eventually Luigi could have ranges of completion as first-class citizens. Then this listing business could be factored away/be provided for explicitly in target API or some kind of a history server.) """ filesystems_and_globs_by_location = _get_filesystems_and_globs(datetime_to_task, datetime_to_re) paths_by_datetime = [[o.path for o in flatten_output(datetime_to_task(d))] for d in datetimes] listing = set() for (f, g), p in zip(filesystems_and_globs_by_location, zip(*paths_by_datetime)): # transposed, so here we're iterating over logical outputs, not datetimes listing |= _list_existing(f, g, p) # quickly learn everything that's missing missing_datetimes = [] for d, p in zip(datetimes, paths_by_datetime): if not set(p) <= listing: missing_datetimes.append(d) return missing_datetimes
Efficiently determines missing datetimes by filesystem listing. The current implementation works for the common case of a task writing output to a ``FileSystemTarget`` whose path is built using strftime with format like '...%Y...%m...%d...%H...', without custom ``complete()`` or ``exists()``. (Eventually Luigi could have ranges of completion as first-class citizens. Then this listing business could be factored away/be provided for explicitly in target API or some kind of a history server.)
infer_bulk_complete_from_fs
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def datetime_to_parameters(self, dt): """ Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter """ return self._task_parameters(dt.date())
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
datetime_to_parameters
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def parameters_to_datetime(self, p): """ Given a dictionary of parameters, will extract the ranged task parameter value """ dt = p[self._param_name] return datetime(dt.year, dt.month, 1)
Given a dictionary of parameters, will extract the ranged task parameter value
parameters_to_datetime
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def finite_datetimes(self, finite_start, finite_stop): """ Simply returns the points in time that correspond to turn of month. """ start_date = self._align(finite_start) aligned_stop = self._align(finite_stop) dates = [] for m in itertools.count(): t = start_date + relativedelta(months=m) if t >= aligned_stop: return dates if t >= finite_start: dates.append(t)
Simply returns the points in time that correspond to turn of month.
finite_datetimes
python
spotify/luigi
luigi/tools/range.py
https://github.com/spotify/luigi/blob/master/luigi/tools/range.py
Apache-2.0
def print_tree(task, indent='', last=True): ''' Return a string representation of the tasks, their statuses/parameters in a dependency tree format ''' # dont bother printing out warnings about tasks with no output with warnings.catch_warnings(): warnings.filterwarnings(action='ignore', message='Task .* without outputs has no custom complete\\(\\) method') is_task_complete = task.complete() is_complete = (bcolors.OKGREEN + 'COMPLETE' if is_task_complete else bcolors.OKBLUE + 'PENDING') + bcolors.ENDC name = task.__class__.__name__ params = task.to_str_params(only_significant=True) result = '\n' + indent if (last): result += '└─--' indent += ' ' else: result += '|---' indent += '| ' result += '[{0}-{1} ({2})]'.format(name, params, is_complete) children = flatten(task.requires()) for index, child in enumerate(children): result += print_tree(child, indent, (index+1) == len(children)) return result
Return a string representation of the tasks, their statuses/parameters in a dependency tree format
print_tree
python
spotify/luigi
luigi/tools/deps_tree.py
https://github.com/spotify/luigi/blob/master/luigi/tools/deps_tree.py
Apache-2.0
def find_deps(task, upstream_task_family): ''' Finds all dependencies that start with the given task and have a path to upstream_task_family Returns all deps on all paths between task and upstream ''' return {t for t in dfs_paths(task, upstream_task_family)}
Finds all dependencies that start with the given task and have a path to upstream_task_family Returns all deps on all paths between task and upstream
find_deps
python
spotify/luigi
luigi/tools/deps.py
https://github.com/spotify/luigi/blob/master/luigi/tools/deps.py
Apache-2.0
def find_deps_cli(): ''' Finds all tasks on all paths from provided CLI task ''' cmdline_args = sys.argv[1:] with CmdlineParser.global_instance(cmdline_args) as cp: return find_deps(cp.get_task_obj(), upstream().family)
Finds all tasks on all paths from provided CLI task
find_deps_cli
python
spotify/luigi
luigi/tools/deps.py
https://github.com/spotify/luigi/blob/master/luigi/tools/deps.py
Apache-2.0
def get_task_output_description(task_output): ''' Returns a task's output as a string ''' output_description = "n/a" if isinstance(task_output, RemoteTarget): output_description = "[SSH] {0}:{1}".format(task_output._fs.remote_context.host, task_output.path) elif isinstance(task_output, S3Target): output_description = "[S3] {0}".format(task_output.path) elif isinstance(task_output, FileSystemTarget): output_description = "[FileSystem] {0}".format(task_output.path) elif isinstance(task_output, PostgresTarget): output_description = "[DB] {0}:{1}".format(task_output.host, task_output.table) else: output_description = "to be determined" return output_description
Returns a task's output as a string
get_task_output_description
python
spotify/luigi
luigi/tools/deps.py
https://github.com/spotify/luigi/blob/master/luigi/tools/deps.py
Apache-2.0
def Popen(self, cmd, **kwargs): """ Remote Popen. """ prefixed_cmd = self._prepare_cmd(cmd) return subprocess.Popen(prefixed_cmd, **kwargs)
Remote Popen.
Popen
python
spotify/luigi
luigi/contrib/ssh.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ssh.py
Apache-2.0
def check_output(self, cmd): """ Execute a shell command remotely and return the output. Simplified version of Popen when you only want the output as a string and detect any errors. """ p = self.Popen(cmd, stdout=subprocess.PIPE) output, _ = p.communicate() if p.returncode != 0: raise RemoteCalledProcessError(p.returncode, cmd, self.host, output=output) return output
Execute a shell command remotely and return the output. Simplified version of Popen when you only want the output as a string and detect any errors.
check_output
python
spotify/luigi
luigi/contrib/ssh.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ssh.py
Apache-2.0
def tunnel(self, local_port, remote_port=None, remote_host="localhost"): """ Open a tunnel between localhost:local_port and remote_host:remote_port via the host specified by this context. Remember to close() the returned "tunnel" object in order to clean up after yourself when you are done with the tunnel. """ tunnel_host = "{0}:{1}:{2}".format(local_port, remote_host, remote_port) proc = self.Popen( # cat so we can shut down gracefully by closing stdin ["-L", tunnel_host, "echo -n ready && cat"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, ) # make sure to get the data so we know the connection is established ready = proc.stdout.read(5) assert ready == b"ready", "Didn't get ready from remote echo" yield # user code executed here proc.communicate() assert proc.returncode == 0, "Tunnel process did an unclean exit (returncode %s)" % (proc.returncode,)
Open a tunnel between localhost:local_port and remote_host:remote_port via the host specified by this context. Remember to close() the returned "tunnel" object in order to clean up after yourself when you are done with the tunnel.
tunnel
python
spotify/luigi
luigi/contrib/ssh.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ssh.py
Apache-2.0
def exists(self, path): """ Return `True` if file or directory at `path` exist, False otherwise. """ try: self.remote_context.check_output(["test", "-e", path]) except subprocess.CalledProcessError as e: if e.returncode == 1: return False else: raise return True
Return `True` if file or directory at `path` exist, False otherwise.
exists
python
spotify/luigi
luigi/contrib/ssh.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ssh.py
Apache-2.0
def isdir(self, path): """ Return `True` if directory at `path` exist, False otherwise. """ try: self.remote_context.check_output(["test", "-d", path]) except subprocess.CalledProcessError as e: if e.returncode == 1: return False else: raise return True
Return `True` if directory at `path` exist, False otherwise.
isdir
python
spotify/luigi
luigi/contrib/ssh.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ssh.py
Apache-2.0
def remove(self, path, recursive=True): """ Remove file or directory at location `path`. """ if recursive: cmd = ["rm", "-r", path] else: cmd = ["rm", path] self.remote_context.check_output(cmd)
Remove file or directory at location `path`.
remove
python
spotify/luigi
luigi/contrib/ssh.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ssh.py
Apache-2.0
def __init__(self, host, database, user, password, table, update_id, **cnx_kwargs): """ Initializes a MySqlTarget instance. :param host: MySql server address. Possibly a host:port string. :type host: str :param database: database name. :type database: str :param user: database user :type user: str :param password: password for specified user. :type password: str :param update_id: an identifier for this data set. :type update_id: str :param cnx_kwargs: optional params for mysql connector constructor. See https://dev.mysql.com/doc/connector-python/en/connector-python-connectargs.html. """ if ':' in host: self.host, self.port = host.split(':') self.port = int(self.port) else: self.host = host self.port = 3306 self.database = database self.user = user self.password = password self.table = table self.update_id = update_id self.cnx_kwargs = cnx_kwargs
Initializes a MySqlTarget instance. :param host: MySql server address. Possibly a host:port string. :type host: str :param database: database name. :type database: str :param user: database user :type user: str :param password: password for specified user. :type password: str :param update_id: an identifier for this data set. :type update_id: str :param cnx_kwargs: optional params for mysql connector constructor. See https://dev.mysql.com/doc/connector-python/en/connector-python-connectargs.html.
__init__
python
spotify/luigi
luigi/contrib/mysqldb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mysqldb.py
Apache-2.0
def touch(self, connection=None): """ Mark this update as complete. IMPORTANT, If the marker table doesn't exist, the connection transaction will be aborted and the connection reset. Then the marker table will be created. """ self.create_marker_table() if connection is None: connection = self.connect() connection.autocommit = True # if connection created here, we commit it here connection.cursor().execute( """INSERT INTO {marker_table} (update_id, target_table) VALUES (%s, %s) ON DUPLICATE KEY UPDATE update_id = VALUES(update_id) """.format(marker_table=self.marker_table), (self.update_id, self.table) ) # make sure update is properly marked assert self.exists(connection)
Mark this update as complete. IMPORTANT, If the marker table doesn't exist, the connection transaction will be aborted and the connection reset. Then the marker table will be created.
touch
python
spotify/luigi
luigi/contrib/mysqldb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mysqldb.py
Apache-2.0
def create_marker_table(self): """ Create marker table if it doesn't exist. Using a separate connection since the transaction might have to be reset. """ connection = self.connect(autocommit=True) cursor = connection.cursor() try: cursor.execute( """ CREATE TABLE {marker_table} ( id BIGINT(20) NOT NULL AUTO_INCREMENT, update_id VARCHAR(128) NOT NULL, target_table VARCHAR(128), inserted TIMESTAMP DEFAULT NOW(), PRIMARY KEY (update_id), KEY id (id) ) """ .format(marker_table=self.marker_table) ) except mysql.connector.Error as e: if e.errno == errorcode.ER_TABLE_EXISTS_ERROR: pass else: raise connection.close()
Create marker table if it doesn't exist. Using a separate connection since the transaction might have to be reset.
create_marker_table
python
spotify/luigi
luigi/contrib/mysqldb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mysqldb.py
Apache-2.0
def rows(self): """ Return/yield tuples or lists corresponding to each row to be inserted. """ with self.input().open('r') as fobj: for line in fobj: yield line.strip('\n').split('\t')
Return/yield tuples or lists corresponding to each row to be inserted.
rows
python
spotify/luigi
luigi/contrib/mysqldb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mysqldb.py
Apache-2.0
def output(self): """ Returns a MySqlTarget representing the inserted dataset. Normally you don't override this. """ return MySqlTarget( host=self.host, database=self.database, user=self.user, password=self.password, table=self.table, update_id=self.update_id )
Returns a MySqlTarget representing the inserted dataset. Normally you don't override this.
output
python
spotify/luigi
luigi/contrib/mysqldb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mysqldb.py
Apache-2.0
def run(self): """ Inserts data generated by rows() into target table. If the target table doesn't exist, self.create_table will be called to attempt to create the table. Normally you don't want to override this. """ if not (self.table and self.columns): raise Exception("table and columns need to be specified") connection = self.output().connect() # attempt to copy the data into mysql # if it fails because the target table doesn't exist # try to create it by running self.create_table for attempt in range(2): try: cursor = connection.cursor() print("caling init copy...") self.init_copy(connection) self.copy(cursor) self.post_copy(connection) if self.enable_metadata_columns: self.post_copy_metacolumns(cursor) except Error as err: if err.errno == errorcode.ER_NO_SUCH_TABLE and attempt == 0: # if first attempt fails with "relation not found", try creating table # logger.info("Creating table %s", self.table) connection.reconnect() self.create_table(connection) else: raise else: break # mark as complete in same transaction self.output().touch(connection) connection.commit() connection.close()
Inserts data generated by rows() into target table. If the target table doesn't exist, self.create_table will be called to attempt to create the table. Normally you don't want to override this.
run
python
spotify/luigi
luigi/contrib/mysqldb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mysqldb.py
Apache-2.0
def attach(*packages): """ Attach a python package to hadoop map reduce tarballs to make those packages available on the hadoop cluster. """ _attached_packages.extend(packages)
Attach a python package to hadoop map reduce tarballs to make those packages available on the hadoop cluster.
attach
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def create_packages_archive(packages, filename): """ Create a tar archive which will contain the files for the packages listed in packages. """ import tarfile tar = tarfile.open(filename, "w") def add(src, dst): logger.debug('adding to tar: %s -> %s', src, dst) tar.add(src, dst) def add_files_for_package(sub_package_path, root_package_path, root_package_name): for root, dirs, files in os.walk(sub_package_path): if '.svn' in dirs: dirs.remove('.svn') for f in files: if not f.endswith(".pyc") and not f.startswith("."): add(dereference(root + "/" + f), root.replace(root_package_path, root_package_name) + "/" + f) for package in packages: # Put a submodule's entire package in the archive. This is the # magic that usually packages everything you need without # having to attach packages/modules explicitly if not getattr(package, "__path__", None) and '.' in package.__name__: package = __import__(package.__name__.rpartition('.')[0], None, None, 'non_empty') n = package.__name__.replace(".", "/") if getattr(package, "__path__", None): # TODO: (BUG) picking only the first path does not # properly deal with namespaced packages in different # directories p = package.__path__[0] if p.endswith('.egg') and os.path.isfile(p): raise 'egg files not supported!!!' # Add the entire egg file # p = p[:p.find('.egg') + 4] # add(dereference(p), os.path.basename(p)) else: # include __init__ files from parent projects root = [] for parent in package.__name__.split('.')[0:-1]: root.append(parent) module_name = '.'.join(root) directory = '/'.join(root) add(dereference(__import__(module_name, None, None, 'non_empty').__path__[0] + "/__init__.py"), directory + "/__init__.py") add_files_for_package(p, p, n) # include egg-info directories that are parallel: for egg_info_path in glob.glob(p + '*.egg-info'): logger.debug( 'Adding package metadata to archive for "%s" found at "%s"', package.__name__, egg_info_path ) add_files_for_package(egg_info_path, p, n) else: f = package.__file__ if f.endswith("pyc"): f = f[:-3] + "py" if n.find(".") == -1: add(dereference(f), os.path.basename(f)) else: add(dereference(f), n + ".py") tar.close()
Create a tar archive which will contain the files for the packages listed in packages.
create_packages_archive
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def flatten(sequence): """ A simple generator which flattens a sequence. Only one level is flattened. .. code-block:: python (1, (2, 3), 4) -> (1, 2, 3, 4) """ for item in sequence: if hasattr(item, "__iter__") and not isinstance(item, str) and not isinstance(item, bytes): for i in item: yield i else: yield item
A simple generator which flattens a sequence. Only one level is flattened. .. code-block:: python (1, (2, 3), 4) -> (1, 2, 3, 4)
flatten
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def write_luigi_history(arglist, history): """ Writes history to a file in the job's output directory in JSON format. Currently just for tracking the job ID in a configuration where no history is stored in the output directory by Hadoop. """ history_filename = configuration.get_config().get('core', 'history-filename', '') if history_filename and '-output' in arglist: output_dir = arglist[arglist.index('-output') + 1] f = luigi.contrib.hdfs.HdfsTarget(os.path.join(output_dir, history_filename)).open('w') f.write(json.dumps(history)) f.close()
Writes history to a file in the job's output directory in JSON format. Currently just for tracking the job ID in a configuration where no history is stored in the output directory by Hadoop.
run_and_track_hadoop_job.write_luigi_history
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def fetch_task_failures(tracking_url): """ Uses mechanize to fetch the actual task logs from the task tracker. This is highly opportunistic, and we might not succeed. So we set a low timeout and hope it works. If it does not, it's not the end of the world. TODO: Yarn has a REST API that we should probably use instead: http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html """ import mechanize timeout = 3.0 failures_url = tracking_url.replace('jobdetails.jsp', 'jobfailures.jsp') + '&cause=failed' logger.debug('Fetching data from %s', failures_url) b = mechanize.Browser() b.open(failures_url, timeout=timeout) links = list(b.links(text_regex='Last 4KB')) # For some reason text_regex='All' doesn't work... no idea why links = random.sample(links, min(10, len(links))) # Fetch a random subset of all failed tasks, so not to be biased towards the early fails error_text = [] for link in links: task_url = link.url.replace('&start=-4097', '&start=-100000') # Increase the offset logger.debug('Fetching data from %s', task_url) b2 = mechanize.Browser() try: r = b2.open(task_url, timeout=timeout) data = r.read() except Exception as e: logger.debug('Error fetching data from %s: %s', task_url, e) continue # Try to get the hex-encoded traceback back from the output for exc in re.findall(r'luigi-exc-hex=[0-9a-f]+', data): error_text.append('---------- %s:' % task_url) error_text.append(exc.split('=')[-1].decode('hex')) return '\n'.join(error_text)
Uses mechanize to fetch the actual task logs from the task tracker. This is highly opportunistic, and we might not succeed. So we set a low timeout and hope it works. If it does not, it's not the end of the world. TODO: Yarn has a REST API that we should probably use instead: http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html
fetch_task_failures
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def _get_pool(self): """ Protected method """ if self.pool: return self.pool if hadoop().pool: return hadoop().pool
Protected method
_get_pool
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def init_local(self): """ Implement any work to setup any internal datastructure etc here. You can add extra input using the requires_local/input_local methods. Anything you set on the object will be pickled and available on the Hadoop nodes. """ pass
Implement any work to setup any internal datastructure etc here. You can add extra input using the requires_local/input_local methods. Anything you set on the object will be pickled and available on the Hadoop nodes.
init_local
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def requires_local(self): """ Default impl - override this method if you need any local input to be accessible in init(). """ return []
Default impl - override this method if you need any local input to be accessible in init().
requires_local
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def job_runner(self): # We recommend that you define a subclass, override this method and set up your own config """ Get the MapReduce runner for this job. If all outputs are HdfsTargets, the DefaultHadoopJobRunner will be used. Otherwise, the LocalJobRunner which streams all data through the local machine will be used (great for testing). """ outputs = luigi.task.flatten(self.output()) for output in outputs: if not isinstance(output, luigi.contrib.hdfs.HdfsTarget): warnings.warn("Job is using one or more non-HdfsTarget outputs" + " so it will be run in local mode") return LocalJobRunner() else: return DefaultHadoopJobRunner()
Get the MapReduce runner for this job. If all outputs are HdfsTargets, the DefaultHadoopJobRunner will be used. Otherwise, the LocalJobRunner which streams all data through the local machine will be used (great for testing).
job_runner
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def reader(self, input_stream): """ Reader is a method which iterates over input lines and outputs records. The default implementation yields one argument containing the line for each line in the input.""" for line in input_stream: yield line,
Reader is a method which iterates over input lines and outputs records. The default implementation yields one argument containing the line for each line in the input.
reader
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def writer(self, outputs, stdout, stderr=sys.stderr): """ Writer format is a method which iterates over the output records from the reducer and formats them for output. The default implementation outputs tab separated items. """ for output in outputs: try: output = flatten(output) if self.data_interchange_format == "json": # Only dump one json string, and skip another one, maybe key or value. output = filter(lambda x: x, output) else: # JSON is already serialized, so we put `self.serialize` in a else statement. output = map(self.serialize, output) print("\t".join(output), file=stdout) except BaseException: print(output, file=stderr) raise
Writer format is a method which iterates over the output records from the reducer and formats them for output. The default implementation outputs tab separated items.
writer
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def mapper(self, item): """ Re-define to process an input item (usually a line of input data). Defaults to identity mapper that sends all lines to the same reducer. """ yield None, item
Re-define to process an input item (usually a line of input data). Defaults to identity mapper that sends all lines to the same reducer.
mapper
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def incr_counter(self, *args, **kwargs): """ Increments a Hadoop counter. Since counters can be a bit slow to update, this batches the updates. """ threshold = kwargs.get("threshold", self.batch_counter_default) if len(args) == 2: # backwards compatibility with existing hadoop jobs group_name, count = args key = (group_name,) else: group, name, count = args key = (group, name) ct = self._counter_dict.get(key, 0) ct += count if ct >= threshold: new_arg = list(key) + [ct] self._incr_counter(*new_arg) ct = 0 self._counter_dict[key] = ct
Increments a Hadoop counter. Since counters can be a bit slow to update, this batches the updates.
incr_counter
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def _flush_batch_incr_counter(self): """ Increments any unflushed counter values. """ for key, count in self._counter_dict.items(): if count == 0: continue args = list(key) + [count] self._incr_counter(*args) self._counter_dict[key] = 0
Increments any unflushed counter values.
_flush_batch_incr_counter
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def _incr_counter(self, *args): """ Increments a Hadoop counter. Note that this seems to be a bit slow, ~1 ms Don't overuse this function by updating very frequently. """ if len(args) == 2: # backwards compatibility with existing hadoop jobs group_name, count = args print('reporter:counter:%s,%s' % (group_name, count), file=sys.stderr) else: group, name, count = args print('reporter:counter:%s,%s,%s' % (group, name, count), file=sys.stderr)
Increments a Hadoop counter. Note that this seems to be a bit slow, ~1 ms Don't overuse this function by updating very frequently.
_incr_counter
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def extra_files(self): """ Can be overridden in subclass. Each element is either a string, or a pair of two strings (src, dst). * `src` can be a directory (in which case everything will be copied recursively). * `dst` can include subdirectories (foo/bar/baz.txt etc) Uses Hadoop's -files option so that the same file is reused across tasks. """ return []
Can be overridden in subclass. Each element is either a string, or a pair of two strings (src, dst). * `src` can be a directory (in which case everything will be copied recursively). * `dst` can include subdirectories (foo/bar/baz.txt etc) Uses Hadoop's -files option so that the same file is reused across tasks.
extra_files
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def extra_streaming_arguments(self): """ Extra arguments to Hadoop command line. Return here a list of (parameter, value) tuples. """ return []
Extra arguments to Hadoop command line. Return here a list of (parameter, value) tuples.
extra_streaming_arguments
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def extra_archives(self): """List of paths to archives """ return []
List of paths to archives
extra_archives
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def dump(self, directory=''): """ Dump instance to file. """ with self.no_unpicklable_properties(): file_name = os.path.join(directory, 'job-instance.pickle') if self.__module__ == '__main__': d = pickle.dumps(self) module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0] d = d.replace(b'(c__main__', "(c" + module_name) open(file_name, "wb").write(d) else: pickle.dump(self, open(file_name, "wb"))
Dump instance to file.
dump
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def _map_input(self, input_stream): """ Iterate over input and call the mapper for each item. If the job has a parser defined, the return values from the parser will be passed as arguments to the mapper. If the input is coded output from a previous run, the arguments will be splitted in key and value. """ for record in self.reader(input_stream): for output in self.mapper(*record): yield output if self.final_mapper != NotImplemented: for output in self.final_mapper(): yield output self._flush_batch_incr_counter()
Iterate over input and call the mapper for each item. If the job has a parser defined, the return values from the parser will be passed as arguments to the mapper. If the input is coded output from a previous run, the arguments will be splitted in key and value.
_map_input
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def _reduce_input(self, inputs, reducer, final=NotImplemented): """ Iterate over input, collect values with the same key, and call the reducer for each unique key. """ for key, values in groupby(inputs, key=lambda x: self.internal_serialize(x[0])): for output in reducer(self.deserialize(key), (v[1] for v in values)): yield output if final != NotImplemented: for output in final(): yield output self._flush_batch_incr_counter()
Iterate over input, collect values with the same key, and call the reducer for each unique key.
_reduce_input
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def run_mapper(self, stdin=sys.stdin, stdout=sys.stdout): """ Run the mapper on the hadoop node. """ self.init_hadoop() self.init_mapper() outputs = self._map_input((line[:-1] for line in stdin)) if self.reducer == NotImplemented: self.writer(outputs, stdout) else: self.internal_writer(outputs, stdout)
Run the mapper on the hadoop node.
run_mapper
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def run_reducer(self, stdin=sys.stdin, stdout=sys.stdout): """ Run the reducer on the hadoop node. """ self.init_hadoop() self.init_reducer() outputs = self._reduce_input(self.internal_reader((line[:-1] for line in stdin)), self.reducer, self.final_reducer) self.writer(outputs, stdout)
Run the reducer on the hadoop node.
run_reducer
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def internal_reader(self, input_stream): """ Reader which uses python eval on each part of a tab separated string. Yields a tuple of python objects. """ for input_line in input_stream: yield list(map(self.deserialize, input_line.split("\t")))
Reader which uses python eval on each part of a tab separated string. Yields a tuple of python objects.
internal_reader
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def internal_writer(self, outputs, stdout): """ Writer which outputs the python repr for each item. """ for output in outputs: print("\t".join(map(self.internal_serialize, output)), file=stdout)
Writer which outputs the python repr for each item.
internal_writer
python
spotify/luigi
luigi/contrib/hadoop.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop.py
Apache-2.0
def __init__(self, host, port, db, update_id, password=None, socket_timeout=None, expire=None): """ :param host: Redis server host :type host: str :param port: Redis server port :type port: int :param db: database index :type db: int :param update_id: an identifier for this data hash :type update_id: str :param password: a password to connect to the redis server :type password: str :param socket_timeout: client socket timeout :type socket_timeout: int :param expire: timeout before the target is deleted :type expire: int """ self.host = host self.port = port self.db = db self.password = password self.socket_timeout = socket_timeout self.update_id = update_id self.expire = expire self.redis_client = redis.StrictRedis( host=self.host, port=self.port, password=self.password, db=self.db, socket_timeout=self.socket_timeout, )
:param host: Redis server host :type host: str :param port: Redis server port :type port: int :param db: database index :type db: int :param update_id: an identifier for this data hash :type update_id: str :param password: a password to connect to the redis server :type password: str :param socket_timeout: client socket timeout :type socket_timeout: int :param expire: timeout before the target is deleted :type expire: int
__init__
python
spotify/luigi
luigi/contrib/redis_store.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/redis_store.py
Apache-2.0
def marker_key(self): """ Generate a key for the indicator hash. """ return '%s:%s' % (self.marker_prefix, self.update_id)
Generate a key for the indicator hash.
marker_key
python
spotify/luigi
luigi/contrib/redis_store.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/redis_store.py
Apache-2.0
def touch(self): """ Mark this update as complete. We index the parameters `update_id` and `date`. """ marker_key = self.marker_key() self.redis_client.hset(marker_key, 'update_id', self.update_id) self.redis_client.hset(marker_key, 'date', datetime.datetime.now().isoformat()) if self.expire is not None: self.redis_client.expire(marker_key, self.expire)
Mark this update as complete. We index the parameters `update_id` and `date`.
touch
python
spotify/luigi
luigi/contrib/redis_store.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/redis_store.py
Apache-2.0
def exists(self): """ Test, if this task has been run. """ return self.redis_client.exists(self.marker_key()) == 1
Test, if this task has been run.
exists
python
spotify/luigi
luigi/contrib/redis_store.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/redis_store.py
Apache-2.0
def dataflow_executable(self): """ Command representing the Dataflow executable to be run. For example: return ['java', 'com.spotify.luigi.MyClass', '-Xmx256m'] """ pass
Command representing the Dataflow executable to be run. For example: return ['java', 'com.spotify.luigi.MyClass', '-Xmx256m']
dataflow_executable
python
spotify/luigi
luigi/contrib/beam_dataflow.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/beam_dataflow.py
Apache-2.0
def args(self): """ Extra String arguments that will be passed to your Dataflow job. For example: return ['--setup_file=setup.py'] """ return []
Extra String arguments that will be passed to your Dataflow job. For example: return ['--setup_file=setup.py']
args
python
spotify/luigi
luigi/contrib/beam_dataflow.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/beam_dataflow.py
Apache-2.0
def before_run(self): """ Hook that gets called right before the Dataflow job is launched. Can be used to setup any temporary files/tables, validate input, etc. """ pass
Hook that gets called right before the Dataflow job is launched. Can be used to setup any temporary files/tables, validate input, etc.
before_run
python
spotify/luigi
luigi/contrib/beam_dataflow.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/beam_dataflow.py
Apache-2.0
def on_successful_run(self): """ Callback that gets called right after the Dataflow job has finished successfully but before validate_output is run. """ pass
Callback that gets called right after the Dataflow job has finished successfully but before validate_output is run.
on_successful_run
python
spotify/luigi
luigi/contrib/beam_dataflow.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/beam_dataflow.py
Apache-2.0
def validate_output(self): """ Callback that can be used to validate your output before it is moved to its final location. Returning false here will cause the job to fail, and output to be removed instead of published. """ return True
Callback that can be used to validate your output before it is moved to its final location. Returning false here will cause the job to fail, and output to be removed instead of published.
validate_output
python
spotify/luigi
luigi/contrib/beam_dataflow.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/beam_dataflow.py
Apache-2.0
def file_pattern(self): """ If one/some of the input target files are not in the pattern of part-*, we can add the key of the required target and the correct file pattern that should be appended in the command line here. If the input target key is not found in this dict, the file pattern will be assumed to be part-* for that target. :return A dictionary of overridden file pattern that is not part-* for the inputs """ return {}
If one/some of the input target files are not in the pattern of part-*, we can add the key of the required target and the correct file pattern that should be appended in the command line here. If the input target key is not found in this dict, the file pattern will be assumed to be part-* for that target. :return A dictionary of overridden file pattern that is not part-* for the inputs
file_pattern
python
spotify/luigi
luigi/contrib/beam_dataflow.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/beam_dataflow.py
Apache-2.0
def on_successful_output_validation(self): """ Callback that gets called after the Dataflow job has finished successfully if validate_output returns True. """ pass
Callback that gets called after the Dataflow job has finished successfully if validate_output returns True.
on_successful_output_validation
python
spotify/luigi
luigi/contrib/beam_dataflow.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/beam_dataflow.py
Apache-2.0
def cleanup_on_error(self, error): """ Callback that gets called after the Dataflow job has finished unsuccessfully, or validate_output returns False. """ pass
Callback that gets called after the Dataflow job has finished unsuccessfully, or validate_output returns False.
cleanup_on_error
python
spotify/luigi
luigi/contrib/beam_dataflow.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/beam_dataflow.py
Apache-2.0
def _format_input_args(self): """ Parses the result(s) of self.input() into a string-serialized key-value list passed to the Dataflow job. Valid inputs include: return FooTarget() return {"input1": FooTarget(), "input2": FooTarget2()) return ("input", FooTarget()) return [("input1", FooTarget()), ("input2": FooTarget2())] return [FooTarget(), FooTarget2()] Unlabeled input are passed in with under the default key "input". """ job_input = self.input() if isinstance(job_input, luigi.Target): job_input = {"input": job_input} elif isinstance(job_input, tuple): job_input = {job_input[0]: job_input[1]} elif isinstance(job_input, list): if all(isinstance(item, tuple) for item in job_input): job_input = dict(job_input) else: job_input = {"input": job_input} elif not isinstance(job_input, dict): raise ValueError("Invalid job input requires(). Supported types: [" "Target, tuple of (name, Target), " "dict of (name: Target), list of Targets]") if not isinstance(self.file_pattern(), dict): raise ValueError('file_pattern() must return a dict type') input_args = [] for (name, targets) in job_input.items(): uris = [ self.get_target_path(uri_target) for uri_target in luigi.task.flatten(targets) ] if isinstance(targets, dict): """ If targets is a dict that means it had multiple outputs. Make the input args in that case "<input key>-<task output key>" """ names = ["%s-%s" % (name, key) for key in targets.keys()] else: names = [name] * len(uris) input_dict = {} for (arg_name, uri) in zip(names, uris): pattern = self.file_pattern().get(name, 'part-*') input_value = input_dict.get(arg_name, []) input_value.append(uri.rstrip('/') + '/' + pattern) input_dict[arg_name] = input_value for (key, paths) in input_dict.items(): input_args.append("--%s=%s" % (key, ','.join(paths))) return input_args
Parses the result(s) of self.input() into a string-serialized key-value list passed to the Dataflow job. Valid inputs include: return FooTarget() return {"input1": FooTarget(), "input2": FooTarget2()) return ("input", FooTarget()) return [("input1", FooTarget()), ("input2": FooTarget2())] return [FooTarget(), FooTarget2()] Unlabeled input are passed in with under the default key "input".
_format_input_args
python
spotify/luigi
luigi/contrib/beam_dataflow.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/beam_dataflow.py
Apache-2.0
def _format_output_args(self): """ Parses the result(s) of self.output() into a string-serialized key-value list passed to the Dataflow job. Valid outputs include: return FooTarget() return {"output1": FooTarget(), "output2": FooTarget2()} Unlabeled outputs are passed in with under the default key "output". """ job_output = self.output() if isinstance(job_output, luigi.Target): job_output = {"output": job_output} elif not isinstance(job_output, dict): raise ValueError( "Task output must be a Target or a dict from String to Target") output_args = [] for (name, target) in job_output.items(): uri = self.get_target_path(target) output_args.append("--%s=%s" % (name, uri)) return output_args
Parses the result(s) of self.output() into a string-serialized key-value list passed to the Dataflow job. Valid outputs include: return FooTarget() return {"output1": FooTarget(), "output2": FooTarget2()} Unlabeled outputs are passed in with under the default key "output".
_format_output_args
python
spotify/luigi
luigi/contrib/beam_dataflow.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/beam_dataflow.py
Apache-2.0
def get_target_path(target): """ Given a luigi Target, determine a stringly typed path to pass as a Dataflow job argument. """ if isinstance(target, luigi.LocalTarget) or isinstance(target, gcs.GCSTarget): return target.path elif isinstance(target, bigquery.BigQueryTarget): return "{}:{}.{}".format(target.table.project_id, target.table.dataset_id, target.table.table_id) else: raise ValueError("Target %s not supported" % target)
Given a luigi Target, determine a stringly typed path to pass as a Dataflow job argument.
get_target_path
python
spotify/luigi
luigi/contrib/beam_dataflow.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/beam_dataflow.py
Apache-2.0
def __init__(self, jobName, image, tasks): """ Initialize a Job with required fields. :param jobName: Name for the job, need to be unique :param image: URL pointing to the Docker image for all tasks in the job :param tasks: List of taskRole, one task role at least """ self.jobName = jobName self.image = image if isinstance(tasks, list) and len(tasks) != 0: self.taskRoles = tasks else: raise TypeError('you must specify one task at least.')
Initialize a Job with required fields. :param jobName: Name for the job, need to be unique :param image: URL pointing to the Docker image for all tasks in the job :param tasks: List of taskRole, one task role at least
__init__
python
spotify/luigi
luigi/contrib/pai.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/pai.py
Apache-2.0
def __init__(self, label, begin_at=0, port_number=1): """ The Port definition for TaskRole :param label: Label name for the port type, required :param begin_at: The port to begin with in the port type, 0 for random selection, required :param port_number: Number of ports for the specific type, required """ self.label = label self.beginAt = begin_at self.portNumber = port_number
The Port definition for TaskRole :param label: Label name for the port type, required :param begin_at: The port to begin with in the port type, 0 for random selection, required :param port_number: Number of ports for the specific type, required
__init__
python
spotify/luigi
luigi/contrib/pai.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/pai.py
Apache-2.0
def __init__(self, name, command, taskNumber=1, cpuNumber=1, memoryMB=2048, shmMB=64, gpuNumber=0, portList=[]): """ The TaskRole of PAI :param name: Name for the task role, need to be unique with other roles, required :param command: Executable command for tasks in the task role, can not be empty, required :param taskNumber: Number of tasks for the task role, no less than 1, required :param cpuNumber: CPU number for one task in the task role, no less than 1, required :param shmMB: Shared memory for one task in the task role, no more than memory size, required :param memoryMB: Memory for one task in the task role, no less than 100, required :param gpuNumber: GPU number for one task in the task role, no less than 0, required :param portList: List of portType to use, optional """ self.name = name self.command = command self.taskNumber = taskNumber self.cpuNumber = cpuNumber self.memoryMB = memoryMB self.shmMB = shmMB self.gpuNumber = gpuNumber self.portList = portList
The TaskRole of PAI :param name: Name for the task role, need to be unique with other roles, required :param command: Executable command for tasks in the task role, can not be empty, required :param taskNumber: Number of tasks for the task role, no less than 1, required :param cpuNumber: CPU number for one task in the task role, no less than 1, required :param shmMB: Shared memory for one task in the task role, no more than memory size, required :param memoryMB: Memory for one task in the task role, no less than 100, required :param gpuNumber: GPU number for one task in the task role, no less than 0, required :param portList: List of portType to use, optional
__init__
python
spotify/luigi
luigi/contrib/pai.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/pai.py
Apache-2.0
def name(self): """Name for the job, need to be unique, required""" return 'SklearnExample'
Name for the job, need to be unique, required
name
python
spotify/luigi
luigi/contrib/pai.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/pai.py
Apache-2.0
def image(self): """URL pointing to the Docker image for all tasks in the job, required""" return 'openpai/pai.example.sklearn'
URL pointing to the Docker image for all tasks in the job, required
image
python
spotify/luigi
luigi/contrib/pai.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/pai.py
Apache-2.0
def tasks(self): """List of taskRole, one task role at least, required""" return []
List of taskRole, one task role at least, required
tasks
python
spotify/luigi
luigi/contrib/pai.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/pai.py
Apache-2.0
def auth_file_path(self): """Docker registry authentication file existing on HDFS, optional""" return None
Docker registry authentication file existing on HDFS, optional
auth_file_path
python
spotify/luigi
luigi/contrib/pai.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/pai.py
Apache-2.0
def data_dir(self): """Data directory existing on HDFS, optional""" return None
Data directory existing on HDFS, optional
data_dir
python
spotify/luigi
luigi/contrib/pai.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/pai.py
Apache-2.0
def code_dir(self): """Code directory existing on HDFS, should not contain any data and should be less than 200MB, optional""" return None
Code directory existing on HDFS, should not contain any data and should be less than 200MB, optional
code_dir
python
spotify/luigi
luigi/contrib/pai.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/pai.py
Apache-2.0
def output_dir(self): """Output directory on HDFS, $PAI_DEFAULT_FS_URI/$jobName/output will be used if not specified, optional""" return '$PAI_DEFAULT_FS_URI/{0}/output'.format(self.name)
Output directory on HDFS, $PAI_DEFAULT_FS_URI/$jobName/output will be used if not specified, optional
output_dir
python
spotify/luigi
luigi/contrib/pai.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/pai.py
Apache-2.0
def virtual_cluster(self): """The virtual cluster job runs on. If omitted, the job will run on default virtual cluster, optional""" return 'default'
The virtual cluster job runs on. If omitted, the job will run on default virtual cluster, optional
virtual_cluster
python
spotify/luigi
luigi/contrib/pai.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/pai.py
Apache-2.0