code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def command(self): """ Command passed to the containers Override to return list of dicts with keys 'name' and 'command', describing the container names and commands to pass to the container. These values will be specified in the `containerOverrides` property of the `overrides` parameter passed to the runTask API. Example:: [ { 'name': 'myContainer', 'command': ['/bin/sleep', '60'] } ] """ pass
Command passed to the containers Override to return list of dicts with keys 'name' and 'command', describing the container names and commands to pass to the container. These values will be specified in the `containerOverrides` property of the `overrides` parameter passed to the runTask API. Example:: [ { 'name': 'myContainer', 'command': ['/bin/sleep', '60'] } ]
command
python
spotify/luigi
luigi/contrib/ecs.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ecs.py
Apache-2.0
def update_container_overrides_command(container_overrides, command): """ Update a list of container overrides with the specified command. The specified command will take precedence over any existing commands in `container_overrides` for the same container name. If no existing command yet exists in `container_overrides` for the specified command, it will be added. """ for colliding_override in filter(lambda x: x['name'] == command['name'], container_overrides): colliding_override['command'] = command['command'] break else: container_overrides.append(command)
Update a list of container overrides with the specified command. The specified command will take precedence over any existing commands in `container_overrides` for the same container name. If no existing command yet exists in `container_overrides` for the specified command, it will be added.
update_container_overrides_command
python
spotify/luigi
luigi/contrib/ecs.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ecs.py
Apache-2.0
def combined_overrides(self): """ Return single dict combining any provided `overrides` parameters. This is used to allow custom `overrides` parameters to be specified in `self.run_task_kwargs` while ensuring that the values specified in `self.command` are honored in `containerOverrides`. """ overrides = copy.deepcopy(self.run_task_kwargs.get('overrides', {})) if self.command: if 'containerOverrides' in overrides: for command in self.command: self.update_container_overrides_command(overrides['containerOverrides'], command) else: overrides['containerOverrides'] = self.command return overrides
Return single dict combining any provided `overrides` parameters. This is used to allow custom `overrides` parameters to be specified in `self.run_task_kwargs` while ensuring that the values specified in `self.command` are honored in `containerOverrides`.
combined_overrides
python
spotify/luigi
luigi/contrib/ecs.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ecs.py
Apache-2.0
def run_task_kwargs(self): """ Additional keyword arguments to be provided to ECS runTask API. Override this property in a subclass to provide additional parameters such as `network_configuration`, `launchType`, etc. If the returned `dict` includes an `overrides` value with a nested `containerOverrides` array defining one or more container `command` values, prior to calling `run_task` they will be combined with and superseded by any colliding values specified separately in the `command` property. Example:: { 'launchType': 'FARGATE', 'platformVersion': '1.4.0', 'networkConfiguration': { 'awsvpcConfiguration': { 'subnets': [ 'subnet-01234567890abcdef', 'subnet-abcdef01234567890' ], 'securityGroups': [ 'sg-abcdef01234567890', ], 'assignPublicIp': 'ENABLED' } }, 'overrides': { 'ephemeralStorage': { 'sizeInGiB': 30 } } } """ return {}
Additional keyword arguments to be provided to ECS runTask API. Override this property in a subclass to provide additional parameters such as `network_configuration`, `launchType`, etc. If the returned `dict` includes an `overrides` value with a nested `containerOverrides` array defining one or more container `command` values, prior to calling `run_task` they will be combined with and superseded by any colliding values specified separately in the `command` property. Example:: { 'launchType': 'FARGATE', 'platformVersion': '1.4.0', 'networkConfiguration': { 'awsvpcConfiguration': { 'subnets': [ 'subnet-01234567890abcdef', 'subnet-abcdef01234567890' ], 'securityGroups': [ 'sg-abcdef01234567890', ], 'assignPublicIp': 'ENABLED' } }, 'overrides': { 'ephemeralStorage': { 'sizeInGiB': 30 } } }
run_task_kwargs
python
spotify/luigi
luigi/contrib/ecs.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ecs.py
Apache-2.0
def exists(self, path): """ Does provided path exist on S3? """ (bucket, key) = self._path_to_bucket_and_key(path) # root always exists if self._is_root(key): return True # file if self._exists(bucket, key): return True if self.isdir(path): return True logger.debug('Path %s does not exist', path) return False
Does provided path exist on S3?
exists
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def remove(self, path, recursive=True): """ Remove a file or directory from S3. :param path: File or directory to remove :param recursive: Boolean indicator to remove object and children :return: Boolean indicator denoting success of the removal of 1 or more files """ if not self.exists(path): logger.debug('Could not delete %s; path does not exist', path) return False (bucket, key) = self._path_to_bucket_and_key(path) s3_bucket = self.s3.Bucket(bucket) # root if self._is_root(key): raise InvalidDeleteException('Cannot delete root of bucket at path %s' % path) # file if self._exists(bucket, key): self.s3.meta.client.delete_object(Bucket=bucket, Key=key) logger.debug('Deleting %s from bucket %s', key, bucket) return True if self.isdir(path) and not recursive: raise InvalidDeleteException('Path %s is a directory. Must use recursive delete' % path) delete_key_list = [{'Key': obj.key} for obj in s3_bucket.objects.filter(Prefix=self._add_path_delimiter(key))] # delete the directory marker file if it exists if self._exists(bucket, '{}{}'.format(key, S3_DIRECTORY_MARKER_SUFFIX_0)): delete_key_list.append({'Key': '{}{}'.format(key, S3_DIRECTORY_MARKER_SUFFIX_0)}) if len(delete_key_list) > 0: n = 1000 for i in range(0, len(delete_key_list), n): self.s3.meta.client.delete_objects(Bucket=bucket, Delete={'Objects': delete_key_list[i: i + n]}) return True return False
Remove a file or directory from S3. :param path: File or directory to remove :param recursive: Boolean indicator to remove object and children :return: Boolean indicator denoting success of the removal of 1 or more files
remove
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def move(self, source_path, destination_path, **kwargs): """ Rename/move an object from one S3 location to another. :param source_path: The `s3://` path of the directory or key to copy from :param destination_path: The `s3://` path of the directory or key to copy to :param kwargs: Keyword arguments are passed to the boto3 function `copy` """ self.copy(source_path, destination_path, **kwargs) self.remove(source_path)
Rename/move an object from one S3 location to another. :param source_path: The `s3://` path of the directory or key to copy from :param destination_path: The `s3://` path of the directory or key to copy to :param kwargs: Keyword arguments are passed to the boto3 function `copy`
move
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def get_key(self, path): """ Returns the object summary at the path """ (bucket, key) = self._path_to_bucket_and_key(path) if self._exists(bucket, key): return self.s3.ObjectSummary(bucket, key)
Returns the object summary at the path
get_key
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def put(self, local_path, destination_s3_path, **kwargs): """ Put an object stored locally to an S3 path. :param local_path: Path to source local file :param destination_s3_path: URL for target S3 location :param kwargs: Keyword arguments are passed to the boto function `put_object` """ self._check_deprecated_argument(**kwargs) # put the file self.put_multipart(local_path, destination_s3_path, **kwargs)
Put an object stored locally to an S3 path. :param local_path: Path to source local file :param destination_s3_path: URL for target S3 location :param kwargs: Keyword arguments are passed to the boto function `put_object`
put
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def put_string(self, content, destination_s3_path, **kwargs): """ Put a string to an S3 path. :param content: Data str :param destination_s3_path: URL for target S3 location :param kwargs: Keyword arguments are passed to the boto3 function `put_object` """ self._check_deprecated_argument(**kwargs) (bucket, key) = self._path_to_bucket_and_key(destination_s3_path) # put the file self.s3.meta.client.put_object( Key=key, Bucket=bucket, Body=content, **kwargs)
Put a string to an S3 path. :param content: Data str :param destination_s3_path: URL for target S3 location :param kwargs: Keyword arguments are passed to the boto3 function `put_object`
put_string
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def put_multipart(self, local_path, destination_s3_path, part_size=DEFAULT_PART_SIZE, **kwargs): """ Put an object stored locally to an S3 path using S3 multi-part upload (for files > 8Mb). :param local_path: Path to source local file :param destination_s3_path: URL for target S3 location :param part_size: Part size in bytes. Default: 8388608 (8MB) :param kwargs: Keyword arguments are passed to the boto function `upload_fileobj` as ExtraArgs """ self._check_deprecated_argument(**kwargs) from boto3.s3.transfer import TransferConfig # default part size for boto3 is 8Mb, changing it to fit part_size # provided as a parameter transfer_config = TransferConfig(multipart_chunksize=part_size) (bucket, key) = self._path_to_bucket_and_key(destination_s3_path) self.s3.meta.client.upload_fileobj( Fileobj=open(local_path, 'rb'), Bucket=bucket, Key=key, Config=transfer_config, ExtraArgs=kwargs)
Put an object stored locally to an S3 path using S3 multi-part upload (for files > 8Mb). :param local_path: Path to source local file :param destination_s3_path: URL for target S3 location :param part_size: Part size in bytes. Default: 8388608 (8MB) :param kwargs: Keyword arguments are passed to the boto function `upload_fileobj` as ExtraArgs
put_multipart
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def copy(self, source_path, destination_path, threads=DEFAULT_THREADS, start_time=None, end_time=None, part_size=DEFAULT_PART_SIZE, **kwargs): """ Copy object(s) from one S3 location to another. Works for individual keys or entire directories. When files are larger than `part_size`, multipart uploading will be used. :param source_path: The `s3://` path of the directory or key to copy from :param destination_path: The `s3://` path of the directory or key to copy to :param threads: Optional argument to define the number of threads to use when copying (min: 3 threads) :param start_time: Optional argument to copy files with modified dates after start_time :param end_time: Optional argument to copy files with modified dates before end_time :param part_size: Part size in bytes :param kwargs: Keyword arguments are passed to the boto function `copy` as ExtraArgs :returns tuple (number_of_files_copied, total_size_copied_in_bytes) """ # don't allow threads to be less than 3 threads = 3 if threads < 3 else threads if self.isdir(source_path): return self._copy_dir(source_path, destination_path, threads=threads, start_time=start_time, end_time=end_time, part_size=part_size, **kwargs) # If the file isn't a directory just perform a simple copy else: return self._copy_file(source_path, destination_path, threads=threads, part_size=part_size, **kwargs)
Copy object(s) from one S3 location to another. Works for individual keys or entire directories. When files are larger than `part_size`, multipart uploading will be used. :param source_path: The `s3://` path of the directory or key to copy from :param destination_path: The `s3://` path of the directory or key to copy to :param threads: Optional argument to define the number of threads to use when copying (min: 3 threads) :param start_time: Optional argument to copy files with modified dates after start_time :param end_time: Optional argument to copy files with modified dates before end_time :param part_size: Part size in bytes :param kwargs: Keyword arguments are passed to the boto function `copy` as ExtraArgs :returns tuple (number_of_files_copied, total_size_copied_in_bytes)
copy
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def get(self, s3_path, destination_local_path): """ Get an object stored in S3 and write it to a local path. """ (bucket, key) = self._path_to_bucket_and_key(s3_path) # download the file self.s3.meta.client.download_file(bucket, key, destination_local_path)
Get an object stored in S3 and write it to a local path.
get
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def get_as_bytes(self, s3_path): """ Get the contents of an object stored in S3 as bytes :param s3_path: URL for target S3 location :return: File contents as pure bytes """ (bucket, key) = self._path_to_bucket_and_key(s3_path) obj = self.s3.Object(bucket, key) contents = obj.get()['Body'].read() return contents
Get the contents of an object stored in S3 as bytes :param s3_path: URL for target S3 location :return: File contents as pure bytes
get_as_bytes
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def get_as_string(self, s3_path, encoding='utf-8'): """ Get the contents of an object stored in S3 as string. :param s3_path: URL for target S3 location :param encoding: Encoding to decode bytes to string :return: File contents as a string """ content = self.get_as_bytes(s3_path) return content.decode(encoding)
Get the contents of an object stored in S3 as string. :param s3_path: URL for target S3 location :param encoding: Encoding to decode bytes to string :return: File contents as a string
get_as_string
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def isdir(self, path): """ Is the parameter S3 path a directory? """ (bucket, key) = self._path_to_bucket_and_key(path) s3_bucket = self.s3.Bucket(bucket) # root is a directory if self._is_root(key): return True for suffix in (S3_DIRECTORY_MARKER_SUFFIX_0, S3_DIRECTORY_MARKER_SUFFIX_1): try: self.s3.meta.client.get_object( Bucket=bucket, Key=key + suffix) except botocore.exceptions.ClientError as e: if not e.response['Error']['Code'] in ['NoSuchKey', '404']: raise else: return True # files with this prefix key_path = self._add_path_delimiter(key) s3_bucket_list_result = list(itertools.islice( s3_bucket.objects.filter(Prefix=key_path), 1)) if s3_bucket_list_result: return True return False
Is the parameter S3 path a directory?
isdir
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def listdir(self, path, start_time=None, end_time=None, return_key=False): """ Get an iterable with S3 folder contents. Iterable contains absolute paths for which queried path is a prefix. :param path: URL for target S3 location :param start_time: Optional argument to list files with modified (offset aware) datetime after start_time :param end_time: Optional argument to list files with modified (offset aware) datetime before end_time :param return_key: Optional argument, when set to True will return boto3's ObjectSummary (instead of the filename) """ (bucket, key) = self._path_to_bucket_and_key(path) # grab and validate the bucket s3_bucket = self.s3.Bucket(bucket) key_path = self._add_path_delimiter(key) key_path_len = len(key_path) for item in s3_bucket.objects.filter(Prefix=key_path): last_modified_date = item.last_modified if ( # neither are defined, list all (not start_time and not end_time) or # start defined, after start (start_time and not end_time and start_time < last_modified_date) or # end defined, prior to end (not start_time and end_time and last_modified_date < end_time) or (start_time and end_time and start_time < last_modified_date < end_time) # both defined, between ): if return_key: yield item else: yield self._add_path_delimiter(path) + item.key[key_path_len:]
Get an iterable with S3 folder contents. Iterable contains absolute paths for which queried path is a prefix. :param path: URL for target S3 location :param start_time: Optional argument to list files with modified (offset aware) datetime after start_time :param end_time: Optional argument to list files with modified (offset aware) datetime before end_time :param return_key: Optional argument, when set to True will return boto3's ObjectSummary (instead of the filename)
listdir
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def list(self, path, start_time=None, end_time=None, return_key=False): # backwards compat """ Get an iterable with S3 folder contents. Iterable contains paths relative to queried path. :param path: URL for target S3 location :param start_time: Optional argument to list files with modified (offset aware) datetime after start_time :param end_time: Optional argument to list files with modified (offset aware) datetime before end_time :param return_key: Optional argument, when set to True will return boto3's ObjectSummary (instead of the filename) """ key_path_len = len(self._add_path_delimiter(path)) for item in self.listdir(path, start_time=start_time, end_time=end_time, return_key=return_key): if return_key: yield item else: yield item[key_path_len:]
Get an iterable with S3 folder contents. Iterable contains paths relative to queried path. :param path: URL for target S3 location :param start_time: Optional argument to list files with modified (offset aware) datetime after start_time :param end_time: Optional argument to list files with modified (offset aware) datetime before end_time :param return_key: Optional argument, when set to True will return boto3's ObjectSummary (instead of the filename)
list
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def _check_deprecated_argument(**kwargs): """ If `encrypt_key` or `host` is part of the arguments raise an exception :return: None """ if 'encrypt_key' in kwargs: raise DeprecatedBotoClientException( 'encrypt_key deprecated in boto3. Please refer to boto3 documentation for encryption details.') if 'host' in kwargs: raise DeprecatedBotoClientException( 'host keyword deprecated and is replaced by region_name in boto3.\n' 'example: region_name=us-west-1\n' 'For region names, refer to the amazon S3 region documentation\n' 'https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region')
If `encrypt_key` or `host` is part of the arguments raise an exception :return: None
_check_deprecated_argument
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def __init__(self, path, format=None, client=None, flag='_SUCCESS'): """ Initializes a S3FlagTarget. :param path: the directory where the files are stored. :type path: str :param client: :type client: :param flag: :type flag: str """ if format is None: format = get_default_format() if path[-1] != "/": raise ValueError("S3FlagTarget requires the path to be to a " "directory. It must end with a slash ( / ).") super(S3FlagTarget, self).__init__(path, format, client) self.flag = flag
Initializes a S3FlagTarget. :param path: the directory where the files are stored. :type path: str :param client: :type client: :param flag: :type flag: str
__init__
python
spotify/luigi
luigi/contrib/s3.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/s3.py
Apache-2.0
def get_soql_fields(soql): """ Gets queried columns names. """ soql_fields = re.search('(?<=select)(?s)(.*)(?=from)', soql, re.IGNORECASE) # get fields soql_fields = re.sub(' ', '', soql_fields.group()) # remove extra spaces soql_fields = re.sub('\t', '', soql_fields) # remove tabs fields = re.split(',|\n|\r|', soql_fields) # split on commas and newlines fields = [field for field in fields if field != ''] # remove empty strings return fields
Gets queried columns names.
get_soql_fields
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def parse_results(fields, data): """ Traverses ordered dictionary, calls _traverse_results() to recursively read into the dictionary depth of data """ master = [] for record in data['records']: # for each 'record' in response row = [None] * len(fields) # create null list the length of number of columns for obj, value in record.items(): # for each obj in record if not isinstance(value, (dict, list, tuple)): # if not data structure if obj in fields: row[fields.index(obj)] = ensure_utf(value) elif isinstance(value, dict) and obj != 'attributes': # traverse down into object path = obj _traverse_results(value, fields, row, path) master.append(row) return master
Traverses ordered dictionary, calls _traverse_results() to recursively read into the dictionary depth of data
parse_results
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def _traverse_results(value, fields, row, path): """ Helper method for parse_results(). Traverses through ordered dict and recursively calls itself when encountering a dictionary """ for f, v in value.items(): # for each item in obj field_name = '{path}.{name}'.format(path=path, name=f) if path else f if not isinstance(v, (dict, list, tuple)): # if not data structure if field_name in fields: row[fields.index(field_name)] = ensure_utf(v) elif isinstance(v, dict) and f != 'attributes': # it is a dict _traverse_results(v, fields, row, field_name)
Helper method for parse_results(). Traverses through ordered dict and recursively calls itself when encountering a dictionary
_traverse_results
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def object_name(self): """ Override to return the SF object we are querying. Must have the SF "__c" suffix if it is a customer object. """ return None
Override to return the SF object we are querying. Must have the SF "__c" suffix if it is a customer object.
object_name
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def use_sandbox(self): """ Override to specify use of SF sandbox. True iff we should be uploading to a sandbox environment instead of the production organization. """ return False
Override to specify use of SF sandbox. True iff we should be uploading to a sandbox environment instead of the production organization.
use_sandbox
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def sandbox_name(self): """Override to specify the sandbox name if it is intended to be used.""" return None
Override to specify the sandbox name if it is intended to be used.
sandbox_name
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def soql(self): """Override to return the raw string SOQL or the path to it.""" return None
Override to return the raw string SOQL or the path to it.
soql
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def is_soql_file(self): """Override to True if soql property is a file path.""" return False
Override to True if soql property is a file path.
is_soql_file
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def content_type(self): """ Override to use a different content type. Salesforce allows XML, CSV, ZIP_CSV, or ZIP_XML. Defaults to CSV. """ return "CSV"
Override to use a different content type. Salesforce allows XML, CSV, ZIP_CSV, or ZIP_XML. Defaults to CSV.
content_type
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def merge_batch_results(self, result_ids): """ Merges the resulting files of a multi-result batch bulk query. """ outfile = open(self.output().path, 'w') if self.content_type.lower() == 'csv': for i, result_id in enumerate(result_ids): with open("%s.%d" % (self.output().path, i), 'r') as f: header = f.readline() if i == 0: outfile.write(header) for line in f: outfile.write(line) else: raise Exception("Batch result merging not implemented for %s" % self.content_type) outfile.close()
Merges the resulting files of a multi-result batch bulk query.
merge_batch_results
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def start_session(self): """ Starts a Salesforce session and determines which SF instance to use for future requests. """ if self.has_active_session(): raise Exception("Session already in progress.") response = requests.post(self._get_login_url(), headers=self._get_login_headers(), data=self._get_login_xml()) response.raise_for_status() root = ET.fromstring(response.text) for e in root.iter("%ssessionId" % self.SOAP_NS): if self.session_id: raise Exception("Invalid login attempt. Multiple session ids found.") self.session_id = e.text for e in root.iter("%sserverUrl" % self.SOAP_NS): if self.server_url: raise Exception("Invalid login attempt. Multiple server urls found.") self.server_url = e.text if not self.has_active_session(): raise Exception("Invalid login attempt resulted in null sessionId [%s] and/or serverUrl [%s]." % (self.session_id, self.server_url)) self.hostname = urlsplit(self.server_url).hostname
Starts a Salesforce session and determines which SF instance to use for future requests.
start_session
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def query(self, query, **kwargs): """ Return the result of a Salesforce SOQL query as a dict decoded from the Salesforce response JSON payload. :param query: the SOQL query to send to Salesforce, e.g. "SELECT id from Lead WHERE email = '[email protected]'" """ params = {'q': query} response = requests.get(self._get_norm_query_url(), headers=self._get_rest_headers(), params=params, **kwargs) if response.status_code != requests.codes.ok: raise Exception(response.content) return response.json()
Return the result of a Salesforce SOQL query as a dict decoded from the Salesforce response JSON payload. :param query: the SOQL query to send to Salesforce, e.g. "SELECT id from Lead WHERE email = '[email protected]'"
query
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def query_more(self, next_records_identifier, identifier_is_url=False, **kwargs): """ Retrieves more results from a query that returned more results than the batch maximum. Returns a dict decoded from the Salesforce response JSON payload. :param next_records_identifier: either the Id of the next Salesforce object in the result, or a URL to the next record in the result. :param identifier_is_url: True if `next_records_identifier` should be treated as a URL, False if `next_records_identifer` should be treated as an Id. """ if identifier_is_url: # Don't use `self.base_url` here because the full URI is provided url = (u'https://{instance}{next_record_url}' .format(instance=self.hostname, next_record_url=next_records_identifier)) else: url = self._get_norm_query_url() + '{next_record_id}' url = url.format(next_record_id=next_records_identifier) response = requests.get(url, headers=self._get_rest_headers(), **kwargs) response.raise_for_status() return response.json()
Retrieves more results from a query that returned more results than the batch maximum. Returns a dict decoded from the Salesforce response JSON payload. :param next_records_identifier: either the Id of the next Salesforce object in the result, or a URL to the next record in the result. :param identifier_is_url: True if `next_records_identifier` should be treated as a URL, False if `next_records_identifer` should be treated as an Id.
query_more
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def query_all(self, query, **kwargs): """ Returns the full set of results for the `query`. This is a convenience wrapper around `query(...)` and `query_more(...)`. The returned dict is the decoded JSON payload from the final call to Salesforce, but with the `totalSize` field representing the full number of results retrieved and the `records` list representing the full list of records retrieved. :param query: the SOQL query to send to Salesforce, e.g. `SELECT Id FROM Lead WHERE Email = "[email protected]"` """ # Make the initial query to Salesforce response = self.query(query, **kwargs) # get fields fields = get_soql_fields(query) # put fields and first page of results into a temp list to be written to TempFile tmp_list = [fields] tmp_list.extend(parse_results(fields, response)) tmp_dir = luigi.configuration.get_config().get('salesforce', 'local-tmp-dir', None) tmp_file = tempfile.TemporaryFile(mode='a+b', dir=tmp_dir) writer = csv.writer(tmp_file) writer.writerows(tmp_list) # The number of results might have exceeded the Salesforce batch limit # so check whether there are more results and retrieve them if so. length = len(response['records']) while not response['done']: response = self.query_more(response['nextRecordsUrl'], identifier_is_url=True, **kwargs) writer.writerows(parse_results(fields, response)) length += len(response['records']) if not length % 10000: logger.info('Requested {0} lines...'.format(length)) logger.info('Requested a total of {0} lines.'.format(length)) tmp_file.seek(0) return tmp_file
Returns the full set of results for the `query`. This is a convenience wrapper around `query(...)` and `query_more(...)`. The returned dict is the decoded JSON payload from the final call to Salesforce, but with the `totalSize` field representing the full number of results retrieved and the `records` list representing the full list of records retrieved. :param query: the SOQL query to send to Salesforce, e.g. `SELECT Id FROM Lead WHERE Email = "[email protected]"`
query_all
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def restful(self, path, params): """ Allows you to make a direct REST call if you know the path Arguments: :param path: The path of the request. Example: sobjects/User/ABC123/password' :param params: dict of parameters to pass to the path """ url = self._get_norm_base_url() + path response = requests.get(url, headers=self._get_rest_headers(), params=params) if response.status_code != 200: raise Exception(response) json_result = response.json(object_pairs_hook=OrderedDict) if len(json_result) == 0: return None else: return json_result
Allows you to make a direct REST call if you know the path Arguments: :param path: The path of the request. Example: sobjects/User/ABC123/password' :param params: dict of parameters to pass to the path
restful
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def create_operation_job(self, operation, obj, external_id_field_name=None, content_type=None): """ Creates a new SF job that for doing any operation (insert, upsert, update, delete, query) :param operation: delete, insert, query, upsert, update, hardDelete. Must be lowercase. :param obj: Parent SF object :param external_id_field_name: Optional. """ if not self.has_active_session(): self.start_session() response = requests.post(self._get_create_job_url(), headers=self._get_create_job_headers(), data=self._get_create_job_xml(operation, obj, external_id_field_name, content_type)) response.raise_for_status() root = ET.fromstring(response.text) job_id = root.find('%sid' % self.API_NS).text return job_id
Creates a new SF job that for doing any operation (insert, upsert, update, delete, query) :param operation: delete, insert, query, upsert, update, hardDelete. Must be lowercase. :param obj: Parent SF object :param external_id_field_name: Optional.
create_operation_job
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def get_job_details(self, job_id): """ Gets all details for existing job :param job_id: job_id as returned by 'create_operation_job(...)' :return: job info as xml """ response = requests.get(self._get_job_details_url(job_id)) response.raise_for_status() return response
Gets all details for existing job :param job_id: job_id as returned by 'create_operation_job(...)' :return: job info as xml
get_job_details
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def abort_job(self, job_id): """ Abort an existing job. When a job is aborted, no more records are processed. Changes to data may already have been committed and aren't rolled back. :param job_id: job_id as returned by 'create_operation_job(...)' :return: abort response as xml """ response = requests.post(self._get_abort_job_url(job_id), headers=self._get_abort_job_headers(), data=self._get_abort_job_xml()) response.raise_for_status() return response
Abort an existing job. When a job is aborted, no more records are processed. Changes to data may already have been committed and aren't rolled back. :param job_id: job_id as returned by 'create_operation_job(...)' :return: abort response as xml
abort_job
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def close_job(self, job_id): """ Closes job :param job_id: job_id as returned by 'create_operation_job(...)' :return: close response as xml """ if not job_id or not self.has_active_session(): raise Exception("Can not close job without valid job_id and an active session.") response = requests.post(self._get_close_job_url(job_id), headers=self._get_close_job_headers(), data=self._get_close_job_xml()) response.raise_for_status() return response
Closes job :param job_id: job_id as returned by 'create_operation_job(...)' :return: close response as xml
close_job
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def create_batch(self, job_id, data, file_type): """ Creates a batch with either a string of data or a file containing data. If a file is provided, this will pull the contents of the file_target into memory when running. That shouldn't be a problem for any files that meet the Salesforce single batch upload size limit (10MB) and is done to ensure compressed files can be uploaded properly. :param job_id: job_id as returned by 'create_operation_job(...)' :param data: :return: Returns batch_id """ if not job_id or not self.has_active_session(): raise Exception("Can not create a batch without a valid job_id and an active session.") headers = self._get_create_batch_content_headers(file_type) headers['Content-Length'] = str(len(data)) response = requests.post(self._get_create_batch_url(job_id), headers=headers, data=data) response.raise_for_status() root = ET.fromstring(response.text) batch_id = root.find('%sid' % self.API_NS).text return batch_id
Creates a batch with either a string of data or a file containing data. If a file is provided, this will pull the contents of the file_target into memory when running. That shouldn't be a problem for any files that meet the Salesforce single batch upload size limit (10MB) and is done to ensure compressed files can be uploaded properly. :param job_id: job_id as returned by 'create_operation_job(...)' :param data: :return: Returns batch_id
create_batch
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def block_on_batch(self, job_id, batch_id, sleep_time_seconds=5, max_wait_time_seconds=-1): """ Blocks until @batch_id is completed or failed. :param job_id: :param batch_id: :param sleep_time_seconds: :param max_wait_time_seconds: """ if not job_id or not batch_id or not self.has_active_session(): raise Exception("Can not block on a batch without a valid batch_id, job_id and an active session.") start_time = time.time() status = {} while max_wait_time_seconds < 0 or time.time() - start_time < max_wait_time_seconds: status = self._get_batch_info(job_id, batch_id) logger.info("Batch %s Job %s in state %s. %s records processed. %s records failed." % (batch_id, job_id, status['state'], status['num_processed'], status['num_failed'])) if status['state'].lower() in ["completed", "failed"]: return status time.sleep(sleep_time_seconds) raise Exception("Batch did not complete in %s seconds. Final status was: %s" % (sleep_time_seconds, status))
Blocks until @batch_id is completed or failed. :param job_id: :param batch_id: :param sleep_time_seconds: :param max_wait_time_seconds:
block_on_batch
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def get_batch_results(self, job_id, batch_id): """ DEPRECATED: Use `get_batch_result_ids` """ warnings.warn("get_batch_results is deprecated and only returns one batch result. Please use get_batch_result_ids") return self.get_batch_result_ids(job_id, batch_id)[0]
DEPRECATED: Use `get_batch_result_ids`
get_batch_results
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def get_batch_result_ids(self, job_id, batch_id): """ Get result IDs of a batch that has completed processing. :param job_id: job_id as returned by 'create_operation_job(...)' :param batch_id: batch_id as returned by 'create_batch(...)' :return: list of batch result IDs to be used in 'get_batch_result(...)' """ response = requests.get(self._get_batch_results_url(job_id, batch_id), headers=self._get_batch_info_headers()) response.raise_for_status() root = ET.fromstring(response.text) result_ids = [r.text for r in root.findall('%sresult' % self.API_NS)] return result_ids
Get result IDs of a batch that has completed processing. :param job_id: job_id as returned by 'create_operation_job(...)' :param batch_id: batch_id as returned by 'create_batch(...)' :return: list of batch result IDs to be used in 'get_batch_result(...)'
get_batch_result_ids
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def get_batch_result(self, job_id, batch_id, result_id): """ Gets result back from Salesforce as whatever type was originally sent in create_batch (xml, or csv). :param job_id: :param batch_id: :param result_id: """ response = requests.get(self._get_batch_result_url(job_id, batch_id, result_id), headers=self._get_session_headers()) response.raise_for_status() return response.content
Gets result back from Salesforce as whatever type was originally sent in create_batch (xml, or csv). :param job_id: :param batch_id: :param result_id:
get_batch_result
python
spotify/luigi
luigi/contrib/salesforce.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/salesforce.py
Apache-2.0
def latest(cls, *args, **kwargs): """This is cached so that requires() is deterministic.""" date = kwargs.pop("date", datetime.date.today()) lookback = kwargs.pop("lookback", 14) # hashing kwargs deterministically would be hard. Let's just lookup by equality key = (cls, args, kwargs, lookback, date) for k, v in ExternalDailySnapshot.__cache: if k == key: return v val = cls.__latest(date, lookback, args, kwargs) ExternalDailySnapshot.__cache.append((key, val)) return val
This is cached so that requires() is deterministic.
latest
python
spotify/luigi
luigi/contrib/external_daily_snapshot.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/external_daily_snapshot.py
Apache-2.0
def get_authenticate_kwargs(oauth_credentials=None, http_=None): """Returns a dictionary with keyword arguments for use with discovery Prioritizes oauth_credentials or a http client provided by the user If none provided, falls back to default credentials provided by google's command line utilities. If that also fails, tries using httplib2.Http() Used by `gcs.GCSClient` and `bigquery.BigQueryClient` to initiate the API Client """ if oauth_credentials: authenticate_kwargs = { "credentials": oauth_credentials } elif http_: authenticate_kwargs = { "http": http_ } else: # neither http_ or credentials provided try: # try default credentials credentials, _ = google.auth.default() authenticate_kwargs = { "credentials": credentials } except google.auth.exceptions.DefaultCredentialsError: # try http using httplib2 authenticate_kwargs = { "http": httplib2.Http() } return authenticate_kwargs
Returns a dictionary with keyword arguments for use with discovery Prioritizes oauth_credentials or a http client provided by the user If none provided, falls back to default credentials provided by google's command line utilities. If that also fails, tries using httplib2.Http() Used by `gcs.GCSClient` and `bigquery.BigQueryClient` to initiate the API Client
get_authenticate_kwargs
python
spotify/luigi
luigi/contrib/gcp.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/gcp.py
Apache-2.0
def rename(self, path, raise_if_exists=False): """ Does not change self.path. Unlike ``move_dir()``, ``rename()`` might cause nested directories. See spotify/luigi#522 """ if isinstance(path, HdfsTarget): path = path.path if raise_if_exists and self.fs.exists(path): raise RuntimeError('Destination exists: %s' % path) self.fs.rename(self.path, path)
Does not change self.path. Unlike ``move_dir()``, ``rename()`` might cause nested directories. See spotify/luigi#522
rename
python
spotify/luigi
luigi/contrib/hdfs/target.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/target.py
Apache-2.0
def move(self, path, raise_if_exists=False): """ Alias for ``rename()`` """ self.rename(path, raise_if_exists=raise_if_exists)
Alias for ``rename()``
move
python
spotify/luigi
luigi/contrib/hdfs/target.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/target.py
Apache-2.0
def move_dir(self, path): """ Move using :py:class:`~luigi.contrib.hdfs.abstract_client.HdfsFileSystem.rename_dont_move` New since after luigi v2.1: Does not change self.path One could argue that the implementation should use the mkdir+raise_if_exists approach, but we at Spotify have had more trouble with that over just using plain mv. See spotify/luigi#557 """ self.fs.rename_dont_move(self.path, path)
Move using :py:class:`~luigi.contrib.hdfs.abstract_client.HdfsFileSystem.rename_dont_move` New since after luigi v2.1: Does not change self.path One could argue that the implementation should use the mkdir+raise_if_exists approach, but we at Spotify have had more trouble with that over just using plain mv. See spotify/luigi#557
move_dir
python
spotify/luigi
luigi/contrib/hdfs/target.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/target.py
Apache-2.0
def copy(self, dst_dir): """ Copy to destination directory. """ self.fs.copy(self.path, dst_dir)
Copy to destination directory.
copy
python
spotify/luigi
luigi/contrib/hdfs/target.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/target.py
Apache-2.0
def is_writable(self): """ Currently only works with hadoopcli """ if "/" in self.path: # example path: /log/ap/2013-01-17/00 parts = self.path.split("/") # start with the full path and then up the tree until we can check length = len(parts) for part in range(length): path = "/".join(parts[0:length - part]) + "/" if self.fs.exists(path): # if the path exists and we can write there, great! if self._is_writable(path): return True # if it exists and we can't =( sad panda else: return False # We went through all parts of the path and we still couldn't find # one that exists. return False
Currently only works with hadoopcli
is_writable
python
spotify/luigi
luigi/contrib/hdfs/target.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/target.py
Apache-2.0
def __init__(self, path, format=None, client=None, flag='_SUCCESS'): """ Initializes a HdfsFlagTarget. :param path: the directory where the files are stored. :type path: str :param client: :type client: :param flag: :type flag: str """ if path[-1] != "/": raise ValueError("HdfsFlagTarget requires the path to be to a " "directory. It must end with a slash ( / ).") super(HdfsFlagTarget, self).__init__(path, format, client) self.flag = flag
Initializes a HdfsFlagTarget. :param path: the directory where the files are stored. :type path: str :param client: :type client: :param flag: :type flag: str
__init__
python
spotify/luigi
luigi/contrib/hdfs/target.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/target.py
Apache-2.0
def exists(self, path): """ Returns true if the path exists and false otherwise. """ import hdfs try: self.client.status(path) return True except hdfs.util.HdfsError as e: if str(e).startswith('File does not exist: '): return False else: raise e
Returns true if the path exists and false otherwise.
exists
python
spotify/luigi
luigi/contrib/hdfs/webhdfs_client.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/webhdfs_client.py
Apache-2.0
def mkdir(self, path, parents=True, mode=0o755, raise_if_exists=False): """ Has no returnvalue (just like WebHDFS) """ if not parents or raise_if_exists: warnings.warn('webhdfs mkdir: parents/raise_if_exists not implemented') permission = int(oct(mode)[2:]) # Convert from int(decimal) to int(octal) self.client.makedirs(path, permission=permission)
Has no returnvalue (just like WebHDFS)
mkdir
python
spotify/luigi
luigi/contrib/hdfs/webhdfs_client.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/webhdfs_client.py
Apache-2.0
def chmod(self, path, permissions, recursive=False): """ Raise a NotImplementedError exception. """ raise NotImplementedError("Webhdfs in luigi doesn't implement chmod")
Raise a NotImplementedError exception.
chmod
python
spotify/luigi
luigi/contrib/hdfs/webhdfs_client.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/webhdfs_client.py
Apache-2.0
def chown(self, path, owner, group, recursive=False): """ Raise a NotImplementedError exception. """ raise NotImplementedError("Webhdfs in luigi doesn't implement chown")
Raise a NotImplementedError exception.
chown
python
spotify/luigi
luigi/contrib/hdfs/webhdfs_client.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/webhdfs_client.py
Apache-2.0
def count(self, path): """ Raise a NotImplementedError exception. """ raise NotImplementedError("Webhdfs in luigi doesn't implement count")
Raise a NotImplementedError exception.
count
python
spotify/luigi
luigi/contrib/hdfs/webhdfs_client.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/webhdfs_client.py
Apache-2.0
def copy(self, path, destination): """ Raise a NotImplementedError exception. """ raise NotImplementedError("Webhdfs in luigi doesn't implement copy")
Raise a NotImplementedError exception.
copy
python
spotify/luigi
luigi/contrib/hdfs/webhdfs_client.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/webhdfs_client.py
Apache-2.0
def put(self, local_path, destination): """ Restricted version of upload """ self.upload(local_path, destination)
Restricted version of upload
put
python
spotify/luigi
luigi/contrib/hdfs/webhdfs_client.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/webhdfs_client.py
Apache-2.0
def get(self, path, local_destination): """ Restricted version of download """ self.download(path, local_destination)
Restricted version of download
get
python
spotify/luigi
luigi/contrib/hdfs/webhdfs_client.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/webhdfs_client.py
Apache-2.0
def touchz(self, path): """ To touchz using the web hdfs "write" cmd. """ self.client.write(path, data='', overwrite=False)
To touchz using the web hdfs "write" cmd.
touchz
python
spotify/luigi
luigi/contrib/hdfs/webhdfs_client.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/webhdfs_client.py
Apache-2.0
def get_configured_hadoop_version(): """ CDH4 (hadoop 2+) has a slightly different syntax for interacting with hdfs via the command line. The default version is CDH4, but one can override this setting with "cdh3" or "apache1" in the hadoop section of the config in order to use the old syntax. """ return hadoopcli().version.lower()
CDH4 (hadoop 2+) has a slightly different syntax for interacting with hdfs via the command line. The default version is CDH4, but one can override this setting with "cdh3" or "apache1" in the hadoop section of the config in order to use the old syntax.
get_configured_hadoop_version
python
spotify/luigi
luigi/contrib/hdfs/config.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/config.py
Apache-2.0
def get_configured_hdfs_client(): """ This is a helper that fetches the configuration value for 'client' in the [hdfs] section. It will return the client that retains backwards compatibility when 'client' isn't configured. """ return hdfs().client
This is a helper that fetches the configuration value for 'client' in the [hdfs] section. It will return the client that retains backwards compatibility when 'client' isn't configured.
get_configured_hdfs_client
python
spotify/luigi
luigi/contrib/hdfs/config.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/config.py
Apache-2.0
def tmppath(path=None, include_unix_username=True): """ @param path: target path for which it is needed to generate temporary location @type path: str @type include_unix_username: bool @rtype: str Note that include_unix_username might work on windows too. """ addon = "luigitemp-%09d" % random.randrange(0, 10_000_000_000) temp_dir = '/tmp' # default tmp dir if none is specified in config # 1. Figure out to which temporary directory to place configured_hdfs_tmp_dir = hdfs().tmp_dir if configured_hdfs_tmp_dir is not None: # config is superior base_dir = configured_hdfs_tmp_dir elif path is not None: # need to copy correct schema and network location parsed = urlparse(path) base_dir = urlunparse((parsed.scheme, parsed.netloc, temp_dir, '', '', '')) else: # just system temporary directory base_dir = temp_dir # 2. Figure out what to place if path is not None: if path.startswith(temp_dir + '/'): # Not 100%, but some protection from directories like /tmp/tmp/file subdir = path[len(temp_dir):] else: # Protection from /tmp/hdfs:/dir/file parsed = urlparse(path) subdir = parsed.path subdir = subdir.lstrip('/') + '-' else: # just return any random temporary location subdir = '' if include_unix_username: subdir = os.path.join(getpass.getuser(), subdir) return os.path.join(base_dir, subdir + addon)
@param path: target path for which it is needed to generate temporary location @type path: str @type include_unix_username: bool @rtype: str Note that include_unix_username might work on windows too.
tmppath
python
spotify/luigi
luigi/contrib/hdfs/config.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/config.py
Apache-2.0
def rename(self, path, dest): """ Rename or move a file. In hdfs land, "mv" is often called rename. So we add an alias for ``move()`` called ``rename()``. This is also to keep backward compatibility since ``move()`` became standardized in luigi's filesystem interface. """ return self.move(path, dest)
Rename or move a file. In hdfs land, "mv" is often called rename. So we add an alias for ``move()`` called ``rename()``. This is also to keep backward compatibility since ``move()`` became standardized in luigi's filesystem interface.
rename
python
spotify/luigi
luigi/contrib/hdfs/abstract_client.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/abstract_client.py
Apache-2.0
def rename_dont_move(self, path, dest): """ Override this method with an implementation that uses rename2, which is a rename operation that never moves. rename2 - https://github.com/apache/hadoop/blob/ae91b13/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (lines 483-523) """ # We only override this method to be able to provide a more specific # docstring. return super(HdfsFileSystem, self).rename_dont_move(path, dest)
Override this method with an implementation that uses rename2, which is a rename operation that never moves. rename2 - https://github.com/apache/hadoop/blob/ae91b13/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (lines 483-523)
rename_dont_move
python
spotify/luigi
luigi/contrib/hdfs/abstract_client.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/abstract_client.py
Apache-2.0
def count(self, path): """ Count contents in a directory """ pass
Count contents in a directory
count
python
spotify/luigi
luigi/contrib/hdfs/abstract_client.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/abstract_client.py
Apache-2.0
def create_hadoopcli_client(): """ Given that we want one of the hadoop cli clients, this one will return the right one. """ version = hdfs_config.get_configured_hadoop_version() if version == "cdh4": return HdfsClient() elif version == "cdh3": return HdfsClientCdh3() elif version == "apache1": return HdfsClientApache1() else: raise ValueError("Error: Unknown version specified in Hadoop version" "configuration parameter")
Given that we want one of the hadoop cli clients, this one will return the right one.
create_hadoopcli_client
python
spotify/luigi
luigi/contrib/hdfs/hadoopcli_clients.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/hadoopcli_clients.py
Apache-2.0
def exists(self, path): """ Use ``hadoop fs -stat`` to check file existence. """ cmd = load_hadoop_cmd() + ['fs', '-stat', path] logger.debug('Running file existence check: %s', subprocess.list2cmdline(cmd)) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, universal_newlines=True) stdout, stderr = p.communicate() if p.returncode == 0: return True else: not_found_pattern = "^.*No such file or directory$" not_found_re = re.compile(not_found_pattern) for line in stderr.split('\n'): if not_found_re.match(line): return False raise hdfs_error.HDFSCliError(cmd, p.returncode, stdout, stderr)
Use ``hadoop fs -stat`` to check file existence.
exists
python
spotify/luigi
luigi/contrib/hdfs/hadoopcli_clients.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/hadoopcli_clients.py
Apache-2.0
def mkdir(self, path, parents=True, raise_if_exists=False): """ No explicit -p switch, this version of Hadoop always creates parent directories. """ try: self.call_check(load_hadoop_cmd() + ['fs', '-mkdir', path]) except hdfs_error.HDFSCliError as ex: if "File exists" in ex.stderr: if raise_if_exists: raise FileAlreadyExists(ex.stderr) else: raise
No explicit -p switch, this version of Hadoop always creates parent directories.
mkdir
python
spotify/luigi
luigi/contrib/hdfs/hadoopcli_clients.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/hadoopcli_clients.py
Apache-2.0
def get_autoconfig_client(client_cache=_AUTOCONFIG_CLIENT): """ Creates the client as specified in the `luigi.cfg` configuration. """ try: return client_cache.client except AttributeError: configured_client = hdfs_config.get_configured_hdfs_client() if configured_client == "webhdfs": client_cache.client = hdfs_webhdfs_client.WebHdfsClient() elif configured_client == "hadoopcli": client_cache.client = hdfs_hadoopcli_clients.create_hadoopcli_client() else: raise Exception("Unknown hdfs client " + configured_client) return client_cache.client
Creates the client as specified in the `luigi.cfg` configuration.
get_autoconfig_client
python
spotify/luigi
luigi/contrib/hdfs/clients.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hdfs/clients.py
Apache-2.0
def import_test(self): """Test that all module can be imported """ luigidir = os.path.join( os.path.dirname(os.path.abspath(__file__)), '..' ) packagedir = os.path.join(luigidir, 'luigi') for root, subdirs, files in os.walk(packagedir): package = os.path.relpath(root, luigidir).replace('/', '.') if '__init__.py' in files: __import__(package) for f in files: if f.endswith('.py') and not f.startswith('_'): __import__(package + '.' + f[:-3])
Test that all module can be imported
import_test
python
spotify/luigi
test/import_test.py
https://github.com/spotify/luigi/blob/master/test/import_test.py
Apache-2.0
def import_luigi_test(self): """ Test that the top luigi package can be imported and contains the usual suspects. """ import luigi # These should exist (if not, this will cause AttributeErrors) expected = [ luigi.Event, luigi.Config, luigi.Task, luigi.ExternalTask, luigi.WrapperTask, luigi.Target, luigi.LocalTarget, luigi.namespace, luigi.RemoteScheduler, luigi.RPCError, luigi.run, luigi.build, luigi.Parameter, luigi.DateHourParameter, luigi.DateMinuteParameter, luigi.DateSecondParameter, luigi.DateParameter, luigi.MonthParameter, luigi.YearParameter, luigi.DateIntervalParameter, luigi.TimeDeltaParameter, luigi.IntParameter, luigi.FloatParameter, luigi.BoolParameter, ] self.assertGreater(len(expected), 0)
Test that the top luigi package can be imported and contains the usual suspects.
import_luigi_test
python
spotify/luigi
test/import_test.py
https://github.com/spotify/luigi/blob/master/test/import_test.py
Apache-2.0
def _is_running_from_main_thread(): """ Return true if we're the same thread as the one that created the Tornado IOLoop. In practice, the problem is that we get annoying intermittent failures because sometimes the KeepAliveThread jumps in and "disturbs" the intended flow of the test case. Worse, it fails in the terrible way that the KeepAliveThread is kept alive, bugging the execution of subsequent test casses. Oh, I so wish Tornado would explicitly say that you're acessing it from different threads and things will just not work. """ return tornado.ioloop.IOLoop.current(instance=False)
Return true if we're the same thread as the one that created the Tornado IOLoop. In practice, the problem is that we get annoying intermittent failures because sometimes the KeepAliveThread jumps in and "disturbs" the intended flow of the test case. Worse, it fails in the terrible way that the KeepAliveThread is kept alive, bugging the execution of subsequent test casses. Oh, I so wish Tornado would explicitly say that you're acessing it from different threads and things will just not work.
_is_running_from_main_thread
python
spotify/luigi
test/server_test.py
https://github.com/spotify/luigi/blob/master/test/server_test.py
Apache-2.0
def test_with_cmdline(self): """ Test to run against the server as a normal luigi invocation does """ params = ['Task', '--scheduler-port', str(self.server_client.port), '--no-lock'] self.assertTrue(luigi.interface.run(params))
Test to run against the server as a normal luigi invocation does
test_with_cmdline
python
spotify/luigi
test/server_test.py
https://github.com/spotify/luigi/blob/master/test/server_test.py
Apache-2.0
def patching_test(self): """ Check that HAS_REQUESTS patching is meaningful """ fetcher1 = luigi.rpc.RemoteScheduler()._fetcher with mock.patch.object(luigi.rpc, 'HAS_REQUESTS', False): fetcher2 = luigi.rpc.RemoteScheduler()._fetcher self.assertNotEqual(fetcher1.__class__, fetcher2.__class__)
Check that HAS_REQUESTS patching is meaningful
patching_test
python
spotify/luigi
test/server_test.py
https://github.com/spotify/luigi/blob/master/test/server_test.py
Apache-2.0
def run_locally(self, args): """ Helper for running tests testing more of the stack, the command line parsing and task from name intstantiation parts in particular. """ temp = CmdlineParser._instance try: CmdlineParser._instance = None run_exit_status = luigi.run(['--local-scheduler', '--no-lock'] + args) finally: CmdlineParser._instance = temp return run_exit_status
Helper for running tests testing more of the stack, the command line parsing and task from name intstantiation parts in particular.
run_locally
python
spotify/luigi
test/helpers.py
https://github.com/spotify/luigi/blob/master/test/helpers.py
Apache-2.0
def run_locally_split(self, space_seperated_args): """ Helper for running tests testing more of the stack, the command line parsing and task from name intstantiation parts in particular. """ return self.run_locally(space_seperated_args.split(' '))
Helper for running tests testing more of the stack, the command line parsing and task from name intstantiation parts in particular.
run_locally_split
python
spotify/luigi
test/helpers.py
https://github.com/spotify/luigi/blob/master/test/helpers.py
Apache-2.0
def temporary_unloaded_module(python_file_contents): """ Create an importable module Return the name of importable module name given its file contents (source code) """ with tempfile.NamedTemporaryFile( dir='test/', prefix="_test_time_generated_module", suffix='.py') as temp_module_file: temp_module_file.file.write(python_file_contents) temp_module_file.file.flush() temp_module_path = temp_module_file.name temp_module_name = re.search(r'/(_test_time_generated_module.*).py', temp_module_path).group(1) yield temp_module_name
Create an importable module Return the name of importable module name given its file contents (source code)
temporary_unloaded_module
python
spotify/luigi
test/helpers.py
https://github.com/spotify/luigi/blob/master/test/helpers.py
Apache-2.0
def test_pathlib(self): """Test work with pathlib.Path""" import pathlib path = pathlib.Path(self.path) self.assertFalse(path.exists()) target = LocalTarget(path) self.assertFalse(target.exists()) with path.open('w') as stream: stream.write('test me') self.assertTrue(target.exists())
Test work with pathlib.Path
test_pathlib
python
spotify/luigi
test/local_target_test.py
https://github.com/spotify/luigi/blob/master/test/local_target_test.py
Apache-2.0
def test_date(self): ''' Adding unit test because we had a problem with this ''' class DummyTask(luigi.Task): x = luigi.DateIntervalParameter() dummy_1 = DummyTask(luigi.date_interval.Year(2012)) dummy_2 = DummyTask(luigi.date_interval.Year(2013)) dummy_1b = DummyTask(luigi.date_interval.Year(2012)) self.assertNotEqual(dummy_1, dummy_2) self.assertEqual(dummy_1, dummy_1b)
Adding unit test because we had a problem with this
test_date
python
spotify/luigi
test/instance_test.py
https://github.com/spotify/luigi/blob/master/test/instance_test.py
Apache-2.0
def _value(parameter): """ A hackish way to get the "value" of a parameter. Previously Parameter exposed ``param_obj._value``. This is replacement for that so I don't need to rewrite all test cases. """ class DummyLuigiTask(luigi.Task): param = parameter return DummyLuigiTask().param
A hackish way to get the "value" of a parameter. Previously Parameter exposed ``param_obj._value``. This is replacement for that so I don't need to rewrite all test cases.
_value
python
spotify/luigi
test/parameter_test.py
https://github.com/spotify/luigi/blob/master/test/parameter_test.py
Apache-2.0
def test_local_significant_param(self): """ Obviously, if anything should be positional, so should local significant parameters """ class MyTask(luigi.Task): # This could typically be "--label-company=disney" x = luigi.Parameter(significant=True) MyTask('arg') self.assertRaises(luigi.parameter.MissingParameterException, lambda: MyTask())
Obviously, if anything should be positional, so should local significant parameters
test_local_significant_param
python
spotify/luigi
test/parameter_test.py
https://github.com/spotify/luigi/blob/master/test/parameter_test.py
Apache-2.0
def test_local_insignificant_param(self): """ Ensure we have the same behavior as in before a78338c """ class MyTask(luigi.Task): # This could typically be "--num-threads=True" x = luigi.Parameter(significant=False) MyTask('arg') self.assertRaises(luigi.parameter.MissingParameterException, lambda: MyTask())
Ensure we have the same behavior as in before a78338c
test_local_insignificant_param
python
spotify/luigi
test/parameter_test.py
https://github.com/spotify/luigi/blob/master/test/parameter_test.py
Apache-2.0
def test_nonpositional_param(self): """ Ensure we have the same behavior as in before a78338c """ class MyTask(luigi.Task): # This could typically be "--num-threads=10" x = luigi.Parameter(significant=False, positional=False) MyTask(x='arg') self.assertRaises(luigi.parameter.UnknownParameterException, lambda: MyTask('arg'))
Ensure we have the same behavior as in before a78338c
test_nonpositional_param
python
spotify/luigi
test/parameter_test.py
https://github.com/spotify/luigi/blob/master/test/parameter_test.py
Apache-2.0
def test_global_insignificant_param_warning(self): """ We don't want any kind of global param to be positional """ with self.assertWarnsRegex(DeprecationWarning, 'is_global support is removed. Assuming positional=False'): class MyTask(luigi.Task): # This could typically be "--yarn-pool=development" x_g2 = luigi.Parameter(default='y', is_global=True, significant=False) self.assertRaises(luigi.parameter.UnknownParameterException, lambda: MyTask('arg'))
We don't want any kind of global param to be positional
test_global_significant_param_warning.test_global_insignificant_param_warning
python
spotify/luigi
test/parameter_test.py
https://github.com/spotify/luigi/blob/master/test/parameter_test.py
Apache-2.0
def test_global_significant_param_warning(self): """ We don't want any kind of global param to be positional """ with self.assertWarnsRegex(DeprecationWarning, 'is_global support is removed. Assuming positional=False'): class MyTask(luigi.Task): # This could typically be called "--test-dry-run" x_g1 = luigi.Parameter(default='y', is_global=True, significant=True) self.assertRaises(luigi.parameter.UnknownParameterException, lambda: MyTask('arg')) def test_global_insignificant_param_warning(self): """ We don't want any kind of global param to be positional """ with self.assertWarnsRegex(DeprecationWarning, 'is_global support is removed. Assuming positional=False'): class MyTask(luigi.Task): # This could typically be "--yarn-pool=development" x_g2 = luigi.Parameter(default='y', is_global=True, significant=False) self.assertRaises(luigi.parameter.UnknownParameterException, lambda: MyTask('arg'))
We don't want any kind of global param to be positional
test_global_significant_param_warning
python
spotify/luigi
test/parameter_test.py
https://github.com/spotify/luigi/blob/master/test/parameter_test.py
Apache-2.0
def testCommandLineWithDefault(self): """ Verify that we also read from the config when we build tasks from the command line parsers. """ class MyClass(luigi.Task): p_not_global = luigi.Parameter(default='banana') def complete(self): import sys luigi.configuration.get_config().write(sys.stdout) if self.p_not_global != "123": raise ValueError("The parameter didn't get set!!") return True def run(self): pass self.assertTrue(self.run_locally(['MyClass'])) self.assertFalse(self.run_locally(['MyClass', '--p-not-global', '124'])) self.assertFalse(self.run_locally(['MyClass', '--MyClass-p-not-global', '124']))
Verify that we also read from the config when we build tasks from the command line parsers.
testCommandLineWithDefault
python
spotify/luigi
test/parameter_test.py
https://github.com/spotify/luigi/blob/master/test/parameter_test.py
Apache-2.0
def testCommandLineNoDefault(self): """ Verify that we also read from the config when we build tasks from the command line parsers. """ class MyClass2(luigi.Task): """ TODO: Make luigi clean it's register for tests. Hate this 2 dance. """ p_not_global_no_default = luigi.Parameter() def complete(self): import sys luigi.configuration.get_config().write(sys.stdout) luigi.configuration.get_config().write(sys.stdout) if self.p_not_global_no_default != "123": raise ValueError("The parameter didn't get set!!") return True def run(self): pass self.assertTrue(self.run_locally(['MyClass2'])) self.assertFalse(self.run_locally(['MyClass2', '--p-not-global-no-default', '124'])) self.assertFalse(self.run_locally(['MyClass2', '--MyClass2-p-not-global-no-default', '124']))
Verify that we also read from the config when we build tasks from the command line parsers.
testCommandLineNoDefault
python
spotify/luigi
test/parameter_test.py
https://github.com/spotify/luigi/blob/master/test/parameter_test.py
Apache-2.0
def test_range_doesnt_propagate_args(self): """ Ensure that ``--task Range --of Blah --blah-arg 123`` doesn't work. This will of course not work unless support is explicitly added for it. But being a bit paranoid here and adding this test case so that if somebody decides to add it in the future, they'll be redircted to the dicussion in #1304 """ class Blah(RunOnceTask): date = luigi.DateParameter() blah_arg = luigi.IntParameter() # The SystemExit is assumed to be thrown by argparse self.assertRaises(SystemExit, self.run_locally_split, 'RangeDailyBase --of Blah --start 2015-01-01 --task-limit 1 --blah-arg 123') self.assertTrue(self.run_locally_split('RangeDailyBase --of Blah --start 2015-01-01 --task-limit 1 --Blah-blah-arg 123'))
Ensure that ``--task Range --of Blah --blah-arg 123`` doesn't work. This will of course not work unless support is explicitly added for it. But being a bit paranoid here and adding this test case so that if somebody decides to add it in the future, they'll be redircted to the dicussion in #1304
test_range_doesnt_propagate_args
python
spotify/luigi
test/parameter_test.py
https://github.com/spotify/luigi/blob/master/test/parameter_test.py
Apache-2.0
def setUp(self): """ Creates structure /test /test/file1 /test/hola/ /test/hola/file2 /test/hola/singlefile /test/hola/file3 """ # create structure ftp = ftplib.FTP(HOST, USER, PWD) ftp.cwd('/') ftp.mkd('test') ftp.cwd('test') ftp.mkd('hola') ftp.cwd('hola') f2 = StringIO(FILE2) ftp.storbinary('STOR file2', f2) # send the file f3 = StringIO(FILE3) ftp.storbinary('STOR file3', f3) # send the file ftp.cwd('..') f1 = StringIO(FILE1) ftp.storbinary('STOR file1', f1) # send the file ftp.close()
Creates structure /test /test/file1 /test/hola/ /test/hola/file2 /test/hola/singlefile /test/hola/file3
setUp
python
spotify/luigi
test/_test_ftp.py
https://github.com/spotify/luigi/blob/master/test/_test_ftp.py
Apache-2.0
def test_file_remove(self): """ Delete with recursive deactivated """ rfs = RemoteFileSystem(HOST, USER, PWD) rfs.remove('/test/hola/file3', recursive=False) rfs.remove('/test/hola/file2', recursive=False) rfs.remove('/test/hola', recursive=False) rfs.remove('/test/file1', recursive=False) rfs.remove('/test', recursive=False) ftp = ftplib.FTP(HOST, USER, PWD) list_dir = ftp.nlst() self.assertFalse("test" in list_dir)
Delete with recursive deactivated
test_file_remove
python
spotify/luigi
test/_test_ftp.py
https://github.com/spotify/luigi/blob/master/test/_test_ftp.py
Apache-2.0
def test_recursive_remove(self): """ Test FTP filesystem removing files recursive """ rfs = RemoteFileSystem(HOST, USER, PWD) rfs.remove('/test') ftp = ftplib.FTP(HOST, USER, PWD) list_dir = ftp.nlst() self.assertFalse("test" in list_dir)
Test FTP filesystem removing files recursive
test_recursive_remove
python
spotify/luigi
test/_test_ftp.py
https://github.com/spotify/luigi/blob/master/test/_test_ftp.py
Apache-2.0
def test_single(self): """ Test upload file with creation of intermediate folders """ ftp_path = "/test/nest/luigi-test" local_filepath = "/tmp/luigi-test-ftp" # create local temp file with open(local_filepath, 'w') as outfile: outfile.write("something to fill") rfs = RemoteFileSystem(HOST, USER, PWD) rfs.put(local_filepath, ftp_path) # manually connect to ftp ftp = ftplib.FTP(HOST, USER, PWD) ftp.cwd("/test/nest") list_dir = ftp.nlst() # file is successfuly created self.assertTrue("luigi-test" in list_dir) # delete tmp files ftp.delete("luigi-test") ftp.cwd("/") ftp.rmd("/test/nest") ftp.rmd("test") os.remove(local_filepath) ftp.close()
Test upload file with creation of intermediate folders
test_single
python
spotify/luigi
test/_test_ftp.py
https://github.com/spotify/luigi/blob/master/test/_test_ftp.py
Apache-2.0
def test_put(self): """ Test RemoteTarget put method with uploading to an FTP """ local_filepath = "/tmp/luigi-remotetarget-write-test" remote_file = "/test/example.put.file" # create local temp file with open(local_filepath, 'w') as outfile: outfile.write("something to fill") remotetarget = RemoteTarget(remote_file, HOST, username=USER, password=PWD) remotetarget.put(local_filepath) # manually connect to ftp ftp = ftplib.FTP(HOST, USER, PWD) ftp.cwd("/test") list_dir = ftp.nlst() # file is successfuly created self.assertTrue(remote_file.split("/")[-1] in list_dir) # clean os.remove(local_filepath) ftp.delete(remote_file) ftp.cwd("/") ftp.rmd("test") ftp.close()
Test RemoteTarget put method with uploading to an FTP
test_put
python
spotify/luigi
test/_test_ftp.py
https://github.com/spotify/luigi/blob/master/test/_test_ftp.py
Apache-2.0
def test_get(self): """ Test Remote target get method downloading a file from ftp """ local_filepath = "/tmp/luigi-remotetarget-read-test" tmp_filepath = "/tmp/tmp-luigi-remotetarget-read-test" remote_file = "/test/example.get.file" # create local temp file with open(tmp_filepath, 'w') as outfile: outfile.write("something to fill") # manualy upload to ftp ftp = ftplib.FTP(HOST, USER, PWD) ftp.mkd("test") ftp.storbinary('STOR %s' % remote_file, open(tmp_filepath, 'rb')) ftp.close() # execute command remotetarget = RemoteTarget(remote_file, HOST, username=USER, password=PWD) remotetarget.get(local_filepath) # make sure that it can open file with remotetarget.open('r') as fin: self.assertEqual(fin.read(), "something to fill") # check for cleaning temporary files if sys.version_info >= (3, 2): # cleanup uses tempfile.TemporaryDirectory only available in 3.2+ temppath = remotetarget._RemoteTarget__tmp_path self.assertTrue(os.path.exists(temppath)) remotetarget = None # garbage collect remotetarget self.assertFalse(os.path.exists(temppath)) # file is successfuly created self.assertTrue(os.path.exists(local_filepath)) # test RemoteTarget with mtime ts = datetime.datetime.now() - datetime.timedelta(days=2) delayed_remotetarget = RemoteTarget(remote_file, HOST, username=USER, password=PWD, mtime=ts) self.assertTrue(delayed_remotetarget.exists()) ts = datetime.datetime.now() + datetime.timedelta(days=2) # who knows what timezone it is in delayed_remotetarget = RemoteTarget(remote_file, HOST, username=USER, password=PWD, mtime=ts) self.assertFalse(delayed_remotetarget.exists()) # clean os.remove(local_filepath) os.remove(tmp_filepath) ftp = ftplib.FTP(HOST, USER, PWD) ftp.delete(remote_file) ftp.cwd("/") ftp.rmd("test") ftp.close()
Test Remote target get method downloading a file from ftp
test_get
python
spotify/luigi
test/_test_ftp.py
https://github.com/spotify/luigi/blob/master/test/_test_ftp.py
Apache-2.0
def test_allow_reschedule_with_many_missing_deps(self): class A(Task): """ Task that must run twice to succeed """ i = luigi.IntParameter() runs = 0 def complete(self): return self.runs >= 2 def run(self): self.runs += 1 class B(Task): done = False def requires(self): return map(A, range(20)) def complete(self): return self.done def run(self): self.done = True b = B() w = Worker(scheduler=self.sch, worker_id='X', max_reschedules=1) self.assertTrue(w.add(b)) self.assertFalse(w.run()) # For b to be done, we must have rescheduled its dependencies to run them twice self.assertTrue(b.complete()) self.assertTrue(all(a.complete() for a in b.deps()))
Task that must run twice to succeed
test_allow_reschedule_with_many_missing_deps
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def _test_stop_getting_new_work_build(self, sch, worker): """ I got motivated to create this test case when I saw that the execution_summary crashed after my first attempted solution. """ class KillWorkerTask(luigi.Task): did_actually_run = False def run(self): sch.disable_worker('my_worker_id') KillWorkerTask.did_actually_run = True class Factory: def create_local_scheduler(self, *args, **kwargs): return sch def create_worker(self, *args, **kwargs): return worker luigi.build([KillWorkerTask()], worker_scheduler_factory=Factory(), local_scheduler=True) self.assertTrue(KillWorkerTask.did_actually_run)
I got motivated to create this test case when I saw that the execution_summary crashed after my first attempted solution.
_test_stop_getting_new_work_build
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def test_ping_retry(self): """ Worker ping fails once. Ping continues to try to connect to scheduler Kind of ugly since it uses actual timing with sleep to test the thread """ sch = Scheduler( retry_delay=100, remove_delay=1000, worker_disconnect_delay=10, ) self._total_pings = 0 # class var so it can be accessed from fail_ping def fail_ping(worker): # this will be called from within keep-alive thread... self._total_pings += 1 raise Exception("Some random exception") sch.ping = fail_ping with Worker( scheduler=sch, worker_id="foo", ping_interval=0.01 # very short between pings to make test fast ): # let the keep-alive thread run for a bit... time.sleep(0.1) # yes, this is ugly but it's exactly what we need to test self.assertTrue( self._total_pings > 1, msg="Didn't retry pings (%d pings performed)" % (self._total_pings,) )
Worker ping fails once. Ping continues to try to connect to scheduler Kind of ugly since it uses actual timing with sleep to test the thread
test_ping_retry
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def test_external_task_retries(self, emails): """ Test that we do not send error emails on the failures of external tasks """ class A(luigi.ExternalTask): pass a = A() luigi.build([a], workers=2, local_scheduler=True) self.assertEqual(emails, [])
Test that we do not send error emails on the failures of external tasks
test_external_task_retries
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0