code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def _exists_and_is_dir(self, path): """ Auxiliary method, used by the 'accept_trailing_slash' and 'accept_trailing_slash_in_existing_dirpaths' decorators :param path: a Dropbox path that does NOT ends with a '/' (even if it is a directory) """ if path == '/': return True try: md = self.conn.files_get_metadata(path) is_dir = isinstance(md, dropbox.files.FolderMetadata) return is_dir except dropbox.exceptions.ApiError: return False
Auxiliary method, used by the 'accept_trailing_slash' and 'accept_trailing_slash_in_existing_dirpaths' decorators :param path: a Dropbox path that does NOT ends with a '/' (even if it is a directory)
_exists_and_is_dir
python
spotify/luigi
luigi/contrib/dropbox.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/dropbox.py
Apache-2.0
def __init__(self, path, client): """ Represents a file inside the Dropbox cloud which will be read :param str path: Dropbpx path of the file to be read (always starting with /) :param DropboxClient client: a DropboxClient object (initialized with a valid token) """ self.path = path self.client = client self.download_file_location = os.path.join(tempfile.mkdtemp(prefix=str(time.time())), ntpath.basename(path)) self.fid = None self.closed = False
Represents a file inside the Dropbox cloud which will be read :param str path: Dropbpx path of the file to be read (always starting with /) :param DropboxClient client: a DropboxClient object (initialized with a valid token)
__init__
python
spotify/luigi
luigi/contrib/dropbox.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/dropbox.py
Apache-2.0
def __init__(self, path, client): """ Represents a file that will be created inside the Dropbox cloud :param str path: Destination path inside Dropbox :param DropboxClient client: a DropboxClient object (initialized with a valid token, for the desired account) """ super(AtomicWritableDropboxFile, self).__init__(path) self.path = path self.client = client
Represents a file that will be created inside the Dropbox cloud :param str path: Destination path inside Dropbox :param DropboxClient client: a DropboxClient object (initialized with a valid token, for the desired account)
__init__
python
spotify/luigi
luigi/contrib/dropbox.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/dropbox.py
Apache-2.0
def move_to_final_destination(self): """ After editing the file locally, this function uploads it to the Dropbox cloud """ self.client.upload(self.tmp_path, self.path)
After editing the file locally, this function uploads it to the Dropbox cloud
move_to_final_destination
python
spotify/luigi
luigi/contrib/dropbox.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/dropbox.py
Apache-2.0
def __init__(self, path, token, format=None, user_agent="Luigi", root_namespace_id=None): """ Create an Dropbox Target for storing data in a dropbox.com account **About the path parameter** The path must start with '/' and should not end with '/' (even if it is a directory). The path must not contain adjacent slashes ('/files//img.jpg' is an invalid path) If the app has 'App folder' access, then / will refer to this app folder (which mean that there is no need to prepend the name of the app to the path) Otherwise, if the app has 'full access', then / will refer to the root of the Dropbox folder **About the token parameter:** The Dropbox target requires a valid OAuth2 token as a parameter (which means that a `Dropbox API app <https://www.dropbox.com/developers/apps>`_ must be created. This app can have 'App folder' access or 'Full Dropbox', as desired). Information about generating the token can be read here: - https://dropbox-sdk-python.readthedocs.io/en/latest/api/oauth.html#dropbox.oauth.DropboxOAuth2Flow - https://blogs.dropbox.com/developers/2014/05/generate-an-access-token-for-your-own-account/ :param str path: Remote path in Dropbox (starting with '/'). :param str token: a valid OAuth2 Dropbox token. :param luigi.Format format: the luigi format to use (e.g. `luigi.format.Nop`) :param str root_namespace_id: Root namespace ID for interacting with Team Spaces """ super(DropboxTarget, self).__init__(path) if not token: raise ValueError("The token parameter must contain a valid Dropbox Oauth2 Token") self.path = path self.token = token self.client = DropboxClient(token, user_agent, root_namespace_id) self.format = format or luigi.format.get_default_format()
Create an Dropbox Target for storing data in a dropbox.com account **About the path parameter** The path must start with '/' and should not end with '/' (even if it is a directory). The path must not contain adjacent slashes ('/files//img.jpg' is an invalid path) If the app has 'App folder' access, then / will refer to this app folder (which mean that there is no need to prepend the name of the app to the path) Otherwise, if the app has 'full access', then / will refer to the root of the Dropbox folder **About the token parameter:** The Dropbox target requires a valid OAuth2 token as a parameter (which means that a `Dropbox API app <https://www.dropbox.com/developers/apps>`_ must be created. This app can have 'App folder' access or 'Full Dropbox', as desired). Information about generating the token can be read here: - https://dropbox-sdk-python.readthedocs.io/en/latest/api/oauth.html#dropbox.oauth.DropboxOAuth2Flow - https://blogs.dropbox.com/developers/2014/05/generate-an-access-token-for-your-own-account/ :param str path: Remote path in Dropbox (starting with '/'). :param str token: a valid OAuth2 Dropbox token. :param luigi.Format format: the luigi format to use (e.g. `luigi.format.Nop`) :param str root_namespace_id: Root namespace ID for interacting with Team Spaces
__init__
python
spotify/luigi
luigi/contrib/dropbox.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/dropbox.py
Apache-2.0
def get_path(self): """ Returns a temporary file path based on a MD5 hash generated with the task's name and its arguments """ md5_hash = hashlib.new('md5', self.task_id.encode(), usedforsecurity=False).hexdigest() logger.debug('Hash %s corresponds to task %s', md5_hash, self.task_id) return os.path.join(self.temp_dir, str(self.unique.value), md5_hash)
Returns a temporary file path based on a MD5 hash generated with the task's name and its arguments
get_path
python
spotify/luigi
luigi/contrib/simulate.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/simulate.py
Apache-2.0
def exists(self): """ Checks if the file exists """ return os.path.isfile(self.get_path())
Checks if the file exists
exists
python
spotify/luigi
luigi/contrib/simulate.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/simulate.py
Apache-2.0
def done(self): """ Creates temporary file to mark the task as `done` """ logger.info('Marking %s as done', self) fn = self.get_path() try: os.makedirs(os.path.dirname(fn)) except OSError: pass open(fn, 'w').close()
Creates temporary file to mark the task as `done`
done
python
spotify/luigi
luigi/contrib/simulate.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/simulate.py
Apache-2.0
def main(args=None, stdin=sys.stdin, stdout=sys.stdout, print_exception=print_exception): """ Run either the mapper, combiner, or reducer from the class instance in the file "job-instance.pickle". Arguments: kind -- is either map, combiner, or reduce """ try: # Set up logging. logging.basicConfig(level=logging.WARN) kind = args is not None and args[1] or sys.argv[1] Runner().run(kind, stdin=stdin, stdout=stdout) except Exception as exc: # Dump encoded data that we will try to fetch using mechanize print_exception(exc) raise
Run either the mapper, combiner, or reducer from the class instance in the file "job-instance.pickle". Arguments: kind -- is either map, combiner, or reduce
main
python
spotify/luigi
luigi/contrib/mrrunner.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mrrunner.py
Apache-2.0
def __init__(self, mongo_client, index, collection): """ :param mongo_client: MongoClient instance :type mongo_client: MongoClient :param index: database index :type index: str :param collection: index collection :type collection: str """ self._mongo_client = mongo_client self._index = index self._collection = collection
:param mongo_client: MongoClient instance :type mongo_client: MongoClient :param index: database index :type index: str :param collection: index collection :type collection: str
__init__
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def get_collection(self): """ Return targeted mongo collection to query on """ db_mongo = self._mongo_client[self._index] return db_mongo[self._collection]
Return targeted mongo collection to query on
get_collection
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def get_index(self): """ Return targeted mongo index to query on """ return self._mongo_client[self._index]
Return targeted mongo index to query on
get_index
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def __init__(self, mongo_client, index, collection, document_id, path): """ :param document_id: targeted mongo document :type document_id: str :param path: full path to the targeted field in the mongo document :type path: str """ super(MongoCellTarget, self).__init__(mongo_client, index, collection) self._document_id = document_id self._path = path
:param document_id: targeted mongo document :type document_id: str :param path: full path to the targeted field in the mongo document :type path: str
__init__
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def exists(self): """ Test if target has been run Target is considered run if the targeted field exists """ return self.read() is not None
Test if target has been run Target is considered run if the targeted field exists
exists
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def read(self): """ Read the target value Use $project aggregate operator in order to support nested objects """ result = self.get_collection().aggregate([ {'$match': {'_id': self._document_id}}, {'$project': {'_value': '$' + self._path, '_id': False}} ]) for doc in result: if '_value' not in doc: break return doc['_value']
Read the target value Use $project aggregate operator in order to support nested objects
read
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def write(self, value): """ Write value to the target """ self.get_collection().update_one( {'_id': self._document_id}, {'$set': {self._path: value}}, upsert=True )
Write value to the target
write
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def __init__(self, mongo_client, index, collection, document_ids, field): """ :param document_ids: targeted mongo documents :type documents_ids: list of str :param field: targeted field in documents :type field: str """ super(MongoRangeTarget, self).__init__(mongo_client, index, collection) self._document_ids = document_ids self._field = field
:param document_ids: targeted mongo documents :type documents_ids: list of str :param field: targeted field in documents :type field: str
__init__
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def exists(self): """ Test if target has been run Target is considered run if the targeted field exists in ALL documents """ return not self.get_empty_ids()
Test if target has been run Target is considered run if the targeted field exists in ALL documents
exists
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def read(self): """ Read the targets value """ cursor = self.get_collection().find( { '_id': {'$in': self._document_ids}, self._field: {'$exists': True} }, {self._field: True} ) return {doc['_id']: doc[self._field] for doc in cursor}
Read the targets value
read
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def write(self, values): """ Write values to the targeted documents Values need to be a dict as : {document_id: value} """ # Insert only for docs targeted by the target filtered = {_id: value for _id, value in values.items() if _id in self._document_ids} if not filtered: return bulk = self.get_collection().initialize_ordered_bulk_op() for _id, value in filtered.items(): bulk.find({'_id': _id}).upsert() \ .update_one({'$set': {self._field: value}}) bulk.execute()
Write values to the targeted documents Values need to be a dict as : {document_id: value}
write
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def get_empty_ids(self): """ Get documents id with missing targeted field """ cursor = self.get_collection().find( { '_id': {'$in': self._document_ids}, self._field: {'$exists': True} }, {'_id': True} ) return set(self._document_ids) - {doc['_id'] for doc in cursor}
Get documents id with missing targeted field
get_empty_ids
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def exists(self): """ Test if target has been run Target is considered run if the targeted collection exists in the database """ return self.read()
Test if target has been run Target is considered run if the targeted collection exists in the database
exists
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def read(self): """ Return if the target collection exists in the database """ return self._collection in self.get_index().collection_names()
Return if the target collection exists in the database
read
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def __init__(self, mongo_client, index, collection, target_count): """ :param target_count: Value of the desired item count in the target :type field: int """ super(MongoCountTarget, self).__init__(mongo_client, index, collection) self._target_count = target_count
:param target_count: Value of the desired item count in the target :type field: int
__init__
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def exists(self): """ Test if the target has been run Target is considered run if the number of items in the target matches value of self._target_count """ return self.read() == self._target_count
Test if the target has been run Target is considered run if the number of items in the target matches value of self._target_count
exists
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def read(self): """ Using the aggregate method to avoid inaccurate count if using a sharded cluster https://docs.mongodb.com/manual/reference/method/db.collection.count/#behavior """ for res in self.get_collection().aggregate([{'$group': {'_id': None, 'count': {'$sum': 1}}}]): return res.get('count', None) return None
Using the aggregate method to avoid inaccurate count if using a sharded cluster https://docs.mongodb.com/manual/reference/method/db.collection.count/#behavior
read
python
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mongodb.py
Apache-2.0
def main(args=sys.argv): """Run the work() method from the class instance in the file "job-instance.pickle". """ try: # Set up logging. logging.basicConfig(level=logging.WARN) work_dir = args[1] assert os.path.exists(work_dir), "First argument to lsf_runner.py must be a directory that exists" do_work_on_compute_node(work_dir) except Exception as exc: # Dump encoded data that we will try to fetch using mechanize print(exc) raise
Run the work() method from the class instance in the file "job-instance.pickle".
main
python
spotify/luigi
luigi/contrib/lsf_runner.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/lsf_runner.py
Apache-2.0
def _connect(self): """ Log in to ftp. """ if self.sftp: self._sftp_connect() else: self._ftp_connect()
Log in to ftp.
_connect
python
spotify/luigi
luigi/contrib/ftp.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ftp.py
Apache-2.0
def _close(self): """ Close ftp connection. """ if self.sftp: self._sftp_close() else: self._ftp_close()
Close ftp connection.
_close
python
spotify/luigi
luigi/contrib/ftp.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ftp.py
Apache-2.0
def exists(self, path, mtime=None): """ Return `True` if file or directory at `path` exist, False otherwise. Additional check on modified time when mtime is passed in. Return False if the file's modified time is older mtime. """ self._connect() if self.sftp: exists = self._sftp_exists(path, mtime) else: exists = self._ftp_exists(path, mtime) self._close() return exists
Return `True` if file or directory at `path` exist, False otherwise. Additional check on modified time when mtime is passed in. Return False if the file's modified time is older mtime.
exists
python
spotify/luigi
luigi/contrib/ftp.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ftp.py
Apache-2.0
def remove(self, path, recursive=True): """ Remove file or directory at location ``path``. :param path: a path within the FileSystem to remove. :type path: str :param recursive: if the path is a directory, recursively remove the directory and all of its descendants. Defaults to ``True``. :type recursive: bool """ self._connect() if self.sftp: self._sftp_remove(path, recursive) else: self._ftp_remove(path, recursive) self._close()
Remove file or directory at location ``path``. :param path: a path within the FileSystem to remove. :type path: str :param recursive: if the path is a directory, recursively remove the directory and all of its descendants. Defaults to ``True``. :type recursive: bool
remove
python
spotify/luigi
luigi/contrib/ftp.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ftp.py
Apache-2.0
def _rm_recursive(self, ftp, path): """ Recursively delete a directory tree on a remote server. Source: https://gist.github.com/artlogic/2632647 """ wd = ftp.pwd() # check if it is a file first, because some FTP servers don't return # correctly on ftp.nlst(file) try: ftp.cwd(path) except ftplib.all_errors: # this is a file, we will just delete the file ftp.delete(path) return try: names = ftp.nlst() except ftplib.all_errors: # some FTP servers complain when you try and list non-existent paths return for name in names: if os.path.split(name)[1] in ('.', '..'): continue try: ftp.cwd(name) # if we can cwd to it, it's a folder ftp.cwd(wd) # don't try a nuke a folder we're in ftp.cwd(path) # then go back to where we were self._rm_recursive(ftp, name) except ftplib.all_errors: ftp.delete(name) try: ftp.cwd(wd) # do not delete the folder that we are in ftp.rmd(path) except ftplib.all_errors as e: print('_rm_recursive: Could not remove {0}: {1}'.format(path, e))
Recursively delete a directory tree on a remote server. Source: https://gist.github.com/artlogic/2632647
_rm_recursive
python
spotify/luigi
luigi/contrib/ftp.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ftp.py
Apache-2.0
def put(self, local_path, path, atomic=True): """ Put file from local filesystem to (s)FTP. """ self._connect() if self.sftp: self._sftp_put(local_path, path, atomic) else: self._ftp_put(local_path, path, atomic) self._close()
Put file from local filesystem to (s)FTP.
put
python
spotify/luigi
luigi/contrib/ftp.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ftp.py
Apache-2.0
def get(self, path, local_path): """ Download file from (s)FTP to local filesystem. """ normpath = os.path.normpath(local_path) folder = os.path.dirname(normpath) if folder and not os.path.exists(folder): os.makedirs(folder) tmp_local_path = local_path + '-luigi-tmp-%09d' % random.randrange(0, 10_000_000_000) # download file self._connect() if self.sftp: self._sftp_get(path, tmp_local_path) else: self._ftp_get(path, tmp_local_path) self._close() os.replace(tmp_local_path, local_path)
Download file from (s)FTP to local filesystem.
get
python
spotify/luigi
luigi/contrib/ftp.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ftp.py
Apache-2.0
def listdir(self, path='.'): """ Gets an list of the contents of path in (s)FTP """ self._connect() if self.sftp: contents = self._sftp_listdir(path) else: contents = self._ftp_listdir(path) self._close() return contents
Gets an list of the contents of path in (s)FTP
listdir
python
spotify/luigi
luigi/contrib/ftp.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ftp.py
Apache-2.0
def __init__(self, fs, path): """ Initializes an AtomicFtpfile instance. :param fs: :param path: :type path: str """ self._fs = fs super(AtomicFtpFile, self).__init__(path)
Initializes an AtomicFtpfile instance. :param fs: :param path: :type path: str
__init__
python
spotify/luigi
luigi/contrib/ftp.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ftp.py
Apache-2.0
def open(self, mode): """ Open the FileSystem target. This method returns a file-like object which can either be read from or written to depending on the specified mode. :param mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will open the FileSystemTarget in write mode. Subclasses can implement additional options. :type mode: str """ if mode == 'w': return self.format.pipe_writer(AtomicFtpFile(self._fs, self.path)) elif mode == 'r': temppath = '{}-luigi-tmp-{:09d}'.format( self.path.lstrip('/'), random.randrange(0, 10_000_000_000) ) try: # store reference to the TemporaryDirectory because it will be removed on GC self.__temp_dir = tempfile.TemporaryDirectory( prefix="luigi-contrib-ftp_" ) except AttributeError: # TemporaryDirectory only available in Python3, use old behaviour in Python2 # this file will not be cleaned up automatically self.__tmp_path = os.path.join( tempfile.gettempdir(), 'luigi-contrib-ftp', temppath ) else: self.__tmp_path = os.path.join(self.__temp_dir.name, temppath) # download file to local self._fs.get(self.path, self.__tmp_path) return self.format.pipe_reader( FileWrapper(io.BufferedReader(io.FileIO(self.__tmp_path, 'r'))) ) else: raise Exception("mode must be 'r' or 'w' (got: %s)" % mode)
Open the FileSystem target. This method returns a file-like object which can either be read from or written to depending on the specified mode. :param mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will open the FileSystemTarget in write mode. Subclasses can implement additional options. :type mode: str
open
python
spotify/luigi
luigi/contrib/ftp.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ftp.py
Apache-2.0
def __init__(self, host, database, user, password, table, update_id): """ Initializes a MsSqlTarget instance. :param host: MsSql server address. Possibly a host:port string. :type host: str :param database: database name. :type database: str :param user: database user :type user: str :param password: password for specified user. :type password: str :param update_id: an identifier for this data set. :type update_id: str """ if ':' in host: self.host, self.port = host.split(':') self.port = int(self.port) else: self.host = host self.port = 1433 self.database = database self.user = user self.password = password self.table = table self.update_id = update_id
Initializes a MsSqlTarget instance. :param host: MsSql server address. Possibly a host:port string. :type host: str :param database: database name. :type database: str :param user: database user :type user: str :param password: password for specified user. :type password: str :param update_id: an identifier for this data set. :type update_id: str
__init__
python
spotify/luigi
luigi/contrib/mssqldb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mssqldb.py
Apache-2.0
def touch(self, connection=None): """ Mark this update as complete. IMPORTANT, If the marker table doesn't exist, the connection transaction will be aborted and the connection reset. Then the marker table will be created. """ self.create_marker_table() if connection is None: connection = self.connect() connection.execute_non_query( """IF NOT EXISTS(SELECT 1 FROM {marker_table} WHERE update_id = %(update_id)s) INSERT INTO {marker_table} (update_id, target_table) VALUES (%(update_id)s, %(table)s) ELSE UPDATE t SET target_table = %(table)s , inserted = GETDATE() FROM {marker_table} t WHERE update_id = %(update_id)s """.format(marker_table=self.marker_table), {"update_id": self.update_id, "table": self.table}) # make sure update is properly marked assert self.exists(connection)
Mark this update as complete. IMPORTANT, If the marker table doesn't exist, the connection transaction will be aborted and the connection reset. Then the marker table will be created.
touch
python
spotify/luigi
luigi/contrib/mssqldb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mssqldb.py
Apache-2.0
def connect(self): """ Create a SQL Server connection and return a connection object """ connection = _mssql.connect(user=self.user, password=self.password, server=self.host, port=self.port, database=self.database) return connection
Create a SQL Server connection and return a connection object
connect
python
spotify/luigi
luigi/contrib/mssqldb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mssqldb.py
Apache-2.0
def create_marker_table(self): """ Create marker table if it doesn't exist. Use a separate connection since the transaction might have to be reset. """ connection = self.connect() try: connection.execute_non_query( """ CREATE TABLE {marker_table} ( id BIGINT NOT NULL IDENTITY(1,1), update_id VARCHAR(128) NOT NULL, target_table VARCHAR(128), inserted DATETIME DEFAULT(GETDATE()), PRIMARY KEY (update_id) ) """ .format(marker_table=self.marker_table) ) except _mssql.MssqlDatabaseException as e: # Table already exists code if e.number == 2714: pass else: raise connection.close()
Create marker table if it doesn't exist. Use a separate connection since the transaction might have to be reset.
create_marker_table
python
spotify/luigi
luigi/contrib/mssqldb.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/mssqldb.py
Apache-2.0
def metadata_columns(self): """Returns the default metadata columns. Those columns are columns that we want each tables to have by default. """ return []
Returns the default metadata columns. Those columns are columns that we want each tables to have by default.
metadata_columns
python
spotify/luigi
luigi/contrib/rdbms.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/rdbms.py
Apache-2.0
def create_table(self, connection): """ Override to provide code for creating the target table. By default it will be created using types (optionally) specified in columns. If overridden, use the provided connection object for setting up the table in order to create the table and insert data using the same transaction. """ if len(self.columns[0]) == 1: # only names of columns specified, no types raise NotImplementedError("create_table() not implemented for %r and columns types not specified" % self.table) elif len(self.columns[0]) == 2: # if columns is specified as (name, type) tuples coldefs = ','.join( '{name} {type}'.format(name=name, type=type) for name, type in self.columns ) query = "CREATE TABLE {table} ({coldefs})".format(table=self.table, coldefs=coldefs) connection.cursor().execute(query)
Override to provide code for creating the target table. By default it will be created using types (optionally) specified in columns. If overridden, use the provided connection object for setting up the table in order to create the table and insert data using the same transaction.
create_table
python
spotify/luigi
luigi/contrib/rdbms.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/rdbms.py
Apache-2.0
def update_id(self): """ This update id will be a unique identifier for this insert on this table. """ return self.task_id
This update id will be a unique identifier for this insert on this table.
update_id
python
spotify/luigi
luigi/contrib/rdbms.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/rdbms.py
Apache-2.0
def init_copy(self, connection): """ Override to perform custom queries. Any code here will be formed in the same transaction as the main copy, just prior to copying data. Example use cases include truncating the table or removing all data older than X in the database to keep a rolling window of data available in the table. """ # TODO: remove this after sufficient time so most people using the # clear_table attribtue will have noticed it doesn't work anymore if hasattr(self, "clear_table"): raise Exception("The clear_table attribute has been removed. Override init_copy instead!") if self.enable_metadata_columns: self._add_metadata_columns(connection)
Override to perform custom queries. Any code here will be formed in the same transaction as the main copy, just prior to copying data. Example use cases include truncating the table or removing all data older than X in the database to keep a rolling window of data available in the table.
init_copy
python
spotify/luigi
luigi/contrib/rdbms.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/rdbms.py
Apache-2.0
def post_copy(self, connection): """ Override to perform custom queries. Any code here will be formed in the same transaction as the main copy, just after copying data. Example use cases include cleansing data in temp table prior to insertion into real table. """ pass
Override to perform custom queries. Any code here will be formed in the same transaction as the main copy, just after copying data. Example use cases include cleansing data in temp table prior to insertion into real table.
post_copy
python
spotify/luigi
luigi/contrib/rdbms.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/rdbms.py
Apache-2.0
def host(self): """ Host of the RDBMS. Implementation should support `hostname:port` to encode port. """ return None
Host of the RDBMS. Implementation should support `hostname:port` to encode port.
host
python
spotify/luigi
luigi/contrib/rdbms.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/rdbms.py
Apache-2.0
def port(self): """ Override to specify port separately from host. """ return None
Override to specify port separately from host.
port
python
spotify/luigi
luigi/contrib/rdbms.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/rdbms.py
Apache-2.0
def update_id(self): """ Override to create a custom marker table 'update_id' signature for Query subclass task instances """ return self.task_id
Override to create a custom marker table 'update_id' signature for Query subclass task instances
update_id
python
spotify/luigi
luigi/contrib/rdbms.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/rdbms.py
Apache-2.0
def output(self): """ Override with an RDBMS Target (e.g. PostgresTarget or RedshiftTarget) to record execution in a marker table """ raise NotImplementedError("This method must be overridden")
Override with an RDBMS Target (e.g. PostgresTarget or RedshiftTarget) to record execution in a marker table
output
python
spotify/luigi
luigi/contrib/rdbms.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/rdbms.py
Apache-2.0
def percentage_progress(self): """ :return: percentage of query overall progress """ return self._status.get('stats', {}).get('progressPercentage', 0.1)
:return: percentage of query overall progress
percentage_progress
python
spotify/luigi
luigi/contrib/presto.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/presto.py
Apache-2.0
def info_uri(self): """ :return: query UI link """ return self._status.get('infoUri')
:return: query UI link
info_uri
python
spotify/luigi
luigi/contrib/presto.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/presto.py
Apache-2.0
def execute(self, query, parameters=None, mode=None): """ :param query: query to run :param parameters: parameters should be injected in the query :param mode: "fetch" - yields rows, "watch" - yields log entries :return: """ class Mode(Enum): watch = 'watch' fetch = 'fetch' _mode = Mode(mode) if mode else Mode.watch with closing(self._connection.cursor()) as cursor: cursor.execute(query, parameters) status = self._status while status: sleep(self.sleep_time) status = cursor.poll() if status: if _mode == Mode.watch: yield status self._status = status if _mode == Mode.fetch: for row in cursor.fetchall(): yield row
:param query: query to run :param parameters: parameters should be injected in the query :param mode: "fetch" - yields rows, "watch" - yields log entries :return:
execute
python
spotify/luigi
luigi/contrib/presto.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/presto.py
Apache-2.0
def _kwargs(): """ replace to ``` (_self, *args), *_ = inspect.getfullargspec(Cursor.__init__) ``` after py2-deprecation """ args = inspect.getfullargspec(Cursor.__init__)[0][1:] for parameter in args: val = getattr(self, parameter) if val: yield parameter, val
replace to ``` (_self, *args), *_ = inspect.getfullargspec(Cursor.__init__) ``` after py2-deprecation
__new__.__new__._client._kwargs
python
spotify/luigi
luigi/contrib/presto.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/presto.py
Apache-2.0
def _client(self): def _kwargs(): """ replace to ``` (_self, *args), *_ = inspect.getfullargspec(Cursor.__init__) ``` after py2-deprecation """ args = inspect.getfullargspec(Cursor.__init__)[0][1:] for parameter in args: val = getattr(self, parameter) if val: yield parameter, val connection = Connection(**dict(_kwargs())) return PrestoClient(connection=connection)
replace to ``` (_self, *args), *_ = inspect.getfullargspec(Cursor.__init__) ``` after py2-deprecation
__new__._client
python
spotify/luigi
luigi/contrib/presto.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/presto.py
Apache-2.0
def __new__(cls, name, bases, attrs): def _client(self): def _kwargs(): """ replace to ``` (_self, *args), *_ = inspect.getfullargspec(Cursor.__init__) ``` after py2-deprecation """ args = inspect.getfullargspec(Cursor.__init__)[0][1:] for parameter in args: val = getattr(self, parameter) if val: yield parameter, val connection = Connection(**dict(_kwargs())) return PrestoClient(connection=connection) attrs.update({ '_client': property(_client) }) return super(cls, WithPrestoClient).__new__(cls, name, bases, attrs)
replace to ``` (_self, *args), *_ = inspect.getfullargspec(Cursor.__init__) ``` after py2-deprecation
__new__
python
spotify/luigi
luigi/contrib/presto.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/presto.py
Apache-2.0
def count(self): if not self._count: ''' replace to self._count, *_ = next(self._client.execute(*self.count_query, 'fetch')) after py2 deprecation ''' self._count = next(self._client.execute(*self._count_query, mode='fetch'))[0] return self._count
replace to self._count, *_ = next(self._client.execute(*self.count_query, 'fetch')) after py2 deprecation
count
python
spotify/luigi
luigi/contrib/presto.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/presto.py
Apache-2.0
def exists(self): """ :return: `True` if given table exists and there are any rows in a given partition `False` if no rows in the partition exists or table is absent """ try: return self.count() > 0 except DatabaseError as exception: if self._table_doesnot_exist(exception): return False except Exception: raise
:return: `True` if given table exists and there are any rows in a given partition `False` if no rows in the partition exists or table is absent
exists
python
spotify/luigi
luigi/contrib/presto.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/presto.py
Apache-2.0
def fix_paths(job): """ Coerce input arguments to use temporary files when used for output. Return a list of temporary file pairs (tmpfile, destination path) and a list of arguments. Converts each HdfsTarget to a string for the path. """ tmp_files = [] args = [] for x in job.args(): if isinstance(x, luigi.contrib.hdfs.HdfsTarget): # input/output if x.exists() or not job.atomic_output(): # input args.append(x.path) else: # output x_path_no_slash = x.path[:-1] if x.path[-1] == '/' else x.path y = luigi.contrib.hdfs.HdfsTarget(x_path_no_slash + '-luigi-tmp-%09d' % random.randrange(0, 10_000_000_000)) tmp_files.append((y, x_path_no_slash)) logger.info('Using temp path: %s for path %s', y.path, x.path) args.append(y.path) else: try: # hopefully the target has a path to use args.append(x.path) except AttributeError: # if there's no path then hope converting it to a string will work args.append(str(x)) return (tmp_files, args)
Coerce input arguments to use temporary files when used for output. Return a list of temporary file pairs (tmpfile, destination path) and a list of arguments. Converts each HdfsTarget to a string for the path.
fix_paths
python
spotify/luigi
luigi/contrib/hadoop_jar.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop_jar.py
Apache-2.0
def jar(self): """ Path to the jar for this Hadoop Job. """ return None
Path to the jar for this Hadoop Job.
jar
python
spotify/luigi
luigi/contrib/hadoop_jar.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop_jar.py
Apache-2.0
def main(self): """ optional main method for this Hadoop Job. """ return None
optional main method for this Hadoop Job.
main
python
spotify/luigi
luigi/contrib/hadoop_jar.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop_jar.py
Apache-2.0
def atomic_output(self): """ If True, then rewrite output arguments to be temp locations and atomically move them into place after the job finishes. """ return True
If True, then rewrite output arguments to be temp locations and atomically move them into place after the job finishes.
atomic_output
python
spotify/luigi
luigi/contrib/hadoop_jar.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop_jar.py
Apache-2.0
def ssh(self): """ Set this to run hadoop command remotely via ssh. It needs to be a dict that looks like {"host": "myhost", "key_file": None, "username": None, ["no_host_key_check": False]} """ return None
Set this to run hadoop command remotely via ssh. It needs to be a dict that looks like {"host": "myhost", "key_file": None, "username": None, ["no_host_key_check": False]}
ssh
python
spotify/luigi
luigi/contrib/hadoop_jar.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop_jar.py
Apache-2.0
def args(self): """ Returns an array of args to pass to the job (after hadoop jar <jar> <main>). """ return []
Returns an array of args to pass to the job (after hadoop jar <jar> <main>).
args
python
spotify/luigi
luigi/contrib/hadoop_jar.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/hadoop_jar.py
Apache-2.0
def __init__(self, openers=None): """An opener registry that stores a number of opener objects used to parse Target URIs :param openers: A list of objects inherited from the Opener class. :type openers: list """ if openers is None: openers = [] self.registry = {} self.openers = {} self.default_opener = 'file' for opener in openers: self.add(opener)
An opener registry that stores a number of opener objects used to parse Target URIs :param openers: A list of objects inherited from the Opener class. :type openers: list
__init__
python
spotify/luigi
luigi/contrib/opener.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/opener.py
Apache-2.0
def get_opener(self, name): """Retrieve an opener for the given protocol :param name: name of the opener to open :type name: string :raises NoOpenerError: if no opener has been registered of that name """ if name not in self.registry: raise NoOpenerError("No opener for %s" % name) index = self.registry[name] return self.openers[index]
Retrieve an opener for the given protocol :param name: name of the opener to open :type name: string :raises NoOpenerError: if no opener has been registered of that name
get_opener
python
spotify/luigi
luigi/contrib/opener.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/opener.py
Apache-2.0
def add(self, opener): """Adds an opener to the registry :param opener: Opener object :type opener: Opener inherited object """ index = len(self.openers) self.openers[index] = opener for name in opener.names: self.registry[name] = index
Adds an opener to the registry :param opener: Opener object :type opener: Opener inherited object
add
python
spotify/luigi
luigi/contrib/opener.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/opener.py
Apache-2.0
def open(self, target_uri, **kwargs): """Open target uri. :param target_uri: Uri to open :type target_uri: string :returns: Target object """ target = urlsplit(target_uri, scheme=self.default_opener) opener = self.get_opener(target.scheme) query = opener.conform_query(target.query) target = opener.get_target( target.scheme, target.path, target.fragment, target.username, target.password, target.hostname, target.port, query, **kwargs ) target.opener_path = target_uri return target
Open target uri. :param target_uri: Uri to open :type target_uri: string :returns: Target object
open
python
spotify/luigi
luigi/contrib/opener.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/opener.py
Apache-2.0
def conform_query(cls, query): """Converts the query string from a target uri, uses cls.allowed_kwargs, and cls.filter_kwargs to drive logic. :param query: Unparsed query string :type query: urllib.parse.unsplit(uri).query :returns: Dictionary of parsed values, everything in cls.allowed_kwargs with values set to True will be parsed as json strings. """ query = parse_qs(query, keep_blank_values=True) # Remove any unexpected keywords from the query string. if cls.filter_kwargs: query = {x: y for x, y in query.items() if x in cls.allowed_kwargs} for key, vals in query.items(): # Multiple values of the same name could be passed use first # Also params without strings will be treated as true values if cls.allowed_kwargs.get(key, False): val = json.loads(vals[0] or 'true') else: val = vals[0] or 'true' query[key] = val return query
Converts the query string from a target uri, uses cls.allowed_kwargs, and cls.filter_kwargs to drive logic. :param query: Unparsed query string :type query: urllib.parse.unsplit(uri).query :returns: Dictionary of parsed values, everything in cls.allowed_kwargs with values set to True will be parsed as json strings.
conform_query
python
spotify/luigi
luigi/contrib/opener.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/opener.py
Apache-2.0
def get_target(cls, scheme, path, fragment, username, password, hostname, port, query, **kwargs): """Override this method to use values from the parsed uri to initialize the expected target. """ raise NotImplementedError("get_target must be overridden")
Override this method to use values from the parsed uri to initialize the expected target.
get_target
python
spotify/luigi
luigi/contrib/opener.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/opener.py
Apache-2.0
def relpath(self, current_file, rel_path): """ Compute path given current file and relative path. """ script_dir = os.path.dirname(os.path.abspath(current_file)) rel_path = os.path.abspath(os.path.join(script_dir, rel_path)) return rel_path
Compute path given current file and relative path.
relpath
python
spotify/luigi
luigi/contrib/scalding.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/scalding.py
Apache-2.0
def source(self): """ Path to the scala source for this Scalding Job Either one of source() or jar() must be specified. """ return None
Path to the scala source for this Scalding Job Either one of source() or jar() must be specified.
source
python
spotify/luigi
luigi/contrib/scalding.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/scalding.py
Apache-2.0
def jar(self): """ Path to the jar file for this Scalding Job Either one of source() or jar() must be specified. """ return None
Path to the jar file for this Scalding Job Either one of source() or jar() must be specified.
jar
python
spotify/luigi
luigi/contrib/scalding.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/scalding.py
Apache-2.0
def extra_jars(self): """ Extra jars for building and running this Scalding Job. """ return []
Extra jars for building and running this Scalding Job.
extra_jars
python
spotify/luigi
luigi/contrib/scalding.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/scalding.py
Apache-2.0
def job_class(self): """ optional main job class for this Scalding Job. """ return None
optional main job class for this Scalding Job.
job_class
python
spotify/luigi
luigi/contrib/scalding.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/scalding.py
Apache-2.0
def atomic_output(self): """ If True, then rewrite output arguments to be temp locations and atomically move them into place after the job finishes. """ return True
If True, then rewrite output arguments to be temp locations and atomically move them into place after the job finishes.
atomic_output
python
spotify/luigi
luigi/contrib/scalding.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/scalding.py
Apache-2.0
def job_args(self): """ Extra arguments to pass to the Scalding job. """ return []
Extra arguments to pass to the Scalding job.
job_args
python
spotify/luigi
luigi/contrib/scalding.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/scalding.py
Apache-2.0
def args(self): """ Returns an array of args to pass to the job. """ arglist = [] for k, v in self.requires_hadoop().items(): arglist.append('--' + k) arglist.extend([t.output().path for t in flatten(v)]) arglist.extend(['--output', self.output()]) arglist.extend(self.job_args()) return arglist
Returns an array of args to pass to the job.
args
python
spotify/luigi
luigi/contrib/scalding.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/scalding.py
Apache-2.0
def __init__(self, account_name=None, account_key=None, sas_token=None, **kwargs): """ :param str account_name: The storage account name. This is used to authenticate requests signed with an account key\ and to construct the storage endpoint. It is required unless a connection string is given,\ or if a custom domain is used with anonymous authentication. :param str account_key: The storage account key. This is used for shared key authentication. :param str sas_token: A shared access signature token to use to authenticate requests instead of the account key. :param dict kwargs: A key-value pair to provide additional connection options. * `protocol` - The protocol to use for requests. Defaults to https. * `connection_string` - If specified, this will override all other parameters besides request session.\ See http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ for the connection string format * `endpoint_suffix` - The host base component of the url, minus the account name. Defaults to Azure\ (core.windows.net). Override this to use the China cloud (core.chinacloudapi.cn). * `custom_domain` - The custom domain to use. This can be set in the Azure Portal. For example, ‘www.mydomain.com’. * `token_credential` - A token credential used to authenticate HTTPS requests. The token value should be updated before its expiration. """ if kwargs.get("custom_domain"): account_url = "{protocol}://{custom_domain}/{account_name}".format(protocol=kwargs.get("protocol", "https"), custom_domain=kwargs.get("custom_domain"), account_name=account_name) else: account_url = "{protocol}://{account_name}.blob.{endpoint_suffix}".format(protocol=kwargs.get("protocol", "https"), account_name=account_name, endpoint_suffix=kwargs.get( "endpoint_suffix", "core.windows.net")) self.options = { "account_name": account_name, "account_key": account_key, "account_url": account_url, "sas_token": sas_token} self.kwargs = kwargs
:param str account_name: The storage account name. This is used to authenticate requests signed with an account key\ and to construct the storage endpoint. It is required unless a connection string is given,\ or if a custom domain is used with anonymous authentication. :param str account_key: The storage account key. This is used for shared key authentication. :param str sas_token: A shared access signature token to use to authenticate requests instead of the account key. :param dict kwargs: A key-value pair to provide additional connection options. * `protocol` - The protocol to use for requests. Defaults to https. * `connection_string` - If specified, this will override all other parameters besides request session.\ See http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ for the connection string format * `endpoint_suffix` - The host base component of the url, minus the account name. Defaults to Azure\ (core.windows.net). Override this to use the China cloud (core.chinacloudapi.cn). * `custom_domain` - The custom domain to use. This can be set in the Azure Portal. For example, ‘www.mydomain.com’. * `token_credential` - A token credential used to authenticate HTTPS requests. The token value should be updated before its expiration.
__init__
python
spotify/luigi
luigi/contrib/azureblob.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/azureblob.py
Apache-2.0
def isdir(self, path): """ Azure Blob Storage has no concept of directories. It always returns False :param str path: Path of the Azure blob storage :return: False """ return False
Azure Blob Storage has no concept of directories. It always returns False :param str path: Path of the Azure blob storage :return: False
isdir
python
spotify/luigi
luigi/contrib/azureblob.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/azureblob.py
Apache-2.0
def __init__(self, container, blob, client=None, format=None, download_when_reading=True, **kwargs): """ :param str account_name: The storage account name. This is used to authenticate requests signed with an account key and to construct the storage endpoint. It is required unless a connection string is given, or if a custom domain is used with anonymous authentication. :param str container: The azure container in which the blob needs to be stored :param str blob: The name of the blob under container specified :param str client: An instance of :class:`.AzureBlobClient`. If none is specified, anonymous access would be used :param str format: An instance of :class:`luigi.format`. :param bool download_when_reading: Determines whether the file has to be downloaded to temporary location on disk. Defaults to `True`. Pass the argument **progress_callback** with signature *(func(current, total))* to get real time progress of upload """ super(AzureBlobTarget, self).__init__(os.path.join(container, blob)) if format is None: format = get_default_format() self.container = container self.blob = blob self.client = client or AzureBlobClient() self.format = format self.download_when_reading = download_when_reading self.azure_blob_options = kwargs
:param str account_name: The storage account name. This is used to authenticate requests signed with an account key and to construct the storage endpoint. It is required unless a connection string is given, or if a custom domain is used with anonymous authentication. :param str container: The azure container in which the blob needs to be stored :param str blob: The name of the blob under container specified :param str client: An instance of :class:`.AzureBlobClient`. If none is specified, anonymous access would be used :param str format: An instance of :class:`luigi.format`. :param bool download_when_reading: Determines whether the file has to be downloaded to temporary location on disk. Defaults to `True`. Pass the argument **progress_callback** with signature *(func(current, total))* to get real time progress of upload
__init__
python
spotify/luigi
luigi/contrib/azureblob.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/azureblob.py
Apache-2.0
def fs(self): """ The :py:class:`FileSystem` associated with :class:`.AzureBlobTarget` """ return self.client
The :py:class:`FileSystem` associated with :class:`.AzureBlobTarget`
fs
python
spotify/luigi
luigi/contrib/azureblob.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/azureblob.py
Apache-2.0
def open(self, mode): """ Open the target for reading or writing :param char mode: 'r' for reading and 'w' for writing. 'b' is not supported and will be stripped if used. For binary mode, use `format` :return: * :class:`.ReadableAzureBlobFile` if 'r' * :class:`.AtomicAzureBlobFile` if 'w' """ if mode not in ('r', 'w'): raise ValueError("Unsupported open mode '%s'" % mode) if mode == 'r': return self.format.pipe_reader(ReadableAzureBlobFile(self.container, self.blob, self.client, self.download_when_reading, **self.azure_blob_options)) else: return self.format.pipe_writer(AtomicAzureBlobFile(self.container, self.blob, self.client, **self.azure_blob_options))
Open the target for reading or writing :param char mode: 'r' for reading and 'w' for writing. 'b' is not supported and will be stripped if used. For binary mode, use `format` :return: * :class:`.ReadableAzureBlobFile` if 'r' * :class:`.AtomicAzureBlobFile` if 'w'
open
python
spotify/luigi
luigi/contrib/azureblob.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/azureblob.py
Apache-2.0
def program_args(self): """ Override this method to map your task parameters to the program arguments :return: list to pass as ``args`` to :py:class:`subprocess.Popen` """ raise NotImplementedError
Override this method to map your task parameters to the program arguments :return: list to pass as ``args`` to :py:class:`subprocess.Popen`
program_args
python
spotify/luigi
luigi/contrib/external_program.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/external_program.py
Apache-2.0
def program_environment(self): """ Override this method to control environment variables for the program :return: dict mapping environment variable names to values """ env = os.environ.copy() return env
Override this method to control environment variables for the program :return: dict mapping environment variable names to values
program_environment
python
spotify/luigi
luigi/contrib/external_program.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/external_program.py
Apache-2.0
def always_log_stderr(self): """ When True, stderr will be logged even if program execution succeeded Override to False to log stderr only when program execution fails. """ return True
When True, stderr will be logged even if program execution succeeded Override to False to log stderr only when program execution fails.
always_log_stderr
python
spotify/luigi
luigi/contrib/external_program.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/external_program.py
Apache-2.0
def build_tracking_url(self, logs_output): """ This method is intended for transforming pattern match in logs to an URL :param logs_output: Found match of `self.tracking_url_pattern` :return: a tracking URL for the task """ return logs_output
This method is intended for transforming pattern match in logs to an URL :param logs_output: Found match of `self.tracking_url_pattern` :return: a tracking URL for the task
build_tracking_url
python
spotify/luigi
luigi/contrib/external_program.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/external_program.py
Apache-2.0
def _track_url_by_pattern(): """ Scans the pipe looking for a passed pattern, if the pattern is found, `set_tracking_url` callback is sent. If tmp_stdout is passed, also appends lines to this file. """ pattern = re.compile(self.tracking_url_pattern) for new_line in iter(pipe_to_read.readline, ''): if new_line: if file_to_write: file_to_write.write(new_line) match = re.search(pattern, new_line.decode('utf-8')) if match: self.set_tracking_url( self.build_tracking_url(match.group(1)) ) else: file_to_write.flush() sleep(time_to_sleep)
Scans the pipe looking for a passed pattern, if the pattern is found, `set_tracking_url` callback is sent. If tmp_stdout is passed, also appends lines to this file.
_proc_with_tracking_url_context._track_url_by_pattern
python
spotify/luigi
luigi/contrib/external_program.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/external_program.py
Apache-2.0
def _proc_with_tracking_url_context(self, proc_args, proc_kwargs): time_to_sleep = 0.5 file_to_write = proc_kwargs.get(self.stream_for_searching_tracking_url) proc_kwargs.update({self.stream_for_searching_tracking_url: subprocess.PIPE}) main_proc = subprocess.Popen(proc_args, **proc_kwargs) pipe_to_read = main_proc.stderr if self.stream_for_searching_tracking_url == 'stderr' else main_proc.stdout def _track_url_by_pattern(): """ Scans the pipe looking for a passed pattern, if the pattern is found, `set_tracking_url` callback is sent. If tmp_stdout is passed, also appends lines to this file. """ pattern = re.compile(self.tracking_url_pattern) for new_line in iter(pipe_to_read.readline, ''): if new_line: if file_to_write: file_to_write.write(new_line) match = re.search(pattern, new_line.decode('utf-8')) if match: self.set_tracking_url( self.build_tracking_url(match.group(1)) ) else: file_to_write.flush() sleep(time_to_sleep) track_proc = Process(target=_track_url_by_pattern) try: track_proc.start() with ExternalProgramRunContext(main_proc): yield main_proc finally: # need to wait a bit to let the subprocess read the last lines track_proc.join(time_to_sleep * 2) if track_proc.is_alive(): track_proc.terminate() pipe_to_read.close()
Scans the pipe looking for a passed pattern, if the pattern is found, `set_tracking_url` callback is sent. If tmp_stdout is passed, also appends lines to this file.
_proc_with_tracking_url_context
python
spotify/luigi
luigi/contrib/external_program.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/external_program.py
Apache-2.0
def __init__(self, connection_string, target_table, update_id, echo=False, connect_args=None): """ Constructor for the SQLAlchemyTarget. :param connection_string: SQLAlchemy connection string :type connection_string: str :param target_table: The table name for the data :type target_table: str :param update_id: An identifier for this data set :type update_id: str :param echo: Flag to setup SQLAlchemy logging :type echo: bool :param connect_args: A dictionary of connection arguments :type connect_args: dict :return: """ if connect_args is None: connect_args = {} self.target_table = target_table self.update_id = update_id self.connection_string = connection_string self.echo = echo self.connect_args = connect_args self.marker_table_bound = None
Constructor for the SQLAlchemyTarget. :param connection_string: SQLAlchemy connection string :type connection_string: str :param target_table: The table name for the data :type target_table: str :param update_id: An identifier for this data set :type update_id: str :param echo: Flag to setup SQLAlchemy logging :type echo: bool :param connect_args: A dictionary of connection arguments :type connect_args: dict :return:
__init__
python
spotify/luigi
luigi/contrib/sqla.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/sqla.py
Apache-2.0
def engine(self): """ Return an engine instance, creating it if it doesn't exist. Recreate the engine connection if it wasn't originally created by the current process. """ pid = os.getpid() conn = SQLAlchemyTarget._engine_dict.get(self.connection_string) if not conn or conn.pid != pid: # create and reset connection engine = sqlalchemy.create_engine( self.connection_string, connect_args=self.connect_args, echo=self.echo ) SQLAlchemyTarget._engine_dict[self.connection_string] = self.Connection(engine, pid) return SQLAlchemyTarget._engine_dict[self.connection_string].engine
Return an engine instance, creating it if it doesn't exist. Recreate the engine connection if it wasn't originally created by the current process.
engine
python
spotify/luigi
luigi/contrib/sqla.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/sqla.py
Apache-2.0
def touch(self): """ Mark this update as complete. """ if self.marker_table_bound is None: self.create_marker_table() table = self.marker_table_bound id_exists = self.exists() with self.engine.begin() as conn: if not id_exists: ins = table.insert().values(update_id=self.update_id, target_table=self.target_table, inserted=datetime.datetime.now()) else: ins = table.update().where(sqlalchemy.and_(table.c.update_id == self.update_id, table.c.target_table == self.target_table)).\ values(update_id=self.update_id, target_table=self.target_table, inserted=datetime.datetime.now()) conn.execute(ins) assert self.exists()
Mark this update as complete.
touch
python
spotify/luigi
luigi/contrib/sqla.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/sqla.py
Apache-2.0
def create_marker_table(self): """ Create marker table if it doesn't exist. Using a separate connection since the transaction might have to be reset. """ if self.marker_table is None: self.marker_table = luigi.configuration.get_config().get('sqlalchemy', 'marker-table', 'table_updates') engine = self.engine with engine.begin() as con: metadata = sqlalchemy.MetaData() if not con.dialect.has_table(con, self.marker_table): self.marker_table_bound = sqlalchemy.Table( self.marker_table, metadata, sqlalchemy.Column("update_id", sqlalchemy.String(128), primary_key=True), sqlalchemy.Column("target_table", sqlalchemy.String(128)), sqlalchemy.Column("inserted", sqlalchemy.DateTime, default=datetime.datetime.now())) metadata.create_all(engine) else: metadata.reflect(only=[self.marker_table], bind=engine) self.marker_table_bound = metadata.tables[self.marker_table]
Create marker table if it doesn't exist. Using a separate connection since the transaction might have to be reset.
create_marker_table
python
spotify/luigi
luigi/contrib/sqla.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/sqla.py
Apache-2.0
def create_table(self, engine): """ Override to provide code for creating the target table. By default it will be created using types specified in columns. If the table exists, then it binds to the existing table. If overridden, use the provided connection object for setting up the table in order to create the table and insert data using the same transaction. :param engine: The sqlalchemy engine instance :type engine: object """ def construct_sqla_columns(columns): retval = [sqlalchemy.Column(*c[0], **c[1]) for c in columns] return retval needs_setup = (len(self.columns) == 0) or (False in [len(c) == 2 for c in self.columns]) if not self.reflect else False if needs_setup: # only names of columns specified, no types raise NotImplementedError("create_table() not implemented for %r and columns types not specified" % self.table) else: # if columns is specified as (name, type) tuples with engine.begin() as con: if self.schema: metadata = sqlalchemy.MetaData(schema=self.schema) else: metadata = sqlalchemy.MetaData() try: if not con.dialect.has_table(con, self.table, self.schema or None): sqla_columns = construct_sqla_columns(self.columns) self.table_bound = sqlalchemy.Table(self.table, metadata, *sqla_columns) metadata.create_all(engine) else: full_table = '.'.join([self.schema, self.table]) if self.schema else self.table metadata.reflect(only=[self.table], bind=engine) self.table_bound = metadata.tables[full_table] except Exception as e: self._logger.exception(self.table + str(e))
Override to provide code for creating the target table. By default it will be created using types specified in columns. If the table exists, then it binds to the existing table. If overridden, use the provided connection object for setting up the table in order to create the table and insert data using the same transaction. :param engine: The sqlalchemy engine instance :type engine: object
create_table
python
spotify/luigi
luigi/contrib/sqla.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/sqla.py
Apache-2.0
def update_id(self): """ This update id will be a unique identifier for this insert on this table. """ return self.task_id
This update id will be a unique identifier for this insert on this table.
update_id
python
spotify/luigi
luigi/contrib/sqla.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/sqla.py
Apache-2.0
def rows(self): """ Return/yield tuples or lists corresponding to each row to be inserted. This method can be overridden for custom file types or formats. """ with self.input().open('r') as fobj: for line in fobj: yield line.strip("\n").split(self.column_separator)
Return/yield tuples or lists corresponding to each row to be inserted. This method can be overridden for custom file types or formats.
rows
python
spotify/luigi
luigi/contrib/sqla.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/sqla.py
Apache-2.0
def copy(self, conn, ins_rows, table_bound): """ This method does the actual insertion of the rows of data given by ins_rows into the database. A task that needs row updates instead of insertions should overload this method. :param conn: The sqlalchemy connection object :param ins_rows: The dictionary of rows with the keys in the format _<column_name>. For example if you have a table with a column name "property", then the key in the dictionary would be "_property". This format is consistent with the bindparam usage in sqlalchemy. :param table_bound: The object referring to the table :return: """ bound_cols = dict((c, sqlalchemy.bindparam("_" + c.key)) for c in table_bound.columns) ins = table_bound.insert().values(bound_cols) conn.execute(ins, ins_rows)
This method does the actual insertion of the rows of data given by ins_rows into the database. A task that needs row updates instead of insertions should overload this method. :param conn: The sqlalchemy connection object :param ins_rows: The dictionary of rows with the keys in the format _<column_name>. For example if you have a table with a column name "property", then the key in the dictionary would be "_property". This format is consistent with the bindparam usage in sqlalchemy. :param table_bound: The object referring to the table :return:
copy
python
spotify/luigi
luigi/contrib/sqla.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/sqla.py
Apache-2.0
def _get_task_statuses(task_ids, cluster): """ Retrieve task statuses from ECS API Returns list of {RUNNING|PENDING|STOPPED} for each id in task_ids """ response = client.describe_tasks(tasks=task_ids, cluster=cluster) # Error checking if response['failures'] != []: raise Exception('There were some failures:\n{0}'.format( response['failures'])) status_code = response['ResponseMetadata']['HTTPStatusCode'] if status_code != 200: msg = 'Task status request received status code {0}:\n{1}' raise Exception(msg.format(status_code, response)) return [t['lastStatus'] for t in response['tasks']]
Retrieve task statuses from ECS API Returns list of {RUNNING|PENDING|STOPPED} for each id in task_ids
_get_task_statuses
python
spotify/luigi
luigi/contrib/ecs.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ecs.py
Apache-2.0
def _track_tasks(task_ids, cluster): """Poll task status until STOPPED""" while True: statuses = _get_task_statuses(task_ids, cluster) if all([status == 'STOPPED' for status in statuses]): logger.info('ECS tasks {0} STOPPED'.format(','.join(task_ids))) break time.sleep(POLL_TIME) logger.debug('ECS task status for tasks {0}: {1}'.format(task_ids, statuses))
Poll task status until STOPPED
_track_tasks
python
spotify/luigi
luigi/contrib/ecs.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ecs.py
Apache-2.0
def ecs_task_ids(self): """Expose the ECS task ID""" if hasattr(self, '_task_ids'): return self._task_ids
Expose the ECS task ID
ecs_task_ids
python
spotify/luigi
luigi/contrib/ecs.py
https://github.com/spotify/luigi/blob/master/luigi/contrib/ecs.py
Apache-2.0