id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
247,000
turicas/rows
rows/utils.py
csv_to_sqlite
def csv_to_sqlite( input_filename, output_filename, samples=None, dialect=None, batch_size=10000, encoding="utf-8", callback=None, force_types=None, chunk_size=8388608, table_name="table1", schema=None, ): "Export a CSV file to SQLite, based on field type detection from samples" # TODO: automatically detect encoding if encoding == `None` # TODO: should be able to specify fields # TODO: if table_name is "2019" the final name will be "field_2019" - must # be "table_2019" # TODO: if schema is provided and the names are in uppercase, this function # will fail if dialect is None: # Get a sample to detect dialect fobj = open_compressed(input_filename, mode="rb") sample = fobj.read(chunk_size) dialect = rows.plugins.csv.discover_dialect(sample, encoding=encoding) elif isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if schema is None: # Identify data types fobj = open_compressed(input_filename, encoding=encoding) data = list(islice(csv.DictReader(fobj, dialect=dialect), samples)) schema = rows.import_from_dicts(data).fields if force_types is not None: schema.update(force_types) # Create lazy table object to be converted # TODO: this lazyness feature will be incorported into the library soon so # we can call here `rows.import_from_csv` instead of `csv.reader`. reader = csv.reader( open_compressed(input_filename, encoding=encoding), dialect=dialect ) header = make_header(next(reader)) # skip header table = rows.Table(fields=OrderedDict([(field, schema[field]) for field in header])) table._rows = reader # Export to SQLite return rows.export_to_sqlite( table, output_filename, table_name=table_name, batch_size=batch_size, callback=callback, )
python
def csv_to_sqlite( input_filename, output_filename, samples=None, dialect=None, batch_size=10000, encoding="utf-8", callback=None, force_types=None, chunk_size=8388608, table_name="table1", schema=None, ): "Export a CSV file to SQLite, based on field type detection from samples" # TODO: automatically detect encoding if encoding == `None` # TODO: should be able to specify fields # TODO: if table_name is "2019" the final name will be "field_2019" - must # be "table_2019" # TODO: if schema is provided and the names are in uppercase, this function # will fail if dialect is None: # Get a sample to detect dialect fobj = open_compressed(input_filename, mode="rb") sample = fobj.read(chunk_size) dialect = rows.plugins.csv.discover_dialect(sample, encoding=encoding) elif isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if schema is None: # Identify data types fobj = open_compressed(input_filename, encoding=encoding) data = list(islice(csv.DictReader(fobj, dialect=dialect), samples)) schema = rows.import_from_dicts(data).fields if force_types is not None: schema.update(force_types) # Create lazy table object to be converted # TODO: this lazyness feature will be incorported into the library soon so # we can call here `rows.import_from_csv` instead of `csv.reader`. reader = csv.reader( open_compressed(input_filename, encoding=encoding), dialect=dialect ) header = make_header(next(reader)) # skip header table = rows.Table(fields=OrderedDict([(field, schema[field]) for field in header])) table._rows = reader # Export to SQLite return rows.export_to_sqlite( table, output_filename, table_name=table_name, batch_size=batch_size, callback=callback, )
[ "def", "csv_to_sqlite", "(", "input_filename", ",", "output_filename", ",", "samples", "=", "None", ",", "dialect", "=", "None", ",", "batch_size", "=", "10000", ",", "encoding", "=", "\"utf-8\"", ",", "callback", "=", "None", ",", "force_types", "=", "None", ",", "chunk_size", "=", "8388608", ",", "table_name", "=", "\"table1\"", ",", "schema", "=", "None", ",", ")", ":", "# TODO: automatically detect encoding if encoding == `None`", "# TODO: should be able to specify fields", "# TODO: if table_name is \"2019\" the final name will be \"field_2019\" - must", "# be \"table_2019\"", "# TODO: if schema is provided and the names are in uppercase, this function", "# will fail", "if", "dialect", "is", "None", ":", "# Get a sample to detect dialect", "fobj", "=", "open_compressed", "(", "input_filename", ",", "mode", "=", "\"rb\"", ")", "sample", "=", "fobj", ".", "read", "(", "chunk_size", ")", "dialect", "=", "rows", ".", "plugins", ".", "csv", ".", "discover_dialect", "(", "sample", ",", "encoding", "=", "encoding", ")", "elif", "isinstance", "(", "dialect", ",", "six", ".", "text_type", ")", ":", "dialect", "=", "csv", ".", "get_dialect", "(", "dialect", ")", "if", "schema", "is", "None", ":", "# Identify data types", "fobj", "=", "open_compressed", "(", "input_filename", ",", "encoding", "=", "encoding", ")", "data", "=", "list", "(", "islice", "(", "csv", ".", "DictReader", "(", "fobj", ",", "dialect", "=", "dialect", ")", ",", "samples", ")", ")", "schema", "=", "rows", ".", "import_from_dicts", "(", "data", ")", ".", "fields", "if", "force_types", "is", "not", "None", ":", "schema", ".", "update", "(", "force_types", ")", "# Create lazy table object to be converted", "# TODO: this lazyness feature will be incorported into the library soon so", "# we can call here `rows.import_from_csv` instead of `csv.reader`.", "reader", "=", "csv", ".", "reader", "(", "open_compressed", "(", "input_filename", ",", "encoding", "=", "encoding", ")", ",", "dialect", "=", "dialect", ")", "header", "=", "make_header", "(", "next", "(", "reader", ")", ")", "# skip header", "table", "=", "rows", ".", "Table", "(", "fields", "=", "OrderedDict", "(", "[", "(", "field", ",", "schema", "[", "field", "]", ")", "for", "field", "in", "header", "]", ")", ")", "table", ".", "_rows", "=", "reader", "# Export to SQLite", "return", "rows", ".", "export_to_sqlite", "(", "table", ",", "output_filename", ",", "table_name", "=", "table_name", ",", "batch_size", "=", "batch_size", ",", "callback", "=", "callback", ",", ")" ]
Export a CSV file to SQLite, based on field type detection from samples
[ "Export", "a", "CSV", "file", "to", "SQLite", "based", "on", "field", "type", "detection", "from", "samples" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L560-L613
247,001
turicas/rows
rows/utils.py
sqlite_to_csv
def sqlite_to_csv( input_filename, table_name, output_filename, dialect=csv.excel, batch_size=10000, encoding="utf-8", callback=None, query=None, ): """Export a table inside a SQLite database to CSV""" # TODO: should be able to specify fields # TODO: should be able to specify custom query if isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if query is None: query = "SELECT * FROM {}".format(table_name) connection = sqlite3.Connection(input_filename) cursor = connection.cursor() result = cursor.execute(query) header = [item[0] for item in cursor.description] fobj = open_compressed(output_filename, mode="w", encoding=encoding) writer = csv.writer(fobj, dialect=dialect) writer.writerow(header) total_written = 0 for batch in rows.plugins.utils.ipartition(result, batch_size): writer.writerows(batch) written = len(batch) total_written += written if callback: callback(written, total_written) fobj.close()
python
def sqlite_to_csv( input_filename, table_name, output_filename, dialect=csv.excel, batch_size=10000, encoding="utf-8", callback=None, query=None, ): # TODO: should be able to specify fields # TODO: should be able to specify custom query if isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if query is None: query = "SELECT * FROM {}".format(table_name) connection = sqlite3.Connection(input_filename) cursor = connection.cursor() result = cursor.execute(query) header = [item[0] for item in cursor.description] fobj = open_compressed(output_filename, mode="w", encoding=encoding) writer = csv.writer(fobj, dialect=dialect) writer.writerow(header) total_written = 0 for batch in rows.plugins.utils.ipartition(result, batch_size): writer.writerows(batch) written = len(batch) total_written += written if callback: callback(written, total_written) fobj.close()
[ "def", "sqlite_to_csv", "(", "input_filename", ",", "table_name", ",", "output_filename", ",", "dialect", "=", "csv", ".", "excel", ",", "batch_size", "=", "10000", ",", "encoding", "=", "\"utf-8\"", ",", "callback", "=", "None", ",", "query", "=", "None", ",", ")", ":", "# TODO: should be able to specify fields", "# TODO: should be able to specify custom query", "if", "isinstance", "(", "dialect", ",", "six", ".", "text_type", ")", ":", "dialect", "=", "csv", ".", "get_dialect", "(", "dialect", ")", "if", "query", "is", "None", ":", "query", "=", "\"SELECT * FROM {}\"", ".", "format", "(", "table_name", ")", "connection", "=", "sqlite3", ".", "Connection", "(", "input_filename", ")", "cursor", "=", "connection", ".", "cursor", "(", ")", "result", "=", "cursor", ".", "execute", "(", "query", ")", "header", "=", "[", "item", "[", "0", "]", "for", "item", "in", "cursor", ".", "description", "]", "fobj", "=", "open_compressed", "(", "output_filename", ",", "mode", "=", "\"w\"", ",", "encoding", "=", "encoding", ")", "writer", "=", "csv", ".", "writer", "(", "fobj", ",", "dialect", "=", "dialect", ")", "writer", ".", "writerow", "(", "header", ")", "total_written", "=", "0", "for", "batch", "in", "rows", ".", "plugins", ".", "utils", ".", "ipartition", "(", "result", ",", "batch_size", ")", ":", "writer", ".", "writerows", "(", "batch", ")", "written", "=", "len", "(", "batch", ")", "total_written", "+=", "written", "if", "callback", ":", "callback", "(", "written", ",", "total_written", ")", "fobj", ".", "close", "(", ")" ]
Export a table inside a SQLite database to CSV
[ "Export", "a", "table", "inside", "a", "SQLite", "database", "to", "CSV" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L616-L650
247,002
turicas/rows
rows/utils.py
execute_command
def execute_command(command): """Execute a command and return its output""" command = shlex.split(command) try: process = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) except FileNotFoundError: raise RuntimeError("Command not found: {}".format(repr(command))) process.wait() # TODO: may use another codec to decode if process.returncode > 0: stderr = process.stderr.read().decode("utf-8") raise ValueError("Error executing command: {}".format(repr(stderr))) return process.stdout.read().decode("utf-8")
python
def execute_command(command): command = shlex.split(command) try: process = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) except FileNotFoundError: raise RuntimeError("Command not found: {}".format(repr(command))) process.wait() # TODO: may use another codec to decode if process.returncode > 0: stderr = process.stderr.read().decode("utf-8") raise ValueError("Error executing command: {}".format(repr(stderr))) return process.stdout.read().decode("utf-8")
[ "def", "execute_command", "(", "command", ")", ":", "command", "=", "shlex", ".", "split", "(", "command", ")", "try", ":", "process", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", ")", "except", "FileNotFoundError", ":", "raise", "RuntimeError", "(", "\"Command not found: {}\"", ".", "format", "(", "repr", "(", "command", ")", ")", ")", "process", ".", "wait", "(", ")", "# TODO: may use another codec to decode", "if", "process", ".", "returncode", ">", "0", ":", "stderr", "=", "process", ".", "stderr", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", "raise", "ValueError", "(", "\"Error executing command: {}\"", ".", "format", "(", "repr", "(", "stderr", ")", ")", ")", "return", "process", ".", "stdout", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")" ]
Execute a command and return its output
[ "Execute", "a", "command", "and", "return", "its", "output" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L706-L724
247,003
turicas/rows
rows/utils.py
uncompressed_size
def uncompressed_size(filename): """Return the uncompressed size for a file by executing commands Note: due to a limitation in gzip format, uncompressed files greather than 4GiB will have a wrong value. """ quoted_filename = shlex.quote(filename) # TODO: get filetype from file-magic, if available if str(filename).lower().endswith(".xz"): output = execute_command('xz --list "{}"'.format(quoted_filename)) compressed, uncompressed = regexp_sizes.findall(output) value, unit = uncompressed.split() value = float(value.replace(",", "")) return int(value * MULTIPLIERS[unit]) elif str(filename).lower().endswith(".gz"): # XXX: gzip only uses 32 bits to store uncompressed size, so if the # uncompressed size is greater than 4GiB, the value returned will be # incorrect. output = execute_command('gzip --list "{}"'.format(quoted_filename)) lines = [line.split() for line in output.splitlines()] header, data = lines[0], lines[1] gzip_data = dict(zip(header, data)) return int(gzip_data["uncompressed"]) else: raise ValueError('Unrecognized file type for "{}".'.format(filename))
python
def uncompressed_size(filename): quoted_filename = shlex.quote(filename) # TODO: get filetype from file-magic, if available if str(filename).lower().endswith(".xz"): output = execute_command('xz --list "{}"'.format(quoted_filename)) compressed, uncompressed = regexp_sizes.findall(output) value, unit = uncompressed.split() value = float(value.replace(",", "")) return int(value * MULTIPLIERS[unit]) elif str(filename).lower().endswith(".gz"): # XXX: gzip only uses 32 bits to store uncompressed size, so if the # uncompressed size is greater than 4GiB, the value returned will be # incorrect. output = execute_command('gzip --list "{}"'.format(quoted_filename)) lines = [line.split() for line in output.splitlines()] header, data = lines[0], lines[1] gzip_data = dict(zip(header, data)) return int(gzip_data["uncompressed"]) else: raise ValueError('Unrecognized file type for "{}".'.format(filename))
[ "def", "uncompressed_size", "(", "filename", ")", ":", "quoted_filename", "=", "shlex", ".", "quote", "(", "filename", ")", "# TODO: get filetype from file-magic, if available", "if", "str", "(", "filename", ")", ".", "lower", "(", ")", ".", "endswith", "(", "\".xz\"", ")", ":", "output", "=", "execute_command", "(", "'xz --list \"{}\"'", ".", "format", "(", "quoted_filename", ")", ")", "compressed", ",", "uncompressed", "=", "regexp_sizes", ".", "findall", "(", "output", ")", "value", ",", "unit", "=", "uncompressed", ".", "split", "(", ")", "value", "=", "float", "(", "value", ".", "replace", "(", "\",\"", ",", "\"\"", ")", ")", "return", "int", "(", "value", "*", "MULTIPLIERS", "[", "unit", "]", ")", "elif", "str", "(", "filename", ")", ".", "lower", "(", ")", ".", "endswith", "(", "\".gz\"", ")", ":", "# XXX: gzip only uses 32 bits to store uncompressed size, so if the", "# uncompressed size is greater than 4GiB, the value returned will be", "# incorrect.", "output", "=", "execute_command", "(", "'gzip --list \"{}\"'", ".", "format", "(", "quoted_filename", ")", ")", "lines", "=", "[", "line", ".", "split", "(", ")", "for", "line", "in", "output", ".", "splitlines", "(", ")", "]", "header", ",", "data", "=", "lines", "[", "0", "]", ",", "lines", "[", "1", "]", "gzip_data", "=", "dict", "(", "zip", "(", "header", ",", "data", ")", ")", "return", "int", "(", "gzip_data", "[", "\"uncompressed\"", "]", ")", "else", ":", "raise", "ValueError", "(", "'Unrecognized file type for \"{}\".'", ".", "format", "(", "filename", ")", ")" ]
Return the uncompressed size for a file by executing commands Note: due to a limitation in gzip format, uncompressed files greather than 4GiB will have a wrong value.
[ "Return", "the", "uncompressed", "size", "for", "a", "file", "by", "executing", "commands" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L727-L755
247,004
turicas/rows
rows/utils.py
pgimport
def pgimport( filename, database_uri, table_name, encoding="utf-8", dialect=None, create_table=True, schema=None, callback=None, timeout=0.1, chunk_size=8388608, max_samples=10000, ): """Import data from CSV into PostgreSQL using the fastest method Required: psql command """ fobj = open_compressed(filename, mode="r", encoding=encoding) sample = fobj.read(chunk_size) if dialect is None: # Detect dialect dialect = rows.plugins.csv.discover_dialect( sample.encode(encoding), encoding=encoding ) elif isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if schema is None: # Detect field names reader = csv.reader(io.StringIO(sample), dialect=dialect) field_names = [slug(field_name) for field_name in next(reader)] else: field_names = list(schema.keys()) if create_table: if schema is None: data = [ dict(zip(field_names, row)) for row in itertools.islice(reader, max_samples) ] table = rows.import_from_dicts(data) field_types = [table.fields[field_name] for field_name in field_names] else: field_types = list(schema.values()) columns = [ "{} {}".format(name, POSTGRESQL_TYPES.get(type_, DEFAULT_POSTGRESQL_TYPE)) for name, type_ in zip(field_names, field_types) ] create_table = SQL_CREATE_TABLE.format( table_name=table_name, field_types=", ".join(columns) ) execute_command(get_psql_command(create_table, database_uri=database_uri)) # Prepare the `psql` command to be executed based on collected metadata command = get_psql_copy_command( database_uri=database_uri, dialect=dialect, direction="FROM", encoding=encoding, header=field_names, table_name=table_name, ) rows_imported, error = 0, None fobj = open_compressed(filename, mode="rb") try: process = subprocess.Popen( shlex.split(command), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) data = fobj.read(chunk_size) total_written = 0 while data != b"": written = process.stdin.write(data) total_written += written if callback: callback(written, total_written) data = fobj.read(chunk_size) stdout, stderr = process.communicate() if stderr != b"": raise RuntimeError(stderr.decode("utf-8")) rows_imported = int(stdout.replace(b"COPY ", b"").strip()) except FileNotFoundError: raise RuntimeError("Command `psql` not found") except BrokenPipeError: raise RuntimeError(process.stderr.read().decode("utf-8")) return {"bytes_written": total_written, "rows_imported": rows_imported}
python
def pgimport( filename, database_uri, table_name, encoding="utf-8", dialect=None, create_table=True, schema=None, callback=None, timeout=0.1, chunk_size=8388608, max_samples=10000, ): fobj = open_compressed(filename, mode="r", encoding=encoding) sample = fobj.read(chunk_size) if dialect is None: # Detect dialect dialect = rows.plugins.csv.discover_dialect( sample.encode(encoding), encoding=encoding ) elif isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) if schema is None: # Detect field names reader = csv.reader(io.StringIO(sample), dialect=dialect) field_names = [slug(field_name) for field_name in next(reader)] else: field_names = list(schema.keys()) if create_table: if schema is None: data = [ dict(zip(field_names, row)) for row in itertools.islice(reader, max_samples) ] table = rows.import_from_dicts(data) field_types = [table.fields[field_name] for field_name in field_names] else: field_types = list(schema.values()) columns = [ "{} {}".format(name, POSTGRESQL_TYPES.get(type_, DEFAULT_POSTGRESQL_TYPE)) for name, type_ in zip(field_names, field_types) ] create_table = SQL_CREATE_TABLE.format( table_name=table_name, field_types=", ".join(columns) ) execute_command(get_psql_command(create_table, database_uri=database_uri)) # Prepare the `psql` command to be executed based on collected metadata command = get_psql_copy_command( database_uri=database_uri, dialect=dialect, direction="FROM", encoding=encoding, header=field_names, table_name=table_name, ) rows_imported, error = 0, None fobj = open_compressed(filename, mode="rb") try: process = subprocess.Popen( shlex.split(command), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) data = fobj.read(chunk_size) total_written = 0 while data != b"": written = process.stdin.write(data) total_written += written if callback: callback(written, total_written) data = fobj.read(chunk_size) stdout, stderr = process.communicate() if stderr != b"": raise RuntimeError(stderr.decode("utf-8")) rows_imported = int(stdout.replace(b"COPY ", b"").strip()) except FileNotFoundError: raise RuntimeError("Command `psql` not found") except BrokenPipeError: raise RuntimeError(process.stderr.read().decode("utf-8")) return {"bytes_written": total_written, "rows_imported": rows_imported}
[ "def", "pgimport", "(", "filename", ",", "database_uri", ",", "table_name", ",", "encoding", "=", "\"utf-8\"", ",", "dialect", "=", "None", ",", "create_table", "=", "True", ",", "schema", "=", "None", ",", "callback", "=", "None", ",", "timeout", "=", "0.1", ",", "chunk_size", "=", "8388608", ",", "max_samples", "=", "10000", ",", ")", ":", "fobj", "=", "open_compressed", "(", "filename", ",", "mode", "=", "\"r\"", ",", "encoding", "=", "encoding", ")", "sample", "=", "fobj", ".", "read", "(", "chunk_size", ")", "if", "dialect", "is", "None", ":", "# Detect dialect", "dialect", "=", "rows", ".", "plugins", ".", "csv", ".", "discover_dialect", "(", "sample", ".", "encode", "(", "encoding", ")", ",", "encoding", "=", "encoding", ")", "elif", "isinstance", "(", "dialect", ",", "six", ".", "text_type", ")", ":", "dialect", "=", "csv", ".", "get_dialect", "(", "dialect", ")", "if", "schema", "is", "None", ":", "# Detect field names", "reader", "=", "csv", ".", "reader", "(", "io", ".", "StringIO", "(", "sample", ")", ",", "dialect", "=", "dialect", ")", "field_names", "=", "[", "slug", "(", "field_name", ")", "for", "field_name", "in", "next", "(", "reader", ")", "]", "else", ":", "field_names", "=", "list", "(", "schema", ".", "keys", "(", ")", ")", "if", "create_table", ":", "if", "schema", "is", "None", ":", "data", "=", "[", "dict", "(", "zip", "(", "field_names", ",", "row", ")", ")", "for", "row", "in", "itertools", ".", "islice", "(", "reader", ",", "max_samples", ")", "]", "table", "=", "rows", ".", "import_from_dicts", "(", "data", ")", "field_types", "=", "[", "table", ".", "fields", "[", "field_name", "]", "for", "field_name", "in", "field_names", "]", "else", ":", "field_types", "=", "list", "(", "schema", ".", "values", "(", ")", ")", "columns", "=", "[", "\"{} {}\"", ".", "format", "(", "name", ",", "POSTGRESQL_TYPES", ".", "get", "(", "type_", ",", "DEFAULT_POSTGRESQL_TYPE", ")", ")", "for", "name", ",", "type_", "in", "zip", "(", "field_names", ",", "field_types", ")", "]", "create_table", "=", "SQL_CREATE_TABLE", ".", "format", "(", "table_name", "=", "table_name", ",", "field_types", "=", "\", \"", ".", "join", "(", "columns", ")", ")", "execute_command", "(", "get_psql_command", "(", "create_table", ",", "database_uri", "=", "database_uri", ")", ")", "# Prepare the `psql` command to be executed based on collected metadata", "command", "=", "get_psql_copy_command", "(", "database_uri", "=", "database_uri", ",", "dialect", "=", "dialect", ",", "direction", "=", "\"FROM\"", ",", "encoding", "=", "encoding", ",", "header", "=", "field_names", ",", "table_name", "=", "table_name", ",", ")", "rows_imported", ",", "error", "=", "0", ",", "None", "fobj", "=", "open_compressed", "(", "filename", ",", "mode", "=", "\"rb\"", ")", "try", ":", "process", "=", "subprocess", ".", "Popen", "(", "shlex", ".", "split", "(", "command", ")", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", ")", "data", "=", "fobj", ".", "read", "(", "chunk_size", ")", "total_written", "=", "0", "while", "data", "!=", "b\"\"", ":", "written", "=", "process", ".", "stdin", ".", "write", "(", "data", ")", "total_written", "+=", "written", "if", "callback", ":", "callback", "(", "written", ",", "total_written", ")", "data", "=", "fobj", ".", "read", "(", "chunk_size", ")", "stdout", ",", "stderr", "=", "process", ".", "communicate", "(", ")", "if", "stderr", "!=", "b\"\"", ":", "raise", "RuntimeError", "(", "stderr", ".", "decode", "(", "\"utf-8\"", ")", ")", "rows_imported", "=", "int", "(", "stdout", ".", "replace", "(", "b\"COPY \"", ",", "b\"\"", ")", ".", "strip", "(", ")", ")", "except", "FileNotFoundError", ":", "raise", "RuntimeError", "(", "\"Command `psql` not found\"", ")", "except", "BrokenPipeError", ":", "raise", "RuntimeError", "(", "process", ".", "stderr", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ")", "return", "{", "\"bytes_written\"", ":", "total_written", ",", "\"rows_imported\"", ":", "rows_imported", "}" ]
Import data from CSV into PostgreSQL using the fastest method Required: psql command
[ "Import", "data", "from", "CSV", "into", "PostgreSQL", "using", "the", "fastest", "method" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L831-L924
247,005
turicas/rows
rows/utils.py
pgexport
def pgexport( database_uri, table_name, filename, encoding="utf-8", dialect=csv.excel, callback=None, timeout=0.1, chunk_size=8388608, ): """Export data from PostgreSQL into a CSV file using the fastest method Required: psql command """ if isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) # Prepare the `psql` command to be executed to export data command = get_psql_copy_command( database_uri=database_uri, direction="TO", encoding=encoding, header=None, # Needed when direction = 'TO' table_name=table_name, dialect=dialect, ) fobj = open_compressed(filename, mode="wb") try: process = subprocess.Popen( shlex.split(command), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) total_written = 0 data = process.stdout.read(chunk_size) while data != b"": written = fobj.write(data) total_written += written if callback: callback(written, total_written) data = process.stdout.read(chunk_size) stdout, stderr = process.communicate() if stderr != b"": raise RuntimeError(stderr.decode("utf-8")) except FileNotFoundError: raise RuntimeError("Command `psql` not found") except BrokenPipeError: raise RuntimeError(process.stderr.read().decode("utf-8")) return {"bytes_written": total_written}
python
def pgexport( database_uri, table_name, filename, encoding="utf-8", dialect=csv.excel, callback=None, timeout=0.1, chunk_size=8388608, ): if isinstance(dialect, six.text_type): dialect = csv.get_dialect(dialect) # Prepare the `psql` command to be executed to export data command = get_psql_copy_command( database_uri=database_uri, direction="TO", encoding=encoding, header=None, # Needed when direction = 'TO' table_name=table_name, dialect=dialect, ) fobj = open_compressed(filename, mode="wb") try: process = subprocess.Popen( shlex.split(command), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) total_written = 0 data = process.stdout.read(chunk_size) while data != b"": written = fobj.write(data) total_written += written if callback: callback(written, total_written) data = process.stdout.read(chunk_size) stdout, stderr = process.communicate() if stderr != b"": raise RuntimeError(stderr.decode("utf-8")) except FileNotFoundError: raise RuntimeError("Command `psql` not found") except BrokenPipeError: raise RuntimeError(process.stderr.read().decode("utf-8")) return {"bytes_written": total_written}
[ "def", "pgexport", "(", "database_uri", ",", "table_name", ",", "filename", ",", "encoding", "=", "\"utf-8\"", ",", "dialect", "=", "csv", ".", "excel", ",", "callback", "=", "None", ",", "timeout", "=", "0.1", ",", "chunk_size", "=", "8388608", ",", ")", ":", "if", "isinstance", "(", "dialect", ",", "six", ".", "text_type", ")", ":", "dialect", "=", "csv", ".", "get_dialect", "(", "dialect", ")", "# Prepare the `psql` command to be executed to export data", "command", "=", "get_psql_copy_command", "(", "database_uri", "=", "database_uri", ",", "direction", "=", "\"TO\"", ",", "encoding", "=", "encoding", ",", "header", "=", "None", ",", "# Needed when direction = 'TO'", "table_name", "=", "table_name", ",", "dialect", "=", "dialect", ",", ")", "fobj", "=", "open_compressed", "(", "filename", ",", "mode", "=", "\"wb\"", ")", "try", ":", "process", "=", "subprocess", ".", "Popen", "(", "shlex", ".", "split", "(", "command", ")", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", ")", "total_written", "=", "0", "data", "=", "process", ".", "stdout", ".", "read", "(", "chunk_size", ")", "while", "data", "!=", "b\"\"", ":", "written", "=", "fobj", ".", "write", "(", "data", ")", "total_written", "+=", "written", "if", "callback", ":", "callback", "(", "written", ",", "total_written", ")", "data", "=", "process", ".", "stdout", ".", "read", "(", "chunk_size", ")", "stdout", ",", "stderr", "=", "process", ".", "communicate", "(", ")", "if", "stderr", "!=", "b\"\"", ":", "raise", "RuntimeError", "(", "stderr", ".", "decode", "(", "\"utf-8\"", ")", ")", "except", "FileNotFoundError", ":", "raise", "RuntimeError", "(", "\"Command `psql` not found\"", ")", "except", "BrokenPipeError", ":", "raise", "RuntimeError", "(", "process", ".", "stderr", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ")", "return", "{", "\"bytes_written\"", ":", "total_written", "}" ]
Export data from PostgreSQL into a CSV file using the fastest method Required: psql command
[ "Export", "data", "from", "PostgreSQL", "into", "a", "CSV", "file", "using", "the", "fastest", "method" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L927-L980
247,006
turicas/rows
rows/utils.py
load_schema
def load_schema(filename, context=None): """Load schema from file in any of the supported formats The table must have at least the fields `field_name` and `field_type`. `context` is a `dict` with field_type as key pointing to field class, like: {"text": rows.fields.TextField, "value": MyCustomField} """ table = import_from_uri(filename) field_names = table.field_names assert "field_name" in field_names assert "field_type" in field_names context = context or { key.replace("Field", "").lower(): getattr(rows.fields, key) for key in dir(rows.fields) if "Field" in key and key != "Field" } return OrderedDict( [ (row.field_name, context[row.field_type]) for row in table ] )
python
def load_schema(filename, context=None): table = import_from_uri(filename) field_names = table.field_names assert "field_name" in field_names assert "field_type" in field_names context = context or { key.replace("Field", "").lower(): getattr(rows.fields, key) for key in dir(rows.fields) if "Field" in key and key != "Field" } return OrderedDict( [ (row.field_name, context[row.field_type]) for row in table ] )
[ "def", "load_schema", "(", "filename", ",", "context", "=", "None", ")", ":", "table", "=", "import_from_uri", "(", "filename", ")", "field_names", "=", "table", ".", "field_names", "assert", "\"field_name\"", "in", "field_names", "assert", "\"field_type\"", "in", "field_names", "context", "=", "context", "or", "{", "key", ".", "replace", "(", "\"Field\"", ",", "\"\"", ")", ".", "lower", "(", ")", ":", "getattr", "(", "rows", ".", "fields", ",", "key", ")", "for", "key", "in", "dir", "(", "rows", ".", "fields", ")", "if", "\"Field\"", "in", "key", "and", "key", "!=", "\"Field\"", "}", "return", "OrderedDict", "(", "[", "(", "row", ".", "field_name", ",", "context", "[", "row", ".", "field_type", "]", ")", "for", "row", "in", "table", "]", ")" ]
Load schema from file in any of the supported formats The table must have at least the fields `field_name` and `field_type`. `context` is a `dict` with field_type as key pointing to field class, like: {"text": rows.fields.TextField, "value": MyCustomField}
[ "Load", "schema", "from", "file", "in", "any", "of", "the", "supported", "formats" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L1082-L1104
247,007
turicas/rows
rows/fields.py
slug
def slug(text, separator="_", permitted_chars=SLUG_CHARS): """Generate a slug for the `text`. >>> slug(' ÁLVARO justen% ') 'alvaro_justen' >>> slug(' ÁLVARO justen% ', separator='-') 'alvaro-justen' """ text = six.text_type(text or "") # Strip non-ASCII characters # Example: u' ÁLVARO justen% ' -> ' ALVARO justen% ' text = normalize("NFKD", text.strip()).encode("ascii", "ignore").decode("ascii") # Replace word boundaries with separator text = REGEXP_WORD_BOUNDARY.sub("\\1" + re.escape(separator), text) # Remove non-permitted characters and put everything to lowercase # Example: u'_ALVARO__justen%_' -> u'_alvaro__justen_' allowed_chars = list(permitted_chars) + [separator] text = "".join(char for char in text if char in allowed_chars).lower() # Remove double occurrencies of separator # Example: u'_alvaro__justen_' -> u'_alvaro_justen_' text = ( REGEXP_SEPARATOR if separator == "_" else re.compile("(" + re.escape(separator) + "+)") ).sub(separator, text) # Strip separators # Example: u'_alvaro_justen_' -> u'alvaro_justen' return text.strip(separator)
python
def slug(text, separator="_", permitted_chars=SLUG_CHARS): text = six.text_type(text or "") # Strip non-ASCII characters # Example: u' ÁLVARO justen% ' -> ' ALVARO justen% ' text = normalize("NFKD", text.strip()).encode("ascii", "ignore").decode("ascii") # Replace word boundaries with separator text = REGEXP_WORD_BOUNDARY.sub("\\1" + re.escape(separator), text) # Remove non-permitted characters and put everything to lowercase # Example: u'_ALVARO__justen%_' -> u'_alvaro__justen_' allowed_chars = list(permitted_chars) + [separator] text = "".join(char for char in text if char in allowed_chars).lower() # Remove double occurrencies of separator # Example: u'_alvaro__justen_' -> u'_alvaro_justen_' text = ( REGEXP_SEPARATOR if separator == "_" else re.compile("(" + re.escape(separator) + "+)") ).sub(separator, text) # Strip separators # Example: u'_alvaro_justen_' -> u'alvaro_justen' return text.strip(separator)
[ "def", "slug", "(", "text", ",", "separator", "=", "\"_\"", ",", "permitted_chars", "=", "SLUG_CHARS", ")", ":", "text", "=", "six", ".", "text_type", "(", "text", "or", "\"\"", ")", "# Strip non-ASCII characters", "# Example: u' ÁLVARO justen% ' -> ' ALVARO justen% '", "text", "=", "normalize", "(", "\"NFKD\"", ",", "text", ".", "strip", "(", ")", ")", ".", "encode", "(", "\"ascii\"", ",", "\"ignore\"", ")", ".", "decode", "(", "\"ascii\"", ")", "# Replace word boundaries with separator", "text", "=", "REGEXP_WORD_BOUNDARY", ".", "sub", "(", "\"\\\\1\"", "+", "re", ".", "escape", "(", "separator", ")", ",", "text", ")", "# Remove non-permitted characters and put everything to lowercase", "# Example: u'_ALVARO__justen%_' -> u'_alvaro__justen_'", "allowed_chars", "=", "list", "(", "permitted_chars", ")", "+", "[", "separator", "]", "text", "=", "\"\"", ".", "join", "(", "char", "for", "char", "in", "text", "if", "char", "in", "allowed_chars", ")", ".", "lower", "(", ")", "# Remove double occurrencies of separator", "# Example: u'_alvaro__justen_' -> u'_alvaro_justen_'", "text", "=", "(", "REGEXP_SEPARATOR", "if", "separator", "==", "\"_\"", "else", "re", ".", "compile", "(", "\"(\"", "+", "re", ".", "escape", "(", "separator", ")", "+", "\"+)\"", ")", ")", ".", "sub", "(", "separator", ",", "text", ")", "# Strip separators", "# Example: u'_alvaro_justen_' -> u'alvaro_justen'", "return", "text", ".", "strip", "(", "separator", ")" ]
Generate a slug for the `text`. >>> slug(' ÁLVARO justen% ') 'alvaro_justen' >>> slug(' ÁLVARO justen% ', separator='-') 'alvaro-justen'
[ "Generate", "a", "slug", "for", "the", "text", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/fields.py#L520-L553
247,008
turicas/rows
rows/fields.py
make_unique_name
def make_unique_name(name, existing_names, name_format="{name}_{index}", start=2): """Return a unique name based on `name_format` and `name`.""" index = start new_name = name while new_name in existing_names: new_name = name_format.format(name=name, index=index) index += 1 return new_name
python
def make_unique_name(name, existing_names, name_format="{name}_{index}", start=2): index = start new_name = name while new_name in existing_names: new_name = name_format.format(name=name, index=index) index += 1 return new_name
[ "def", "make_unique_name", "(", "name", ",", "existing_names", ",", "name_format", "=", "\"{name}_{index}\"", ",", "start", "=", "2", ")", ":", "index", "=", "start", "new_name", "=", "name", "while", "new_name", "in", "existing_names", ":", "new_name", "=", "name_format", ".", "format", "(", "name", "=", "name", ",", "index", "=", "index", ")", "index", "+=", "1", "return", "new_name" ]
Return a unique name based on `name_format` and `name`.
[ "Return", "a", "unique", "name", "based", "on", "name_format", "and", "name", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/fields.py#L556-L564
247,009
turicas/rows
rows/fields.py
make_header
def make_header(field_names, permit_not=False): """Return unique and slugged field names.""" slug_chars = SLUG_CHARS if not permit_not else SLUG_CHARS + "^" header = [ slug(field_name, permitted_chars=slug_chars) for field_name in field_names ] result = [] for index, field_name in enumerate(header): if not field_name: field_name = "field_{}".format(index) elif field_name[0].isdigit(): field_name = "field_{}".format(field_name) if field_name in result: field_name = make_unique_name( name=field_name, existing_names=result, start=2 ) result.append(field_name) return result
python
def make_header(field_names, permit_not=False): slug_chars = SLUG_CHARS if not permit_not else SLUG_CHARS + "^" header = [ slug(field_name, permitted_chars=slug_chars) for field_name in field_names ] result = [] for index, field_name in enumerate(header): if not field_name: field_name = "field_{}".format(index) elif field_name[0].isdigit(): field_name = "field_{}".format(field_name) if field_name in result: field_name = make_unique_name( name=field_name, existing_names=result, start=2 ) result.append(field_name) return result
[ "def", "make_header", "(", "field_names", ",", "permit_not", "=", "False", ")", ":", "slug_chars", "=", "SLUG_CHARS", "if", "not", "permit_not", "else", "SLUG_CHARS", "+", "\"^\"", "header", "=", "[", "slug", "(", "field_name", ",", "permitted_chars", "=", "slug_chars", ")", "for", "field_name", "in", "field_names", "]", "result", "=", "[", "]", "for", "index", ",", "field_name", "in", "enumerate", "(", "header", ")", ":", "if", "not", "field_name", ":", "field_name", "=", "\"field_{}\"", ".", "format", "(", "index", ")", "elif", "field_name", "[", "0", "]", ".", "isdigit", "(", ")", ":", "field_name", "=", "\"field_{}\"", ".", "format", "(", "field_name", ")", "if", "field_name", "in", "result", ":", "field_name", "=", "make_unique_name", "(", "name", "=", "field_name", ",", "existing_names", "=", "result", ",", "start", "=", "2", ")", "result", ".", "append", "(", "field_name", ")", "return", "result" ]
Return unique and slugged field names.
[ "Return", "unique", "and", "slugged", "field", "names", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/fields.py#L567-L587
247,010
turicas/rows
rows/fields.py
Field.deserialize
def deserialize(cls, value, *args, **kwargs): """Deserialize a value just after importing it `cls.deserialize` should always return a value of type `cls.TYPE` or `None`. """ if isinstance(value, cls.TYPE): return value elif is_null(value): return None else: return value
python
def deserialize(cls, value, *args, **kwargs): if isinstance(value, cls.TYPE): return value elif is_null(value): return None else: return value
[ "def", "deserialize", "(", "cls", ",", "value", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "value", ",", "cls", ".", "TYPE", ")", ":", "return", "value", "elif", "is_null", "(", "value", ")", ":", "return", "None", "else", ":", "return", "value" ]
Deserialize a value just after importing it `cls.deserialize` should always return a value of type `cls.TYPE` or `None`.
[ "Deserialize", "a", "value", "just", "after", "importing", "it" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/fields.py#L91-L103
247,011
turicas/rows
rows/plugins/plugin_pdf.py
ExtractionAlgorithm.selected_objects
def selected_objects(self): """Filter out objects outside table boundaries""" return [ obj for obj in self.text_objects if contains_or_overlap(self.table_bbox, obj.bbox) ]
python
def selected_objects(self): return [ obj for obj in self.text_objects if contains_or_overlap(self.table_bbox, obj.bbox) ]
[ "def", "selected_objects", "(", "self", ")", ":", "return", "[", "obj", "for", "obj", "in", "self", ".", "text_objects", "if", "contains_or_overlap", "(", "self", ".", "table_bbox", ",", "obj", ".", "bbox", ")", "]" ]
Filter out objects outside table boundaries
[ "Filter", "out", "objects", "outside", "table", "boundaries" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_pdf.py#L446-L453
247,012
turicas/rows
examples/library/extract_links.py
transform
def transform(row, table): 'Extract links from "project" field and remove HTML from all' data = row._asdict() data["links"] = " ".join(extract_links(row.project)) for key, value in data.items(): if isinstance(value, six.text_type): data[key] = extract_text(value) return data
python
def transform(row, table): 'Extract links from "project" field and remove HTML from all' data = row._asdict() data["links"] = " ".join(extract_links(row.project)) for key, value in data.items(): if isinstance(value, six.text_type): data[key] = extract_text(value) return data
[ "def", "transform", "(", "row", ",", "table", ")", ":", "data", "=", "row", ".", "_asdict", "(", ")", "data", "[", "\"links\"", "]", "=", "\" \"", ".", "join", "(", "extract_links", "(", "row", ".", "project", ")", ")", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "text_type", ")", ":", "data", "[", "key", "]", "=", "extract_text", "(", "value", ")", "return", "data" ]
Extract links from "project" field and remove HTML from all
[ "Extract", "links", "from", "project", "field", "and", "remove", "HTML", "from", "all" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/examples/library/extract_links.py#L24-L32
247,013
turicas/rows
examples/library/brazilian_cities_wikipedia.py
transform
def transform(row, table): 'Transform row "link" into full URL and add "state" based on "name"' data = row._asdict() data["link"] = urljoin("https://pt.wikipedia.org", data["link"]) data["name"], data["state"] = regexp_city_state.findall(data["name"])[0] return data
python
def transform(row, table): 'Transform row "link" into full URL and add "state" based on "name"' data = row._asdict() data["link"] = urljoin("https://pt.wikipedia.org", data["link"]) data["name"], data["state"] = regexp_city_state.findall(data["name"])[0] return data
[ "def", "transform", "(", "row", ",", "table", ")", ":", "data", "=", "row", ".", "_asdict", "(", ")", "data", "[", "\"link\"", "]", "=", "urljoin", "(", "\"https://pt.wikipedia.org\"", ",", "data", "[", "\"link\"", "]", ")", "data", "[", "\"name\"", "]", ",", "data", "[", "\"state\"", "]", "=", "regexp_city_state", ".", "findall", "(", "data", "[", "\"name\"", "]", ")", "[", "0", "]", "return", "data" ]
Transform row "link" into full URL and add "state" based on "name"
[ "Transform", "row", "link", "into", "full", "URL", "and", "add", "state", "based", "on", "name" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/examples/library/brazilian_cities_wikipedia.py#L34-L40
247,014
turicas/rows
rows/plugins/plugin_parquet.py
import_from_parquet
def import_from_parquet(filename_or_fobj, *args, **kwargs): """Import data from a Parquet file and return with rows.Table.""" source = Source.from_file(filename_or_fobj, plugin_name="parquet", mode="rb") # TODO: should look into `schema.converted_type` also types = OrderedDict( [ (schema.name, PARQUET_TO_ROWS[schema.type]) for schema in parquet._read_footer(source.fobj).schema if schema.type is not None ] ) header = list(types.keys()) table_rows = list(parquet.reader(source.fobj)) # TODO: be lazy meta = {"imported_from": "parquet", "source": source} return create_table( [header] + table_rows, meta=meta, force_types=types, *args, **kwargs )
python
def import_from_parquet(filename_or_fobj, *args, **kwargs): source = Source.from_file(filename_or_fobj, plugin_name="parquet", mode="rb") # TODO: should look into `schema.converted_type` also types = OrderedDict( [ (schema.name, PARQUET_TO_ROWS[schema.type]) for schema in parquet._read_footer(source.fobj).schema if schema.type is not None ] ) header = list(types.keys()) table_rows = list(parquet.reader(source.fobj)) # TODO: be lazy meta = {"imported_from": "parquet", "source": source} return create_table( [header] + table_rows, meta=meta, force_types=types, *args, **kwargs )
[ "def", "import_from_parquet", "(", "filename_or_fobj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "source", "=", "Source", ".", "from_file", "(", "filename_or_fobj", ",", "plugin_name", "=", "\"parquet\"", ",", "mode", "=", "\"rb\"", ")", "# TODO: should look into `schema.converted_type` also", "types", "=", "OrderedDict", "(", "[", "(", "schema", ".", "name", ",", "PARQUET_TO_ROWS", "[", "schema", ".", "type", "]", ")", "for", "schema", "in", "parquet", ".", "_read_footer", "(", "source", ".", "fobj", ")", ".", "schema", "if", "schema", ".", "type", "is", "not", "None", "]", ")", "header", "=", "list", "(", "types", ".", "keys", "(", ")", ")", "table_rows", "=", "list", "(", "parquet", ".", "reader", "(", "source", ".", "fobj", ")", ")", "# TODO: be lazy", "meta", "=", "{", "\"imported_from\"", ":", "\"parquet\"", ",", "\"source\"", ":", "source", "}", "return", "create_table", "(", "[", "header", "]", "+", "table_rows", ",", "meta", "=", "meta", ",", "force_types", "=", "types", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Import data from a Parquet file and return with rows.Table.
[ "Import", "data", "from", "a", "Parquet", "file", "and", "return", "with", "rows", ".", "Table", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_parquet.py#L47-L65
247,015
turicas/rows
rows/plugins/dicts.py
import_from_dicts
def import_from_dicts(data, samples=None, *args, **kwargs): """Import data from a iterable of dicts The algorithm will use the `samples` first `dict`s to determine the field names (if `samples` is `None` all `dict`s will be used). """ data = iter(data) cached_rows, headers = [], [] for index, row in enumerate(data, start=1): cached_rows.append(row) for key in row.keys(): if key not in headers: headers.append(key) if samples and index == samples: break data_rows = ( [row.get(header, None) for header in headers] for row in chain(cached_rows, data) ) kwargs["samples"] = samples meta = {"imported_from": "dicts"} return create_table(chain([headers], data_rows), meta=meta, *args, **kwargs)
python
def import_from_dicts(data, samples=None, *args, **kwargs): data = iter(data) cached_rows, headers = [], [] for index, row in enumerate(data, start=1): cached_rows.append(row) for key in row.keys(): if key not in headers: headers.append(key) if samples and index == samples: break data_rows = ( [row.get(header, None) for header in headers] for row in chain(cached_rows, data) ) kwargs["samples"] = samples meta = {"imported_from": "dicts"} return create_table(chain([headers], data_rows), meta=meta, *args, **kwargs)
[ "def", "import_from_dicts", "(", "data", ",", "samples", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "data", "=", "iter", "(", "data", ")", "cached_rows", ",", "headers", "=", "[", "]", ",", "[", "]", "for", "index", ",", "row", "in", "enumerate", "(", "data", ",", "start", "=", "1", ")", ":", "cached_rows", ".", "append", "(", "row", ")", "for", "key", "in", "row", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "headers", ":", "headers", ".", "append", "(", "key", ")", "if", "samples", "and", "index", "==", "samples", ":", "break", "data_rows", "=", "(", "[", "row", ".", "get", "(", "header", ",", "None", ")", "for", "header", "in", "headers", "]", "for", "row", "in", "chain", "(", "cached_rows", ",", "data", ")", ")", "kwargs", "[", "\"samples\"", "]", "=", "samples", "meta", "=", "{", "\"imported_from\"", ":", "\"dicts\"", "}", "return", "create_table", "(", "chain", "(", "[", "headers", "]", ",", "data_rows", ")", ",", "meta", "=", "meta", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Import data from a iterable of dicts The algorithm will use the `samples` first `dict`s to determine the field names (if `samples` is `None` all `dict`s will be used).
[ "Import", "data", "from", "a", "iterable", "of", "dicts" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/dicts.py#L25-L52
247,016
turicas/rows
rows/plugins/dicts.py
export_to_dicts
def export_to_dicts(table, *args, **kwargs): """Export a `rows.Table` to a list of dicts""" field_names = table.field_names return [{key: getattr(row, key) for key in field_names} for row in table]
python
def export_to_dicts(table, *args, **kwargs): field_names = table.field_names return [{key: getattr(row, key) for key in field_names} for row in table]
[ "def", "export_to_dicts", "(", "table", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "field_names", "=", "table", ".", "field_names", "return", "[", "{", "key", ":", "getattr", "(", "row", ",", "key", ")", "for", "key", "in", "field_names", "}", "for", "row", "in", "table", "]" ]
Export a `rows.Table` to a list of dicts
[ "Export", "a", "rows", ".", "Table", "to", "a", "list", "of", "dicts" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/dicts.py#L55-L58
247,017
turicas/rows
rows/plugins/xls.py
cell_value
def cell_value(sheet, row, col): """Return the cell value of the table passed by argument, based in row and column.""" cell = sheet.cell(row, col) field_type = CELL_TYPES[cell.ctype] # TODO: this approach will not work if using locale value = cell.value if field_type is None: return None elif field_type is fields.TextField: if cell.ctype != xlrd.XL_CELL_BLANK: return value else: return "" elif field_type is fields.DatetimeField: if value == 0.0: return None try: time_tuple = xlrd.xldate_as_tuple(value, sheet.book.datemode) except xlrd.xldate.XLDateTooLarge: return None value = field_type.serialize(datetime.datetime(*time_tuple)) return value.split("T00:00:00")[0] elif field_type is fields.BoolField: if value == 0: return False elif value == 1: return True elif cell.xf_index is None: return value # TODO: test else: book = sheet.book xf = book.xf_list[cell.xf_index] fmt = book.format_map[xf.format_key] if fmt.format_str.endswith("%"): # TODO: we may optimize this approach: we're converting to string # and the library is detecting the type when we could just say to # the library this value is PercentField if value is not None: try: decimal_places = len(fmt.format_str[:-1].split(".")[-1]) except IndexError: decimal_places = 2 return "{}%".format(str(round(value * 100, decimal_places))) else: return None elif type(value) == float and int(value) == value: return int(value) else: return value
python
def cell_value(sheet, row, col): cell = sheet.cell(row, col) field_type = CELL_TYPES[cell.ctype] # TODO: this approach will not work if using locale value = cell.value if field_type is None: return None elif field_type is fields.TextField: if cell.ctype != xlrd.XL_CELL_BLANK: return value else: return "" elif field_type is fields.DatetimeField: if value == 0.0: return None try: time_tuple = xlrd.xldate_as_tuple(value, sheet.book.datemode) except xlrd.xldate.XLDateTooLarge: return None value = field_type.serialize(datetime.datetime(*time_tuple)) return value.split("T00:00:00")[0] elif field_type is fields.BoolField: if value == 0: return False elif value == 1: return True elif cell.xf_index is None: return value # TODO: test else: book = sheet.book xf = book.xf_list[cell.xf_index] fmt = book.format_map[xf.format_key] if fmt.format_str.endswith("%"): # TODO: we may optimize this approach: we're converting to string # and the library is detecting the type when we could just say to # the library this value is PercentField if value is not None: try: decimal_places = len(fmt.format_str[:-1].split(".")[-1]) except IndexError: decimal_places = 2 return "{}%".format(str(round(value * 100, decimal_places))) else: return None elif type(value) == float and int(value) == value: return int(value) else: return value
[ "def", "cell_value", "(", "sheet", ",", "row", ",", "col", ")", ":", "cell", "=", "sheet", ".", "cell", "(", "row", ",", "col", ")", "field_type", "=", "CELL_TYPES", "[", "cell", ".", "ctype", "]", "# TODO: this approach will not work if using locale", "value", "=", "cell", ".", "value", "if", "field_type", "is", "None", ":", "return", "None", "elif", "field_type", "is", "fields", ".", "TextField", ":", "if", "cell", ".", "ctype", "!=", "xlrd", ".", "XL_CELL_BLANK", ":", "return", "value", "else", ":", "return", "\"\"", "elif", "field_type", "is", "fields", ".", "DatetimeField", ":", "if", "value", "==", "0.0", ":", "return", "None", "try", ":", "time_tuple", "=", "xlrd", ".", "xldate_as_tuple", "(", "value", ",", "sheet", ".", "book", ".", "datemode", ")", "except", "xlrd", ".", "xldate", ".", "XLDateTooLarge", ":", "return", "None", "value", "=", "field_type", ".", "serialize", "(", "datetime", ".", "datetime", "(", "*", "time_tuple", ")", ")", "return", "value", ".", "split", "(", "\"T00:00:00\"", ")", "[", "0", "]", "elif", "field_type", "is", "fields", ".", "BoolField", ":", "if", "value", "==", "0", ":", "return", "False", "elif", "value", "==", "1", ":", "return", "True", "elif", "cell", ".", "xf_index", "is", "None", ":", "return", "value", "# TODO: test", "else", ":", "book", "=", "sheet", ".", "book", "xf", "=", "book", ".", "xf_list", "[", "cell", ".", "xf_index", "]", "fmt", "=", "book", ".", "format_map", "[", "xf", ".", "format_key", "]", "if", "fmt", ".", "format_str", ".", "endswith", "(", "\"%\"", ")", ":", "# TODO: we may optimize this approach: we're converting to string", "# and the library is detecting the type when we could just say to", "# the library this value is PercentField", "if", "value", "is", "not", "None", ":", "try", ":", "decimal_places", "=", "len", "(", "fmt", ".", "format_str", "[", ":", "-", "1", "]", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", ")", "except", "IndexError", ":", "decimal_places", "=", "2", "return", "\"{}%\"", ".", "format", "(", "str", "(", "round", "(", "value", "*", "100", ",", "decimal_places", ")", ")", ")", "else", ":", "return", "None", "elif", "type", "(", "value", ")", "==", "float", "and", "int", "(", "value", ")", "==", "value", ":", "return", "int", "(", "value", ")", "else", ":", "return", "value" ]
Return the cell value of the table passed by argument, based in row and column.
[ "Return", "the", "cell", "value", "of", "the", "table", "passed", "by", "argument", "based", "in", "row", "and", "column", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xls.py#L83-L143
247,018
turicas/rows
rows/plugins/xls.py
import_from_xls
def import_from_xls( filename_or_fobj, sheet_name=None, sheet_index=0, start_row=None, start_column=None, end_row=None, end_column=None, *args, **kwargs ): """Return a rows.Table created from imported XLS file.""" source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="xls") source.fobj.close() book = xlrd.open_workbook( source.uri, formatting_info=True, logfile=open(os.devnull, mode="w") ) if sheet_name is not None: sheet = book.sheet_by_name(sheet_name) else: sheet = book.sheet_by_index(sheet_index) # TODO: may re-use Excel data types # Get header and rows # xlrd library reads rows and columns starting from 0 and ending on # sheet.nrows/ncols - 1. rows accepts the same pattern # The xlrd library reads rows and columns starting from 0 and ending on # sheet.nrows/ncols - 1. rows also uses 0-based indexes, so no # transformation is needed min_row, min_column = get_table_start(sheet) max_row, max_column = sheet.nrows - 1, sheet.ncols - 1 # TODO: consider adding a parameter `ignore_padding=True` and when it's # True, consider `start_row` starting from `min_row` and `start_column` # starting from `min_col`. start_row = max(start_row if start_row is not None else min_row, min_row) end_row = min(end_row if end_row is not None else max_row, max_row) start_column = max( start_column if start_column is not None else min_column, min_column ) end_column = min(end_column if end_column is not None else max_column, max_column) table_rows = [ [ cell_value(sheet, row_index, column_index) for column_index in range(start_column, end_column + 1) ] for row_index in range(start_row, end_row + 1) ] meta = {"imported_from": "xls", "source": source, "name": sheet.name} return create_table(table_rows, meta=meta, *args, **kwargs)
python
def import_from_xls( filename_or_fobj, sheet_name=None, sheet_index=0, start_row=None, start_column=None, end_row=None, end_column=None, *args, **kwargs ): source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="xls") source.fobj.close() book = xlrd.open_workbook( source.uri, formatting_info=True, logfile=open(os.devnull, mode="w") ) if sheet_name is not None: sheet = book.sheet_by_name(sheet_name) else: sheet = book.sheet_by_index(sheet_index) # TODO: may re-use Excel data types # Get header and rows # xlrd library reads rows and columns starting from 0 and ending on # sheet.nrows/ncols - 1. rows accepts the same pattern # The xlrd library reads rows and columns starting from 0 and ending on # sheet.nrows/ncols - 1. rows also uses 0-based indexes, so no # transformation is needed min_row, min_column = get_table_start(sheet) max_row, max_column = sheet.nrows - 1, sheet.ncols - 1 # TODO: consider adding a parameter `ignore_padding=True` and when it's # True, consider `start_row` starting from `min_row` and `start_column` # starting from `min_col`. start_row = max(start_row if start_row is not None else min_row, min_row) end_row = min(end_row if end_row is not None else max_row, max_row) start_column = max( start_column if start_column is not None else min_column, min_column ) end_column = min(end_column if end_column is not None else max_column, max_column) table_rows = [ [ cell_value(sheet, row_index, column_index) for column_index in range(start_column, end_column + 1) ] for row_index in range(start_row, end_row + 1) ] meta = {"imported_from": "xls", "source": source, "name": sheet.name} return create_table(table_rows, meta=meta, *args, **kwargs)
[ "def", "import_from_xls", "(", "filename_or_fobj", ",", "sheet_name", "=", "None", ",", "sheet_index", "=", "0", ",", "start_row", "=", "None", ",", "start_column", "=", "None", ",", "end_row", "=", "None", ",", "end_column", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "source", "=", "Source", ".", "from_file", "(", "filename_or_fobj", ",", "mode", "=", "\"rb\"", ",", "plugin_name", "=", "\"xls\"", ")", "source", ".", "fobj", ".", "close", "(", ")", "book", "=", "xlrd", ".", "open_workbook", "(", "source", ".", "uri", ",", "formatting_info", "=", "True", ",", "logfile", "=", "open", "(", "os", ".", "devnull", ",", "mode", "=", "\"w\"", ")", ")", "if", "sheet_name", "is", "not", "None", ":", "sheet", "=", "book", ".", "sheet_by_name", "(", "sheet_name", ")", "else", ":", "sheet", "=", "book", ".", "sheet_by_index", "(", "sheet_index", ")", "# TODO: may re-use Excel data types", "# Get header and rows", "# xlrd library reads rows and columns starting from 0 and ending on", "# sheet.nrows/ncols - 1. rows accepts the same pattern", "# The xlrd library reads rows and columns starting from 0 and ending on", "# sheet.nrows/ncols - 1. rows also uses 0-based indexes, so no", "# transformation is needed", "min_row", ",", "min_column", "=", "get_table_start", "(", "sheet", ")", "max_row", ",", "max_column", "=", "sheet", ".", "nrows", "-", "1", ",", "sheet", ".", "ncols", "-", "1", "# TODO: consider adding a parameter `ignore_padding=True` and when it's", "# True, consider `start_row` starting from `min_row` and `start_column`", "# starting from `min_col`.", "start_row", "=", "max", "(", "start_row", "if", "start_row", "is", "not", "None", "else", "min_row", ",", "min_row", ")", "end_row", "=", "min", "(", "end_row", "if", "end_row", "is", "not", "None", "else", "max_row", ",", "max_row", ")", "start_column", "=", "max", "(", "start_column", "if", "start_column", "is", "not", "None", "else", "min_column", ",", "min_column", ")", "end_column", "=", "min", "(", "end_column", "if", "end_column", "is", "not", "None", "else", "max_column", ",", "max_column", ")", "table_rows", "=", "[", "[", "cell_value", "(", "sheet", ",", "row_index", ",", "column_index", ")", "for", "column_index", "in", "range", "(", "start_column", ",", "end_column", "+", "1", ")", "]", "for", "row_index", "in", "range", "(", "start_row", ",", "end_row", "+", "1", ")", "]", "meta", "=", "{", "\"imported_from\"", ":", "\"xls\"", ",", "\"source\"", ":", "source", ",", "\"name\"", ":", "sheet", ".", "name", "}", "return", "create_table", "(", "table_rows", ",", "meta", "=", "meta", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return a rows.Table created from imported XLS file.
[ "Return", "a", "rows", ".", "Table", "created", "from", "imported", "XLS", "file", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xls.py#L160-L212
247,019
turicas/rows
rows/plugins/xls.py
export_to_xls
def export_to_xls(table, filename_or_fobj=None, sheet_name="Sheet1", *args, **kwargs): """Export the rows.Table to XLS file and return the saved file.""" workbook = xlwt.Workbook() sheet = workbook.add_sheet(sheet_name) prepared_table = prepare_to_export(table, *args, **kwargs) field_names = next(prepared_table) for column_index, field_name in enumerate(field_names): sheet.write(0, column_index, field_name) _convert_row = _python_to_xls([table.fields.get(field) for field in field_names]) for row_index, row in enumerate(prepared_table, start=1): for column_index, (value, data) in enumerate(_convert_row(row)): sheet.write(row_index, column_index, value, **data) return_result = False if filename_or_fobj is None: filename_or_fobj = BytesIO() return_result = True source = Source.from_file(filename_or_fobj, mode="wb", plugin_name="xls") workbook.save(source.fobj) source.fobj.flush() if return_result: source.fobj.seek(0) result = source.fobj.read() else: result = source.fobj if source.should_close: source.fobj.close() return result
python
def export_to_xls(table, filename_or_fobj=None, sheet_name="Sheet1", *args, **kwargs): workbook = xlwt.Workbook() sheet = workbook.add_sheet(sheet_name) prepared_table = prepare_to_export(table, *args, **kwargs) field_names = next(prepared_table) for column_index, field_name in enumerate(field_names): sheet.write(0, column_index, field_name) _convert_row = _python_to_xls([table.fields.get(field) for field in field_names]) for row_index, row in enumerate(prepared_table, start=1): for column_index, (value, data) in enumerate(_convert_row(row)): sheet.write(row_index, column_index, value, **data) return_result = False if filename_or_fobj is None: filename_or_fobj = BytesIO() return_result = True source = Source.from_file(filename_or_fobj, mode="wb", plugin_name="xls") workbook.save(source.fobj) source.fobj.flush() if return_result: source.fobj.seek(0) result = source.fobj.read() else: result = source.fobj if source.should_close: source.fobj.close() return result
[ "def", "export_to_xls", "(", "table", ",", "filename_or_fobj", "=", "None", ",", "sheet_name", "=", "\"Sheet1\"", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "workbook", "=", "xlwt", ".", "Workbook", "(", ")", "sheet", "=", "workbook", ".", "add_sheet", "(", "sheet_name", ")", "prepared_table", "=", "prepare_to_export", "(", "table", ",", "*", "args", ",", "*", "*", "kwargs", ")", "field_names", "=", "next", "(", "prepared_table", ")", "for", "column_index", ",", "field_name", "in", "enumerate", "(", "field_names", ")", ":", "sheet", ".", "write", "(", "0", ",", "column_index", ",", "field_name", ")", "_convert_row", "=", "_python_to_xls", "(", "[", "table", ".", "fields", ".", "get", "(", "field", ")", "for", "field", "in", "field_names", "]", ")", "for", "row_index", ",", "row", "in", "enumerate", "(", "prepared_table", ",", "start", "=", "1", ")", ":", "for", "column_index", ",", "(", "value", ",", "data", ")", "in", "enumerate", "(", "_convert_row", "(", "row", ")", ")", ":", "sheet", ".", "write", "(", "row_index", ",", "column_index", ",", "value", ",", "*", "*", "data", ")", "return_result", "=", "False", "if", "filename_or_fobj", "is", "None", ":", "filename_or_fobj", "=", "BytesIO", "(", ")", "return_result", "=", "True", "source", "=", "Source", ".", "from_file", "(", "filename_or_fobj", ",", "mode", "=", "\"wb\"", ",", "plugin_name", "=", "\"xls\"", ")", "workbook", ".", "save", "(", "source", ".", "fobj", ")", "source", ".", "fobj", ".", "flush", "(", ")", "if", "return_result", ":", "source", ".", "fobj", ".", "seek", "(", "0", ")", "result", "=", "source", ".", "fobj", ".", "read", "(", ")", "else", ":", "result", "=", "source", ".", "fobj", "if", "source", ".", "should_close", ":", "source", ".", "fobj", ".", "close", "(", ")", "return", "result" ]
Export the rows.Table to XLS file and return the saved file.
[ "Export", "the", "rows", ".", "Table", "to", "XLS", "file", "and", "return", "the", "saved", "file", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xls.py#L215-L250
247,020
turicas/rows
rows/plugins/postgresql.py
_valid_table_name
def _valid_table_name(name): """Verify if a given table name is valid for `rows` Rules: - Should start with a letter or '_' - Letters can be capitalized or not - Accepts letters, numbers and _ """ if name[0] not in "_" + string.ascii_letters or not set(name).issubset( "_" + string.ascii_letters + string.digits ): return False else: return True
python
def _valid_table_name(name): if name[0] not in "_" + string.ascii_letters or not set(name).issubset( "_" + string.ascii_letters + string.digits ): return False else: return True
[ "def", "_valid_table_name", "(", "name", ")", ":", "if", "name", "[", "0", "]", "not", "in", "\"_\"", "+", "string", ".", "ascii_letters", "or", "not", "set", "(", "name", ")", ".", "issubset", "(", "\"_\"", "+", "string", ".", "ascii_letters", "+", "string", ".", "digits", ")", ":", "return", "False", "else", ":", "return", "True" ]
Verify if a given table name is valid for `rows` Rules: - Should start with a letter or '_' - Letters can be capitalized or not - Accepts letters, numbers and _
[ "Verify", "if", "a", "given", "table", "name", "is", "valid", "for", "rows" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/postgresql.py#L104-L119
247,021
turicas/rows
rows/plugins/txt.py
_parse_col_positions
def _parse_col_positions(frame_style, header_line): """Find the position for each column separator in the given line If frame_style is 'None', this won work for column names that _start_ with whitespace (which includes non-lefthand aligned column titles) """ separator = re.escape(FRAMES[frame_style.lower()]["VERTICAL"]) if frame_style == "None": separator = r"[\s]{2}[^\s]" # Matches two whitespaces followed by a non-whitespace. # Our column headers are serated by 3 spaces by default. col_positions = [] # Abuse regexp engine to anotate vertical-separator positions: re.sub(separator, lambda group: col_positions.append(group.start()), header_line) if frame_style == "None": col_positions.append(len(header_line) - 1) return col_positions
python
def _parse_col_positions(frame_style, header_line): separator = re.escape(FRAMES[frame_style.lower()]["VERTICAL"]) if frame_style == "None": separator = r"[\s]{2}[^\s]" # Matches two whitespaces followed by a non-whitespace. # Our column headers are serated by 3 spaces by default. col_positions = [] # Abuse regexp engine to anotate vertical-separator positions: re.sub(separator, lambda group: col_positions.append(group.start()), header_line) if frame_style == "None": col_positions.append(len(header_line) - 1) return col_positions
[ "def", "_parse_col_positions", "(", "frame_style", ",", "header_line", ")", ":", "separator", "=", "re", ".", "escape", "(", "FRAMES", "[", "frame_style", ".", "lower", "(", ")", "]", "[", "\"VERTICAL\"", "]", ")", "if", "frame_style", "==", "\"None\"", ":", "separator", "=", "r\"[\\s]{2}[^\\s]\"", "# Matches two whitespaces followed by a non-whitespace.", "# Our column headers are serated by 3 spaces by default.", "col_positions", "=", "[", "]", "# Abuse regexp engine to anotate vertical-separator positions:", "re", ".", "sub", "(", "separator", ",", "lambda", "group", ":", "col_positions", ".", "append", "(", "group", ".", "start", "(", ")", ")", ",", "header_line", ")", "if", "frame_style", "==", "\"None\"", ":", "col_positions", ".", "append", "(", "len", "(", "header_line", ")", "-", "1", ")", "return", "col_positions" ]
Find the position for each column separator in the given line If frame_style is 'None', this won work for column names that _start_ with whitespace (which includes non-lefthand aligned column titles)
[ "Find", "the", "position", "for", "each", "column", "separator", "in", "the", "given", "line" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/txt.py#L99-L119
247,022
turicas/rows
rows/plugins/txt.py
import_from_txt
def import_from_txt( filename_or_fobj, encoding="utf-8", frame_style=FRAME_SENTINEL, *args, **kwargs ): """Return a rows.Table created from imported TXT file.""" # TODO: (maybe) # enable parsing of non-fixed-width-columns # with old algorithm - that would just split columns # at the vertical separator character for the frame. # (if doing so, include an optional parameter) # Also, this fixes an outstanding unreported issue: # trying to parse tables which fields values # included a Pipe char - "|" - would silently # yield bad results. source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="txt", encoding=encoding) raw_contents = source.fobj.read().decode(encoding).rstrip("\n") if frame_style is FRAME_SENTINEL: frame_style = _guess_frame_style(raw_contents) else: frame_style = _parse_frame_style(frame_style) contents = raw_contents.splitlines() del raw_contents if frame_style != "None": contents = contents[1:-1] del contents[1] else: # the table is possibly generated from other source. # check if the line we reserve as a separator is realy empty. if not contents[1].strip(): del contents[1] col_positions = _parse_col_positions(frame_style, contents[0]) table_rows = [ [ row[start + 1 : end].strip() for start, end in zip(col_positions, col_positions[1:]) ] for row in contents ] meta = { "imported_from": "txt", "source": source, "frame_style": frame_style, } return create_table(table_rows, meta=meta, *args, **kwargs)
python
def import_from_txt( filename_or_fobj, encoding="utf-8", frame_style=FRAME_SENTINEL, *args, **kwargs ): # TODO: (maybe) # enable parsing of non-fixed-width-columns # with old algorithm - that would just split columns # at the vertical separator character for the frame. # (if doing so, include an optional parameter) # Also, this fixes an outstanding unreported issue: # trying to parse tables which fields values # included a Pipe char - "|" - would silently # yield bad results. source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="txt", encoding=encoding) raw_contents = source.fobj.read().decode(encoding).rstrip("\n") if frame_style is FRAME_SENTINEL: frame_style = _guess_frame_style(raw_contents) else: frame_style = _parse_frame_style(frame_style) contents = raw_contents.splitlines() del raw_contents if frame_style != "None": contents = contents[1:-1] del contents[1] else: # the table is possibly generated from other source. # check if the line we reserve as a separator is realy empty. if not contents[1].strip(): del contents[1] col_positions = _parse_col_positions(frame_style, contents[0]) table_rows = [ [ row[start + 1 : end].strip() for start, end in zip(col_positions, col_positions[1:]) ] for row in contents ] meta = { "imported_from": "txt", "source": source, "frame_style": frame_style, } return create_table(table_rows, meta=meta, *args, **kwargs)
[ "def", "import_from_txt", "(", "filename_or_fobj", ",", "encoding", "=", "\"utf-8\"", ",", "frame_style", "=", "FRAME_SENTINEL", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# TODO: (maybe)", "# enable parsing of non-fixed-width-columns", "# with old algorithm - that would just split columns", "# at the vertical separator character for the frame.", "# (if doing so, include an optional parameter)", "# Also, this fixes an outstanding unreported issue:", "# trying to parse tables which fields values", "# included a Pipe char - \"|\" - would silently", "# yield bad results.", "source", "=", "Source", ".", "from_file", "(", "filename_or_fobj", ",", "mode", "=", "\"rb\"", ",", "plugin_name", "=", "\"txt\"", ",", "encoding", "=", "encoding", ")", "raw_contents", "=", "source", ".", "fobj", ".", "read", "(", ")", ".", "decode", "(", "encoding", ")", ".", "rstrip", "(", "\"\\n\"", ")", "if", "frame_style", "is", "FRAME_SENTINEL", ":", "frame_style", "=", "_guess_frame_style", "(", "raw_contents", ")", "else", ":", "frame_style", "=", "_parse_frame_style", "(", "frame_style", ")", "contents", "=", "raw_contents", ".", "splitlines", "(", ")", "del", "raw_contents", "if", "frame_style", "!=", "\"None\"", ":", "contents", "=", "contents", "[", "1", ":", "-", "1", "]", "del", "contents", "[", "1", "]", "else", ":", "# the table is possibly generated from other source.", "# check if the line we reserve as a separator is realy empty.", "if", "not", "contents", "[", "1", "]", ".", "strip", "(", ")", ":", "del", "contents", "[", "1", "]", "col_positions", "=", "_parse_col_positions", "(", "frame_style", ",", "contents", "[", "0", "]", ")", "table_rows", "=", "[", "[", "row", "[", "start", "+", "1", ":", "end", "]", ".", "strip", "(", ")", "for", "start", ",", "end", "in", "zip", "(", "col_positions", ",", "col_positions", "[", "1", ":", "]", ")", "]", "for", "row", "in", "contents", "]", "meta", "=", "{", "\"imported_from\"", ":", "\"txt\"", ",", "\"source\"", ":", "source", ",", "\"frame_style\"", ":", "frame_style", ",", "}", "return", "create_table", "(", "table_rows", ",", "meta", "=", "meta", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return a rows.Table created from imported TXT file.
[ "Return", "a", "rows", ".", "Table", "created", "from", "imported", "TXT", "file", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/txt.py#L130-L179
247,023
turicas/rows
rows/plugins/txt.py
export_to_txt
def export_to_txt( table, filename_or_fobj=None, encoding=None, frame_style="ASCII", safe_none_frame=True, *args, **kwargs ): """Export a `rows.Table` to text. This function can return the result as a string or save into a file (via filename or file-like object). `encoding` could be `None` if no filename/file-like object is specified, then the return type will be `six.text_type`. `frame_style`: will select the frame style to be printed around data. Valid values are: ('None', 'ASCII', 'single', 'double') - ASCII is default. Warning: no checks are made to check the desired encoding allows the characters needed by single and double frame styles. `safe_none_frame`: bool, defaults to True. Affects only output with frame_style == "None": column titles are left-aligned and have whitespace replaced for "_". This enables the output to be parseable. Otherwise, the generated table will look prettier but can not be imported back. """ # TODO: will work only if table.fields is OrderedDict frame_style = _parse_frame_style(frame_style) frame = FRAMES[frame_style.lower()] serialized_table = serialize(table, *args, **kwargs) field_names = next(serialized_table) table_rows = list(serialized_table) max_sizes = _max_column_sizes(field_names, table_rows) dashes = [frame["HORIZONTAL"] * (max_sizes[field] + 2) for field in field_names] if frame_style != "None" or not safe_none_frame: header = [field.center(max_sizes[field]) for field in field_names] else: header = [ field.replace(" ", "_").ljust(max_sizes[field]) for field in field_names ] header = "{0} {1} {0}".format( frame["VERTICAL"], " {} ".format(frame["VERTICAL"]).join(header) ) top_split_line = ( frame["DOWN AND RIGHT"] + frame["DOWN AND HORIZONTAL"].join(dashes) + frame["DOWN AND LEFT"] ) body_split_line = ( frame["VERTICAL AND RIGHT"] + frame["VERTICAL AND HORIZONTAL"].join(dashes) + frame["VERTICAL AND LEFT"] ) botton_split_line = ( frame["UP AND RIGHT"] + frame["UP AND HORIZONTAL"].join(dashes) + frame["UP AND LEFT"] ) result = [] if frame_style != "None": result += [top_split_line] result += [header, body_split_line] for row in table_rows: values = [ value.rjust(max_sizes[field_name]) for field_name, value in zip(field_names, row) ] row_data = " {} ".format(frame["VERTICAL"]).join(values) result.append("{0} {1} {0}".format(frame["VERTICAL"], row_data)) if frame_style != "None": result.append(botton_split_line) result.append("") data = "\n".join(result) if encoding is not None: data = data.encode(encoding) return export_data(filename_or_fobj, data, mode="wb")
python
def export_to_txt( table, filename_or_fobj=None, encoding=None, frame_style="ASCII", safe_none_frame=True, *args, **kwargs ): # TODO: will work only if table.fields is OrderedDict frame_style = _parse_frame_style(frame_style) frame = FRAMES[frame_style.lower()] serialized_table = serialize(table, *args, **kwargs) field_names = next(serialized_table) table_rows = list(serialized_table) max_sizes = _max_column_sizes(field_names, table_rows) dashes = [frame["HORIZONTAL"] * (max_sizes[field] + 2) for field in field_names] if frame_style != "None" or not safe_none_frame: header = [field.center(max_sizes[field]) for field in field_names] else: header = [ field.replace(" ", "_").ljust(max_sizes[field]) for field in field_names ] header = "{0} {1} {0}".format( frame["VERTICAL"], " {} ".format(frame["VERTICAL"]).join(header) ) top_split_line = ( frame["DOWN AND RIGHT"] + frame["DOWN AND HORIZONTAL"].join(dashes) + frame["DOWN AND LEFT"] ) body_split_line = ( frame["VERTICAL AND RIGHT"] + frame["VERTICAL AND HORIZONTAL"].join(dashes) + frame["VERTICAL AND LEFT"] ) botton_split_line = ( frame["UP AND RIGHT"] + frame["UP AND HORIZONTAL"].join(dashes) + frame["UP AND LEFT"] ) result = [] if frame_style != "None": result += [top_split_line] result += [header, body_split_line] for row in table_rows: values = [ value.rjust(max_sizes[field_name]) for field_name, value in zip(field_names, row) ] row_data = " {} ".format(frame["VERTICAL"]).join(values) result.append("{0} {1} {0}".format(frame["VERTICAL"], row_data)) if frame_style != "None": result.append(botton_split_line) result.append("") data = "\n".join(result) if encoding is not None: data = data.encode(encoding) return export_data(filename_or_fobj, data, mode="wb")
[ "def", "export_to_txt", "(", "table", ",", "filename_or_fobj", "=", "None", ",", "encoding", "=", "None", ",", "frame_style", "=", "\"ASCII\"", ",", "safe_none_frame", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# TODO: will work only if table.fields is OrderedDict", "frame_style", "=", "_parse_frame_style", "(", "frame_style", ")", "frame", "=", "FRAMES", "[", "frame_style", ".", "lower", "(", ")", "]", "serialized_table", "=", "serialize", "(", "table", ",", "*", "args", ",", "*", "*", "kwargs", ")", "field_names", "=", "next", "(", "serialized_table", ")", "table_rows", "=", "list", "(", "serialized_table", ")", "max_sizes", "=", "_max_column_sizes", "(", "field_names", ",", "table_rows", ")", "dashes", "=", "[", "frame", "[", "\"HORIZONTAL\"", "]", "*", "(", "max_sizes", "[", "field", "]", "+", "2", ")", "for", "field", "in", "field_names", "]", "if", "frame_style", "!=", "\"None\"", "or", "not", "safe_none_frame", ":", "header", "=", "[", "field", ".", "center", "(", "max_sizes", "[", "field", "]", ")", "for", "field", "in", "field_names", "]", "else", ":", "header", "=", "[", "field", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", ".", "ljust", "(", "max_sizes", "[", "field", "]", ")", "for", "field", "in", "field_names", "]", "header", "=", "\"{0} {1} {0}\"", ".", "format", "(", "frame", "[", "\"VERTICAL\"", "]", ",", "\" {} \"", ".", "format", "(", "frame", "[", "\"VERTICAL\"", "]", ")", ".", "join", "(", "header", ")", ")", "top_split_line", "=", "(", "frame", "[", "\"DOWN AND RIGHT\"", "]", "+", "frame", "[", "\"DOWN AND HORIZONTAL\"", "]", ".", "join", "(", "dashes", ")", "+", "frame", "[", "\"DOWN AND LEFT\"", "]", ")", "body_split_line", "=", "(", "frame", "[", "\"VERTICAL AND RIGHT\"", "]", "+", "frame", "[", "\"VERTICAL AND HORIZONTAL\"", "]", ".", "join", "(", "dashes", ")", "+", "frame", "[", "\"VERTICAL AND LEFT\"", "]", ")", "botton_split_line", "=", "(", "frame", "[", "\"UP AND RIGHT\"", "]", "+", "frame", "[", "\"UP AND HORIZONTAL\"", "]", ".", "join", "(", "dashes", ")", "+", "frame", "[", "\"UP AND LEFT\"", "]", ")", "result", "=", "[", "]", "if", "frame_style", "!=", "\"None\"", ":", "result", "+=", "[", "top_split_line", "]", "result", "+=", "[", "header", ",", "body_split_line", "]", "for", "row", "in", "table_rows", ":", "values", "=", "[", "value", ".", "rjust", "(", "max_sizes", "[", "field_name", "]", ")", "for", "field_name", ",", "value", "in", "zip", "(", "field_names", ",", "row", ")", "]", "row_data", "=", "\" {} \"", ".", "format", "(", "frame", "[", "\"VERTICAL\"", "]", ")", ".", "join", "(", "values", ")", "result", ".", "append", "(", "\"{0} {1} {0}\"", ".", "format", "(", "frame", "[", "\"VERTICAL\"", "]", ",", "row_data", ")", ")", "if", "frame_style", "!=", "\"None\"", ":", "result", ".", "append", "(", "botton_split_line", ")", "result", ".", "append", "(", "\"\"", ")", "data", "=", "\"\\n\"", ".", "join", "(", "result", ")", "if", "encoding", "is", "not", "None", ":", "data", "=", "data", ".", "encode", "(", "encoding", ")", "return", "export_data", "(", "filename_or_fobj", ",", "data", ",", "mode", "=", "\"wb\"", ")" ]
Export a `rows.Table` to text. This function can return the result as a string or save into a file (via filename or file-like object). `encoding` could be `None` if no filename/file-like object is specified, then the return type will be `six.text_type`. `frame_style`: will select the frame style to be printed around data. Valid values are: ('None', 'ASCII', 'single', 'double') - ASCII is default. Warning: no checks are made to check the desired encoding allows the characters needed by single and double frame styles. `safe_none_frame`: bool, defaults to True. Affects only output with frame_style == "None": column titles are left-aligned and have whitespace replaced for "_". This enables the output to be parseable. Otherwise, the generated table will look prettier but can not be imported back.
[ "Export", "a", "rows", ".", "Table", "to", "text", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/txt.py#L182-L270
247,024
turicas/rows
rows/plugins/sqlite.py
import_from_sqlite
def import_from_sqlite( filename_or_connection, table_name="table1", query=None, query_args=None, *args, **kwargs ): """Return a rows.Table with data from SQLite database.""" source = get_source(filename_or_connection) connection = source.fobj cursor = connection.cursor() if query is None: if not _valid_table_name(table_name): raise ValueError("Invalid table name: {}".format(table_name)) query = SQL_SELECT_ALL.format(table_name=table_name) if query_args is None: query_args = tuple() table_rows = list(cursor.execute(query, query_args)) # TODO: may be lazy header = [six.text_type(info[0]) for info in cursor.description] cursor.close() # TODO: should close connection also? meta = {"imported_from": "sqlite", "source": source} return create_table([header] + table_rows, meta=meta, *args, **kwargs)
python
def import_from_sqlite( filename_or_connection, table_name="table1", query=None, query_args=None, *args, **kwargs ): source = get_source(filename_or_connection) connection = source.fobj cursor = connection.cursor() if query is None: if not _valid_table_name(table_name): raise ValueError("Invalid table name: {}".format(table_name)) query = SQL_SELECT_ALL.format(table_name=table_name) if query_args is None: query_args = tuple() table_rows = list(cursor.execute(query, query_args)) # TODO: may be lazy header = [six.text_type(info[0]) for info in cursor.description] cursor.close() # TODO: should close connection also? meta = {"imported_from": "sqlite", "source": source} return create_table([header] + table_rows, meta=meta, *args, **kwargs)
[ "def", "import_from_sqlite", "(", "filename_or_connection", ",", "table_name", "=", "\"table1\"", ",", "query", "=", "None", ",", "query_args", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "source", "=", "get_source", "(", "filename_or_connection", ")", "connection", "=", "source", ".", "fobj", "cursor", "=", "connection", ".", "cursor", "(", ")", "if", "query", "is", "None", ":", "if", "not", "_valid_table_name", "(", "table_name", ")", ":", "raise", "ValueError", "(", "\"Invalid table name: {}\"", ".", "format", "(", "table_name", ")", ")", "query", "=", "SQL_SELECT_ALL", ".", "format", "(", "table_name", "=", "table_name", ")", "if", "query_args", "is", "None", ":", "query_args", "=", "tuple", "(", ")", "table_rows", "=", "list", "(", "cursor", ".", "execute", "(", "query", ",", "query_args", ")", ")", "# TODO: may be lazy", "header", "=", "[", "six", ".", "text_type", "(", "info", "[", "0", "]", ")", "for", "info", "in", "cursor", ".", "description", "]", "cursor", ".", "close", "(", ")", "# TODO: should close connection also?", "meta", "=", "{", "\"imported_from\"", ":", "\"sqlite\"", ",", "\"source\"", ":", "source", "}", "return", "create_table", "(", "[", "header", "]", "+", "table_rows", ",", "meta", "=", "meta", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return a rows.Table with data from SQLite database.
[ "Return", "a", "rows", ".", "Table", "with", "data", "from", "SQLite", "database", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/sqlite.py#L140-L168
247,025
turicas/rows
rows/plugins/xlsx.py
_cell_to_python
def _cell_to_python(cell): """Convert a PyOpenXL's `Cell` object to the corresponding Python object.""" data_type, value = cell.data_type, cell.value if type(cell) is EmptyCell: return None elif data_type == "f" and value == "=TRUE()": return True elif data_type == "f" and value == "=FALSE()": return False elif cell.number_format.lower() == "yyyy-mm-dd": return str(value).split(" 00:00:00")[0] elif cell.number_format.lower() == "yyyy-mm-dd hh:mm:ss": return str(value).split(".")[0] elif cell.number_format.endswith("%") and isinstance(value, Number): value = Decimal(str(value)) return "{:%}".format(value) elif value is None: return "" else: return value
python
def _cell_to_python(cell): data_type, value = cell.data_type, cell.value if type(cell) is EmptyCell: return None elif data_type == "f" and value == "=TRUE()": return True elif data_type == "f" and value == "=FALSE()": return False elif cell.number_format.lower() == "yyyy-mm-dd": return str(value).split(" 00:00:00")[0] elif cell.number_format.lower() == "yyyy-mm-dd hh:mm:ss": return str(value).split(".")[0] elif cell.number_format.endswith("%") and isinstance(value, Number): value = Decimal(str(value)) return "{:%}".format(value) elif value is None: return "" else: return value
[ "def", "_cell_to_python", "(", "cell", ")", ":", "data_type", ",", "value", "=", "cell", ".", "data_type", ",", "cell", ".", "value", "if", "type", "(", "cell", ")", "is", "EmptyCell", ":", "return", "None", "elif", "data_type", "==", "\"f\"", "and", "value", "==", "\"=TRUE()\"", ":", "return", "True", "elif", "data_type", "==", "\"f\"", "and", "value", "==", "\"=FALSE()\"", ":", "return", "False", "elif", "cell", ".", "number_format", ".", "lower", "(", ")", "==", "\"yyyy-mm-dd\"", ":", "return", "str", "(", "value", ")", ".", "split", "(", "\" 00:00:00\"", ")", "[", "0", "]", "elif", "cell", ".", "number_format", ".", "lower", "(", ")", "==", "\"yyyy-mm-dd hh:mm:ss\"", ":", "return", "str", "(", "value", ")", ".", "split", "(", "\".\"", ")", "[", "0", "]", "elif", "cell", ".", "number_format", ".", "endswith", "(", "\"%\"", ")", "and", "isinstance", "(", "value", ",", "Number", ")", ":", "value", "=", "Decimal", "(", "str", "(", "value", ")", ")", "return", "\"{:%}\"", ".", "format", "(", "value", ")", "elif", "value", "is", "None", ":", "return", "\"\"", "else", ":", "return", "value" ]
Convert a PyOpenXL's `Cell` object to the corresponding Python object.
[ "Convert", "a", "PyOpenXL", "s", "Cell", "object", "to", "the", "corresponding", "Python", "object", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xlsx.py#L32-L55
247,026
turicas/rows
rows/plugins/xlsx.py
import_from_xlsx
def import_from_xlsx( filename_or_fobj, sheet_name=None, sheet_index=0, start_row=None, start_column=None, end_row=None, end_column=None, workbook_kwargs=None, *args, **kwargs ): """Return a rows.Table created from imported XLSX file. workbook_kwargs will be passed to openpyxl.load_workbook """ workbook_kwargs = workbook_kwargs or {} if "read_only" not in workbook_kwargs: workbook_kwargs["read_only"] = True workbook = load_workbook(filename_or_fobj, **workbook_kwargs) if sheet_name is None: sheet_name = workbook.sheetnames[sheet_index] sheet = workbook[sheet_name] # The openpyxl library reads rows and columns starting from 1 and ending on # sheet.max_row/max_col. rows uses 0-based indexes (from 0 to N - 1), so we # need to adjust the ranges accordingly. min_row, min_column = sheet.min_row - 1, sheet.min_column - 1 max_row, max_column = sheet.max_row - 1, sheet.max_column - 1 # TODO: consider adding a parameter `ignore_padding=True` and when it's # True, consider `start_row` starting from `sheet.min_row` and # `start_column` starting from `sheet.min_col`. start_row = start_row if start_row is not None else min_row end_row = end_row if end_row is not None else max_row start_column = start_column if start_column is not None else min_column end_column = end_column if end_column is not None else max_column table_rows = [] is_empty = lambda row: all(cell is None for cell in row) selected_rows = sheet.iter_rows( min_row=start_row + 1, max_row=end_row + 1, min_col=start_column + 1, max_col=end_column + 1, ) for row in selected_rows: row = [_cell_to_python(cell) for cell in row] if not is_empty(row): table_rows.append(row) source = Source.from_file(filename_or_fobj, plugin_name="xlsx") source.fobj.close() # TODO: pass a parameter to Source.from_file so it won't open the file metadata = {"imported_from": "xlsx", "source": source, "name": sheet_name} return create_table(table_rows, meta=metadata, *args, **kwargs)
python
def import_from_xlsx( filename_or_fobj, sheet_name=None, sheet_index=0, start_row=None, start_column=None, end_row=None, end_column=None, workbook_kwargs=None, *args, **kwargs ): workbook_kwargs = workbook_kwargs or {} if "read_only" not in workbook_kwargs: workbook_kwargs["read_only"] = True workbook = load_workbook(filename_or_fobj, **workbook_kwargs) if sheet_name is None: sheet_name = workbook.sheetnames[sheet_index] sheet = workbook[sheet_name] # The openpyxl library reads rows and columns starting from 1 and ending on # sheet.max_row/max_col. rows uses 0-based indexes (from 0 to N - 1), so we # need to adjust the ranges accordingly. min_row, min_column = sheet.min_row - 1, sheet.min_column - 1 max_row, max_column = sheet.max_row - 1, sheet.max_column - 1 # TODO: consider adding a parameter `ignore_padding=True` and when it's # True, consider `start_row` starting from `sheet.min_row` and # `start_column` starting from `sheet.min_col`. start_row = start_row if start_row is not None else min_row end_row = end_row if end_row is not None else max_row start_column = start_column if start_column is not None else min_column end_column = end_column if end_column is not None else max_column table_rows = [] is_empty = lambda row: all(cell is None for cell in row) selected_rows = sheet.iter_rows( min_row=start_row + 1, max_row=end_row + 1, min_col=start_column + 1, max_col=end_column + 1, ) for row in selected_rows: row = [_cell_to_python(cell) for cell in row] if not is_empty(row): table_rows.append(row) source = Source.from_file(filename_or_fobj, plugin_name="xlsx") source.fobj.close() # TODO: pass a parameter to Source.from_file so it won't open the file metadata = {"imported_from": "xlsx", "source": source, "name": sheet_name} return create_table(table_rows, meta=metadata, *args, **kwargs)
[ "def", "import_from_xlsx", "(", "filename_or_fobj", ",", "sheet_name", "=", "None", ",", "sheet_index", "=", "0", ",", "start_row", "=", "None", ",", "start_column", "=", "None", ",", "end_row", "=", "None", ",", "end_column", "=", "None", ",", "workbook_kwargs", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "workbook_kwargs", "=", "workbook_kwargs", "or", "{", "}", "if", "\"read_only\"", "not", "in", "workbook_kwargs", ":", "workbook_kwargs", "[", "\"read_only\"", "]", "=", "True", "workbook", "=", "load_workbook", "(", "filename_or_fobj", ",", "*", "*", "workbook_kwargs", ")", "if", "sheet_name", "is", "None", ":", "sheet_name", "=", "workbook", ".", "sheetnames", "[", "sheet_index", "]", "sheet", "=", "workbook", "[", "sheet_name", "]", "# The openpyxl library reads rows and columns starting from 1 and ending on", "# sheet.max_row/max_col. rows uses 0-based indexes (from 0 to N - 1), so we", "# need to adjust the ranges accordingly.", "min_row", ",", "min_column", "=", "sheet", ".", "min_row", "-", "1", ",", "sheet", ".", "min_column", "-", "1", "max_row", ",", "max_column", "=", "sheet", ".", "max_row", "-", "1", ",", "sheet", ".", "max_column", "-", "1", "# TODO: consider adding a parameter `ignore_padding=True` and when it's", "# True, consider `start_row` starting from `sheet.min_row` and", "# `start_column` starting from `sheet.min_col`.", "start_row", "=", "start_row", "if", "start_row", "is", "not", "None", "else", "min_row", "end_row", "=", "end_row", "if", "end_row", "is", "not", "None", "else", "max_row", "start_column", "=", "start_column", "if", "start_column", "is", "not", "None", "else", "min_column", "end_column", "=", "end_column", "if", "end_column", "is", "not", "None", "else", "max_column", "table_rows", "=", "[", "]", "is_empty", "=", "lambda", "row", ":", "all", "(", "cell", "is", "None", "for", "cell", "in", "row", ")", "selected_rows", "=", "sheet", ".", "iter_rows", "(", "min_row", "=", "start_row", "+", "1", ",", "max_row", "=", "end_row", "+", "1", ",", "min_col", "=", "start_column", "+", "1", ",", "max_col", "=", "end_column", "+", "1", ",", ")", "for", "row", "in", "selected_rows", ":", "row", "=", "[", "_cell_to_python", "(", "cell", ")", "for", "cell", "in", "row", "]", "if", "not", "is_empty", "(", "row", ")", ":", "table_rows", ".", "append", "(", "row", ")", "source", "=", "Source", ".", "from_file", "(", "filename_or_fobj", ",", "plugin_name", "=", "\"xlsx\"", ")", "source", ".", "fobj", ".", "close", "(", ")", "# TODO: pass a parameter to Source.from_file so it won't open the file", "metadata", "=", "{", "\"imported_from\"", ":", "\"xlsx\"", ",", "\"source\"", ":", "source", ",", "\"name\"", ":", "sheet_name", "}", "return", "create_table", "(", "table_rows", ",", "meta", "=", "metadata", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return a rows.Table created from imported XLSX file. workbook_kwargs will be passed to openpyxl.load_workbook
[ "Return", "a", "rows", ".", "Table", "created", "from", "imported", "XLSX", "file", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xlsx.py#L58-L113
247,027
turicas/rows
rows/plugins/xlsx.py
export_to_xlsx
def export_to_xlsx(table, filename_or_fobj=None, sheet_name="Sheet1", *args, **kwargs): """Export the rows.Table to XLSX file and return the saved file.""" workbook = Workbook() sheet = workbook.active sheet.title = sheet_name prepared_table = prepare_to_export(table, *args, **kwargs) # Write header field_names = next(prepared_table) for col_index, field_name in enumerate(field_names): cell = sheet.cell(row=1, column=col_index + 1) cell.value = field_name # Write sheet rows _convert_row = _python_to_cell(list(map(table.fields.get, field_names))) for row_index, row in enumerate(prepared_table, start=1): for col_index, (value, number_format) in enumerate(_convert_row(row)): cell = sheet.cell(row=row_index + 1, column=col_index + 1) cell.value = value if number_format is not None: cell.number_format = number_format return_result = False if filename_or_fobj is None: filename_or_fobj = BytesIO() return_result = True source = Source.from_file(filename_or_fobj, mode="wb", plugin_name="xlsx") workbook.save(source.fobj) source.fobj.flush() if return_result: source.fobj.seek(0) result = source.fobj.read() else: result = source.fobj if source.should_close: source.fobj.close() return result
python
def export_to_xlsx(table, filename_or_fobj=None, sheet_name="Sheet1", *args, **kwargs): workbook = Workbook() sheet = workbook.active sheet.title = sheet_name prepared_table = prepare_to_export(table, *args, **kwargs) # Write header field_names = next(prepared_table) for col_index, field_name in enumerate(field_names): cell = sheet.cell(row=1, column=col_index + 1) cell.value = field_name # Write sheet rows _convert_row = _python_to_cell(list(map(table.fields.get, field_names))) for row_index, row in enumerate(prepared_table, start=1): for col_index, (value, number_format) in enumerate(_convert_row(row)): cell = sheet.cell(row=row_index + 1, column=col_index + 1) cell.value = value if number_format is not None: cell.number_format = number_format return_result = False if filename_or_fobj is None: filename_or_fobj = BytesIO() return_result = True source = Source.from_file(filename_or_fobj, mode="wb", plugin_name="xlsx") workbook.save(source.fobj) source.fobj.flush() if return_result: source.fobj.seek(0) result = source.fobj.read() else: result = source.fobj if source.should_close: source.fobj.close() return result
[ "def", "export_to_xlsx", "(", "table", ",", "filename_or_fobj", "=", "None", ",", "sheet_name", "=", "\"Sheet1\"", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "workbook", "=", "Workbook", "(", ")", "sheet", "=", "workbook", ".", "active", "sheet", ".", "title", "=", "sheet_name", "prepared_table", "=", "prepare_to_export", "(", "table", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# Write header", "field_names", "=", "next", "(", "prepared_table", ")", "for", "col_index", ",", "field_name", "in", "enumerate", "(", "field_names", ")", ":", "cell", "=", "sheet", ".", "cell", "(", "row", "=", "1", ",", "column", "=", "col_index", "+", "1", ")", "cell", ".", "value", "=", "field_name", "# Write sheet rows", "_convert_row", "=", "_python_to_cell", "(", "list", "(", "map", "(", "table", ".", "fields", ".", "get", ",", "field_names", ")", ")", ")", "for", "row_index", ",", "row", "in", "enumerate", "(", "prepared_table", ",", "start", "=", "1", ")", ":", "for", "col_index", ",", "(", "value", ",", "number_format", ")", "in", "enumerate", "(", "_convert_row", "(", "row", ")", ")", ":", "cell", "=", "sheet", ".", "cell", "(", "row", "=", "row_index", "+", "1", ",", "column", "=", "col_index", "+", "1", ")", "cell", ".", "value", "=", "value", "if", "number_format", "is", "not", "None", ":", "cell", ".", "number_format", "=", "number_format", "return_result", "=", "False", "if", "filename_or_fobj", "is", "None", ":", "filename_or_fobj", "=", "BytesIO", "(", ")", "return_result", "=", "True", "source", "=", "Source", ".", "from_file", "(", "filename_or_fobj", ",", "mode", "=", "\"wb\"", ",", "plugin_name", "=", "\"xlsx\"", ")", "workbook", ".", "save", "(", "source", ".", "fobj", ")", "source", ".", "fobj", ".", "flush", "(", ")", "if", "return_result", ":", "source", ".", "fobj", ".", "seek", "(", "0", ")", "result", "=", "source", ".", "fobj", ".", "read", "(", ")", "else", ":", "result", "=", "source", ".", "fobj", "if", "source", ".", "should_close", ":", "source", ".", "fobj", ".", "close", "(", ")", "return", "result" ]
Export the rows.Table to XLSX file and return the saved file.
[ "Export", "the", "rows", ".", "Table", "to", "XLSX", "file", "and", "return", "the", "saved", "file", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xlsx.py#L152-L193
247,028
turicas/rows
examples/library/organizaciones.py
download_organizations
def download_organizations(): "Download organizations JSON and extract its properties" response = requests.get(URL) data = response.json() organizations = [organization["properties"] for organization in data["features"]] return rows.import_from_dicts(organizations)
python
def download_organizations(): "Download organizations JSON and extract its properties" response = requests.get(URL) data = response.json() organizations = [organization["properties"] for organization in data["features"]] return rows.import_from_dicts(organizations)
[ "def", "download_organizations", "(", ")", ":", "response", "=", "requests", ".", "get", "(", "URL", ")", "data", "=", "response", ".", "json", "(", ")", "organizations", "=", "[", "organization", "[", "\"properties\"", "]", "for", "organization", "in", "data", "[", "\"features\"", "]", "]", "return", "rows", ".", "import_from_dicts", "(", "organizations", ")" ]
Download organizations JSON and extract its properties
[ "Download", "organizations", "JSON", "and", "extract", "its", "properties" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/examples/library/organizaciones.py#L15-L21
247,029
turicas/rows
rows/plugins/plugin_html.py
import_from_html
def import_from_html( filename_or_fobj, encoding="utf-8", index=0, ignore_colspan=True, preserve_html=False, properties=False, table_tag="table", row_tag="tr", column_tag="td|th", *args, **kwargs ): """Return rows.Table from HTML file.""" source = Source.from_file( filename_or_fobj, plugin_name="html", mode="rb", encoding=encoding ) html = source.fobj.read().decode(source.encoding) html_tree = document_fromstring(html) tables = html_tree.xpath("//{}".format(table_tag)) table = tables[index] # TODO: set meta's "name" from @id or @name (if available) strip_tags(table, "thead") strip_tags(table, "tbody") row_elements = table.xpath(row_tag) table_rows = [ _get_row( row, column_tag=column_tag, preserve_html=preserve_html, properties=properties, ) for row in row_elements ] if properties: table_rows[0][-1] = "properties" if preserve_html and kwargs.get("fields", None) is None: # The field names will be the first table row, so we need to strip HTML # from it even if `preserve_html` is `True` (it's `True` only for rows, # not for the header). table_rows[0] = list(map(_extract_node_text, row_elements[0])) if ignore_colspan: max_columns = max(map(len, table_rows)) table_rows = [row for row in table_rows if len(row) == max_columns] meta = {"imported_from": "html", "source": source} return create_table(table_rows, meta=meta, *args, **kwargs)
python
def import_from_html( filename_or_fobj, encoding="utf-8", index=0, ignore_colspan=True, preserve_html=False, properties=False, table_tag="table", row_tag="tr", column_tag="td|th", *args, **kwargs ): source = Source.from_file( filename_or_fobj, plugin_name="html", mode="rb", encoding=encoding ) html = source.fobj.read().decode(source.encoding) html_tree = document_fromstring(html) tables = html_tree.xpath("//{}".format(table_tag)) table = tables[index] # TODO: set meta's "name" from @id or @name (if available) strip_tags(table, "thead") strip_tags(table, "tbody") row_elements = table.xpath(row_tag) table_rows = [ _get_row( row, column_tag=column_tag, preserve_html=preserve_html, properties=properties, ) for row in row_elements ] if properties: table_rows[0][-1] = "properties" if preserve_html and kwargs.get("fields", None) is None: # The field names will be the first table row, so we need to strip HTML # from it even if `preserve_html` is `True` (it's `True` only for rows, # not for the header). table_rows[0] = list(map(_extract_node_text, row_elements[0])) if ignore_colspan: max_columns = max(map(len, table_rows)) table_rows = [row for row in table_rows if len(row) == max_columns] meta = {"imported_from": "html", "source": source} return create_table(table_rows, meta=meta, *args, **kwargs)
[ "def", "import_from_html", "(", "filename_or_fobj", ",", "encoding", "=", "\"utf-8\"", ",", "index", "=", "0", ",", "ignore_colspan", "=", "True", ",", "preserve_html", "=", "False", ",", "properties", "=", "False", ",", "table_tag", "=", "\"table\"", ",", "row_tag", "=", "\"tr\"", ",", "column_tag", "=", "\"td|th\"", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "source", "=", "Source", ".", "from_file", "(", "filename_or_fobj", ",", "plugin_name", "=", "\"html\"", ",", "mode", "=", "\"rb\"", ",", "encoding", "=", "encoding", ")", "html", "=", "source", ".", "fobj", ".", "read", "(", ")", ".", "decode", "(", "source", ".", "encoding", ")", "html_tree", "=", "document_fromstring", "(", "html", ")", "tables", "=", "html_tree", ".", "xpath", "(", "\"//{}\"", ".", "format", "(", "table_tag", ")", ")", "table", "=", "tables", "[", "index", "]", "# TODO: set meta's \"name\" from @id or @name (if available)", "strip_tags", "(", "table", ",", "\"thead\"", ")", "strip_tags", "(", "table", ",", "\"tbody\"", ")", "row_elements", "=", "table", ".", "xpath", "(", "row_tag", ")", "table_rows", "=", "[", "_get_row", "(", "row", ",", "column_tag", "=", "column_tag", ",", "preserve_html", "=", "preserve_html", ",", "properties", "=", "properties", ",", ")", "for", "row", "in", "row_elements", "]", "if", "properties", ":", "table_rows", "[", "0", "]", "[", "-", "1", "]", "=", "\"properties\"", "if", "preserve_html", "and", "kwargs", ".", "get", "(", "\"fields\"", ",", "None", ")", "is", "None", ":", "# The field names will be the first table row, so we need to strip HTML", "# from it even if `preserve_html` is `True` (it's `True` only for rows,", "# not for the header).", "table_rows", "[", "0", "]", "=", "list", "(", "map", "(", "_extract_node_text", ",", "row_elements", "[", "0", "]", ")", ")", "if", "ignore_colspan", ":", "max_columns", "=", "max", "(", "map", "(", "len", ",", "table_rows", ")", ")", "table_rows", "=", "[", "row", "for", "row", "in", "table_rows", "if", "len", "(", "row", ")", "==", "max_columns", "]", "meta", "=", "{", "\"imported_from\"", ":", "\"html\"", ",", "\"source\"", ":", "source", "}", "return", "create_table", "(", "table_rows", ",", "meta", "=", "meta", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return rows.Table from HTML file.
[ "Return", "rows", ".", "Table", "from", "HTML", "file", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_html.py#L68-L121
247,030
turicas/rows
rows/plugins/plugin_html.py
export_to_html
def export_to_html( table, filename_or_fobj=None, encoding="utf-8", caption=False, *args, **kwargs ): """Export and return rows.Table data to HTML file.""" serialized_table = serialize(table, *args, **kwargs) fields = next(serialized_table) result = ["<table>\n\n"] if caption and table.name: result.extend([" <caption>", table.name, "</caption>\n\n"]) result.extend([" <thead>\n", " <tr>\n"]) # TODO: set @name/@id if self.meta["name"] is set header = [" <th> {} </th>\n".format(field) for field in fields] result.extend(header) result.extend([" </tr>\n", " </thead>\n", "\n", " <tbody>\n", "\n"]) for index, row in enumerate(serialized_table, start=1): css_class = "odd" if index % 2 == 1 else "even" result.append(' <tr class="{}">\n'.format(css_class)) for value in row: result.extend([" <td> ", escape(value), " </td>\n"]) result.append(" </tr>\n\n") result.append(" </tbody>\n\n</table>\n") html = "".join(result).encode(encoding) return export_data(filename_or_fobj, html, mode="wb")
python
def export_to_html( table, filename_or_fobj=None, encoding="utf-8", caption=False, *args, **kwargs ): serialized_table = serialize(table, *args, **kwargs) fields = next(serialized_table) result = ["<table>\n\n"] if caption and table.name: result.extend([" <caption>", table.name, "</caption>\n\n"]) result.extend([" <thead>\n", " <tr>\n"]) # TODO: set @name/@id if self.meta["name"] is set header = [" <th> {} </th>\n".format(field) for field in fields] result.extend(header) result.extend([" </tr>\n", " </thead>\n", "\n", " <tbody>\n", "\n"]) for index, row in enumerate(serialized_table, start=1): css_class = "odd" if index % 2 == 1 else "even" result.append(' <tr class="{}">\n'.format(css_class)) for value in row: result.extend([" <td> ", escape(value), " </td>\n"]) result.append(" </tr>\n\n") result.append(" </tbody>\n\n</table>\n") html = "".join(result).encode(encoding) return export_data(filename_or_fobj, html, mode="wb")
[ "def", "export_to_html", "(", "table", ",", "filename_or_fobj", "=", "None", ",", "encoding", "=", "\"utf-8\"", ",", "caption", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "serialized_table", "=", "serialize", "(", "table", ",", "*", "args", ",", "*", "*", "kwargs", ")", "fields", "=", "next", "(", "serialized_table", ")", "result", "=", "[", "\"<table>\\n\\n\"", "]", "if", "caption", "and", "table", ".", "name", ":", "result", ".", "extend", "(", "[", "\" <caption>\"", ",", "table", ".", "name", ",", "\"</caption>\\n\\n\"", "]", ")", "result", ".", "extend", "(", "[", "\" <thead>\\n\"", ",", "\" <tr>\\n\"", "]", ")", "# TODO: set @name/@id if self.meta[\"name\"] is set", "header", "=", "[", "\" <th> {} </th>\\n\"", ".", "format", "(", "field", ")", "for", "field", "in", "fields", "]", "result", ".", "extend", "(", "header", ")", "result", ".", "extend", "(", "[", "\" </tr>\\n\"", ",", "\" </thead>\\n\"", ",", "\"\\n\"", ",", "\" <tbody>\\n\"", ",", "\"\\n\"", "]", ")", "for", "index", ",", "row", "in", "enumerate", "(", "serialized_table", ",", "start", "=", "1", ")", ":", "css_class", "=", "\"odd\"", "if", "index", "%", "2", "==", "1", "else", "\"even\"", "result", ".", "append", "(", "' <tr class=\"{}\">\\n'", ".", "format", "(", "css_class", ")", ")", "for", "value", "in", "row", ":", "result", ".", "extend", "(", "[", "\" <td> \"", ",", "escape", "(", "value", ")", ",", "\" </td>\\n\"", "]", ")", "result", ".", "append", "(", "\" </tr>\\n\\n\"", ")", "result", ".", "append", "(", "\" </tbody>\\n\\n</table>\\n\"", ")", "html", "=", "\"\"", ".", "join", "(", "result", ")", ".", "encode", "(", "encoding", ")", "return", "export_data", "(", "filename_or_fobj", ",", "html", ",", "mode", "=", "\"wb\"", ")" ]
Export and return rows.Table data to HTML file.
[ "Export", "and", "return", "rows", ".", "Table", "data", "to", "HTML", "file", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_html.py#L124-L148
247,031
turicas/rows
rows/plugins/plugin_html.py
_extract_node_text
def _extract_node_text(node): """Extract text from a given lxml node.""" texts = map( six.text_type.strip, map(six.text_type, map(unescape, node.xpath(".//text()"))) ) return " ".join(text for text in texts if text)
python
def _extract_node_text(node): texts = map( six.text_type.strip, map(six.text_type, map(unescape, node.xpath(".//text()"))) ) return " ".join(text for text in texts if text)
[ "def", "_extract_node_text", "(", "node", ")", ":", "texts", "=", "map", "(", "six", ".", "text_type", ".", "strip", ",", "map", "(", "six", ".", "text_type", ",", "map", "(", "unescape", ",", "node", ".", "xpath", "(", "\".//text()\"", ")", ")", ")", ")", "return", "\" \"", ".", "join", "(", "text", "for", "text", "in", "texts", "if", "text", ")" ]
Extract text from a given lxml node.
[ "Extract", "text", "from", "a", "given", "lxml", "node", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_html.py#L151-L157
247,032
turicas/rows
rows/plugins/plugin_html.py
count_tables
def count_tables(filename_or_fobj, encoding="utf-8", table_tag="table"): """Read a file passed by arg and return your table HTML tag count.""" source = Source.from_file( filename_or_fobj, plugin_name="html", mode="rb", encoding=encoding ) html = source.fobj.read().decode(source.encoding) html_tree = document_fromstring(html) tables = html_tree.xpath("//{}".format(table_tag)) result = len(tables) if source.should_close: source.fobj.close() return result
python
def count_tables(filename_or_fobj, encoding="utf-8", table_tag="table"): source = Source.from_file( filename_or_fobj, plugin_name="html", mode="rb", encoding=encoding ) html = source.fobj.read().decode(source.encoding) html_tree = document_fromstring(html) tables = html_tree.xpath("//{}".format(table_tag)) result = len(tables) if source.should_close: source.fobj.close() return result
[ "def", "count_tables", "(", "filename_or_fobj", ",", "encoding", "=", "\"utf-8\"", ",", "table_tag", "=", "\"table\"", ")", ":", "source", "=", "Source", ".", "from_file", "(", "filename_or_fobj", ",", "plugin_name", "=", "\"html\"", ",", "mode", "=", "\"rb\"", ",", "encoding", "=", "encoding", ")", "html", "=", "source", ".", "fobj", ".", "read", "(", ")", ".", "decode", "(", "source", ".", "encoding", ")", "html_tree", "=", "document_fromstring", "(", "html", ")", "tables", "=", "html_tree", ".", "xpath", "(", "\"//{}\"", ".", "format", "(", "table_tag", ")", ")", "result", "=", "len", "(", "tables", ")", "if", "source", ".", "should_close", ":", "source", ".", "fobj", ".", "close", "(", ")", "return", "result" ]
Read a file passed by arg and return your table HTML tag count.
[ "Read", "a", "file", "passed", "by", "arg", "and", "return", "your", "table", "HTML", "tag", "count", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_html.py#L160-L174
247,033
turicas/rows
rows/plugins/plugin_html.py
tag_to_dict
def tag_to_dict(html): """Extract tag's attributes into a `dict`.""" element = document_fromstring(html).xpath("//html/body/child::*")[0] attributes = dict(element.attrib) attributes["text"] = element.text_content() return attributes
python
def tag_to_dict(html): element = document_fromstring(html).xpath("//html/body/child::*")[0] attributes = dict(element.attrib) attributes["text"] = element.text_content() return attributes
[ "def", "tag_to_dict", "(", "html", ")", ":", "element", "=", "document_fromstring", "(", "html", ")", ".", "xpath", "(", "\"//html/body/child::*\"", ")", "[", "0", "]", "attributes", "=", "dict", "(", "element", ".", "attrib", ")", "attributes", "[", "\"text\"", "]", "=", "element", ".", "text_content", "(", ")", "return", "attributes" ]
Extract tag's attributes into a `dict`.
[ "Extract", "tag", "s", "attributes", "into", "a", "dict", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_html.py#L177-L183
247,034
turicas/rows
rows/plugins/utils.py
create_table
def create_table( data, meta=None, fields=None, skip_header=True, import_fields=None, samples=None, force_types=None, max_rows=None, *args, **kwargs ): """Create a rows.Table object based on data rows and some configurations - `skip_header` is only used if `fields` is set - `samples` is only used if `fields` is `None`. If samples=None, all data is filled in memory - use with caution. - `force_types` is only used if `fields` is `None` - `import_fields` can be used either if `fields` is set or not, the resulting fields will seek its order - `fields` must always be in the same order as the data """ table_rows = iter(data) force_types = force_types or {} if import_fields is not None: import_fields = make_header(import_fields) # TODO: test max_rows if fields is None: # autodetect field types # TODO: may add `type_hints` parameter so autodetection can be easier # (plugins may specify some possible field types). header = make_header(next(table_rows)) if samples is not None: sample_rows = list(islice(table_rows, 0, samples)) table_rows = chain(sample_rows, table_rows) else: if max_rows is not None and max_rows > 0: sample_rows = table_rows = list(islice(table_rows, max_rows)) else: sample_rows = table_rows = list(table_rows) # Detect field types using only the desired columns detected_fields = detect_types( header, sample_rows, skip_indexes=[ index for index, field in enumerate(header) if field in force_types or field not in (import_fields or header) ], *args, **kwargs ) # Check if any field was added during detecting process new_fields = [ field_name for field_name in detected_fields.keys() if field_name not in header ] # Finally create the `fields` with both header and new field names, # based on detected fields `and force_types` fields = OrderedDict( [ (field_name, detected_fields.get(field_name, TextField)) for field_name in header + new_fields ] ) fields.update(force_types) # Update `header` and `import_fields` based on new `fields` header = list(fields.keys()) if import_fields is None: import_fields = header else: # using provided field types if not isinstance(fields, OrderedDict): raise ValueError("`fields` must be an `OrderedDict`") if skip_header: # If we're skipping the header probably this row is not trustable # (can be data or garbage). next(table_rows) header = make_header(list(fields.keys())) if import_fields is None: import_fields = header fields = OrderedDict( [(field_name, fields[key]) for field_name, key in zip(header, fields)] ) diff = set(import_fields) - set(header) if diff: field_names = ", ".join('"{}"'.format(field) for field in diff) raise ValueError("Invalid field names: {}".format(field_names)) fields = OrderedDict( [(field_name, fields[field_name]) for field_name in import_fields] ) get_row = get_items(*map(header.index, import_fields)) table = Table(fields=fields, meta=meta) if max_rows is not None and max_rows > 0: table_rows = islice(table_rows, max_rows) table.extend(dict(zip(import_fields, get_row(row))) for row in table_rows) source = table.meta.get("source", None) if source is not None: if source.should_close: source.fobj.close() if source.should_delete and Path(source.uri).exists(): unlink(source.uri) return table
python
def create_table( data, meta=None, fields=None, skip_header=True, import_fields=None, samples=None, force_types=None, max_rows=None, *args, **kwargs ): table_rows = iter(data) force_types = force_types or {} if import_fields is not None: import_fields = make_header(import_fields) # TODO: test max_rows if fields is None: # autodetect field types # TODO: may add `type_hints` parameter so autodetection can be easier # (plugins may specify some possible field types). header = make_header(next(table_rows)) if samples is not None: sample_rows = list(islice(table_rows, 0, samples)) table_rows = chain(sample_rows, table_rows) else: if max_rows is not None and max_rows > 0: sample_rows = table_rows = list(islice(table_rows, max_rows)) else: sample_rows = table_rows = list(table_rows) # Detect field types using only the desired columns detected_fields = detect_types( header, sample_rows, skip_indexes=[ index for index, field in enumerate(header) if field in force_types or field not in (import_fields or header) ], *args, **kwargs ) # Check if any field was added during detecting process new_fields = [ field_name for field_name in detected_fields.keys() if field_name not in header ] # Finally create the `fields` with both header and new field names, # based on detected fields `and force_types` fields = OrderedDict( [ (field_name, detected_fields.get(field_name, TextField)) for field_name in header + new_fields ] ) fields.update(force_types) # Update `header` and `import_fields` based on new `fields` header = list(fields.keys()) if import_fields is None: import_fields = header else: # using provided field types if not isinstance(fields, OrderedDict): raise ValueError("`fields` must be an `OrderedDict`") if skip_header: # If we're skipping the header probably this row is not trustable # (can be data or garbage). next(table_rows) header = make_header(list(fields.keys())) if import_fields is None: import_fields = header fields = OrderedDict( [(field_name, fields[key]) for field_name, key in zip(header, fields)] ) diff = set(import_fields) - set(header) if diff: field_names = ", ".join('"{}"'.format(field) for field in diff) raise ValueError("Invalid field names: {}".format(field_names)) fields = OrderedDict( [(field_name, fields[field_name]) for field_name in import_fields] ) get_row = get_items(*map(header.index, import_fields)) table = Table(fields=fields, meta=meta) if max_rows is not None and max_rows > 0: table_rows = islice(table_rows, max_rows) table.extend(dict(zip(import_fields, get_row(row))) for row in table_rows) source = table.meta.get("source", None) if source is not None: if source.should_close: source.fobj.close() if source.should_delete and Path(source.uri).exists(): unlink(source.uri) return table
[ "def", "create_table", "(", "data", ",", "meta", "=", "None", ",", "fields", "=", "None", ",", "skip_header", "=", "True", ",", "import_fields", "=", "None", ",", "samples", "=", "None", ",", "force_types", "=", "None", ",", "max_rows", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "table_rows", "=", "iter", "(", "data", ")", "force_types", "=", "force_types", "or", "{", "}", "if", "import_fields", "is", "not", "None", ":", "import_fields", "=", "make_header", "(", "import_fields", ")", "# TODO: test max_rows", "if", "fields", "is", "None", ":", "# autodetect field types", "# TODO: may add `type_hints` parameter so autodetection can be easier", "# (plugins may specify some possible field types).", "header", "=", "make_header", "(", "next", "(", "table_rows", ")", ")", "if", "samples", "is", "not", "None", ":", "sample_rows", "=", "list", "(", "islice", "(", "table_rows", ",", "0", ",", "samples", ")", ")", "table_rows", "=", "chain", "(", "sample_rows", ",", "table_rows", ")", "else", ":", "if", "max_rows", "is", "not", "None", "and", "max_rows", ">", "0", ":", "sample_rows", "=", "table_rows", "=", "list", "(", "islice", "(", "table_rows", ",", "max_rows", ")", ")", "else", ":", "sample_rows", "=", "table_rows", "=", "list", "(", "table_rows", ")", "# Detect field types using only the desired columns", "detected_fields", "=", "detect_types", "(", "header", ",", "sample_rows", ",", "skip_indexes", "=", "[", "index", "for", "index", ",", "field", "in", "enumerate", "(", "header", ")", "if", "field", "in", "force_types", "or", "field", "not", "in", "(", "import_fields", "or", "header", ")", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# Check if any field was added during detecting process", "new_fields", "=", "[", "field_name", "for", "field_name", "in", "detected_fields", ".", "keys", "(", ")", "if", "field_name", "not", "in", "header", "]", "# Finally create the `fields` with both header and new field names,", "# based on detected fields `and force_types`", "fields", "=", "OrderedDict", "(", "[", "(", "field_name", ",", "detected_fields", ".", "get", "(", "field_name", ",", "TextField", ")", ")", "for", "field_name", "in", "header", "+", "new_fields", "]", ")", "fields", ".", "update", "(", "force_types", ")", "# Update `header` and `import_fields` based on new `fields`", "header", "=", "list", "(", "fields", ".", "keys", "(", ")", ")", "if", "import_fields", "is", "None", ":", "import_fields", "=", "header", "else", ":", "# using provided field types", "if", "not", "isinstance", "(", "fields", ",", "OrderedDict", ")", ":", "raise", "ValueError", "(", "\"`fields` must be an `OrderedDict`\"", ")", "if", "skip_header", ":", "# If we're skipping the header probably this row is not trustable", "# (can be data or garbage).", "next", "(", "table_rows", ")", "header", "=", "make_header", "(", "list", "(", "fields", ".", "keys", "(", ")", ")", ")", "if", "import_fields", "is", "None", ":", "import_fields", "=", "header", "fields", "=", "OrderedDict", "(", "[", "(", "field_name", ",", "fields", "[", "key", "]", ")", "for", "field_name", ",", "key", "in", "zip", "(", "header", ",", "fields", ")", "]", ")", "diff", "=", "set", "(", "import_fields", ")", "-", "set", "(", "header", ")", "if", "diff", ":", "field_names", "=", "\", \"", ".", "join", "(", "'\"{}\"'", ".", "format", "(", "field", ")", "for", "field", "in", "diff", ")", "raise", "ValueError", "(", "\"Invalid field names: {}\"", ".", "format", "(", "field_names", ")", ")", "fields", "=", "OrderedDict", "(", "[", "(", "field_name", ",", "fields", "[", "field_name", "]", ")", "for", "field_name", "in", "import_fields", "]", ")", "get_row", "=", "get_items", "(", "*", "map", "(", "header", ".", "index", ",", "import_fields", ")", ")", "table", "=", "Table", "(", "fields", "=", "fields", ",", "meta", "=", "meta", ")", "if", "max_rows", "is", "not", "None", "and", "max_rows", ">", "0", ":", "table_rows", "=", "islice", "(", "table_rows", ",", "max_rows", ")", "table", ".", "extend", "(", "dict", "(", "zip", "(", "import_fields", ",", "get_row", "(", "row", ")", ")", ")", "for", "row", "in", "table_rows", ")", "source", "=", "table", ".", "meta", ".", "get", "(", "\"source\"", ",", "None", ")", "if", "source", "is", "not", "None", ":", "if", "source", ".", "should_close", ":", "source", ".", "fobj", ".", "close", "(", ")", "if", "source", ".", "should_delete", "and", "Path", "(", "source", ".", "uri", ")", ".", "exists", "(", ")", ":", "unlink", "(", "source", ".", "uri", ")", "return", "table" ]
Create a rows.Table object based on data rows and some configurations - `skip_header` is only used if `fields` is set - `samples` is only used if `fields` is `None`. If samples=None, all data is filled in memory - use with caution. - `force_types` is only used if `fields` is `None` - `import_fields` can be used either if `fields` is set or not, the resulting fields will seek its order - `fields` must always be in the same order as the data
[ "Create", "a", "rows", ".", "Table", "object", "based", "on", "data", "rows", "and", "some", "configurations" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/utils.py#L75-L189
247,035
turicas/rows
rows/plugins/utils.py
export_data
def export_data(filename_or_fobj, data, mode="w"): """Return the object ready to be exported or only data if filename_or_fobj is not passed.""" if filename_or_fobj is None: return data _, fobj = get_filename_and_fobj(filename_or_fobj, mode=mode) source = Source.from_file(filename_or_fobj, mode=mode, plugin_name=None) source.fobj.write(data) source.fobj.flush() return source.fobj
python
def export_data(filename_or_fobj, data, mode="w"): if filename_or_fobj is None: return data _, fobj = get_filename_and_fobj(filename_or_fobj, mode=mode) source = Source.from_file(filename_or_fobj, mode=mode, plugin_name=None) source.fobj.write(data) source.fobj.flush() return source.fobj
[ "def", "export_data", "(", "filename_or_fobj", ",", "data", ",", "mode", "=", "\"w\"", ")", ":", "if", "filename_or_fobj", "is", "None", ":", "return", "data", "_", ",", "fobj", "=", "get_filename_and_fobj", "(", "filename_or_fobj", ",", "mode", "=", "mode", ")", "source", "=", "Source", ".", "from_file", "(", "filename_or_fobj", ",", "mode", "=", "mode", ",", "plugin_name", "=", "None", ")", "source", ".", "fobj", ".", "write", "(", "data", ")", "source", ".", "fobj", ".", "flush", "(", ")", "return", "source", ".", "fobj" ]
Return the object ready to be exported or only data if filename_or_fobj is not passed.
[ "Return", "the", "object", "ready", "to", "be", "exported", "or", "only", "data", "if", "filename_or_fobj", "is", "not", "passed", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/utils.py#L236-L246
247,036
turicas/rows
rows/plugins/plugin_csv.py
read_sample
def read_sample(fobj, sample): """Read `sample` bytes from `fobj` and return the cursor to where it was.""" cursor = fobj.tell() data = fobj.read(sample) fobj.seek(cursor) return data
python
def read_sample(fobj, sample): cursor = fobj.tell() data = fobj.read(sample) fobj.seek(cursor) return data
[ "def", "read_sample", "(", "fobj", ",", "sample", ")", ":", "cursor", "=", "fobj", ".", "tell", "(", ")", "data", "=", "fobj", ".", "read", "(", "sample", ")", "fobj", ".", "seek", "(", "cursor", ")", "return", "data" ]
Read `sample` bytes from `fobj` and return the cursor to where it was.
[ "Read", "sample", "bytes", "from", "fobj", "and", "return", "the", "cursor", "to", "where", "it", "was", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_csv.py#L103-L108
247,037
turicas/rows
rows/plugins/plugin_csv.py
export_to_csv
def export_to_csv( table, filename_or_fobj=None, encoding="utf-8", dialect=unicodecsv.excel, batch_size=100, callback=None, *args, **kwargs ): """Export a `rows.Table` to a CSV file. If a file-like object is provided it MUST be in binary mode, like in `open(filename, mode='wb')`. If not filename/fobj is provided, the function returns a string with CSV contents. """ # TODO: will work only if table.fields is OrderedDict # TODO: should use fobj? What about creating a method like json.dumps? return_data, should_close = False, None if filename_or_fobj is None: filename_or_fobj = BytesIO() return_data = should_close = True source = Source.from_file( filename_or_fobj, plugin_name="csv", mode="wb", encoding=encoding, should_close=should_close, ) # TODO: may use `io.BufferedWriter` instead of `ipartition` so user can # choose the real size (in Bytes) when to flush to the file system, instead # number of rows writer = unicodecsv.writer(source.fobj, encoding=encoding, dialect=dialect) if callback is None: for batch in ipartition(serialize(table, *args, **kwargs), batch_size): writer.writerows(batch) else: serialized = serialize(table, *args, **kwargs) writer.writerow(next(serialized)) # First, write the header total = 0 for batch in ipartition(serialized, batch_size): writer.writerows(batch) total += len(batch) callback(total) if return_data: source.fobj.seek(0) result = source.fobj.read() else: source.fobj.flush() result = source.fobj if source.should_close: source.fobj.close() return result
python
def export_to_csv( table, filename_or_fobj=None, encoding="utf-8", dialect=unicodecsv.excel, batch_size=100, callback=None, *args, **kwargs ): # TODO: will work only if table.fields is OrderedDict # TODO: should use fobj? What about creating a method like json.dumps? return_data, should_close = False, None if filename_or_fobj is None: filename_or_fobj = BytesIO() return_data = should_close = True source = Source.from_file( filename_or_fobj, plugin_name="csv", mode="wb", encoding=encoding, should_close=should_close, ) # TODO: may use `io.BufferedWriter` instead of `ipartition` so user can # choose the real size (in Bytes) when to flush to the file system, instead # number of rows writer = unicodecsv.writer(source.fobj, encoding=encoding, dialect=dialect) if callback is None: for batch in ipartition(serialize(table, *args, **kwargs), batch_size): writer.writerows(batch) else: serialized = serialize(table, *args, **kwargs) writer.writerow(next(serialized)) # First, write the header total = 0 for batch in ipartition(serialized, batch_size): writer.writerows(batch) total += len(batch) callback(total) if return_data: source.fobj.seek(0) result = source.fobj.read() else: source.fobj.flush() result = source.fobj if source.should_close: source.fobj.close() return result
[ "def", "export_to_csv", "(", "table", ",", "filename_or_fobj", "=", "None", ",", "encoding", "=", "\"utf-8\"", ",", "dialect", "=", "unicodecsv", ".", "excel", ",", "batch_size", "=", "100", ",", "callback", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# TODO: will work only if table.fields is OrderedDict", "# TODO: should use fobj? What about creating a method like json.dumps?", "return_data", ",", "should_close", "=", "False", ",", "None", "if", "filename_or_fobj", "is", "None", ":", "filename_or_fobj", "=", "BytesIO", "(", ")", "return_data", "=", "should_close", "=", "True", "source", "=", "Source", ".", "from_file", "(", "filename_or_fobj", ",", "plugin_name", "=", "\"csv\"", ",", "mode", "=", "\"wb\"", ",", "encoding", "=", "encoding", ",", "should_close", "=", "should_close", ",", ")", "# TODO: may use `io.BufferedWriter` instead of `ipartition` so user can", "# choose the real size (in Bytes) when to flush to the file system, instead", "# number of rows", "writer", "=", "unicodecsv", ".", "writer", "(", "source", ".", "fobj", ",", "encoding", "=", "encoding", ",", "dialect", "=", "dialect", ")", "if", "callback", "is", "None", ":", "for", "batch", "in", "ipartition", "(", "serialize", "(", "table", ",", "*", "args", ",", "*", "*", "kwargs", ")", ",", "batch_size", ")", ":", "writer", ".", "writerows", "(", "batch", ")", "else", ":", "serialized", "=", "serialize", "(", "table", ",", "*", "args", ",", "*", "*", "kwargs", ")", "writer", ".", "writerow", "(", "next", "(", "serialized", ")", ")", "# First, write the header", "total", "=", "0", "for", "batch", "in", "ipartition", "(", "serialized", ",", "batch_size", ")", ":", "writer", ".", "writerows", "(", "batch", ")", "total", "+=", "len", "(", "batch", ")", "callback", "(", "total", ")", "if", "return_data", ":", "source", ".", "fobj", ".", "seek", "(", "0", ")", "result", "=", "source", ".", "fobj", ".", "read", "(", ")", "else", ":", "source", ".", "fobj", ".", "flush", "(", ")", "result", "=", "source", ".", "fobj", "if", "source", ".", "should_close", ":", "source", ".", "fobj", ".", "close", "(", ")", "return", "result" ]
Export a `rows.Table` to a CSV file. If a file-like object is provided it MUST be in binary mode, like in `open(filename, mode='wb')`. If not filename/fobj is provided, the function returns a string with CSV contents.
[ "Export", "a", "rows", ".", "Table", "to", "a", "CSV", "file", "." ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_csv.py#L139-L201
247,038
turicas/rows
rows/operations.py
join
def join(keys, tables): """Merge a list of `Table` objects using `keys` to group rows""" # Make new (merged) Table fields fields = OrderedDict() for table in tables: fields.update(table.fields) # TODO: may raise an error if a same field is different in some tables # Check if all keys are inside merged Table's fields fields_keys = set(fields.keys()) for key in keys: if key not in fields_keys: raise ValueError('Invalid key: "{}"'.format(key)) # Group rows by key, without missing ordering none_fields = lambda: OrderedDict({field: None for field in fields.keys()}) data = OrderedDict() for table in tables: for row in table: row_key = tuple([getattr(row, key) for key in keys]) if row_key not in data: data[row_key] = none_fields() data[row_key].update(row._asdict()) merged = Table(fields=fields) merged.extend(data.values()) return merged
python
def join(keys, tables): # Make new (merged) Table fields fields = OrderedDict() for table in tables: fields.update(table.fields) # TODO: may raise an error if a same field is different in some tables # Check if all keys are inside merged Table's fields fields_keys = set(fields.keys()) for key in keys: if key not in fields_keys: raise ValueError('Invalid key: "{}"'.format(key)) # Group rows by key, without missing ordering none_fields = lambda: OrderedDict({field: None for field in fields.keys()}) data = OrderedDict() for table in tables: for row in table: row_key = tuple([getattr(row, key) for key in keys]) if row_key not in data: data[row_key] = none_fields() data[row_key].update(row._asdict()) merged = Table(fields=fields) merged.extend(data.values()) return merged
[ "def", "join", "(", "keys", ",", "tables", ")", ":", "# Make new (merged) Table fields", "fields", "=", "OrderedDict", "(", ")", "for", "table", "in", "tables", ":", "fields", ".", "update", "(", "table", ".", "fields", ")", "# TODO: may raise an error if a same field is different in some tables", "# Check if all keys are inside merged Table's fields", "fields_keys", "=", "set", "(", "fields", ".", "keys", "(", ")", ")", "for", "key", "in", "keys", ":", "if", "key", "not", "in", "fields_keys", ":", "raise", "ValueError", "(", "'Invalid key: \"{}\"'", ".", "format", "(", "key", ")", ")", "# Group rows by key, without missing ordering", "none_fields", "=", "lambda", ":", "OrderedDict", "(", "{", "field", ":", "None", "for", "field", "in", "fields", ".", "keys", "(", ")", "}", ")", "data", "=", "OrderedDict", "(", ")", "for", "table", "in", "tables", ":", "for", "row", "in", "table", ":", "row_key", "=", "tuple", "(", "[", "getattr", "(", "row", ",", "key", ")", "for", "key", "in", "keys", "]", ")", "if", "row_key", "not", "in", "data", ":", "data", "[", "row_key", "]", "=", "none_fields", "(", ")", "data", "[", "row_key", "]", ".", "update", "(", "row", ".", "_asdict", "(", ")", ")", "merged", "=", "Table", "(", "fields", "=", "fields", ")", "merged", ".", "extend", "(", "data", ".", "values", "(", ")", ")", "return", "merged" ]
Merge a list of `Table` objects using `keys` to group rows
[ "Merge", "a", "list", "of", "Table", "objects", "using", "keys", "to", "group", "rows" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/operations.py#L26-L53
247,039
turicas/rows
rows/operations.py
transform
def transform(fields, function, *tables): "Return a new table based on other tables and a transformation function" new_table = Table(fields=fields) for table in tables: for row in filter(bool, map(lambda row: function(row, table), table)): new_table.append(row) return new_table
python
def transform(fields, function, *tables): "Return a new table based on other tables and a transformation function" new_table = Table(fields=fields) for table in tables: for row in filter(bool, map(lambda row: function(row, table), table)): new_table.append(row) return new_table
[ "def", "transform", "(", "fields", ",", "function", ",", "*", "tables", ")", ":", "new_table", "=", "Table", "(", "fields", "=", "fields", ")", "for", "table", "in", "tables", ":", "for", "row", "in", "filter", "(", "bool", ",", "map", "(", "lambda", "row", ":", "function", "(", "row", ",", "table", ")", ",", "table", ")", ")", ":", "new_table", ".", "append", "(", "row", ")", "return", "new_table" ]
Return a new table based on other tables and a transformation function
[ "Return", "a", "new", "table", "based", "on", "other", "tables", "and", "a", "transformation", "function" ]
c74da41ae9ed091356b803a64f8a30c641c5fc45
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/operations.py#L56-L65
247,040
jupyterhub/kubespawner
kubespawner/objects.py
make_pvc
def make_pvc( name, storage_class, access_modes, storage, labels=None, annotations=None, ): """ Make a k8s pvc specification for running a user notebook. Parameters ---------- name: Name of persistent volume claim. Must be unique within the namespace the object is going to be created in. Must be a valid DNS label. storage_class: String of the name of the k8s Storage Class to use. access_modes: A list of specifying what access mode the pod should have towards the pvc storage: The ammount of storage needed for the pvc """ pvc = V1PersistentVolumeClaim() pvc.kind = "PersistentVolumeClaim" pvc.api_version = "v1" pvc.metadata = V1ObjectMeta() pvc.metadata.name = name pvc.metadata.annotations = (annotations or {}).copy() pvc.metadata.labels = (labels or {}).copy() pvc.spec = V1PersistentVolumeClaimSpec() pvc.spec.access_modes = access_modes pvc.spec.resources = V1ResourceRequirements() pvc.spec.resources.requests = {"storage": storage} if storage_class: pvc.metadata.annotations.update({"volume.beta.kubernetes.io/storage-class": storage_class}) pvc.spec.storage_class_name = storage_class return pvc
python
def make_pvc( name, storage_class, access_modes, storage, labels=None, annotations=None, ): pvc = V1PersistentVolumeClaim() pvc.kind = "PersistentVolumeClaim" pvc.api_version = "v1" pvc.metadata = V1ObjectMeta() pvc.metadata.name = name pvc.metadata.annotations = (annotations or {}).copy() pvc.metadata.labels = (labels or {}).copy() pvc.spec = V1PersistentVolumeClaimSpec() pvc.spec.access_modes = access_modes pvc.spec.resources = V1ResourceRequirements() pvc.spec.resources.requests = {"storage": storage} if storage_class: pvc.metadata.annotations.update({"volume.beta.kubernetes.io/storage-class": storage_class}) pvc.spec.storage_class_name = storage_class return pvc
[ "def", "make_pvc", "(", "name", ",", "storage_class", ",", "access_modes", ",", "storage", ",", "labels", "=", "None", ",", "annotations", "=", "None", ",", ")", ":", "pvc", "=", "V1PersistentVolumeClaim", "(", ")", "pvc", ".", "kind", "=", "\"PersistentVolumeClaim\"", "pvc", ".", "api_version", "=", "\"v1\"", "pvc", ".", "metadata", "=", "V1ObjectMeta", "(", ")", "pvc", ".", "metadata", ".", "name", "=", "name", "pvc", ".", "metadata", ".", "annotations", "=", "(", "annotations", "or", "{", "}", ")", ".", "copy", "(", ")", "pvc", ".", "metadata", ".", "labels", "=", "(", "labels", "or", "{", "}", ")", ".", "copy", "(", ")", "pvc", ".", "spec", "=", "V1PersistentVolumeClaimSpec", "(", ")", "pvc", ".", "spec", ".", "access_modes", "=", "access_modes", "pvc", ".", "spec", ".", "resources", "=", "V1ResourceRequirements", "(", ")", "pvc", ".", "spec", ".", "resources", ".", "requests", "=", "{", "\"storage\"", ":", "storage", "}", "if", "storage_class", ":", "pvc", ".", "metadata", ".", "annotations", ".", "update", "(", "{", "\"volume.beta.kubernetes.io/storage-class\"", ":", "storage_class", "}", ")", "pvc", ".", "spec", ".", "storage_class_name", "=", "storage_class", "return", "pvc" ]
Make a k8s pvc specification for running a user notebook. Parameters ---------- name: Name of persistent volume claim. Must be unique within the namespace the object is going to be created in. Must be a valid DNS label. storage_class: String of the name of the k8s Storage Class to use. access_modes: A list of specifying what access mode the pod should have towards the pvc storage: The ammount of storage needed for the pvc
[ "Make", "a", "k8s", "pvc", "specification", "for", "running", "a", "user", "notebook", "." ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/objects.py#L393-L433
247,041
jupyterhub/kubespawner
kubespawner/objects.py
make_ingress
def make_ingress( name, routespec, target, data ): """ Returns an ingress, service, endpoint object that'll work for this service """ # move beta imports here, # which are more sensitive to kubernetes version # and will change when they move out of beta from kubernetes.client.models import ( V1beta1Ingress, V1beta1IngressSpec, V1beta1IngressRule, V1beta1HTTPIngressRuleValue, V1beta1HTTPIngressPath, V1beta1IngressBackend, ) meta = V1ObjectMeta( name=name, annotations={ 'hub.jupyter.org/proxy-data': json.dumps(data), 'hub.jupyter.org/proxy-routespec': routespec, 'hub.jupyter.org/proxy-target': target }, labels={ 'heritage': 'jupyterhub', 'component': 'singleuser-server', 'hub.jupyter.org/proxy-route': 'true' } ) if routespec.startswith('/'): host = None path = routespec else: host, path = routespec.split('/', 1) target_parts = urlparse(target) target_ip = target_parts.hostname target_port = target_parts.port target_is_ip = re.match('^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', target_ip) is not None # Make endpoint object if target_is_ip: endpoint = V1Endpoints( kind='Endpoints', metadata=meta, subsets=[ V1EndpointSubset( addresses=[V1EndpointAddress(ip=target_ip)], ports=[V1EndpointPort(port=target_port)] ) ] ) else: endpoint = None # Make service object if target_is_ip: service = V1Service( kind='Service', metadata=meta, spec=V1ServiceSpec( type='ClusterIP', external_name='', ports=[V1ServicePort(port=target_port, target_port=target_port)] ) ) else: service = V1Service( kind='Service', metadata=meta, spec=V1ServiceSpec( type='ExternalName', external_name=target_ip, cluster_ip='', ports=[V1ServicePort(port=target_port, target_port=target_port)], ), ) # Make Ingress object ingress = V1beta1Ingress( kind='Ingress', metadata=meta, spec=V1beta1IngressSpec( rules=[V1beta1IngressRule( host=host, http=V1beta1HTTPIngressRuleValue( paths=[ V1beta1HTTPIngressPath( path=path, backend=V1beta1IngressBackend( service_name=name, service_port=target_port ) ) ] ) )] ) ) return endpoint, service, ingress
python
def make_ingress( name, routespec, target, data ): # move beta imports here, # which are more sensitive to kubernetes version # and will change when they move out of beta from kubernetes.client.models import ( V1beta1Ingress, V1beta1IngressSpec, V1beta1IngressRule, V1beta1HTTPIngressRuleValue, V1beta1HTTPIngressPath, V1beta1IngressBackend, ) meta = V1ObjectMeta( name=name, annotations={ 'hub.jupyter.org/proxy-data': json.dumps(data), 'hub.jupyter.org/proxy-routespec': routespec, 'hub.jupyter.org/proxy-target': target }, labels={ 'heritage': 'jupyterhub', 'component': 'singleuser-server', 'hub.jupyter.org/proxy-route': 'true' } ) if routespec.startswith('/'): host = None path = routespec else: host, path = routespec.split('/', 1) target_parts = urlparse(target) target_ip = target_parts.hostname target_port = target_parts.port target_is_ip = re.match('^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', target_ip) is not None # Make endpoint object if target_is_ip: endpoint = V1Endpoints( kind='Endpoints', metadata=meta, subsets=[ V1EndpointSubset( addresses=[V1EndpointAddress(ip=target_ip)], ports=[V1EndpointPort(port=target_port)] ) ] ) else: endpoint = None # Make service object if target_is_ip: service = V1Service( kind='Service', metadata=meta, spec=V1ServiceSpec( type='ClusterIP', external_name='', ports=[V1ServicePort(port=target_port, target_port=target_port)] ) ) else: service = V1Service( kind='Service', metadata=meta, spec=V1ServiceSpec( type='ExternalName', external_name=target_ip, cluster_ip='', ports=[V1ServicePort(port=target_port, target_port=target_port)], ), ) # Make Ingress object ingress = V1beta1Ingress( kind='Ingress', metadata=meta, spec=V1beta1IngressSpec( rules=[V1beta1IngressRule( host=host, http=V1beta1HTTPIngressRuleValue( paths=[ V1beta1HTTPIngressPath( path=path, backend=V1beta1IngressBackend( service_name=name, service_port=target_port ) ) ] ) )] ) ) return endpoint, service, ingress
[ "def", "make_ingress", "(", "name", ",", "routespec", ",", "target", ",", "data", ")", ":", "# move beta imports here,", "# which are more sensitive to kubernetes version", "# and will change when they move out of beta", "from", "kubernetes", ".", "client", ".", "models", "import", "(", "V1beta1Ingress", ",", "V1beta1IngressSpec", ",", "V1beta1IngressRule", ",", "V1beta1HTTPIngressRuleValue", ",", "V1beta1HTTPIngressPath", ",", "V1beta1IngressBackend", ",", ")", "meta", "=", "V1ObjectMeta", "(", "name", "=", "name", ",", "annotations", "=", "{", "'hub.jupyter.org/proxy-data'", ":", "json", ".", "dumps", "(", "data", ")", ",", "'hub.jupyter.org/proxy-routespec'", ":", "routespec", ",", "'hub.jupyter.org/proxy-target'", ":", "target", "}", ",", "labels", "=", "{", "'heritage'", ":", "'jupyterhub'", ",", "'component'", ":", "'singleuser-server'", ",", "'hub.jupyter.org/proxy-route'", ":", "'true'", "}", ")", "if", "routespec", ".", "startswith", "(", "'/'", ")", ":", "host", "=", "None", "path", "=", "routespec", "else", ":", "host", ",", "path", "=", "routespec", ".", "split", "(", "'/'", ",", "1", ")", "target_parts", "=", "urlparse", "(", "target", ")", "target_ip", "=", "target_parts", ".", "hostname", "target_port", "=", "target_parts", ".", "port", "target_is_ip", "=", "re", ".", "match", "(", "'^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$'", ",", "target_ip", ")", "is", "not", "None", "# Make endpoint object", "if", "target_is_ip", ":", "endpoint", "=", "V1Endpoints", "(", "kind", "=", "'Endpoints'", ",", "metadata", "=", "meta", ",", "subsets", "=", "[", "V1EndpointSubset", "(", "addresses", "=", "[", "V1EndpointAddress", "(", "ip", "=", "target_ip", ")", "]", ",", "ports", "=", "[", "V1EndpointPort", "(", "port", "=", "target_port", ")", "]", ")", "]", ")", "else", ":", "endpoint", "=", "None", "# Make service object", "if", "target_is_ip", ":", "service", "=", "V1Service", "(", "kind", "=", "'Service'", ",", "metadata", "=", "meta", ",", "spec", "=", "V1ServiceSpec", "(", "type", "=", "'ClusterIP'", ",", "external_name", "=", "''", ",", "ports", "=", "[", "V1ServicePort", "(", "port", "=", "target_port", ",", "target_port", "=", "target_port", ")", "]", ")", ")", "else", ":", "service", "=", "V1Service", "(", "kind", "=", "'Service'", ",", "metadata", "=", "meta", ",", "spec", "=", "V1ServiceSpec", "(", "type", "=", "'ExternalName'", ",", "external_name", "=", "target_ip", ",", "cluster_ip", "=", "''", ",", "ports", "=", "[", "V1ServicePort", "(", "port", "=", "target_port", ",", "target_port", "=", "target_port", ")", "]", ",", ")", ",", ")", "# Make Ingress object", "ingress", "=", "V1beta1Ingress", "(", "kind", "=", "'Ingress'", ",", "metadata", "=", "meta", ",", "spec", "=", "V1beta1IngressSpec", "(", "rules", "=", "[", "V1beta1IngressRule", "(", "host", "=", "host", ",", "http", "=", "V1beta1HTTPIngressRuleValue", "(", "paths", "=", "[", "V1beta1HTTPIngressPath", "(", "path", "=", "path", ",", "backend", "=", "V1beta1IngressBackend", "(", "service_name", "=", "name", ",", "service_port", "=", "target_port", ")", ")", "]", ")", ")", "]", ")", ")", "return", "endpoint", ",", "service", ",", "ingress" ]
Returns an ingress, service, endpoint object that'll work for this service
[ "Returns", "an", "ingress", "service", "endpoint", "object", "that", "ll", "work", "for", "this", "service" ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/objects.py#L435-L541
247,042
jupyterhub/kubespawner
kubespawner/clients.py
shared_client
def shared_client(ClientType, *args, **kwargs): """Return a single shared kubernetes client instance A weak reference to the instance is cached, so that concurrent calls to shared_client will all return the same instance until all references to the client are cleared. """ kwarg_key = tuple((key, kwargs[key]) for key in sorted(kwargs)) cache_key = (ClientType, args, kwarg_key) client = None if cache_key in _client_cache: # resolve cached weakref # client can still be None after this! client = _client_cache[cache_key]() if client is None: Client = getattr(kubernetes.client, ClientType) client = Client(*args, **kwargs) # cache weakref so that clients can be garbage collected _client_cache[cache_key] = weakref.ref(client) return client
python
def shared_client(ClientType, *args, **kwargs): kwarg_key = tuple((key, kwargs[key]) for key in sorted(kwargs)) cache_key = (ClientType, args, kwarg_key) client = None if cache_key in _client_cache: # resolve cached weakref # client can still be None after this! client = _client_cache[cache_key]() if client is None: Client = getattr(kubernetes.client, ClientType) client = Client(*args, **kwargs) # cache weakref so that clients can be garbage collected _client_cache[cache_key] = weakref.ref(client) return client
[ "def", "shared_client", "(", "ClientType", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwarg_key", "=", "tuple", "(", "(", "key", ",", "kwargs", "[", "key", "]", ")", "for", "key", "in", "sorted", "(", "kwargs", ")", ")", "cache_key", "=", "(", "ClientType", ",", "args", ",", "kwarg_key", ")", "client", "=", "None", "if", "cache_key", "in", "_client_cache", ":", "# resolve cached weakref", "# client can still be None after this!", "client", "=", "_client_cache", "[", "cache_key", "]", "(", ")", "if", "client", "is", "None", ":", "Client", "=", "getattr", "(", "kubernetes", ".", "client", ",", "ClientType", ")", "client", "=", "Client", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# cache weakref so that clients can be garbage collected", "_client_cache", "[", "cache_key", "]", "=", "weakref", ".", "ref", "(", "client", ")", "return", "client" ]
Return a single shared kubernetes client instance A weak reference to the instance is cached, so that concurrent calls to shared_client will all return the same instance until all references to the client are cleared.
[ "Return", "a", "single", "shared", "kubernetes", "client", "instance" ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/clients.py#L25-L46
247,043
jupyterhub/kubespawner
kubespawner/utils.py
generate_hashed_slug
def generate_hashed_slug(slug, limit=63, hash_length=6): """ Generate a unique name that's within a certain length limit Most k8s objects have a 63 char name limit. We wanna be able to compress larger names down to that if required, while still maintaining some amount of legibility about what the objects really are. If the length of the slug is shorter than the limit - hash_length, we just return slug directly. If not, we truncate the slug to (limit - hash_length) characters, hash the slug and append hash_length characters from the hash to the end of the truncated slug. This ensures that these names are always unique no matter what. """ if len(slug) < (limit - hash_length): return slug slug_hash = hashlib.sha256(slug.encode('utf-8')).hexdigest() return '{prefix}-{hash}'.format( prefix=slug[:limit - hash_length - 1], hash=slug_hash[:hash_length], ).lower()
python
def generate_hashed_slug(slug, limit=63, hash_length=6): if len(slug) < (limit - hash_length): return slug slug_hash = hashlib.sha256(slug.encode('utf-8')).hexdigest() return '{prefix}-{hash}'.format( prefix=slug[:limit - hash_length - 1], hash=slug_hash[:hash_length], ).lower()
[ "def", "generate_hashed_slug", "(", "slug", ",", "limit", "=", "63", ",", "hash_length", "=", "6", ")", ":", "if", "len", "(", "slug", ")", "<", "(", "limit", "-", "hash_length", ")", ":", "return", "slug", "slug_hash", "=", "hashlib", ".", "sha256", "(", "slug", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", "return", "'{prefix}-{hash}'", ".", "format", "(", "prefix", "=", "slug", "[", ":", "limit", "-", "hash_length", "-", "1", "]", ",", "hash", "=", "slug_hash", "[", ":", "hash_length", "]", ",", ")", ".", "lower", "(", ")" ]
Generate a unique name that's within a certain length limit Most k8s objects have a 63 char name limit. We wanna be able to compress larger names down to that if required, while still maintaining some amount of legibility about what the objects really are. If the length of the slug is shorter than the limit - hash_length, we just return slug directly. If not, we truncate the slug to (limit - hash_length) characters, hash the slug and append hash_length characters from the hash to the end of the truncated slug. This ensures that these names are always unique no matter what.
[ "Generate", "a", "unique", "name", "that", "s", "within", "a", "certain", "length", "limit" ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/utils.py#L7-L29
247,044
jupyterhub/kubespawner
kubespawner/utils.py
get_k8s_model
def get_k8s_model(model_type, model_dict): """ Returns an instance of type specified model_type from an model instance or represantative dictionary. """ model_dict = copy.deepcopy(model_dict) if isinstance(model_dict, model_type): return model_dict elif isinstance(model_dict, dict): # convert the dictionaries camelCase keys to snake_case keys model_dict = _map_dict_keys_to_model_attributes(model_type, model_dict) # use the dictionary keys to initialize a model of given type return model_type(**model_dict) else: raise AttributeError("Expected object of type 'dict' (or '{}') but got '{}'.".format(model_type.__name__, type(model_dict).__name__))
python
def get_k8s_model(model_type, model_dict): model_dict = copy.deepcopy(model_dict) if isinstance(model_dict, model_type): return model_dict elif isinstance(model_dict, dict): # convert the dictionaries camelCase keys to snake_case keys model_dict = _map_dict_keys_to_model_attributes(model_type, model_dict) # use the dictionary keys to initialize a model of given type return model_type(**model_dict) else: raise AttributeError("Expected object of type 'dict' (or '{}') but got '{}'.".format(model_type.__name__, type(model_dict).__name__))
[ "def", "get_k8s_model", "(", "model_type", ",", "model_dict", ")", ":", "model_dict", "=", "copy", ".", "deepcopy", "(", "model_dict", ")", "if", "isinstance", "(", "model_dict", ",", "model_type", ")", ":", "return", "model_dict", "elif", "isinstance", "(", "model_dict", ",", "dict", ")", ":", "# convert the dictionaries camelCase keys to snake_case keys", "model_dict", "=", "_map_dict_keys_to_model_attributes", "(", "model_type", ",", "model_dict", ")", "# use the dictionary keys to initialize a model of given type", "return", "model_type", "(", "*", "*", "model_dict", ")", "else", ":", "raise", "AttributeError", "(", "\"Expected object of type 'dict' (or '{}') but got '{}'.\"", ".", "format", "(", "model_type", ".", "__name__", ",", "type", "(", "model_dict", ")", ".", "__name__", ")", ")" ]
Returns an instance of type specified model_type from an model instance or represantative dictionary.
[ "Returns", "an", "instance", "of", "type", "specified", "model_type", "from", "an", "model", "instance", "or", "represantative", "dictionary", "." ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/utils.py#L75-L90
247,045
jupyterhub/kubespawner
kubespawner/utils.py
_get_k8s_model_dict
def _get_k8s_model_dict(model_type, model): """ Returns a dictionary representation of a provided model type """ model = copy.deepcopy(model) if isinstance(model, model_type): return model.to_dict() elif isinstance(model, dict): return _map_dict_keys_to_model_attributes(model_type, model) else: raise AttributeError("Expected object of type '{}' (or 'dict') but got '{}'.".format(model_type.__name__, type(model).__name__))
python
def _get_k8s_model_dict(model_type, model): model = copy.deepcopy(model) if isinstance(model, model_type): return model.to_dict() elif isinstance(model, dict): return _map_dict_keys_to_model_attributes(model_type, model) else: raise AttributeError("Expected object of type '{}' (or 'dict') but got '{}'.".format(model_type.__name__, type(model).__name__))
[ "def", "_get_k8s_model_dict", "(", "model_type", ",", "model", ")", ":", "model", "=", "copy", ".", "deepcopy", "(", "model", ")", "if", "isinstance", "(", "model", ",", "model_type", ")", ":", "return", "model", ".", "to_dict", "(", ")", "elif", "isinstance", "(", "model", ",", "dict", ")", ":", "return", "_map_dict_keys_to_model_attributes", "(", "model_type", ",", "model", ")", "else", ":", "raise", "AttributeError", "(", "\"Expected object of type '{}' (or 'dict') but got '{}'.\"", ".", "format", "(", "model_type", ".", "__name__", ",", "type", "(", "model", ")", ".", "__name__", ")", ")" ]
Returns a dictionary representation of a provided model type
[ "Returns", "a", "dictionary", "representation", "of", "a", "provided", "model", "type" ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/utils.py#L92-L103
247,046
jupyterhub/kubespawner
kubespawner/reflector.py
NamespacedResourceReflector._list_and_update
def _list_and_update(self): """ Update current list of resources by doing a full fetch. Overwrites all current resource info. """ initial_resources = getattr(self.api, self.list_method_name)( self.namespace, label_selector=self.label_selector, field_selector=self.field_selector, _request_timeout=self.request_timeout, ) # This is an atomic operation on the dictionary! self.resources = {p.metadata.name: p for p in initial_resources.items} # return the resource version so we can hook up a watch return initial_resources.metadata.resource_version
python
def _list_and_update(self): initial_resources = getattr(self.api, self.list_method_name)( self.namespace, label_selector=self.label_selector, field_selector=self.field_selector, _request_timeout=self.request_timeout, ) # This is an atomic operation on the dictionary! self.resources = {p.metadata.name: p for p in initial_resources.items} # return the resource version so we can hook up a watch return initial_resources.metadata.resource_version
[ "def", "_list_and_update", "(", "self", ")", ":", "initial_resources", "=", "getattr", "(", "self", ".", "api", ",", "self", ".", "list_method_name", ")", "(", "self", ".", "namespace", ",", "label_selector", "=", "self", ".", "label_selector", ",", "field_selector", "=", "self", ".", "field_selector", ",", "_request_timeout", "=", "self", ".", "request_timeout", ",", ")", "# This is an atomic operation on the dictionary!", "self", ".", "resources", "=", "{", "p", ".", "metadata", ".", "name", ":", "p", "for", "p", "in", "initial_resources", ".", "items", "}", "# return the resource version so we can hook up a watch", "return", "initial_resources", ".", "metadata", ".", "resource_version" ]
Update current list of resources by doing a full fetch. Overwrites all current resource info.
[ "Update", "current", "list", "of", "resources", "by", "doing", "a", "full", "fetch", "." ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/reflector.py#L147-L162
247,047
jupyterhub/kubespawner
kubespawner/reflector.py
NamespacedResourceReflector._watch_and_update
def _watch_and_update(self): """ Keeps the current list of resources up-to-date This method is to be run not on the main thread! We first fetch the list of current resources, and store that. Then we register to be notified of changes to those resources, and keep our local store up-to-date based on these notifications. We also perform exponential backoff, giving up after we hit 32s wait time. This should protect against network connections dropping and intermittent unavailability of the api-server. Every time we recover from an exception we also do a full fetch, to pick up changes that might've been missed in the time we were not doing a watch. Note that we're playing a bit with fire here, by updating a dictionary in this thread while it is probably being read in another thread without using locks! However, dictionary access itself is atomic, and as long as we don't try to mutate them (do a 'fetch / modify / update' cycle on them), we should be ok! """ selectors = [] log_name = "" if self.label_selector: selectors.append("label selector=%r" % self.label_selector) if self.field_selector: selectors.append("field selector=%r" % self.field_selector) log_selector = ', '.join(selectors) cur_delay = 0.1 self.log.info( "watching for %s with %s in namespace %s", self.kind, log_selector, self.namespace, ) while True: self.log.debug("Connecting %s watcher", self.kind) start = time.monotonic() w = watch.Watch() try: resource_version = self._list_and_update() if not self.first_load_future.done(): # signal that we've loaded our initial data self.first_load_future.set_result(None) watch_args = { 'namespace': self.namespace, 'label_selector': self.label_selector, 'field_selector': self.field_selector, 'resource_version': resource_version, } if self.request_timeout: # set network receive timeout watch_args['_request_timeout'] = self.request_timeout if self.timeout_seconds: # set watch timeout watch_args['timeout_seconds'] = self.timeout_seconds # in case of timeout_seconds, the w.stream just exits (no exception thrown) # -> we stop the watcher and start a new one for ev in w.stream( getattr(self.api, self.list_method_name), **watch_args ): cur_delay = 0.1 resource = ev['object'] if ev['type'] == 'DELETED': # This is an atomic delete operation on the dictionary! self.resources.pop(resource.metadata.name, None) else: # This is an atomic operation on the dictionary! self.resources[resource.metadata.name] = resource if self._stop_event.is_set(): self.log.info("%s watcher stopped", self.kind) break watch_duration = time.monotonic() - start if watch_duration >= self.restart_seconds: self.log.debug( "Restarting %s watcher after %i seconds", self.kind, watch_duration, ) break except ReadTimeoutError: # network read time out, just continue and restart the watch # this could be due to a network problem or just low activity self.log.warning("Read timeout watching %s, reconnecting", self.kind) continue except Exception: cur_delay = cur_delay * 2 if cur_delay > 30: self.log.exception("Watching resources never recovered, giving up") if self.on_failure: self.on_failure() return self.log.exception("Error when watching resources, retrying in %ss", cur_delay) time.sleep(cur_delay) continue else: # no events on watch, reconnect self.log.debug("%s watcher timeout", self.kind) finally: w.stop() if self._stop_event.is_set(): self.log.info("%s watcher stopped", self.kind) break self.log.warning("%s watcher finished", self.kind)
python
def _watch_and_update(self): selectors = [] log_name = "" if self.label_selector: selectors.append("label selector=%r" % self.label_selector) if self.field_selector: selectors.append("field selector=%r" % self.field_selector) log_selector = ', '.join(selectors) cur_delay = 0.1 self.log.info( "watching for %s with %s in namespace %s", self.kind, log_selector, self.namespace, ) while True: self.log.debug("Connecting %s watcher", self.kind) start = time.monotonic() w = watch.Watch() try: resource_version = self._list_and_update() if not self.first_load_future.done(): # signal that we've loaded our initial data self.first_load_future.set_result(None) watch_args = { 'namespace': self.namespace, 'label_selector': self.label_selector, 'field_selector': self.field_selector, 'resource_version': resource_version, } if self.request_timeout: # set network receive timeout watch_args['_request_timeout'] = self.request_timeout if self.timeout_seconds: # set watch timeout watch_args['timeout_seconds'] = self.timeout_seconds # in case of timeout_seconds, the w.stream just exits (no exception thrown) # -> we stop the watcher and start a new one for ev in w.stream( getattr(self.api, self.list_method_name), **watch_args ): cur_delay = 0.1 resource = ev['object'] if ev['type'] == 'DELETED': # This is an atomic delete operation on the dictionary! self.resources.pop(resource.metadata.name, None) else: # This is an atomic operation on the dictionary! self.resources[resource.metadata.name] = resource if self._stop_event.is_set(): self.log.info("%s watcher stopped", self.kind) break watch_duration = time.monotonic() - start if watch_duration >= self.restart_seconds: self.log.debug( "Restarting %s watcher after %i seconds", self.kind, watch_duration, ) break except ReadTimeoutError: # network read time out, just continue and restart the watch # this could be due to a network problem or just low activity self.log.warning("Read timeout watching %s, reconnecting", self.kind) continue except Exception: cur_delay = cur_delay * 2 if cur_delay > 30: self.log.exception("Watching resources never recovered, giving up") if self.on_failure: self.on_failure() return self.log.exception("Error when watching resources, retrying in %ss", cur_delay) time.sleep(cur_delay) continue else: # no events on watch, reconnect self.log.debug("%s watcher timeout", self.kind) finally: w.stop() if self._stop_event.is_set(): self.log.info("%s watcher stopped", self.kind) break self.log.warning("%s watcher finished", self.kind)
[ "def", "_watch_and_update", "(", "self", ")", ":", "selectors", "=", "[", "]", "log_name", "=", "\"\"", "if", "self", ".", "label_selector", ":", "selectors", ".", "append", "(", "\"label selector=%r\"", "%", "self", ".", "label_selector", ")", "if", "self", ".", "field_selector", ":", "selectors", ".", "append", "(", "\"field selector=%r\"", "%", "self", ".", "field_selector", ")", "log_selector", "=", "', '", ".", "join", "(", "selectors", ")", "cur_delay", "=", "0.1", "self", ".", "log", ".", "info", "(", "\"watching for %s with %s in namespace %s\"", ",", "self", ".", "kind", ",", "log_selector", ",", "self", ".", "namespace", ",", ")", "while", "True", ":", "self", ".", "log", ".", "debug", "(", "\"Connecting %s watcher\"", ",", "self", ".", "kind", ")", "start", "=", "time", ".", "monotonic", "(", ")", "w", "=", "watch", ".", "Watch", "(", ")", "try", ":", "resource_version", "=", "self", ".", "_list_and_update", "(", ")", "if", "not", "self", ".", "first_load_future", ".", "done", "(", ")", ":", "# signal that we've loaded our initial data", "self", ".", "first_load_future", ".", "set_result", "(", "None", ")", "watch_args", "=", "{", "'namespace'", ":", "self", ".", "namespace", ",", "'label_selector'", ":", "self", ".", "label_selector", ",", "'field_selector'", ":", "self", ".", "field_selector", ",", "'resource_version'", ":", "resource_version", ",", "}", "if", "self", ".", "request_timeout", ":", "# set network receive timeout", "watch_args", "[", "'_request_timeout'", "]", "=", "self", ".", "request_timeout", "if", "self", ".", "timeout_seconds", ":", "# set watch timeout", "watch_args", "[", "'timeout_seconds'", "]", "=", "self", ".", "timeout_seconds", "# in case of timeout_seconds, the w.stream just exits (no exception thrown)", "# -> we stop the watcher and start a new one", "for", "ev", "in", "w", ".", "stream", "(", "getattr", "(", "self", ".", "api", ",", "self", ".", "list_method_name", ")", ",", "*", "*", "watch_args", ")", ":", "cur_delay", "=", "0.1", "resource", "=", "ev", "[", "'object'", "]", "if", "ev", "[", "'type'", "]", "==", "'DELETED'", ":", "# This is an atomic delete operation on the dictionary!", "self", ".", "resources", ".", "pop", "(", "resource", ".", "metadata", ".", "name", ",", "None", ")", "else", ":", "# This is an atomic operation on the dictionary!", "self", ".", "resources", "[", "resource", ".", "metadata", ".", "name", "]", "=", "resource", "if", "self", ".", "_stop_event", ".", "is_set", "(", ")", ":", "self", ".", "log", ".", "info", "(", "\"%s watcher stopped\"", ",", "self", ".", "kind", ")", "break", "watch_duration", "=", "time", ".", "monotonic", "(", ")", "-", "start", "if", "watch_duration", ">=", "self", ".", "restart_seconds", ":", "self", ".", "log", ".", "debug", "(", "\"Restarting %s watcher after %i seconds\"", ",", "self", ".", "kind", ",", "watch_duration", ",", ")", "break", "except", "ReadTimeoutError", ":", "# network read time out, just continue and restart the watch", "# this could be due to a network problem or just low activity", "self", ".", "log", ".", "warning", "(", "\"Read timeout watching %s, reconnecting\"", ",", "self", ".", "kind", ")", "continue", "except", "Exception", ":", "cur_delay", "=", "cur_delay", "*", "2", "if", "cur_delay", ">", "30", ":", "self", ".", "log", ".", "exception", "(", "\"Watching resources never recovered, giving up\"", ")", "if", "self", ".", "on_failure", ":", "self", ".", "on_failure", "(", ")", "return", "self", ".", "log", ".", "exception", "(", "\"Error when watching resources, retrying in %ss\"", ",", "cur_delay", ")", "time", ".", "sleep", "(", "cur_delay", ")", "continue", "else", ":", "# no events on watch, reconnect", "self", ".", "log", ".", "debug", "(", "\"%s watcher timeout\"", ",", "self", ".", "kind", ")", "finally", ":", "w", ".", "stop", "(", ")", "if", "self", ".", "_stop_event", ".", "is_set", "(", ")", ":", "self", ".", "log", ".", "info", "(", "\"%s watcher stopped\"", ",", "self", ".", "kind", ")", "break", "self", ".", "log", ".", "warning", "(", "\"%s watcher finished\"", ",", "self", ".", "kind", ")" ]
Keeps the current list of resources up-to-date This method is to be run not on the main thread! We first fetch the list of current resources, and store that. Then we register to be notified of changes to those resources, and keep our local store up-to-date based on these notifications. We also perform exponential backoff, giving up after we hit 32s wait time. This should protect against network connections dropping and intermittent unavailability of the api-server. Every time we recover from an exception we also do a full fetch, to pick up changes that might've been missed in the time we were not doing a watch. Note that we're playing a bit with fire here, by updating a dictionary in this thread while it is probably being read in another thread without using locks! However, dictionary access itself is atomic, and as long as we don't try to mutate them (do a 'fetch / modify / update' cycle on them), we should be ok!
[ "Keeps", "the", "current", "list", "of", "resources", "up", "-", "to", "-", "date" ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/reflector.py#L164-L269
247,048
jupyterhub/kubespawner
kubespawner/reflector.py
NamespacedResourceReflector.start
def start(self): """ Start the reflection process! We'll do a blocking read of all resources first, so that we don't race with any operations that are checking the state of the pod store - such as polls. This should be called only once at the start of program initialization (when the singleton is being created), and not afterwards! """ if hasattr(self, 'watch_thread'): raise ValueError('Thread watching for resources is already running') self._list_and_update() self.watch_thread = threading.Thread(target=self._watch_and_update) # If the watch_thread is only thread left alive, exit app self.watch_thread.daemon = True self.watch_thread.start()
python
def start(self): if hasattr(self, 'watch_thread'): raise ValueError('Thread watching for resources is already running') self._list_and_update() self.watch_thread = threading.Thread(target=self._watch_and_update) # If the watch_thread is only thread left alive, exit app self.watch_thread.daemon = True self.watch_thread.start()
[ "def", "start", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'watch_thread'", ")", ":", "raise", "ValueError", "(", "'Thread watching for resources is already running'", ")", "self", ".", "_list_and_update", "(", ")", "self", ".", "watch_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_watch_and_update", ")", "# If the watch_thread is only thread left alive, exit app", "self", ".", "watch_thread", ".", "daemon", "=", "True", "self", ".", "watch_thread", ".", "start", "(", ")" ]
Start the reflection process! We'll do a blocking read of all resources first, so that we don't race with any operations that are checking the state of the pod store - such as polls. This should be called only once at the start of program initialization (when the singleton is being created), and not afterwards!
[ "Start", "the", "reflection", "process!" ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/reflector.py#L271-L288
247,049
jupyterhub/kubespawner
kubespawner/spawner.py
KubeSpawner.get_pod_manifest
def get_pod_manifest(self): """ Make a pod manifest that will spawn current user's notebook pod. """ if callable(self.uid): uid = yield gen.maybe_future(self.uid(self)) else: uid = self.uid if callable(self.gid): gid = yield gen.maybe_future(self.gid(self)) else: gid = self.gid if callable(self.fs_gid): fs_gid = yield gen.maybe_future(self.fs_gid(self)) else: fs_gid = self.fs_gid if callable(self.supplemental_gids): supplemental_gids = yield gen.maybe_future(self.supplemental_gids(self)) else: supplemental_gids = self.supplemental_gids if self.cmd: real_cmd = self.cmd + self.get_args() else: real_cmd = None labels = self._build_pod_labels(self._expand_all(self.extra_labels)) annotations = self._build_common_annotations(self._expand_all(self.extra_annotations)) return make_pod( name=self.pod_name, cmd=real_cmd, port=self.port, image=self.image, image_pull_policy=self.image_pull_policy, image_pull_secret=self.image_pull_secrets, node_selector=self.node_selector, run_as_uid=uid, run_as_gid=gid, fs_gid=fs_gid, supplemental_gids=supplemental_gids, run_privileged=self.privileged, env=self.get_env(), volumes=self._expand_all(self.volumes), volume_mounts=self._expand_all(self.volume_mounts), working_dir=self.working_dir, labels=labels, annotations=annotations, cpu_limit=self.cpu_limit, cpu_guarantee=self.cpu_guarantee, mem_limit=self.mem_limit, mem_guarantee=self.mem_guarantee, extra_resource_limits=self.extra_resource_limits, extra_resource_guarantees=self.extra_resource_guarantees, lifecycle_hooks=self.lifecycle_hooks, init_containers=self._expand_all(self.init_containers), service_account=self.service_account, extra_container_config=self.extra_container_config, extra_pod_config=self.extra_pod_config, extra_containers=self._expand_all(self.extra_containers), scheduler_name=self.scheduler_name, tolerations=self.tolerations, node_affinity_preferred=self.node_affinity_preferred, node_affinity_required=self.node_affinity_required, pod_affinity_preferred=self.pod_affinity_preferred, pod_affinity_required=self.pod_affinity_required, pod_anti_affinity_preferred=self.pod_anti_affinity_preferred, pod_anti_affinity_required=self.pod_anti_affinity_required, priority_class_name=self.priority_class_name, logger=self.log, )
python
def get_pod_manifest(self): if callable(self.uid): uid = yield gen.maybe_future(self.uid(self)) else: uid = self.uid if callable(self.gid): gid = yield gen.maybe_future(self.gid(self)) else: gid = self.gid if callable(self.fs_gid): fs_gid = yield gen.maybe_future(self.fs_gid(self)) else: fs_gid = self.fs_gid if callable(self.supplemental_gids): supplemental_gids = yield gen.maybe_future(self.supplemental_gids(self)) else: supplemental_gids = self.supplemental_gids if self.cmd: real_cmd = self.cmd + self.get_args() else: real_cmd = None labels = self._build_pod_labels(self._expand_all(self.extra_labels)) annotations = self._build_common_annotations(self._expand_all(self.extra_annotations)) return make_pod( name=self.pod_name, cmd=real_cmd, port=self.port, image=self.image, image_pull_policy=self.image_pull_policy, image_pull_secret=self.image_pull_secrets, node_selector=self.node_selector, run_as_uid=uid, run_as_gid=gid, fs_gid=fs_gid, supplemental_gids=supplemental_gids, run_privileged=self.privileged, env=self.get_env(), volumes=self._expand_all(self.volumes), volume_mounts=self._expand_all(self.volume_mounts), working_dir=self.working_dir, labels=labels, annotations=annotations, cpu_limit=self.cpu_limit, cpu_guarantee=self.cpu_guarantee, mem_limit=self.mem_limit, mem_guarantee=self.mem_guarantee, extra_resource_limits=self.extra_resource_limits, extra_resource_guarantees=self.extra_resource_guarantees, lifecycle_hooks=self.lifecycle_hooks, init_containers=self._expand_all(self.init_containers), service_account=self.service_account, extra_container_config=self.extra_container_config, extra_pod_config=self.extra_pod_config, extra_containers=self._expand_all(self.extra_containers), scheduler_name=self.scheduler_name, tolerations=self.tolerations, node_affinity_preferred=self.node_affinity_preferred, node_affinity_required=self.node_affinity_required, pod_affinity_preferred=self.pod_affinity_preferred, pod_affinity_required=self.pod_affinity_required, pod_anti_affinity_preferred=self.pod_anti_affinity_preferred, pod_anti_affinity_required=self.pod_anti_affinity_required, priority_class_name=self.priority_class_name, logger=self.log, )
[ "def", "get_pod_manifest", "(", "self", ")", ":", "if", "callable", "(", "self", ".", "uid", ")", ":", "uid", "=", "yield", "gen", ".", "maybe_future", "(", "self", ".", "uid", "(", "self", ")", ")", "else", ":", "uid", "=", "self", ".", "uid", "if", "callable", "(", "self", ".", "gid", ")", ":", "gid", "=", "yield", "gen", ".", "maybe_future", "(", "self", ".", "gid", "(", "self", ")", ")", "else", ":", "gid", "=", "self", ".", "gid", "if", "callable", "(", "self", ".", "fs_gid", ")", ":", "fs_gid", "=", "yield", "gen", ".", "maybe_future", "(", "self", ".", "fs_gid", "(", "self", ")", ")", "else", ":", "fs_gid", "=", "self", ".", "fs_gid", "if", "callable", "(", "self", ".", "supplemental_gids", ")", ":", "supplemental_gids", "=", "yield", "gen", ".", "maybe_future", "(", "self", ".", "supplemental_gids", "(", "self", ")", ")", "else", ":", "supplemental_gids", "=", "self", ".", "supplemental_gids", "if", "self", ".", "cmd", ":", "real_cmd", "=", "self", ".", "cmd", "+", "self", ".", "get_args", "(", ")", "else", ":", "real_cmd", "=", "None", "labels", "=", "self", ".", "_build_pod_labels", "(", "self", ".", "_expand_all", "(", "self", ".", "extra_labels", ")", ")", "annotations", "=", "self", ".", "_build_common_annotations", "(", "self", ".", "_expand_all", "(", "self", ".", "extra_annotations", ")", ")", "return", "make_pod", "(", "name", "=", "self", ".", "pod_name", ",", "cmd", "=", "real_cmd", ",", "port", "=", "self", ".", "port", ",", "image", "=", "self", ".", "image", ",", "image_pull_policy", "=", "self", ".", "image_pull_policy", ",", "image_pull_secret", "=", "self", ".", "image_pull_secrets", ",", "node_selector", "=", "self", ".", "node_selector", ",", "run_as_uid", "=", "uid", ",", "run_as_gid", "=", "gid", ",", "fs_gid", "=", "fs_gid", ",", "supplemental_gids", "=", "supplemental_gids", ",", "run_privileged", "=", "self", ".", "privileged", ",", "env", "=", "self", ".", "get_env", "(", ")", ",", "volumes", "=", "self", ".", "_expand_all", "(", "self", ".", "volumes", ")", ",", "volume_mounts", "=", "self", ".", "_expand_all", "(", "self", ".", "volume_mounts", ")", ",", "working_dir", "=", "self", ".", "working_dir", ",", "labels", "=", "labels", ",", "annotations", "=", "annotations", ",", "cpu_limit", "=", "self", ".", "cpu_limit", ",", "cpu_guarantee", "=", "self", ".", "cpu_guarantee", ",", "mem_limit", "=", "self", ".", "mem_limit", ",", "mem_guarantee", "=", "self", ".", "mem_guarantee", ",", "extra_resource_limits", "=", "self", ".", "extra_resource_limits", ",", "extra_resource_guarantees", "=", "self", ".", "extra_resource_guarantees", ",", "lifecycle_hooks", "=", "self", ".", "lifecycle_hooks", ",", "init_containers", "=", "self", ".", "_expand_all", "(", "self", ".", "init_containers", ")", ",", "service_account", "=", "self", ".", "service_account", ",", "extra_container_config", "=", "self", ".", "extra_container_config", ",", "extra_pod_config", "=", "self", ".", "extra_pod_config", ",", "extra_containers", "=", "self", ".", "_expand_all", "(", "self", ".", "extra_containers", ")", ",", "scheduler_name", "=", "self", ".", "scheduler_name", ",", "tolerations", "=", "self", ".", "tolerations", ",", "node_affinity_preferred", "=", "self", ".", "node_affinity_preferred", ",", "node_affinity_required", "=", "self", ".", "node_affinity_required", ",", "pod_affinity_preferred", "=", "self", ".", "pod_affinity_preferred", ",", "pod_affinity_required", "=", "self", ".", "pod_affinity_required", ",", "pod_anti_affinity_preferred", "=", "self", ".", "pod_anti_affinity_preferred", ",", "pod_anti_affinity_required", "=", "self", ".", "pod_anti_affinity_required", ",", "priority_class_name", "=", "self", ".", "priority_class_name", ",", "logger", "=", "self", ".", "log", ",", ")" ]
Make a pod manifest that will spawn current user's notebook pod.
[ "Make", "a", "pod", "manifest", "that", "will", "spawn", "current", "user", "s", "notebook", "pod", "." ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1303-L1376
247,050
jupyterhub/kubespawner
kubespawner/spawner.py
KubeSpawner.get_pvc_manifest
def get_pvc_manifest(self): """ Make a pvc manifest that will spawn current user's pvc. """ labels = self._build_common_labels(self._expand_all(self.storage_extra_labels)) labels.update({ 'component': 'singleuser-storage' }) annotations = self._build_common_annotations({}) return make_pvc( name=self.pvc_name, storage_class=self.storage_class, access_modes=self.storage_access_modes, storage=self.storage_capacity, labels=labels, annotations=annotations )
python
def get_pvc_manifest(self): labels = self._build_common_labels(self._expand_all(self.storage_extra_labels)) labels.update({ 'component': 'singleuser-storage' }) annotations = self._build_common_annotations({}) return make_pvc( name=self.pvc_name, storage_class=self.storage_class, access_modes=self.storage_access_modes, storage=self.storage_capacity, labels=labels, annotations=annotations )
[ "def", "get_pvc_manifest", "(", "self", ")", ":", "labels", "=", "self", ".", "_build_common_labels", "(", "self", ".", "_expand_all", "(", "self", ".", "storage_extra_labels", ")", ")", "labels", ".", "update", "(", "{", "'component'", ":", "'singleuser-storage'", "}", ")", "annotations", "=", "self", ".", "_build_common_annotations", "(", "{", "}", ")", "return", "make_pvc", "(", "name", "=", "self", ".", "pvc_name", ",", "storage_class", "=", "self", ".", "storage_class", ",", "access_modes", "=", "self", ".", "storage_access_modes", ",", "storage", "=", "self", ".", "storage_capacity", ",", "labels", "=", "labels", ",", "annotations", "=", "annotations", ")" ]
Make a pvc manifest that will spawn current user's pvc.
[ "Make", "a", "pvc", "manifest", "that", "will", "spawn", "current", "user", "s", "pvc", "." ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1378-L1396
247,051
jupyterhub/kubespawner
kubespawner/spawner.py
KubeSpawner.is_pod_running
def is_pod_running(self, pod): """ Check if the given pod is running pod must be a dictionary representing a Pod kubernetes API object. """ # FIXME: Validate if this is really the best way is_running = ( pod is not None and pod.status.phase == 'Running' and pod.status.pod_ip is not None and pod.metadata.deletion_timestamp is None and all([cs.ready for cs in pod.status.container_statuses]) ) return is_running
python
def is_pod_running(self, pod): # FIXME: Validate if this is really the best way is_running = ( pod is not None and pod.status.phase == 'Running' and pod.status.pod_ip is not None and pod.metadata.deletion_timestamp is None and all([cs.ready for cs in pod.status.container_statuses]) ) return is_running
[ "def", "is_pod_running", "(", "self", ",", "pod", ")", ":", "# FIXME: Validate if this is really the best way", "is_running", "=", "(", "pod", "is", "not", "None", "and", "pod", ".", "status", ".", "phase", "==", "'Running'", "and", "pod", ".", "status", ".", "pod_ip", "is", "not", "None", "and", "pod", ".", "metadata", ".", "deletion_timestamp", "is", "None", "and", "all", "(", "[", "cs", ".", "ready", "for", "cs", "in", "pod", ".", "status", ".", "container_statuses", "]", ")", ")", "return", "is_running" ]
Check if the given pod is running pod must be a dictionary representing a Pod kubernetes API object.
[ "Check", "if", "the", "given", "pod", "is", "running" ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1398-L1412
247,052
jupyterhub/kubespawner
kubespawner/spawner.py
KubeSpawner.get_env
def get_env(self): """Return the environment dict to use for the Spawner. See also: jupyterhub.Spawner.get_env """ env = super(KubeSpawner, self).get_env() # deprecate image env['JUPYTER_IMAGE_SPEC'] = self.image env['JUPYTER_IMAGE'] = self.image return env
python
def get_env(self): env = super(KubeSpawner, self).get_env() # deprecate image env['JUPYTER_IMAGE_SPEC'] = self.image env['JUPYTER_IMAGE'] = self.image return env
[ "def", "get_env", "(", "self", ")", ":", "env", "=", "super", "(", "KubeSpawner", ",", "self", ")", ".", "get_env", "(", ")", "# deprecate image", "env", "[", "'JUPYTER_IMAGE_SPEC'", "]", "=", "self", ".", "image", "env", "[", "'JUPYTER_IMAGE'", "]", "=", "self", ".", "image", "return", "env" ]
Return the environment dict to use for the Spawner. See also: jupyterhub.Spawner.get_env
[ "Return", "the", "environment", "dict", "to", "use", "for", "the", "Spawner", "." ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1429-L1440
247,053
jupyterhub/kubespawner
kubespawner/spawner.py
KubeSpawner.poll
def poll(self): """ Check if the pod is still running. Uses the same interface as subprocess.Popen.poll(): if the pod is still running, returns None. If the pod has exited, return the exit code if we can determine it, or 1 if it has exited but we don't know how. These are the return values JupyterHub expects. Note that a clean exit will have an exit code of zero, so it is necessary to check that the returned value is None, rather than just Falsy, to determine that the pod is still running. """ # have to wait for first load of data before we have a valid answer if not self.pod_reflector.first_load_future.done(): yield self.pod_reflector.first_load_future data = self.pod_reflector.pods.get(self.pod_name, None) if data is not None: if data.status.phase == 'Pending': return None ctr_stat = data.status.container_statuses if ctr_stat is None: # No status, no container (we hope) # This seems to happen when a pod is idle-culled. return 1 for c in ctr_stat: # return exit code if notebook container has terminated if c.name == 'notebook': if c.state.terminated: # call self.stop to delete the pod if self.delete_stopped_pods: yield self.stop(now=True) return c.state.terminated.exit_code break # None means pod is running or starting up return None # pod doesn't exist or has been deleted return 1
python
def poll(self): # have to wait for first load of data before we have a valid answer if not self.pod_reflector.first_load_future.done(): yield self.pod_reflector.first_load_future data = self.pod_reflector.pods.get(self.pod_name, None) if data is not None: if data.status.phase == 'Pending': return None ctr_stat = data.status.container_statuses if ctr_stat is None: # No status, no container (we hope) # This seems to happen when a pod is idle-culled. return 1 for c in ctr_stat: # return exit code if notebook container has terminated if c.name == 'notebook': if c.state.terminated: # call self.stop to delete the pod if self.delete_stopped_pods: yield self.stop(now=True) return c.state.terminated.exit_code break # None means pod is running or starting up return None # pod doesn't exist or has been deleted return 1
[ "def", "poll", "(", "self", ")", ":", "# have to wait for first load of data before we have a valid answer", "if", "not", "self", ".", "pod_reflector", ".", "first_load_future", ".", "done", "(", ")", ":", "yield", "self", ".", "pod_reflector", ".", "first_load_future", "data", "=", "self", ".", "pod_reflector", ".", "pods", ".", "get", "(", "self", ".", "pod_name", ",", "None", ")", "if", "data", "is", "not", "None", ":", "if", "data", ".", "status", ".", "phase", "==", "'Pending'", ":", "return", "None", "ctr_stat", "=", "data", ".", "status", ".", "container_statuses", "if", "ctr_stat", "is", "None", ":", "# No status, no container (we hope)", "# This seems to happen when a pod is idle-culled.", "return", "1", "for", "c", "in", "ctr_stat", ":", "# return exit code if notebook container has terminated", "if", "c", ".", "name", "==", "'notebook'", ":", "if", "c", ".", "state", ".", "terminated", ":", "# call self.stop to delete the pod", "if", "self", ".", "delete_stopped_pods", ":", "yield", "self", ".", "stop", "(", "now", "=", "True", ")", "return", "c", ".", "state", ".", "terminated", ".", "exit_code", "break", "# None means pod is running or starting up", "return", "None", "# pod doesn't exist or has been deleted", "return", "1" ]
Check if the pod is still running. Uses the same interface as subprocess.Popen.poll(): if the pod is still running, returns None. If the pod has exited, return the exit code if we can determine it, or 1 if it has exited but we don't know how. These are the return values JupyterHub expects. Note that a clean exit will have an exit code of zero, so it is necessary to check that the returned value is None, rather than just Falsy, to determine that the pod is still running.
[ "Check", "if", "the", "pod", "is", "still", "running", "." ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1456-L1492
247,054
jupyterhub/kubespawner
kubespawner/spawner.py
KubeSpawner.events
def events(self): """Filter event-reflector to just our events Returns list of all events that match our pod_name since our ._last_event (if defined). ._last_event is set at the beginning of .start(). """ if not self.event_reflector: return [] events = [] for event in self.event_reflector.events: if event.involved_object.name != self.pod_name: # only consider events for my pod name continue if self._last_event and event.metadata.uid == self._last_event: # saw last_event marker, ignore any previous events # and only consider future events # only include events *after* our _last_event marker events = [] else: events.append(event) return events
python
def events(self): if not self.event_reflector: return [] events = [] for event in self.event_reflector.events: if event.involved_object.name != self.pod_name: # only consider events for my pod name continue if self._last_event and event.metadata.uid == self._last_event: # saw last_event marker, ignore any previous events # and only consider future events # only include events *after* our _last_event marker events = [] else: events.append(event) return events
[ "def", "events", "(", "self", ")", ":", "if", "not", "self", ".", "event_reflector", ":", "return", "[", "]", "events", "=", "[", "]", "for", "event", "in", "self", ".", "event_reflector", ".", "events", ":", "if", "event", ".", "involved_object", ".", "name", "!=", "self", ".", "pod_name", ":", "# only consider events for my pod name", "continue", "if", "self", ".", "_last_event", "and", "event", ".", "metadata", ".", "uid", "==", "self", ".", "_last_event", ":", "# saw last_event marker, ignore any previous events", "# and only consider future events", "# only include events *after* our _last_event marker", "events", "=", "[", "]", "else", ":", "events", ".", "append", "(", "event", ")", "return", "events" ]
Filter event-reflector to just our events Returns list of all events that match our pod_name since our ._last_event (if defined). ._last_event is set at the beginning of .start().
[ "Filter", "event", "-", "reflector", "to", "just", "our", "events" ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1499-L1522
247,055
jupyterhub/kubespawner
kubespawner/spawner.py
KubeSpawner._start_reflector
def _start_reflector(self, key, ReflectorClass, replace=False, **kwargs): """Start a shared reflector on the KubeSpawner class key: key for the reflector (e.g. 'pod' or 'events') Reflector: Reflector class to be instantiated kwargs: extra keyword-args to be relayed to ReflectorClass If replace=False and the pod reflector is already running, do nothing. If replace=True, a running pod reflector will be stopped and a new one started (for recovering from possible errors). """ main_loop = IOLoop.current() def on_reflector_failure(): self.log.critical( "%s reflector failed, halting Hub.", key.title(), ) sys.exit(1) previous_reflector = self.__class__.reflectors.get(key) if replace or not previous_reflector: self.__class__.reflectors[key] = ReflectorClass( parent=self, namespace=self.namespace, on_failure=on_reflector_failure, **kwargs, ) if replace and previous_reflector: # we replaced the reflector, stop the old one previous_reflector.stop() # return the current reflector return self.__class__.reflectors[key]
python
def _start_reflector(self, key, ReflectorClass, replace=False, **kwargs): main_loop = IOLoop.current() def on_reflector_failure(): self.log.critical( "%s reflector failed, halting Hub.", key.title(), ) sys.exit(1) previous_reflector = self.__class__.reflectors.get(key) if replace or not previous_reflector: self.__class__.reflectors[key] = ReflectorClass( parent=self, namespace=self.namespace, on_failure=on_reflector_failure, **kwargs, ) if replace and previous_reflector: # we replaced the reflector, stop the old one previous_reflector.stop() # return the current reflector return self.__class__.reflectors[key]
[ "def", "_start_reflector", "(", "self", ",", "key", ",", "ReflectorClass", ",", "replace", "=", "False", ",", "*", "*", "kwargs", ")", ":", "main_loop", "=", "IOLoop", ".", "current", "(", ")", "def", "on_reflector_failure", "(", ")", ":", "self", ".", "log", ".", "critical", "(", "\"%s reflector failed, halting Hub.\"", ",", "key", ".", "title", "(", ")", ",", ")", "sys", ".", "exit", "(", "1", ")", "previous_reflector", "=", "self", ".", "__class__", ".", "reflectors", ".", "get", "(", "key", ")", "if", "replace", "or", "not", "previous_reflector", ":", "self", ".", "__class__", ".", "reflectors", "[", "key", "]", "=", "ReflectorClass", "(", "parent", "=", "self", ",", "namespace", "=", "self", ".", "namespace", ",", "on_failure", "=", "on_reflector_failure", ",", "*", "*", "kwargs", ",", ")", "if", "replace", "and", "previous_reflector", ":", "# we replaced the reflector, stop the old one", "previous_reflector", ".", "stop", "(", ")", "# return the current reflector", "return", "self", ".", "__class__", ".", "reflectors", "[", "key", "]" ]
Start a shared reflector on the KubeSpawner class key: key for the reflector (e.g. 'pod' or 'events') Reflector: Reflector class to be instantiated kwargs: extra keyword-args to be relayed to ReflectorClass If replace=False and the pod reflector is already running, do nothing. If replace=True, a running pod reflector will be stopped and a new one started (for recovering from possible errors).
[ "Start", "a", "shared", "reflector", "on", "the", "KubeSpawner", "class" ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1566-L1603
247,056
jupyterhub/kubespawner
kubespawner/spawner.py
KubeSpawner._start_watching_events
def _start_watching_events(self, replace=False): """Start the events reflector If replace=False and the event reflector is already running, do nothing. If replace=True, a running pod reflector will be stopped and a new one started (for recovering from possible errors). """ return self._start_reflector( "events", EventReflector, fields={"involvedObject.kind": "Pod"}, replace=replace, )
python
def _start_watching_events(self, replace=False): return self._start_reflector( "events", EventReflector, fields={"involvedObject.kind": "Pod"}, replace=replace, )
[ "def", "_start_watching_events", "(", "self", ",", "replace", "=", "False", ")", ":", "return", "self", ".", "_start_reflector", "(", "\"events\"", ",", "EventReflector", ",", "fields", "=", "{", "\"involvedObject.kind\"", ":", "\"Pod\"", "}", ",", "replace", "=", "replace", ",", ")" ]
Start the events reflector If replace=False and the event reflector is already running, do nothing. If replace=True, a running pod reflector will be stopped and a new one started (for recovering from possible errors).
[ "Start", "the", "events", "reflector" ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1606-L1620
247,057
jupyterhub/kubespawner
kubespawner/spawner.py
KubeSpawner._options_form_default
def _options_form_default(self): ''' Build the form template according to the `profile_list` setting. Returns: '' when no `profile_list` has been defined The rendered template (using jinja2) when `profile_list` is defined. ''' if not self.profile_list: return '' if callable(self.profile_list): return self._render_options_form_dynamically else: return self._render_options_form(self.profile_list)
python
def _options_form_default(self): ''' Build the form template according to the `profile_list` setting. Returns: '' when no `profile_list` has been defined The rendered template (using jinja2) when `profile_list` is defined. ''' if not self.profile_list: return '' if callable(self.profile_list): return self._render_options_form_dynamically else: return self._render_options_form(self.profile_list)
[ "def", "_options_form_default", "(", "self", ")", ":", "if", "not", "self", ".", "profile_list", ":", "return", "''", "if", "callable", "(", "self", ".", "profile_list", ")", ":", "return", "self", ".", "_render_options_form_dynamically", "else", ":", "return", "self", ".", "_render_options_form", "(", "self", ".", "profile_list", ")" ]
Build the form template according to the `profile_list` setting. Returns: '' when no `profile_list` has been defined The rendered template (using jinja2) when `profile_list` is defined.
[ "Build", "the", "form", "template", "according", "to", "the", "profile_list", "setting", "." ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1824-L1837
247,058
jupyterhub/kubespawner
kubespawner/spawner.py
KubeSpawner.options_from_form
def options_from_form(self, formdata): """get the option selected by the user on the form This only constructs the user_options dict, it should not actually load any options. That is done later in `.load_user_options()` Args: formdata: user selection returned by the form To access to the value, you can use the `get` accessor and the name of the html element, for example:: formdata.get('profile',[0]) to get the value of the form named "profile", as defined in `form_template`:: <select class="form-control" name="profile"...> </select> Returns: user_options (dict): the selected profile in the user_options form, e.g. ``{"profile": "8 CPUs"}`` """ if not self.profile_list or self._profile_list is None: return formdata # Default to first profile if somehow none is provided try: selected_profile = int(formdata.get('profile', [0])[0]) options = self._profile_list[selected_profile] except (TypeError, IndexError, ValueError): raise web.HTTPError(400, "No such profile: %i", formdata.get('profile', None)) return { 'profile': options['display_name'] }
python
def options_from_form(self, formdata): if not self.profile_list or self._profile_list is None: return formdata # Default to first profile if somehow none is provided try: selected_profile = int(formdata.get('profile', [0])[0]) options = self._profile_list[selected_profile] except (TypeError, IndexError, ValueError): raise web.HTTPError(400, "No such profile: %i", formdata.get('profile', None)) return { 'profile': options['display_name'] }
[ "def", "options_from_form", "(", "self", ",", "formdata", ")", ":", "if", "not", "self", ".", "profile_list", "or", "self", ".", "_profile_list", "is", "None", ":", "return", "formdata", "# Default to first profile if somehow none is provided", "try", ":", "selected_profile", "=", "int", "(", "formdata", ".", "get", "(", "'profile'", ",", "[", "0", "]", ")", "[", "0", "]", ")", "options", "=", "self", ".", "_profile_list", "[", "selected_profile", "]", "except", "(", "TypeError", ",", "IndexError", ",", "ValueError", ")", ":", "raise", "web", ".", "HTTPError", "(", "400", ",", "\"No such profile: %i\"", ",", "formdata", ".", "get", "(", "'profile'", ",", "None", ")", ")", "return", "{", "'profile'", ":", "options", "[", "'display_name'", "]", "}" ]
get the option selected by the user on the form This only constructs the user_options dict, it should not actually load any options. That is done later in `.load_user_options()` Args: formdata: user selection returned by the form To access to the value, you can use the `get` accessor and the name of the html element, for example:: formdata.get('profile',[0]) to get the value of the form named "profile", as defined in `form_template`:: <select class="form-control" name="profile"...> </select> Returns: user_options (dict): the selected profile in the user_options form, e.g. ``{"profile": "8 CPUs"}``
[ "get", "the", "option", "selected", "by", "the", "user", "on", "the", "form" ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1839-L1873
247,059
jupyterhub/kubespawner
kubespawner/spawner.py
KubeSpawner._load_profile
def _load_profile(self, profile_name): """Load a profile by name Called by load_user_options """ # find the profile default_profile = self._profile_list[0] for profile in self._profile_list: if profile.get('default', False): # explicit default, not the first default_profile = profile if profile['display_name'] == profile_name: break else: if profile_name: # name specified, but not found raise ValueError("No such profile: %s. Options include: %s" % ( profile_name, ', '.join(p['display_name'] for p in self._profile_list) )) else: # no name specified, use the default profile = default_profile self.log.debug("Applying KubeSpawner override for profile '%s'", profile['display_name']) kubespawner_override = profile.get('kubespawner_override', {}) for k, v in kubespawner_override.items(): if callable(v): v = v(self) self.log.debug(".. overriding KubeSpawner value %s=%s (callable result)", k, v) else: self.log.debug(".. overriding KubeSpawner value %s=%s", k, v) setattr(self, k, v)
python
def _load_profile(self, profile_name): # find the profile default_profile = self._profile_list[0] for profile in self._profile_list: if profile.get('default', False): # explicit default, not the first default_profile = profile if profile['display_name'] == profile_name: break else: if profile_name: # name specified, but not found raise ValueError("No such profile: %s. Options include: %s" % ( profile_name, ', '.join(p['display_name'] for p in self._profile_list) )) else: # no name specified, use the default profile = default_profile self.log.debug("Applying KubeSpawner override for profile '%s'", profile['display_name']) kubespawner_override = profile.get('kubespawner_override', {}) for k, v in kubespawner_override.items(): if callable(v): v = v(self) self.log.debug(".. overriding KubeSpawner value %s=%s (callable result)", k, v) else: self.log.debug(".. overriding KubeSpawner value %s=%s", k, v) setattr(self, k, v)
[ "def", "_load_profile", "(", "self", ",", "profile_name", ")", ":", "# find the profile", "default_profile", "=", "self", ".", "_profile_list", "[", "0", "]", "for", "profile", "in", "self", ".", "_profile_list", ":", "if", "profile", ".", "get", "(", "'default'", ",", "False", ")", ":", "# explicit default, not the first", "default_profile", "=", "profile", "if", "profile", "[", "'display_name'", "]", "==", "profile_name", ":", "break", "else", ":", "if", "profile_name", ":", "# name specified, but not found", "raise", "ValueError", "(", "\"No such profile: %s. Options include: %s\"", "%", "(", "profile_name", ",", "', '", ".", "join", "(", "p", "[", "'display_name'", "]", "for", "p", "in", "self", ".", "_profile_list", ")", ")", ")", "else", ":", "# no name specified, use the default", "profile", "=", "default_profile", "self", ".", "log", ".", "debug", "(", "\"Applying KubeSpawner override for profile '%s'\"", ",", "profile", "[", "'display_name'", "]", ")", "kubespawner_override", "=", "profile", ".", "get", "(", "'kubespawner_override'", ",", "{", "}", ")", "for", "k", ",", "v", "in", "kubespawner_override", ".", "items", "(", ")", ":", "if", "callable", "(", "v", ")", ":", "v", "=", "v", "(", "self", ")", "self", ".", "log", ".", "debug", "(", "\".. overriding KubeSpawner value %s=%s (callable result)\"", ",", "k", ",", "v", ")", "else", ":", "self", ".", "log", ".", "debug", "(", "\".. overriding KubeSpawner value %s=%s\"", ",", "k", ",", "v", ")", "setattr", "(", "self", ",", "k", ",", "v", ")" ]
Load a profile by name Called by load_user_options
[ "Load", "a", "profile", "by", "name" ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1876-L1909
247,060
jupyterhub/kubespawner
kubespawner/spawner.py
KubeSpawner.load_user_options
def load_user_options(self): """Load user options from self.user_options dict This can be set via POST to the API or via options_from_form Only supported argument by default is 'profile'. Override in subclasses to support other options. """ if self._profile_list is None: if callable(self.profile_list): self._profile_list = yield gen.maybe_future(self.profile_list(self)) else: self._profile_list = self.profile_list if self._profile_list: yield self._load_profile(self.user_options.get('profile', None))
python
def load_user_options(self): if self._profile_list is None: if callable(self.profile_list): self._profile_list = yield gen.maybe_future(self.profile_list(self)) else: self._profile_list = self.profile_list if self._profile_list: yield self._load_profile(self.user_options.get('profile', None))
[ "def", "load_user_options", "(", "self", ")", ":", "if", "self", ".", "_profile_list", "is", "None", ":", "if", "callable", "(", "self", ".", "profile_list", ")", ":", "self", ".", "_profile_list", "=", "yield", "gen", ".", "maybe_future", "(", "self", ".", "profile_list", "(", "self", ")", ")", "else", ":", "self", ".", "_profile_list", "=", "self", ".", "profile_list", "if", "self", ".", "_profile_list", ":", "yield", "self", ".", "_load_profile", "(", "self", ".", "user_options", ".", "get", "(", "'profile'", ",", "None", ")", ")" ]
Load user options from self.user_options dict This can be set via POST to the API or via options_from_form Only supported argument by default is 'profile'. Override in subclasses to support other options.
[ "Load", "user", "options", "from", "self", ".", "user_options", "dict" ]
46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1912-L1926
247,061
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
list_motors
def list_motors(name_pattern=Motor.SYSTEM_DEVICE_NAME_CONVENTION, **kwargs): """ This is a generator function that enumerates all tacho motors that match the provided arguments. Parameters: name_pattern: pattern that device name should match. For example, 'motor*'. Default value: '*'. keyword arguments: used for matching the corresponding device attributes. For example, driver_name='lego-ev3-l-motor', or address=['outB', 'outC']. When argument value is a list, then a match against any entry of the list is enough. """ class_path = abspath(Device.DEVICE_ROOT_PATH + '/' + Motor.SYSTEM_CLASS_NAME) return (Motor(name_pattern=name, name_exact=True) for name in list_device_names(class_path, name_pattern, **kwargs))
python
def list_motors(name_pattern=Motor.SYSTEM_DEVICE_NAME_CONVENTION, **kwargs): class_path = abspath(Device.DEVICE_ROOT_PATH + '/' + Motor.SYSTEM_CLASS_NAME) return (Motor(name_pattern=name, name_exact=True) for name in list_device_names(class_path, name_pattern, **kwargs))
[ "def", "list_motors", "(", "name_pattern", "=", "Motor", ".", "SYSTEM_DEVICE_NAME_CONVENTION", ",", "*", "*", "kwargs", ")", ":", "class_path", "=", "abspath", "(", "Device", ".", "DEVICE_ROOT_PATH", "+", "'/'", "+", "Motor", ".", "SYSTEM_CLASS_NAME", ")", "return", "(", "Motor", "(", "name_pattern", "=", "name", ",", "name_exact", "=", "True", ")", "for", "name", "in", "list_device_names", "(", "class_path", ",", "name_pattern", ",", "*", "*", "kwargs", ")", ")" ]
This is a generator function that enumerates all tacho motors that match the provided arguments. Parameters: name_pattern: pattern that device name should match. For example, 'motor*'. Default value: '*'. keyword arguments: used for matching the corresponding device attributes. For example, driver_name='lego-ev3-l-motor', or address=['outB', 'outC']. When argument value is a list, then a match against any entry of the list is enough.
[ "This", "is", "a", "generator", "function", "that", "enumerates", "all", "tacho", "motors", "that", "match", "the", "provided", "arguments", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L1060-L1077
247,062
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
SpeedRPS.to_native_units
def to_native_units(self, motor): """ Return the native speed measurement required to achieve desired rotations-per-second """ assert abs(self.rotations_per_second) <= motor.max_rps,\ "invalid rotations-per-second: {} max RPS is {}, {} was requested".format( motor, motor.max_rps, self.rotations_per_second) return self.rotations_per_second/motor.max_rps * motor.max_speed
python
def to_native_units(self, motor): assert abs(self.rotations_per_second) <= motor.max_rps,\ "invalid rotations-per-second: {} max RPS is {}, {} was requested".format( motor, motor.max_rps, self.rotations_per_second) return self.rotations_per_second/motor.max_rps * motor.max_speed
[ "def", "to_native_units", "(", "self", ",", "motor", ")", ":", "assert", "abs", "(", "self", ".", "rotations_per_second", ")", "<=", "motor", ".", "max_rps", ",", "\"invalid rotations-per-second: {} max RPS is {}, {} was requested\"", ".", "format", "(", "motor", ",", "motor", ".", "max_rps", ",", "self", ".", "rotations_per_second", ")", "return", "self", ".", "rotations_per_second", "/", "motor", ".", "max_rps", "*", "motor", ".", "max_speed" ]
Return the native speed measurement required to achieve desired rotations-per-second
[ "Return", "the", "native", "speed", "measurement", "required", "to", "achieve", "desired", "rotations", "-", "per", "-", "second" ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L155-L162
247,063
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
SpeedRPM.to_native_units
def to_native_units(self, motor): """ Return the native speed measurement required to achieve desired rotations-per-minute """ assert abs(self.rotations_per_minute) <= motor.max_rpm,\ "invalid rotations-per-minute: {} max RPM is {}, {} was requested".format( motor, motor.max_rpm, self.rotations_per_minute) return self.rotations_per_minute/motor.max_rpm * motor.max_speed
python
def to_native_units(self, motor): assert abs(self.rotations_per_minute) <= motor.max_rpm,\ "invalid rotations-per-minute: {} max RPM is {}, {} was requested".format( motor, motor.max_rpm, self.rotations_per_minute) return self.rotations_per_minute/motor.max_rpm * motor.max_speed
[ "def", "to_native_units", "(", "self", ",", "motor", ")", ":", "assert", "abs", "(", "self", ".", "rotations_per_minute", ")", "<=", "motor", ".", "max_rpm", ",", "\"invalid rotations-per-minute: {} max RPM is {}, {} was requested\"", ".", "format", "(", "motor", ",", "motor", ".", "max_rpm", ",", "self", ".", "rotations_per_minute", ")", "return", "self", ".", "rotations_per_minute", "/", "motor", ".", "max_rpm", "*", "motor", ".", "max_speed" ]
Return the native speed measurement required to achieve desired rotations-per-minute
[ "Return", "the", "native", "speed", "measurement", "required", "to", "achieve", "desired", "rotations", "-", "per", "-", "minute" ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L180-L187
247,064
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
SpeedDPS.to_native_units
def to_native_units(self, motor): """ Return the native speed measurement required to achieve desired degrees-per-second """ assert abs(self.degrees_per_second) <= motor.max_dps,\ "invalid degrees-per-second: {} max DPS is {}, {} was requested".format( motor, motor.max_dps, self.degrees_per_second) return self.degrees_per_second/motor.max_dps * motor.max_speed
python
def to_native_units(self, motor): assert abs(self.degrees_per_second) <= motor.max_dps,\ "invalid degrees-per-second: {} max DPS is {}, {} was requested".format( motor, motor.max_dps, self.degrees_per_second) return self.degrees_per_second/motor.max_dps * motor.max_speed
[ "def", "to_native_units", "(", "self", ",", "motor", ")", ":", "assert", "abs", "(", "self", ".", "degrees_per_second", ")", "<=", "motor", ".", "max_dps", ",", "\"invalid degrees-per-second: {} max DPS is {}, {} was requested\"", ".", "format", "(", "motor", ",", "motor", ".", "max_dps", ",", "self", ".", "degrees_per_second", ")", "return", "self", ".", "degrees_per_second", "/", "motor", ".", "max_dps", "*", "motor", ".", "max_speed" ]
Return the native speed measurement required to achieve desired degrees-per-second
[ "Return", "the", "native", "speed", "measurement", "required", "to", "achieve", "desired", "degrees", "-", "per", "-", "second" ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L205-L212
247,065
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
SpeedDPM.to_native_units
def to_native_units(self, motor): """ Return the native speed measurement required to achieve desired degrees-per-minute """ assert abs(self.degrees_per_minute) <= motor.max_dpm,\ "invalid degrees-per-minute: {} max DPM is {}, {} was requested".format( motor, motor.max_dpm, self.degrees_per_minute) return self.degrees_per_minute/motor.max_dpm * motor.max_speed
python
def to_native_units(self, motor): assert abs(self.degrees_per_minute) <= motor.max_dpm,\ "invalid degrees-per-minute: {} max DPM is {}, {} was requested".format( motor, motor.max_dpm, self.degrees_per_minute) return self.degrees_per_minute/motor.max_dpm * motor.max_speed
[ "def", "to_native_units", "(", "self", ",", "motor", ")", ":", "assert", "abs", "(", "self", ".", "degrees_per_minute", ")", "<=", "motor", ".", "max_dpm", ",", "\"invalid degrees-per-minute: {} max DPM is {}, {} was requested\"", ".", "format", "(", "motor", ",", "motor", ".", "max_dpm", ",", "self", ".", "degrees_per_minute", ")", "return", "self", ".", "degrees_per_minute", "/", "motor", ".", "max_dpm", "*", "motor", ".", "max_speed" ]
Return the native speed measurement required to achieve desired degrees-per-minute
[ "Return", "the", "native", "speed", "measurement", "required", "to", "achieve", "desired", "degrees", "-", "per", "-", "minute" ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L230-L237
247,066
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.address
def address(self): """ Returns the name of the port that this motor is connected to. """ self._address, value = self.get_attr_string(self._address, 'address') return value
python
def address(self): self._address, value = self.get_attr_string(self._address, 'address') return value
[ "def", "address", "(", "self", ")", ":", "self", ".", "_address", ",", "value", "=", "self", ".", "get_attr_string", "(", "self", ".", "_address", ",", "'address'", ")", "return", "value" ]
Returns the name of the port that this motor is connected to.
[ "Returns", "the", "name", "of", "the", "port", "that", "this", "motor", "is", "connected", "to", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L403-L408
247,067
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.commands
def commands(self): """ Returns a list of commands that are supported by the motor controller. Possible values are `run-forever`, `run-to-abs-pos`, `run-to-rel-pos`, `run-timed`, `run-direct`, `stop` and `reset`. Not all commands may be supported. - `run-forever` will cause the motor to run until another command is sent. - `run-to-abs-pos` will run to an absolute position specified by `position_sp` and then stop using the action specified in `stop_action`. - `run-to-rel-pos` will run to a position relative to the current `position` value. The new position will be current `position` + `position_sp`. When the new position is reached, the motor will stop using the action specified by `stop_action`. - `run-timed` will run the motor for the amount of time specified in `time_sp` and then stop the motor using the action specified by `stop_action`. - `run-direct` will run the motor at the duty cycle specified by `duty_cycle_sp`. Unlike other run commands, changing `duty_cycle_sp` while running *will* take effect immediately. - `stop` will stop any of the run commands before they are complete using the action specified by `stop_action`. - `reset` will reset all of the motor parameter attributes to their default value. This will also have the effect of stopping the motor. """ (self._commands, value) = self.get_cached_attr_set(self._commands, 'commands') return value
python
def commands(self): (self._commands, value) = self.get_cached_attr_set(self._commands, 'commands') return value
[ "def", "commands", "(", "self", ")", ":", "(", "self", ".", "_commands", ",", "value", ")", "=", "self", ".", "get_cached_attr_set", "(", "self", ".", "_commands", ",", "'commands'", ")", "return", "value" ]
Returns a list of commands that are supported by the motor controller. Possible values are `run-forever`, `run-to-abs-pos`, `run-to-rel-pos`, `run-timed`, `run-direct`, `stop` and `reset`. Not all commands may be supported. - `run-forever` will cause the motor to run until another command is sent. - `run-to-abs-pos` will run to an absolute position specified by `position_sp` and then stop using the action specified in `stop_action`. - `run-to-rel-pos` will run to a position relative to the current `position` value. The new position will be current `position` + `position_sp`. When the new position is reached, the motor will stop using the action specified by `stop_action`. - `run-timed` will run the motor for the amount of time specified in `time_sp` and then stop the motor using the action specified by `stop_action`. - `run-direct` will run the motor at the duty cycle specified by `duty_cycle_sp`. Unlike other run commands, changing `duty_cycle_sp` while running *will* take effect immediately. - `stop` will stop any of the run commands before they are complete using the action specified by `stop_action`. - `reset` will reset all of the motor parameter attributes to their default value. This will also have the effect of stopping the motor.
[ "Returns", "a", "list", "of", "commands", "that", "are", "supported", "by", "the", "motor", "controller", ".", "Possible", "values", "are", "run", "-", "forever", "run", "-", "to", "-", "abs", "-", "pos", "run", "-", "to", "-", "rel", "-", "pos", "run", "-", "timed", "run", "-", "direct", "stop", "and", "reset", ".", "Not", "all", "commands", "may", "be", "supported", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L423-L446
247,068
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.driver_name
def driver_name(self): """ Returns the name of the driver that provides this tacho motor device. """ (self._driver_name, value) = self.get_cached_attr_string(self._driver_name, 'driver_name') return value
python
def driver_name(self): (self._driver_name, value) = self.get_cached_attr_string(self._driver_name, 'driver_name') return value
[ "def", "driver_name", "(", "self", ")", ":", "(", "self", ".", "_driver_name", ",", "value", ")", "=", "self", ".", "get_cached_attr_string", "(", "self", ".", "_driver_name", ",", "'driver_name'", ")", "return", "value" ]
Returns the name of the driver that provides this tacho motor device.
[ "Returns", "the", "name", "of", "the", "driver", "that", "provides", "this", "tacho", "motor", "device", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L469-L474
247,069
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.duty_cycle
def duty_cycle(self): """ Returns the current duty cycle of the motor. Units are percent. Values are -100 to 100. """ self._duty_cycle, value = self.get_attr_int(self._duty_cycle, 'duty_cycle') return value
python
def duty_cycle(self): self._duty_cycle, value = self.get_attr_int(self._duty_cycle, 'duty_cycle') return value
[ "def", "duty_cycle", "(", "self", ")", ":", "self", ".", "_duty_cycle", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_duty_cycle", ",", "'duty_cycle'", ")", "return", "value" ]
Returns the current duty cycle of the motor. Units are percent. Values are -100 to 100.
[ "Returns", "the", "current", "duty", "cycle", "of", "the", "motor", ".", "Units", "are", "percent", ".", "Values", "are", "-", "100", "to", "100", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L477-L483
247,070
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.duty_cycle_sp
def duty_cycle_sp(self): """ Writing sets the duty cycle setpoint. Reading returns the current value. Units are in percent. Valid values are -100 to 100. A negative value causes the motor to rotate in reverse. """ self._duty_cycle_sp, value = self.get_attr_int(self._duty_cycle_sp, 'duty_cycle_sp') return value
python
def duty_cycle_sp(self): self._duty_cycle_sp, value = self.get_attr_int(self._duty_cycle_sp, 'duty_cycle_sp') return value
[ "def", "duty_cycle_sp", "(", "self", ")", ":", "self", ".", "_duty_cycle_sp", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_duty_cycle_sp", ",", "'duty_cycle_sp'", ")", "return", "value" ]
Writing sets the duty cycle setpoint. Reading returns the current value. Units are in percent. Valid values are -100 to 100. A negative value causes the motor to rotate in reverse.
[ "Writing", "sets", "the", "duty", "cycle", "setpoint", ".", "Reading", "returns", "the", "current", "value", ".", "Units", "are", "in", "percent", ".", "Valid", "values", "are", "-", "100", "to", "100", ".", "A", "negative", "value", "causes", "the", "motor", "to", "rotate", "in", "reverse", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L486-L493
247,071
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.polarity
def polarity(self): """ Sets the polarity of the motor. With `normal` polarity, a positive duty cycle will cause the motor to rotate clockwise. With `inversed` polarity, a positive duty cycle will cause the motor to rotate counter-clockwise. Valid values are `normal` and `inversed`. """ self._polarity, value = self.get_attr_string(self._polarity, 'polarity') return value
python
def polarity(self): self._polarity, value = self.get_attr_string(self._polarity, 'polarity') return value
[ "def", "polarity", "(", "self", ")", ":", "self", ".", "_polarity", ",", "value", "=", "self", ".", "get_attr_string", "(", "self", ".", "_polarity", ",", "'polarity'", ")", "return", "value" ]
Sets the polarity of the motor. With `normal` polarity, a positive duty cycle will cause the motor to rotate clockwise. With `inversed` polarity, a positive duty cycle will cause the motor to rotate counter-clockwise. Valid values are `normal` and `inversed`.
[ "Sets", "the", "polarity", "of", "the", "motor", ".", "With", "normal", "polarity", "a", "positive", "duty", "cycle", "will", "cause", "the", "motor", "to", "rotate", "clockwise", ".", "With", "inversed", "polarity", "a", "positive", "duty", "cycle", "will", "cause", "the", "motor", "to", "rotate", "counter", "-", "clockwise", ".", "Valid", "values", "are", "normal", "and", "inversed", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L510-L518
247,072
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.position
def position(self): """ Returns the current position of the motor in pulses of the rotary encoder. When the motor rotates clockwise, the position will increase. Likewise, rotating counter-clockwise causes the position to decrease. Writing will set the position to that value. """ self._position, value = self.get_attr_int(self._position, 'position') return value
python
def position(self): self._position, value = self.get_attr_int(self._position, 'position') return value
[ "def", "position", "(", "self", ")", ":", "self", ".", "_position", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_position", ",", "'position'", ")", "return", "value" ]
Returns the current position of the motor in pulses of the rotary encoder. When the motor rotates clockwise, the position will increase. Likewise, rotating counter-clockwise causes the position to decrease. Writing will set the position to that value.
[ "Returns", "the", "current", "position", "of", "the", "motor", "in", "pulses", "of", "the", "rotary", "encoder", ".", "When", "the", "motor", "rotates", "clockwise", "the", "position", "will", "increase", ".", "Likewise", "rotating", "counter", "-", "clockwise", "causes", "the", "position", "to", "decrease", ".", "Writing", "will", "set", "the", "position", "to", "that", "value", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L525-L533
247,073
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.position_p
def position_p(self): """ The proportional constant for the position PID. """ self._position_p, value = self.get_attr_int(self._position_p, 'hold_pid/Kp') return value
python
def position_p(self): self._position_p, value = self.get_attr_int(self._position_p, 'hold_pid/Kp') return value
[ "def", "position_p", "(", "self", ")", ":", "self", ".", "_position_p", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_position_p", ",", "'hold_pid/Kp'", ")", "return", "value" ]
The proportional constant for the position PID.
[ "The", "proportional", "constant", "for", "the", "position", "PID", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L540-L545
247,074
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.position_i
def position_i(self): """ The integral constant for the position PID. """ self._position_i, value = self.get_attr_int(self._position_i, 'hold_pid/Ki') return value
python
def position_i(self): self._position_i, value = self.get_attr_int(self._position_i, 'hold_pid/Ki') return value
[ "def", "position_i", "(", "self", ")", ":", "self", ".", "_position_i", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_position_i", ",", "'hold_pid/Ki'", ")", "return", "value" ]
The integral constant for the position PID.
[ "The", "integral", "constant", "for", "the", "position", "PID", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L552-L557
247,075
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.position_d
def position_d(self): """ The derivative constant for the position PID. """ self._position_d, value = self.get_attr_int(self._position_d, 'hold_pid/Kd') return value
python
def position_d(self): self._position_d, value = self.get_attr_int(self._position_d, 'hold_pid/Kd') return value
[ "def", "position_d", "(", "self", ")", ":", "self", ".", "_position_d", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_position_d", ",", "'hold_pid/Kd'", ")", "return", "value" ]
The derivative constant for the position PID.
[ "The", "derivative", "constant", "for", "the", "position", "PID", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L564-L569
247,076
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.max_speed
def max_speed(self): """ Returns the maximum value that is accepted by the `speed_sp` attribute. This may be slightly different than the maximum speed that a particular motor can reach - it's the maximum theoretical speed. """ (self._max_speed, value) = self.get_cached_attr_int(self._max_speed, 'max_speed') return value
python
def max_speed(self): (self._max_speed, value) = self.get_cached_attr_int(self._max_speed, 'max_speed') return value
[ "def", "max_speed", "(", "self", ")", ":", "(", "self", ".", "_max_speed", ",", "value", ")", "=", "self", ".", "get_cached_attr_int", "(", "self", ".", "_max_speed", ",", "'max_speed'", ")", "return", "value" ]
Returns the maximum value that is accepted by the `speed_sp` attribute. This may be slightly different than the maximum speed that a particular motor can reach - it's the maximum theoretical speed.
[ "Returns", "the", "maximum", "value", "that", "is", "accepted", "by", "the", "speed_sp", "attribute", ".", "This", "may", "be", "slightly", "different", "than", "the", "maximum", "speed", "that", "a", "particular", "motor", "can", "reach", "-", "it", "s", "the", "maximum", "theoretical", "speed", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L591-L598
247,077
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.ramp_up_sp
def ramp_up_sp(self): """ Writing sets the ramp up setpoint. Reading returns the current value. Units are in milliseconds and must be positive. When set to a non-zero value, the motor speed will increase from 0 to 100% of `max_speed` over the span of this setpoint. The actual ramp time is the ratio of the difference between the `speed_sp` and the current `speed` and max_speed multiplied by `ramp_up_sp`. """ self._ramp_up_sp, value = self.get_attr_int(self._ramp_up_sp, 'ramp_up_sp') return value
python
def ramp_up_sp(self): self._ramp_up_sp, value = self.get_attr_int(self._ramp_up_sp, 'ramp_up_sp') return value
[ "def", "ramp_up_sp", "(", "self", ")", ":", "self", ".", "_ramp_up_sp", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_ramp_up_sp", ",", "'ramp_up_sp'", ")", "return", "value" ]
Writing sets the ramp up setpoint. Reading returns the current value. Units are in milliseconds and must be positive. When set to a non-zero value, the motor speed will increase from 0 to 100% of `max_speed` over the span of this setpoint. The actual ramp time is the ratio of the difference between the `speed_sp` and the current `speed` and max_speed multiplied by `ramp_up_sp`.
[ "Writing", "sets", "the", "ramp", "up", "setpoint", ".", "Reading", "returns", "the", "current", "value", ".", "Units", "are", "in", "milliseconds", "and", "must", "be", "positive", ".", "When", "set", "to", "a", "non", "-", "zero", "value", "the", "motor", "speed", "will", "increase", "from", "0", "to", "100%", "of", "max_speed", "over", "the", "span", "of", "this", "setpoint", ".", "The", "actual", "ramp", "time", "is", "the", "ratio", "of", "the", "difference", "between", "the", "speed_sp", "and", "the", "current", "speed", "and", "max_speed", "multiplied", "by", "ramp_up_sp", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L628-L637
247,078
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.ramp_down_sp
def ramp_down_sp(self): """ Writing sets the ramp down setpoint. Reading returns the current value. Units are in milliseconds and must be positive. When set to a non-zero value, the motor speed will decrease from 0 to 100% of `max_speed` over the span of this setpoint. The actual ramp time is the ratio of the difference between the `speed_sp` and the current `speed` and max_speed multiplied by `ramp_down_sp`. """ self._ramp_down_sp, value = self.get_attr_int(self._ramp_down_sp, 'ramp_down_sp') return value
python
def ramp_down_sp(self): self._ramp_down_sp, value = self.get_attr_int(self._ramp_down_sp, 'ramp_down_sp') return value
[ "def", "ramp_down_sp", "(", "self", ")", ":", "self", ".", "_ramp_down_sp", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_ramp_down_sp", ",", "'ramp_down_sp'", ")", "return", "value" ]
Writing sets the ramp down setpoint. Reading returns the current value. Units are in milliseconds and must be positive. When set to a non-zero value, the motor speed will decrease from 0 to 100% of `max_speed` over the span of this setpoint. The actual ramp time is the ratio of the difference between the `speed_sp` and the current `speed` and max_speed multiplied by `ramp_down_sp`.
[ "Writing", "sets", "the", "ramp", "down", "setpoint", ".", "Reading", "returns", "the", "current", "value", ".", "Units", "are", "in", "milliseconds", "and", "must", "be", "positive", ".", "When", "set", "to", "a", "non", "-", "zero", "value", "the", "motor", "speed", "will", "decrease", "from", "0", "to", "100%", "of", "max_speed", "over", "the", "span", "of", "this", "setpoint", ".", "The", "actual", "ramp", "time", "is", "the", "ratio", "of", "the", "difference", "between", "the", "speed_sp", "and", "the", "current", "speed", "and", "max_speed", "multiplied", "by", "ramp_down_sp", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L644-L653
247,079
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.speed_p
def speed_p(self): """ The proportional constant for the speed regulation PID. """ self._speed_p, value = self.get_attr_int(self._speed_p, 'speed_pid/Kp') return value
python
def speed_p(self): self._speed_p, value = self.get_attr_int(self._speed_p, 'speed_pid/Kp') return value
[ "def", "speed_p", "(", "self", ")", ":", "self", ".", "_speed_p", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_speed_p", ",", "'speed_pid/Kp'", ")", "return", "value" ]
The proportional constant for the speed regulation PID.
[ "The", "proportional", "constant", "for", "the", "speed", "regulation", "PID", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L660-L665
247,080
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.speed_i
def speed_i(self): """ The integral constant for the speed regulation PID. """ self._speed_i, value = self.get_attr_int(self._speed_i, 'speed_pid/Ki') return value
python
def speed_i(self): self._speed_i, value = self.get_attr_int(self._speed_i, 'speed_pid/Ki') return value
[ "def", "speed_i", "(", "self", ")", ":", "self", ".", "_speed_i", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_speed_i", ",", "'speed_pid/Ki'", ")", "return", "value" ]
The integral constant for the speed regulation PID.
[ "The", "integral", "constant", "for", "the", "speed", "regulation", "PID", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L672-L677
247,081
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.speed_d
def speed_d(self): """ The derivative constant for the speed regulation PID. """ self._speed_d, value = self.get_attr_int(self._speed_d, 'speed_pid/Kd') return value
python
def speed_d(self): self._speed_d, value = self.get_attr_int(self._speed_d, 'speed_pid/Kd') return value
[ "def", "speed_d", "(", "self", ")", ":", "self", ".", "_speed_d", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_speed_d", ",", "'speed_pid/Kd'", ")", "return", "value" ]
The derivative constant for the speed regulation PID.
[ "The", "derivative", "constant", "for", "the", "speed", "regulation", "PID", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L684-L689
247,082
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.state
def state(self): """ Reading returns a list of state flags. Possible flags are `running`, `ramping`, `holding`, `overloaded` and `stalled`. """ self._state, value = self.get_attr_set(self._state, 'state') return value
python
def state(self): self._state, value = self.get_attr_set(self._state, 'state') return value
[ "def", "state", "(", "self", ")", ":", "self", ".", "_state", ",", "value", "=", "self", ".", "get_attr_set", "(", "self", ".", "_state", ",", "'state'", ")", "return", "value" ]
Reading returns a list of state flags. Possible flags are `running`, `ramping`, `holding`, `overloaded` and `stalled`.
[ "Reading", "returns", "a", "list", "of", "state", "flags", ".", "Possible", "flags", "are", "running", "ramping", "holding", "overloaded", "and", "stalled", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L696-L702
247,083
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.stop_action
def stop_action(self): """ Reading returns the current stop action. Writing sets the stop action. The value determines the motors behavior when `command` is set to `stop`. Also, it determines the motors behavior when a run command completes. See `stop_actions` for a list of possible values. """ self._stop_action, value = self.get_attr_string(self._stop_action, 'stop_action') return value
python
def stop_action(self): self._stop_action, value = self.get_attr_string(self._stop_action, 'stop_action') return value
[ "def", "stop_action", "(", "self", ")", ":", "self", ".", "_stop_action", ",", "value", "=", "self", ".", "get_attr_string", "(", "self", ".", "_stop_action", ",", "'stop_action'", ")", "return", "value" ]
Reading returns the current stop action. Writing sets the stop action. The value determines the motors behavior when `command` is set to `stop`. Also, it determines the motors behavior when a run command completes. See `stop_actions` for a list of possible values.
[ "Reading", "returns", "the", "current", "stop", "action", ".", "Writing", "sets", "the", "stop", "action", ".", "The", "value", "determines", "the", "motors", "behavior", "when", "command", "is", "set", "to", "stop", ".", "Also", "it", "determines", "the", "motors", "behavior", "when", "a", "run", "command", "completes", ".", "See", "stop_actions", "for", "a", "list", "of", "possible", "values", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L705-L713
247,084
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.stop_actions
def stop_actions(self): """ Returns a list of stop actions supported by the motor controller. Possible values are `coast`, `brake` and `hold`. `coast` means that power will be removed from the motor and it will freely coast to a stop. `brake` means that power will be removed from the motor and a passive electrical load will be placed on the motor. This is usually done by shorting the motor terminals together. This load will absorb the energy from the rotation of the motors and cause the motor to stop more quickly than coasting. `hold` does not remove power from the motor. Instead it actively tries to hold the motor at the current position. If an external force tries to turn the motor, the motor will 'push back' to maintain its position. """ (self._stop_actions, value) = self.get_cached_attr_set(self._stop_actions, 'stop_actions') return value
python
def stop_actions(self): (self._stop_actions, value) = self.get_cached_attr_set(self._stop_actions, 'stop_actions') return value
[ "def", "stop_actions", "(", "self", ")", ":", "(", "self", ".", "_stop_actions", ",", "value", ")", "=", "self", ".", "get_cached_attr_set", "(", "self", ".", "_stop_actions", ",", "'stop_actions'", ")", "return", "value" ]
Returns a list of stop actions supported by the motor controller. Possible values are `coast`, `brake` and `hold`. `coast` means that power will be removed from the motor and it will freely coast to a stop. `brake` means that power will be removed from the motor and a passive electrical load will be placed on the motor. This is usually done by shorting the motor terminals together. This load will absorb the energy from the rotation of the motors and cause the motor to stop more quickly than coasting. `hold` does not remove power from the motor. Instead it actively tries to hold the motor at the current position. If an external force tries to turn the motor, the motor will 'push back' to maintain its position.
[ "Returns", "a", "list", "of", "stop", "actions", "supported", "by", "the", "motor", "controller", ".", "Possible", "values", "are", "coast", "brake", "and", "hold", ".", "coast", "means", "that", "power", "will", "be", "removed", "from", "the", "motor", "and", "it", "will", "freely", "coast", "to", "a", "stop", ".", "brake", "means", "that", "power", "will", "be", "removed", "from", "the", "motor", "and", "a", "passive", "electrical", "load", "will", "be", "placed", "on", "the", "motor", ".", "This", "is", "usually", "done", "by", "shorting", "the", "motor", "terminals", "together", ".", "This", "load", "will", "absorb", "the", "energy", "from", "the", "rotation", "of", "the", "motors", "and", "cause", "the", "motor", "to", "stop", "more", "quickly", "than", "coasting", ".", "hold", "does", "not", "remove", "power", "from", "the", "motor", ".", "Instead", "it", "actively", "tries", "to", "hold", "the", "motor", "at", "the", "current", "position", ".", "If", "an", "external", "force", "tries", "to", "turn", "the", "motor", "the", "motor", "will", "push", "back", "to", "maintain", "its", "position", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L720-L734
247,085
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.time_sp
def time_sp(self): """ Writing specifies the amount of time the motor will run when using the `run-timed` command. Reading returns the current value. Units are in milliseconds. """ self._time_sp, value = self.get_attr_int(self._time_sp, 'time_sp') return value
python
def time_sp(self): self._time_sp, value = self.get_attr_int(self._time_sp, 'time_sp') return value
[ "def", "time_sp", "(", "self", ")", ":", "self", ".", "_time_sp", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_time_sp", ",", "'time_sp'", ")", "return", "value" ]
Writing specifies the amount of time the motor will run when using the `run-timed` command. Reading returns the current value. Units are in milliseconds.
[ "Writing", "specifies", "the", "amount", "of", "time", "the", "motor", "will", "run", "when", "using", "the", "run", "-", "timed", "command", ".", "Reading", "returns", "the", "current", "value", ".", "Units", "are", "in", "milliseconds", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L737-L744
247,086
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.run_forever
def run_forever(self, **kwargs): """ Run the motor until another command is sent. """ for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_RUN_FOREVER
python
def run_forever(self, **kwargs): for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_RUN_FOREVER
[ "def", "run_forever", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "key", "in", "kwargs", ":", "setattr", "(", "self", ",", "key", ",", "kwargs", "[", "key", "]", ")", "self", ".", "command", "=", "self", ".", "COMMAND_RUN_FOREVER" ]
Run the motor until another command is sent.
[ "Run", "the", "motor", "until", "another", "command", "is", "sent", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L750-L756
247,087
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.run_to_abs_pos
def run_to_abs_pos(self, **kwargs): """ Run to an absolute position specified by `position_sp` and then stop using the action specified in `stop_action`. """ for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_RUN_TO_ABS_POS
python
def run_to_abs_pos(self, **kwargs): for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_RUN_TO_ABS_POS
[ "def", "run_to_abs_pos", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "key", "in", "kwargs", ":", "setattr", "(", "self", ",", "key", ",", "kwargs", "[", "key", "]", ")", "self", ".", "command", "=", "self", ".", "COMMAND_RUN_TO_ABS_POS" ]
Run to an absolute position specified by `position_sp` and then stop using the action specified in `stop_action`.
[ "Run", "to", "an", "absolute", "position", "specified", "by", "position_sp", "and", "then", "stop", "using", "the", "action", "specified", "in", "stop_action", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L758-L765
247,088
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.run_to_rel_pos
def run_to_rel_pos(self, **kwargs): """ Run to a position relative to the current `position` value. The new position will be current `position` + `position_sp`. When the new position is reached, the motor will stop using the action specified by `stop_action`. """ for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_RUN_TO_REL_POS
python
def run_to_rel_pos(self, **kwargs): for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_RUN_TO_REL_POS
[ "def", "run_to_rel_pos", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "key", "in", "kwargs", ":", "setattr", "(", "self", ",", "key", ",", "kwargs", "[", "key", "]", ")", "self", ".", "command", "=", "self", ".", "COMMAND_RUN_TO_REL_POS" ]
Run to a position relative to the current `position` value. The new position will be current `position` + `position_sp`. When the new position is reached, the motor will stop using the action specified by `stop_action`.
[ "Run", "to", "a", "position", "relative", "to", "the", "current", "position", "value", ".", "The", "new", "position", "will", "be", "current", "position", "+", "position_sp", ".", "When", "the", "new", "position", "is", "reached", "the", "motor", "will", "stop", "using", "the", "action", "specified", "by", "stop_action", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L767-L776
247,089
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.run_timed
def run_timed(self, **kwargs): """ Run the motor for the amount of time specified in `time_sp` and then stop the motor using the action specified by `stop_action`. """ for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_RUN_TIMED
python
def run_timed(self, **kwargs): for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_RUN_TIMED
[ "def", "run_timed", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "key", "in", "kwargs", ":", "setattr", "(", "self", ",", "key", ",", "kwargs", "[", "key", "]", ")", "self", ".", "command", "=", "self", ".", "COMMAND_RUN_TIMED" ]
Run the motor for the amount of time specified in `time_sp` and then stop the motor using the action specified by `stop_action`.
[ "Run", "the", "motor", "for", "the", "amount", "of", "time", "specified", "in", "time_sp", "and", "then", "stop", "the", "motor", "using", "the", "action", "specified", "by", "stop_action", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L778-L785
247,090
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.stop
def stop(self, **kwargs): """ Stop any of the run commands before they are complete using the action specified by `stop_action`. """ for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_STOP
python
def stop(self, **kwargs): for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_STOP
[ "def", "stop", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "key", "in", "kwargs", ":", "setattr", "(", "self", ",", "key", ",", "kwargs", "[", "key", "]", ")", "self", ".", "command", "=", "self", ".", "COMMAND_STOP" ]
Stop any of the run commands before they are complete using the action specified by `stop_action`.
[ "Stop", "any", "of", "the", "run", "commands", "before", "they", "are", "complete", "using", "the", "action", "specified", "by", "stop_action", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L797-L804
247,091
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.reset
def reset(self, **kwargs): """ Reset all of the motor parameter attributes to their default value. This will also have the effect of stopping the motor. """ for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_RESET
python
def reset(self, **kwargs): for key in kwargs: setattr(self, key, kwargs[key]) self.command = self.COMMAND_RESET
[ "def", "reset", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "key", "in", "kwargs", ":", "setattr", "(", "self", ",", "key", ",", "kwargs", "[", "key", "]", ")", "self", ".", "command", "=", "self", ".", "COMMAND_RESET" ]
Reset all of the motor parameter attributes to their default value. This will also have the effect of stopping the motor.
[ "Reset", "all", "of", "the", "motor", "parameter", "attributes", "to", "their", "default", "value", ".", "This", "will", "also", "have", "the", "effect", "of", "stopping", "the", "motor", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L806-L813
247,092
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.on_for_rotations
def on_for_rotations(self, speed, rotations, brake=True, block=True): """ Rotate the motor at ``speed`` for ``rotations`` ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units. """ speed_sp = self._speed_native_units(speed) self._set_rel_position_degrees_and_speed_sp(rotations * 360, speed_sp) self._set_brake(brake) self.run_to_rel_pos() if block: self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving()
python
def on_for_rotations(self, speed, rotations, brake=True, block=True): speed_sp = self._speed_native_units(speed) self._set_rel_position_degrees_and_speed_sp(rotations * 360, speed_sp) self._set_brake(brake) self.run_to_rel_pos() if block: self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving()
[ "def", "on_for_rotations", "(", "self", ",", "speed", ",", "rotations", ",", "brake", "=", "True", ",", "block", "=", "True", ")", ":", "speed_sp", "=", "self", ".", "_speed_native_units", "(", "speed", ")", "self", ".", "_set_rel_position_degrees_and_speed_sp", "(", "rotations", "*", "360", ",", "speed_sp", ")", "self", ".", "_set_brake", "(", "brake", ")", "self", ".", "run_to_rel_pos", "(", ")", "if", "block", ":", "self", ".", "wait_until", "(", "'running'", ",", "timeout", "=", "WAIT_RUNNING_TIMEOUT", ")", "self", ".", "wait_until_not_moving", "(", ")" ]
Rotate the motor at ``speed`` for ``rotations`` ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units.
[ "Rotate", "the", "motor", "at", "speed", "for", "rotations" ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L958-L972
247,093
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.on_for_degrees
def on_for_degrees(self, speed, degrees, brake=True, block=True): """ Rotate the motor at ``speed`` for ``degrees`` ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units. """ speed_sp = self._speed_native_units(speed) self._set_rel_position_degrees_and_speed_sp(degrees, speed_sp) self._set_brake(brake) self.run_to_rel_pos() if block: self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving()
python
def on_for_degrees(self, speed, degrees, brake=True, block=True): speed_sp = self._speed_native_units(speed) self._set_rel_position_degrees_and_speed_sp(degrees, speed_sp) self._set_brake(brake) self.run_to_rel_pos() if block: self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving()
[ "def", "on_for_degrees", "(", "self", ",", "speed", ",", "degrees", ",", "brake", "=", "True", ",", "block", "=", "True", ")", ":", "speed_sp", "=", "self", ".", "_speed_native_units", "(", "speed", ")", "self", ".", "_set_rel_position_degrees_and_speed_sp", "(", "degrees", ",", "speed_sp", ")", "self", ".", "_set_brake", "(", "brake", ")", "self", ".", "run_to_rel_pos", "(", ")", "if", "block", ":", "self", ".", "wait_until", "(", "'running'", ",", "timeout", "=", "WAIT_RUNNING_TIMEOUT", ")", "self", ".", "wait_until_not_moving", "(", ")" ]
Rotate the motor at ``speed`` for ``degrees`` ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units.
[ "Rotate", "the", "motor", "at", "speed", "for", "degrees" ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L974-L988
247,094
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.on_to_position
def on_to_position(self, speed, position, brake=True, block=True): """ Rotate the motor at ``speed`` to ``position`` ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units. """ speed = self._speed_native_units(speed) self.speed_sp = int(round(speed)) self.position_sp = position self._set_brake(brake) self.run_to_abs_pos() if block: self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving()
python
def on_to_position(self, speed, position, brake=True, block=True): speed = self._speed_native_units(speed) self.speed_sp = int(round(speed)) self.position_sp = position self._set_brake(brake) self.run_to_abs_pos() if block: self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving()
[ "def", "on_to_position", "(", "self", ",", "speed", ",", "position", ",", "brake", "=", "True", ",", "block", "=", "True", ")", ":", "speed", "=", "self", ".", "_speed_native_units", "(", "speed", ")", "self", ".", "speed_sp", "=", "int", "(", "round", "(", "speed", ")", ")", "self", ".", "position_sp", "=", "position", "self", ".", "_set_brake", "(", "brake", ")", "self", ".", "run_to_abs_pos", "(", ")", "if", "block", ":", "self", ".", "wait_until", "(", "'running'", ",", "timeout", "=", "WAIT_RUNNING_TIMEOUT", ")", "self", ".", "wait_until_not_moving", "(", ")" ]
Rotate the motor at ``speed`` to ``position`` ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units.
[ "Rotate", "the", "motor", "at", "speed", "to", "position" ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L990-L1005
247,095
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.on_for_seconds
def on_for_seconds(self, speed, seconds, brake=True, block=True): """ Rotate the motor at ``speed`` for ``seconds`` ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units. """ if seconds < 0: raise ValueError("seconds is negative ({})".format(seconds)) speed = self._speed_native_units(speed) self.speed_sp = int(round(speed)) self.time_sp = int(seconds * 1000) self._set_brake(brake) self.run_timed() if block: self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving()
python
def on_for_seconds(self, speed, seconds, brake=True, block=True): if seconds < 0: raise ValueError("seconds is negative ({})".format(seconds)) speed = self._speed_native_units(speed) self.speed_sp = int(round(speed)) self.time_sp = int(seconds * 1000) self._set_brake(brake) self.run_timed() if block: self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving()
[ "def", "on_for_seconds", "(", "self", ",", "speed", ",", "seconds", ",", "brake", "=", "True", ",", "block", "=", "True", ")", ":", "if", "seconds", "<", "0", ":", "raise", "ValueError", "(", "\"seconds is negative ({})\"", ".", "format", "(", "seconds", ")", ")", "speed", "=", "self", ".", "_speed_native_units", "(", "speed", ")", "self", ".", "speed_sp", "=", "int", "(", "round", "(", "speed", ")", ")", "self", ".", "time_sp", "=", "int", "(", "seconds", "*", "1000", ")", "self", ".", "_set_brake", "(", "brake", ")", "self", ".", "run_timed", "(", ")", "if", "block", ":", "self", ".", "wait_until", "(", "'running'", ",", "timeout", "=", "WAIT_RUNNING_TIMEOUT", ")", "self", ".", "wait_until_not_moving", "(", ")" ]
Rotate the motor at ``speed`` for ``seconds`` ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units.
[ "Rotate", "the", "motor", "at", "speed", "for", "seconds" ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L1007-L1026
247,096
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
Motor.on
def on(self, speed, brake=True, block=False): """ Rotate the motor at ``speed`` for forever ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units. Note that `block` is False by default, this is different from the other `on_for_XYZ` methods. """ speed = self._speed_native_units(speed) self.speed_sp = int(round(speed)) self._set_brake(brake) self.run_forever() if block: self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving()
python
def on(self, speed, brake=True, block=False): speed = self._speed_native_units(speed) self.speed_sp = int(round(speed)) self._set_brake(brake) self.run_forever() if block: self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving()
[ "def", "on", "(", "self", ",", "speed", ",", "brake", "=", "True", ",", "block", "=", "False", ")", ":", "speed", "=", "self", ".", "_speed_native_units", "(", "speed", ")", "self", ".", "speed_sp", "=", "int", "(", "round", "(", "speed", ")", ")", "self", ".", "_set_brake", "(", "brake", ")", "self", ".", "run_forever", "(", ")", "if", "block", ":", "self", ".", "wait_until", "(", "'running'", ",", "timeout", "=", "WAIT_RUNNING_TIMEOUT", ")", "self", ".", "wait_until_not_moving", "(", ")" ]
Rotate the motor at ``speed`` for forever ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units. Note that `block` is False by default, this is different from the other `on_for_XYZ` methods.
[ "Rotate", "the", "motor", "at", "speed", "for", "forever" ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L1028-L1045
247,097
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
DcMotor.commands
def commands(self): """ Returns a list of commands supported by the motor controller. """ self._commands, value = self.get_attr_set(self._commands, 'commands') return value
python
def commands(self): self._commands, value = self.get_attr_set(self._commands, 'commands') return value
[ "def", "commands", "(", "self", ")", ":", "self", ".", "_commands", ",", "value", "=", "self", ".", "get_attr_set", "(", "self", ".", "_commands", ",", "'commands'", ")", "return", "value" ]
Returns a list of commands supported by the motor controller.
[ "Returns", "a", "list", "of", "commands", "supported", "by", "the", "motor", "controller", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L1215-L1221
247,098
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
DcMotor.stop_actions
def stop_actions(self): """ Gets a list of stop actions. Valid values are `coast` and `brake`. """ self._stop_actions, value = self.get_attr_set(self._stop_actions, 'stop_actions') return value
python
def stop_actions(self): self._stop_actions, value = self.get_attr_set(self._stop_actions, 'stop_actions') return value
[ "def", "stop_actions", "(", "self", ")", ":", "self", ".", "_stop_actions", ",", "value", "=", "self", ".", "get_attr_set", "(", "self", ".", "_stop_actions", ",", "'stop_actions'", ")", "return", "value" ]
Gets a list of stop actions. Valid values are `coast` and `brake`.
[ "Gets", "a", "list", "of", "stop", "actions", ".", "Valid", "values", "are", "coast", "and", "brake", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L1317-L1323
247,099
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
ServoMotor.mid_pulse_sp
def mid_pulse_sp(self): """ Used to set the pulse size in milliseconds for the signal that tells the servo to drive to the mid position_sp. Default value is 1500. Valid values are 1300 to 1700. For example, on a 180 degree servo, this would be 90 degrees. On continuous rotation servo, this is the 'neutral' position_sp where the motor does not turn. You must write to the position_sp attribute for changes to this attribute to take effect. """ self._mid_pulse_sp, value = self.get_attr_int(self._mid_pulse_sp, 'mid_pulse_sp') return value
python
def mid_pulse_sp(self): self._mid_pulse_sp, value = self.get_attr_int(self._mid_pulse_sp, 'mid_pulse_sp') return value
[ "def", "mid_pulse_sp", "(", "self", ")", ":", "self", ".", "_mid_pulse_sp", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_mid_pulse_sp", ",", "'mid_pulse_sp'", ")", "return", "value" ]
Used to set the pulse size in milliseconds for the signal that tells the servo to drive to the mid position_sp. Default value is 1500. Valid values are 1300 to 1700. For example, on a 180 degree servo, this would be 90 degrees. On continuous rotation servo, this is the 'neutral' position_sp where the motor does not turn. You must write to the position_sp attribute for changes to this attribute to take effect.
[ "Used", "to", "set", "the", "pulse", "size", "in", "milliseconds", "for", "the", "signal", "that", "tells", "the", "servo", "to", "drive", "to", "the", "mid", "position_sp", ".", "Default", "value", "is", "1500", ".", "Valid", "values", "are", "1300", "to", "1700", ".", "For", "example", "on", "a", "180", "degree", "servo", "this", "would", "be", "90", "degrees", ".", "On", "continuous", "rotation", "servo", "this", "is", "the", "neutral", "position_sp", "where", "the", "motor", "does", "not", "turn", ".", "You", "must", "write", "to", "the", "position_sp", "attribute", "for", "changes", "to", "this", "attribute", "to", "take", "effect", "." ]
afc98d35004b533dc161a01f7c966e78607d7c1e
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L1494-L1504