id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
5,000 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table._init_job_from_response | def _init_job_from_response(self, response):
""" Helper function to create a Job instance from a response. """
job = None
if response and 'jobReference' in response:
job = _job.Job(job_id=response['jobReference']['jobId'], context=self._context)
return job | python | def _init_job_from_response(self, response):
""" Helper function to create a Job instance from a response. """
job = None
if response and 'jobReference' in response:
job = _job.Job(job_id=response['jobReference']['jobId'], context=self._context)
return job | [
"def",
"_init_job_from_response",
"(",
"self",
",",
"response",
")",
":",
"job",
"=",
"None",
"if",
"response",
"and",
"'jobReference'",
"in",
"response",
":",
"job",
"=",
"_job",
".",
"Job",
"(",
"job_id",
"=",
"response",
"[",
"'jobReference'",
"]",
"[",
"'jobId'",
"]",
",",
"context",
"=",
"self",
".",
"_context",
")",
"return",
"job"
] | Helper function to create a Job instance from a response. | [
"Helper",
"function",
"to",
"create",
"a",
"Job",
"instance",
"from",
"a",
"response",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L393-L398 |
5,001 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.extract_async | def extract_async(self, destination, format='csv', csv_delimiter=None, csv_header=True,
compress=False):
"""Starts a job to export the table to GCS.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the export Job if it was started successfully; else None.
"""
format = format.upper()
if format == 'JSON':
format = 'NEWLINE_DELIMITED_JSON'
if format == 'CSV' and csv_delimiter is None:
csv_delimiter = ','
try:
response = self._api.table_extract(self._name_parts, destination, format, compress,
csv_delimiter, csv_header)
return self._init_job_from_response(response)
except Exception as e:
raise google.datalab.JobError(location=traceback.format_exc(), message=str(e),
reason=str(type(e))) | python | def extract_async(self, destination, format='csv', csv_delimiter=None, csv_header=True,
compress=False):
"""Starts a job to export the table to GCS.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the export Job if it was started successfully; else None.
"""
format = format.upper()
if format == 'JSON':
format = 'NEWLINE_DELIMITED_JSON'
if format == 'CSV' and csv_delimiter is None:
csv_delimiter = ','
try:
response = self._api.table_extract(self._name_parts, destination, format, compress,
csv_delimiter, csv_header)
return self._init_job_from_response(response)
except Exception as e:
raise google.datalab.JobError(location=traceback.format_exc(), message=str(e),
reason=str(type(e))) | [
"def",
"extract_async",
"(",
"self",
",",
"destination",
",",
"format",
"=",
"'csv'",
",",
"csv_delimiter",
"=",
"None",
",",
"csv_header",
"=",
"True",
",",
"compress",
"=",
"False",
")",
":",
"format",
"=",
"format",
".",
"upper",
"(",
")",
"if",
"format",
"==",
"'JSON'",
":",
"format",
"=",
"'NEWLINE_DELIMITED_JSON'",
"if",
"format",
"==",
"'CSV'",
"and",
"csv_delimiter",
"is",
"None",
":",
"csv_delimiter",
"=",
"','",
"try",
":",
"response",
"=",
"self",
".",
"_api",
".",
"table_extract",
"(",
"self",
".",
"_name_parts",
",",
"destination",
",",
"format",
",",
"compress",
",",
"csv_delimiter",
",",
"csv_header",
")",
"return",
"self",
".",
"_init_job_from_response",
"(",
"response",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"google",
".",
"datalab",
".",
"JobError",
"(",
"location",
"=",
"traceback",
".",
"format_exc",
"(",
")",
",",
"message",
"=",
"str",
"(",
"e",
")",
",",
"reason",
"=",
"str",
"(",
"type",
"(",
"e",
")",
")",
")"
] | Starts a job to export the table to GCS.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the export Job if it was started successfully; else None. | [
"Starts",
"a",
"job",
"to",
"export",
"the",
"table",
"to",
"GCS",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L400-L426 |
5,002 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.extract | def extract(self, destination, format='csv', csv_delimiter=None, csv_header=True, compress=False):
"""Exports the table to GCS; blocks until complete.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the completed export Job if it was started successfully; else None.
"""
job = self.extract_async(destination, format=format, csv_delimiter=csv_delimiter,
csv_header=csv_header, compress=compress)
if job is not None:
job.wait()
return job | python | def extract(self, destination, format='csv', csv_delimiter=None, csv_header=True, compress=False):
"""Exports the table to GCS; blocks until complete.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the completed export Job if it was started successfully; else None.
"""
job = self.extract_async(destination, format=format, csv_delimiter=csv_delimiter,
csv_header=csv_header, compress=compress)
if job is not None:
job.wait()
return job | [
"def",
"extract",
"(",
"self",
",",
"destination",
",",
"format",
"=",
"'csv'",
",",
"csv_delimiter",
"=",
"None",
",",
"csv_header",
"=",
"True",
",",
"compress",
"=",
"False",
")",
":",
"job",
"=",
"self",
".",
"extract_async",
"(",
"destination",
",",
"format",
"=",
"format",
",",
"csv_delimiter",
"=",
"csv_delimiter",
",",
"csv_header",
"=",
"csv_header",
",",
"compress",
"=",
"compress",
")",
"if",
"job",
"is",
"not",
"None",
":",
"job",
".",
"wait",
"(",
")",
"return",
"job"
] | Exports the table to GCS; blocks until complete.
Args:
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
Returns:
A Job object for the completed export Job if it was started successfully; else None. | [
"Exports",
"the",
"table",
"to",
"GCS",
";",
"blocks",
"until",
"complete",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L428-L446 |
5,003 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.load_async | def load_async(self, source, mode='create', source_format='csv', csv_options=None,
ignore_unknown_values=False, max_bad_records=0):
""" Starts importing a table from GCS and return a Future.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the import if it was started successfully or None if not.
Raises:
Exception if the load job failed to be started or invalid arguments were supplied.
"""
if source_format == 'csv':
source_format = 'CSV'
elif source_format == 'json':
source_format = 'NEWLINE_DELIMITED_JSON'
else:
raise Exception("Invalid source format %s" % source_format)
if not(mode == 'create' or mode == 'append' or mode == 'overwrite'):
raise Exception("Invalid mode %s" % mode)
if csv_options is None:
csv_options = _csv_options.CSVOptions()
try:
response = self._api.jobs_insert_load(source, self._name_parts,
append=(mode == 'append'),
overwrite=(mode == 'overwrite'),
create=(mode == 'create'),
source_format=source_format,
field_delimiter=csv_options.delimiter,
allow_jagged_rows=csv_options.allow_jagged_rows,
allow_quoted_newlines=csv_options.allow_quoted_newlines,
encoding=csv_options.encoding.upper(),
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records,
quote=csv_options.quote,
skip_leading_rows=csv_options.skip_leading_rows)
except Exception as e:
raise e
return self._init_job_from_response(response) | python | def load_async(self, source, mode='create', source_format='csv', csv_options=None,
ignore_unknown_values=False, max_bad_records=0):
""" Starts importing a table from GCS and return a Future.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the import if it was started successfully or None if not.
Raises:
Exception if the load job failed to be started or invalid arguments were supplied.
"""
if source_format == 'csv':
source_format = 'CSV'
elif source_format == 'json':
source_format = 'NEWLINE_DELIMITED_JSON'
else:
raise Exception("Invalid source format %s" % source_format)
if not(mode == 'create' or mode == 'append' or mode == 'overwrite'):
raise Exception("Invalid mode %s" % mode)
if csv_options is None:
csv_options = _csv_options.CSVOptions()
try:
response = self._api.jobs_insert_load(source, self._name_parts,
append=(mode == 'append'),
overwrite=(mode == 'overwrite'),
create=(mode == 'create'),
source_format=source_format,
field_delimiter=csv_options.delimiter,
allow_jagged_rows=csv_options.allow_jagged_rows,
allow_quoted_newlines=csv_options.allow_quoted_newlines,
encoding=csv_options.encoding.upper(),
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records,
quote=csv_options.quote,
skip_leading_rows=csv_options.skip_leading_rows)
except Exception as e:
raise e
return self._init_job_from_response(response) | [
"def",
"load_async",
"(",
"self",
",",
"source",
",",
"mode",
"=",
"'create'",
",",
"source_format",
"=",
"'csv'",
",",
"csv_options",
"=",
"None",
",",
"ignore_unknown_values",
"=",
"False",
",",
"max_bad_records",
"=",
"0",
")",
":",
"if",
"source_format",
"==",
"'csv'",
":",
"source_format",
"=",
"'CSV'",
"elif",
"source_format",
"==",
"'json'",
":",
"source_format",
"=",
"'NEWLINE_DELIMITED_JSON'",
"else",
":",
"raise",
"Exception",
"(",
"\"Invalid source format %s\"",
"%",
"source_format",
")",
"if",
"not",
"(",
"mode",
"==",
"'create'",
"or",
"mode",
"==",
"'append'",
"or",
"mode",
"==",
"'overwrite'",
")",
":",
"raise",
"Exception",
"(",
"\"Invalid mode %s\"",
"%",
"mode",
")",
"if",
"csv_options",
"is",
"None",
":",
"csv_options",
"=",
"_csv_options",
".",
"CSVOptions",
"(",
")",
"try",
":",
"response",
"=",
"self",
".",
"_api",
".",
"jobs_insert_load",
"(",
"source",
",",
"self",
".",
"_name_parts",
",",
"append",
"=",
"(",
"mode",
"==",
"'append'",
")",
",",
"overwrite",
"=",
"(",
"mode",
"==",
"'overwrite'",
")",
",",
"create",
"=",
"(",
"mode",
"==",
"'create'",
")",
",",
"source_format",
"=",
"source_format",
",",
"field_delimiter",
"=",
"csv_options",
".",
"delimiter",
",",
"allow_jagged_rows",
"=",
"csv_options",
".",
"allow_jagged_rows",
",",
"allow_quoted_newlines",
"=",
"csv_options",
".",
"allow_quoted_newlines",
",",
"encoding",
"=",
"csv_options",
".",
"encoding",
".",
"upper",
"(",
")",
",",
"ignore_unknown_values",
"=",
"ignore_unknown_values",
",",
"max_bad_records",
"=",
"max_bad_records",
",",
"quote",
"=",
"csv_options",
".",
"quote",
",",
"skip_leading_rows",
"=",
"csv_options",
".",
"skip_leading_rows",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"return",
"self",
".",
"_init_job_from_response",
"(",
"response",
")"
] | Starts importing a table from GCS and return a Future.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the import if it was started successfully or None if not.
Raises:
Exception if the load job failed to be started or invalid arguments were supplied. | [
"Starts",
"importing",
"a",
"table",
"from",
"GCS",
"and",
"return",
"a",
"Future",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L448-L499 |
5,004 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.load | def load(self, source, mode='create', source_format='csv', csv_options=None,
ignore_unknown_values=False, max_bad_records=0):
""" Load the table from GCS.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: if True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the completed load Job if it was started successfully; else None.
"""
job = self.load_async(source,
mode=mode,
source_format=source_format,
csv_options=csv_options,
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records)
if job is not None:
job.wait()
return job | python | def load(self, source, mode='create', source_format='csv', csv_options=None,
ignore_unknown_values=False, max_bad_records=0):
""" Load the table from GCS.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: if True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the completed load Job if it was started successfully; else None.
"""
job = self.load_async(source,
mode=mode,
source_format=source_format,
csv_options=csv_options,
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records)
if job is not None:
job.wait()
return job | [
"def",
"load",
"(",
"self",
",",
"source",
",",
"mode",
"=",
"'create'",
",",
"source_format",
"=",
"'csv'",
",",
"csv_options",
"=",
"None",
",",
"ignore_unknown_values",
"=",
"False",
",",
"max_bad_records",
"=",
"0",
")",
":",
"job",
"=",
"self",
".",
"load_async",
"(",
"source",
",",
"mode",
"=",
"mode",
",",
"source_format",
"=",
"source_format",
",",
"csv_options",
"=",
"csv_options",
",",
"ignore_unknown_values",
"=",
"ignore_unknown_values",
",",
"max_bad_records",
"=",
"max_bad_records",
")",
"if",
"job",
"is",
"not",
"None",
":",
"job",
".",
"wait",
"(",
")",
"return",
"job"
] | Load the table from GCS.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: if True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the completed load Job if it was started successfully; else None. | [
"Load",
"the",
"table",
"from",
"GCS",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L501-L529 |
5,005 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table._get_row_fetcher | def _get_row_fetcher(self, start_row=0, max_rows=None, page_size=_DEFAULT_PAGE_SIZE):
""" Get a function that can retrieve a page of rows.
The function returned is a closure so that it can have a signature suitable for use
by Iterator.
Args:
start_row: the row to start fetching from; default 0.
max_rows: the maximum number of rows to fetch (across all calls, not per-call). Default
is None which means no limit.
page_size: the maximum number of results to fetch per page; default 1024.
Returns:
A function that can be called repeatedly with a page token and running count, and that
will return an array of rows and a next page token; when the returned page token is None
the fetch is complete.
"""
if not start_row:
start_row = 0
elif start_row < 0: # We are measuring from the table end
if self.length >= 0:
start_row += self.length
else:
raise Exception('Cannot use negative indices for table of unknown length')
schema = self.schema._bq_schema
name_parts = self._name_parts
def _retrieve_rows(page_token, count):
page_rows = []
if max_rows and count >= max_rows:
page_token = None
else:
if max_rows and page_size > (max_rows - count):
max_results = max_rows - count
else:
max_results = page_size
try:
if page_token:
response = self._api.tabledata_list(name_parts, page_token=page_token,
max_results=max_results)
else:
response = self._api.tabledata_list(name_parts, start_index=start_row,
max_results=max_results)
except Exception as e:
raise e
page_token = response['pageToken'] if 'pageToken' in response else None
if 'rows' in response:
page_rows = response['rows']
rows = []
for row_dict in page_rows:
rows.append(_parser.Parser.parse_row(schema, row_dict))
return rows, page_token
return _retrieve_rows | python | def _get_row_fetcher(self, start_row=0, max_rows=None, page_size=_DEFAULT_PAGE_SIZE):
""" Get a function that can retrieve a page of rows.
The function returned is a closure so that it can have a signature suitable for use
by Iterator.
Args:
start_row: the row to start fetching from; default 0.
max_rows: the maximum number of rows to fetch (across all calls, not per-call). Default
is None which means no limit.
page_size: the maximum number of results to fetch per page; default 1024.
Returns:
A function that can be called repeatedly with a page token and running count, and that
will return an array of rows and a next page token; when the returned page token is None
the fetch is complete.
"""
if not start_row:
start_row = 0
elif start_row < 0: # We are measuring from the table end
if self.length >= 0:
start_row += self.length
else:
raise Exception('Cannot use negative indices for table of unknown length')
schema = self.schema._bq_schema
name_parts = self._name_parts
def _retrieve_rows(page_token, count):
page_rows = []
if max_rows and count >= max_rows:
page_token = None
else:
if max_rows and page_size > (max_rows - count):
max_results = max_rows - count
else:
max_results = page_size
try:
if page_token:
response = self._api.tabledata_list(name_parts, page_token=page_token,
max_results=max_results)
else:
response = self._api.tabledata_list(name_parts, start_index=start_row,
max_results=max_results)
except Exception as e:
raise e
page_token = response['pageToken'] if 'pageToken' in response else None
if 'rows' in response:
page_rows = response['rows']
rows = []
for row_dict in page_rows:
rows.append(_parser.Parser.parse_row(schema, row_dict))
return rows, page_token
return _retrieve_rows | [
"def",
"_get_row_fetcher",
"(",
"self",
",",
"start_row",
"=",
"0",
",",
"max_rows",
"=",
"None",
",",
"page_size",
"=",
"_DEFAULT_PAGE_SIZE",
")",
":",
"if",
"not",
"start_row",
":",
"start_row",
"=",
"0",
"elif",
"start_row",
"<",
"0",
":",
"# We are measuring from the table end",
"if",
"self",
".",
"length",
">=",
"0",
":",
"start_row",
"+=",
"self",
".",
"length",
"else",
":",
"raise",
"Exception",
"(",
"'Cannot use negative indices for table of unknown length'",
")",
"schema",
"=",
"self",
".",
"schema",
".",
"_bq_schema",
"name_parts",
"=",
"self",
".",
"_name_parts",
"def",
"_retrieve_rows",
"(",
"page_token",
",",
"count",
")",
":",
"page_rows",
"=",
"[",
"]",
"if",
"max_rows",
"and",
"count",
">=",
"max_rows",
":",
"page_token",
"=",
"None",
"else",
":",
"if",
"max_rows",
"and",
"page_size",
">",
"(",
"max_rows",
"-",
"count",
")",
":",
"max_results",
"=",
"max_rows",
"-",
"count",
"else",
":",
"max_results",
"=",
"page_size",
"try",
":",
"if",
"page_token",
":",
"response",
"=",
"self",
".",
"_api",
".",
"tabledata_list",
"(",
"name_parts",
",",
"page_token",
"=",
"page_token",
",",
"max_results",
"=",
"max_results",
")",
"else",
":",
"response",
"=",
"self",
".",
"_api",
".",
"tabledata_list",
"(",
"name_parts",
",",
"start_index",
"=",
"start_row",
",",
"max_results",
"=",
"max_results",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"page_token",
"=",
"response",
"[",
"'pageToken'",
"]",
"if",
"'pageToken'",
"in",
"response",
"else",
"None",
"if",
"'rows'",
"in",
"response",
":",
"page_rows",
"=",
"response",
"[",
"'rows'",
"]",
"rows",
"=",
"[",
"]",
"for",
"row_dict",
"in",
"page_rows",
":",
"rows",
".",
"append",
"(",
"_parser",
".",
"Parser",
".",
"parse_row",
"(",
"schema",
",",
"row_dict",
")",
")",
"return",
"rows",
",",
"page_token",
"return",
"_retrieve_rows"
] | Get a function that can retrieve a page of rows.
The function returned is a closure so that it can have a signature suitable for use
by Iterator.
Args:
start_row: the row to start fetching from; default 0.
max_rows: the maximum number of rows to fetch (across all calls, not per-call). Default
is None which means no limit.
page_size: the maximum number of results to fetch per page; default 1024.
Returns:
A function that can be called repeatedly with a page token and running count, and that
will return an array of rows and a next page token; when the returned page token is None
the fetch is complete. | [
"Get",
"a",
"function",
"that",
"can",
"retrieve",
"a",
"page",
"of",
"rows",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L531-L588 |
5,006 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.schema | def schema(self):
"""Retrieves the schema of the table.
Returns:
A Schema object containing a list of schema fields and associated metadata.
Raises
Exception if the request could not be executed or the response was malformed.
"""
if not self._schema:
try:
self._load_info()
self._schema = _schema.Schema(self._info['schema']['fields'])
except KeyError:
raise Exception('Unexpected table response: missing schema')
return self._schema | python | def schema(self):
"""Retrieves the schema of the table.
Returns:
A Schema object containing a list of schema fields and associated metadata.
Raises
Exception if the request could not be executed or the response was malformed.
"""
if not self._schema:
try:
self._load_info()
self._schema = _schema.Schema(self._info['schema']['fields'])
except KeyError:
raise Exception('Unexpected table response: missing schema')
return self._schema | [
"def",
"schema",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_schema",
":",
"try",
":",
"self",
".",
"_load_info",
"(",
")",
"self",
".",
"_schema",
"=",
"_schema",
".",
"Schema",
"(",
"self",
".",
"_info",
"[",
"'schema'",
"]",
"[",
"'fields'",
"]",
")",
"except",
"KeyError",
":",
"raise",
"Exception",
"(",
"'Unexpected table response: missing schema'",
")",
"return",
"self",
".",
"_schema"
] | Retrieves the schema of the table.
Returns:
A Schema object containing a list of schema fields and associated metadata.
Raises
Exception if the request could not be executed or the response was malformed. | [
"Retrieves",
"the",
"schema",
"of",
"the",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L662-L676 |
5,007 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.snapshot | def snapshot(self, at):
""" Return a new Table which is a snapshot of this table at the specified time.
Args:
at: the time of the snapshot. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past. Passing None will get a reference the oldest snapshot.
Note that using a datetime will get a snapshot at an absolute point in time, while
a timedelta will provide a varying snapshot; any queries issued against such a Table
will be done against a snapshot that has an age relative to the execution time of the
query.
Returns:
A new Table object referencing the snapshot.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
"""
if self._name_parts.decorator != '':
raise Exception("Cannot use snapshot() on an already decorated table")
value = Table._convert_decorator_time(at)
return Table("%s@%s" % (self._full_name, str(value)), context=self._context) | python | def snapshot(self, at):
""" Return a new Table which is a snapshot of this table at the specified time.
Args:
at: the time of the snapshot. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past. Passing None will get a reference the oldest snapshot.
Note that using a datetime will get a snapshot at an absolute point in time, while
a timedelta will provide a varying snapshot; any queries issued against such a Table
will be done against a snapshot that has an age relative to the execution time of the
query.
Returns:
A new Table object referencing the snapshot.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
"""
if self._name_parts.decorator != '':
raise Exception("Cannot use snapshot() on an already decorated table")
value = Table._convert_decorator_time(at)
return Table("%s@%s" % (self._full_name, str(value)), context=self._context) | [
"def",
"snapshot",
"(",
"self",
",",
"at",
")",
":",
"if",
"self",
".",
"_name_parts",
".",
"decorator",
"!=",
"''",
":",
"raise",
"Exception",
"(",
"\"Cannot use snapshot() on an already decorated table\"",
")",
"value",
"=",
"Table",
".",
"_convert_decorator_time",
"(",
"at",
")",
"return",
"Table",
"(",
"\"%s@%s\"",
"%",
"(",
"self",
".",
"_full_name",
",",
"str",
"(",
"value",
")",
")",
",",
"context",
"=",
"self",
".",
"_context",
")"
] | Return a new Table which is a snapshot of this table at the specified time.
Args:
at: the time of the snapshot. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past. Passing None will get a reference the oldest snapshot.
Note that using a datetime will get a snapshot at an absolute point in time, while
a timedelta will provide a varying snapshot; any queries issued against such a Table
will be done against a snapshot that has an age relative to the execution time of the
query.
Returns:
A new Table object referencing the snapshot.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid. | [
"Return",
"a",
"new",
"Table",
"which",
"is",
"a",
"snapshot",
"of",
"this",
"table",
"at",
"the",
"specified",
"time",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L803-L826 |
5,008 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.window | def window(self, begin, end=None):
""" Return a new Table limited to the rows added to this Table during the specified time range.
Args:
begin: the start time of the window. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past.
Note that using a relative value will provide a varying snapshot, not a fixed
snapshot; any queries issued against such a Table will be done against a snapshot
that has an age relative to the execution time of the query.
end: the end time of the snapshot; if None, then the current time is used. The types and
interpretation of values is as for start.
Returns:
A new Table object referencing the window.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
"""
if self._name_parts.decorator != '':
raise Exception("Cannot use window() on an already decorated table")
start = Table._convert_decorator_time(begin)
if end is None:
if isinstance(begin, datetime.timedelta):
end = datetime.timedelta(0)
else:
end = datetime.datetime.utcnow()
stop = Table._convert_decorator_time(end)
# Both values must have the same sign
if (start > 0 >= stop) or (stop > 0 >= start):
raise Exception("window: Between arguments must both be absolute or relative: %s, %s" %
(str(begin), str(end)))
# start must be less than stop
if start > stop:
raise Exception("window: Between arguments: begin must be before end: %s, %s" %
(str(begin), str(end)))
return Table("%s@%s-%s" % (self._full_name, str(start), str(stop)), context=self._context) | python | def window(self, begin, end=None):
""" Return a new Table limited to the rows added to this Table during the specified time range.
Args:
begin: the start time of the window. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past.
Note that using a relative value will provide a varying snapshot, not a fixed
snapshot; any queries issued against such a Table will be done against a snapshot
that has an age relative to the execution time of the query.
end: the end time of the snapshot; if None, then the current time is used. The types and
interpretation of values is as for start.
Returns:
A new Table object referencing the window.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid.
"""
if self._name_parts.decorator != '':
raise Exception("Cannot use window() on an already decorated table")
start = Table._convert_decorator_time(begin)
if end is None:
if isinstance(begin, datetime.timedelta):
end = datetime.timedelta(0)
else:
end = datetime.datetime.utcnow()
stop = Table._convert_decorator_time(end)
# Both values must have the same sign
if (start > 0 >= stop) or (stop > 0 >= start):
raise Exception("window: Between arguments must both be absolute or relative: %s, %s" %
(str(begin), str(end)))
# start must be less than stop
if start > stop:
raise Exception("window: Between arguments: begin must be before end: %s, %s" %
(str(begin), str(end)))
return Table("%s@%s-%s" % (self._full_name, str(start), str(stop)), context=self._context) | [
"def",
"window",
"(",
"self",
",",
"begin",
",",
"end",
"=",
"None",
")",
":",
"if",
"self",
".",
"_name_parts",
".",
"decorator",
"!=",
"''",
":",
"raise",
"Exception",
"(",
"\"Cannot use window() on an already decorated table\"",
")",
"start",
"=",
"Table",
".",
"_convert_decorator_time",
"(",
"begin",
")",
"if",
"end",
"is",
"None",
":",
"if",
"isinstance",
"(",
"begin",
",",
"datetime",
".",
"timedelta",
")",
":",
"end",
"=",
"datetime",
".",
"timedelta",
"(",
"0",
")",
"else",
":",
"end",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"stop",
"=",
"Table",
".",
"_convert_decorator_time",
"(",
"end",
")",
"# Both values must have the same sign",
"if",
"(",
"start",
">",
"0",
">=",
"stop",
")",
"or",
"(",
"stop",
">",
"0",
">=",
"start",
")",
":",
"raise",
"Exception",
"(",
"\"window: Between arguments must both be absolute or relative: %s, %s\"",
"%",
"(",
"str",
"(",
"begin",
")",
",",
"str",
"(",
"end",
")",
")",
")",
"# start must be less than stop",
"if",
"start",
">",
"stop",
":",
"raise",
"Exception",
"(",
"\"window: Between arguments: begin must be before end: %s, %s\"",
"%",
"(",
"str",
"(",
"begin",
")",
",",
"str",
"(",
"end",
")",
")",
")",
"return",
"Table",
"(",
"\"%s@%s-%s\"",
"%",
"(",
"self",
".",
"_full_name",
",",
"str",
"(",
"start",
")",
",",
"str",
"(",
"stop",
")",
")",
",",
"context",
"=",
"self",
".",
"_context",
")"
] | Return a new Table limited to the rows added to this Table during the specified time range.
Args:
begin: the start time of the window. This can be a Python datetime (absolute) or timedelta
(relative to current time). The result must be after the table was created and no more
than seven days in the past.
Note that using a relative value will provide a varying snapshot, not a fixed
snapshot; any queries issued against such a Table will be done against a snapshot
that has an age relative to the execution time of the query.
end: the end time of the snapshot; if None, then the current time is used. The types and
interpretation of values is as for start.
Returns:
A new Table object referencing the window.
Raises:
An exception if this Table is already decorated, or if the time specified is invalid. | [
"Return",
"a",
"new",
"Table",
"limited",
"to",
"the",
"rows",
"added",
"to",
"this",
"Table",
"during",
"the",
"specified",
"time",
"range",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L828-L870 |
5,009 | googledatalab/pydatalab | solutionbox/ml_workbench/xgboost/transform.py | serialize_example | def serialize_example(transformed_json_data, features, feature_indices, target_name):
"""Makes an instance of data in libsvm format.
Args:
transformed_json_data: dict of transformed data.
features: features config.
feature_indices: output of feature_transforms.get_transformed_feature_indices()
Returns:
The text line representation of an instance in libsvm format.
"""
import six
import tensorflow as tf
from trainer import feature_transforms
line = str(transformed_json_data[target_name][0])
for name, info in feature_indices:
if features[name]['transform'] in [feature_transforms.IDENTITY_TRANSFORM,
feature_transforms.SCALE_TRANSFORM]:
line += ' %d:%s' % (info['index_start'], str(transformed_json_data[name][0]))
elif features[name]['transform'] in [feature_transforms.ONE_HOT_TRANSFORM,
feature_transforms.MULTI_HOT_TRANSFORM]:
for i in range(info['size']):
if i in transformed_json_data[name]:
line += ' %d:1' % (info['index_start'] + i)
elif features[name]['transform'] in [feature_transforms.IMAGE_TRANSFORM]:
for i in range(info['size']):
line += ' %d:%s' % (info['index_start'] + i, str(transformed_json_data[name][i]))
return line | python | def serialize_example(transformed_json_data, features, feature_indices, target_name):
"""Makes an instance of data in libsvm format.
Args:
transformed_json_data: dict of transformed data.
features: features config.
feature_indices: output of feature_transforms.get_transformed_feature_indices()
Returns:
The text line representation of an instance in libsvm format.
"""
import six
import tensorflow as tf
from trainer import feature_transforms
line = str(transformed_json_data[target_name][0])
for name, info in feature_indices:
if features[name]['transform'] in [feature_transforms.IDENTITY_TRANSFORM,
feature_transforms.SCALE_TRANSFORM]:
line += ' %d:%s' % (info['index_start'], str(transformed_json_data[name][0]))
elif features[name]['transform'] in [feature_transforms.ONE_HOT_TRANSFORM,
feature_transforms.MULTI_HOT_TRANSFORM]:
for i in range(info['size']):
if i in transformed_json_data[name]:
line += ' %d:1' % (info['index_start'] + i)
elif features[name]['transform'] in [feature_transforms.IMAGE_TRANSFORM]:
for i in range(info['size']):
line += ' %d:%s' % (info['index_start'] + i, str(transformed_json_data[name][i]))
return line | [
"def",
"serialize_example",
"(",
"transformed_json_data",
",",
"features",
",",
"feature_indices",
",",
"target_name",
")",
":",
"import",
"six",
"import",
"tensorflow",
"as",
"tf",
"from",
"trainer",
"import",
"feature_transforms",
"line",
"=",
"str",
"(",
"transformed_json_data",
"[",
"target_name",
"]",
"[",
"0",
"]",
")",
"for",
"name",
",",
"info",
"in",
"feature_indices",
":",
"if",
"features",
"[",
"name",
"]",
"[",
"'transform'",
"]",
"in",
"[",
"feature_transforms",
".",
"IDENTITY_TRANSFORM",
",",
"feature_transforms",
".",
"SCALE_TRANSFORM",
"]",
":",
"line",
"+=",
"' %d:%s'",
"%",
"(",
"info",
"[",
"'index_start'",
"]",
",",
"str",
"(",
"transformed_json_data",
"[",
"name",
"]",
"[",
"0",
"]",
")",
")",
"elif",
"features",
"[",
"name",
"]",
"[",
"'transform'",
"]",
"in",
"[",
"feature_transforms",
".",
"ONE_HOT_TRANSFORM",
",",
"feature_transforms",
".",
"MULTI_HOT_TRANSFORM",
"]",
":",
"for",
"i",
"in",
"range",
"(",
"info",
"[",
"'size'",
"]",
")",
":",
"if",
"i",
"in",
"transformed_json_data",
"[",
"name",
"]",
":",
"line",
"+=",
"' %d:1'",
"%",
"(",
"info",
"[",
"'index_start'",
"]",
"+",
"i",
")",
"elif",
"features",
"[",
"name",
"]",
"[",
"'transform'",
"]",
"in",
"[",
"feature_transforms",
".",
"IMAGE_TRANSFORM",
"]",
":",
"for",
"i",
"in",
"range",
"(",
"info",
"[",
"'size'",
"]",
")",
":",
"line",
"+=",
"' %d:%s'",
"%",
"(",
"info",
"[",
"'index_start'",
"]",
"+",
"i",
",",
"str",
"(",
"transformed_json_data",
"[",
"name",
"]",
"[",
"i",
"]",
")",
")",
"return",
"line"
] | Makes an instance of data in libsvm format.
Args:
transformed_json_data: dict of transformed data.
features: features config.
feature_indices: output of feature_transforms.get_transformed_feature_indices()
Returns:
The text line representation of an instance in libsvm format. | [
"Makes",
"an",
"instance",
"of",
"data",
"in",
"libsvm",
"format",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/xgboost/transform.py#L392-L421 |
5,010 | googledatalab/pydatalab | datalab/bigquery/_dataset.py | Dataset.delete | def delete(self, delete_contents=False):
"""Issues a request to delete the dataset.
Args:
delete_contents: if True, any tables and views in the dataset will be deleted. If False
and the dataset is non-empty an exception will be raised.
Returns:
None on success.
Raises:
Exception if the delete fails (including if table was nonexistent).
"""
if not self.exists():
raise Exception('Cannot delete non-existent dataset %s' % self._full_name)
try:
self._api.datasets_delete(self._name_parts, delete_contents=delete_contents)
except Exception as e:
raise e
self._info = None
return None | python | def delete(self, delete_contents=False):
"""Issues a request to delete the dataset.
Args:
delete_contents: if True, any tables and views in the dataset will be deleted. If False
and the dataset is non-empty an exception will be raised.
Returns:
None on success.
Raises:
Exception if the delete fails (including if table was nonexistent).
"""
if not self.exists():
raise Exception('Cannot delete non-existent dataset %s' % self._full_name)
try:
self._api.datasets_delete(self._name_parts, delete_contents=delete_contents)
except Exception as e:
raise e
self._info = None
return None | [
"def",
"delete",
"(",
"self",
",",
"delete_contents",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"exists",
"(",
")",
":",
"raise",
"Exception",
"(",
"'Cannot delete non-existent dataset %s'",
"%",
"self",
".",
"_full_name",
")",
"try",
":",
"self",
".",
"_api",
".",
"datasets_delete",
"(",
"self",
".",
"_name_parts",
",",
"delete_contents",
"=",
"delete_contents",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"self",
".",
"_info",
"=",
"None",
"return",
"None"
] | Issues a request to delete the dataset.
Args:
delete_contents: if True, any tables and views in the dataset will be deleted. If False
and the dataset is non-empty an exception will be raised.
Returns:
None on success.
Raises:
Exception if the delete fails (including if table was nonexistent). | [
"Issues",
"a",
"request",
"to",
"delete",
"the",
"dataset",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_dataset.py#L101-L119 |
5,011 | googledatalab/pydatalab | datalab/bigquery/_dataset.py | Dataset.create | def create(self, friendly_name=None, description=None):
"""Creates the Dataset with the specified friendly name and description.
Args:
friendly_name: (optional) the friendly name for the dataset if it is being created.
description: (optional) a description for the dataset if it is being created.
Returns:
The Dataset.
Raises:
Exception if the Dataset could not be created.
"""
if not self.exists():
try:
response = self._api.datasets_insert(self._name_parts,
friendly_name=friendly_name,
description=description)
except Exception as e:
raise e
if 'selfLink' not in response:
raise Exception("Could not create dataset %s" % self._full_name)
return self | python | def create(self, friendly_name=None, description=None):
"""Creates the Dataset with the specified friendly name and description.
Args:
friendly_name: (optional) the friendly name for the dataset if it is being created.
description: (optional) a description for the dataset if it is being created.
Returns:
The Dataset.
Raises:
Exception if the Dataset could not be created.
"""
if not self.exists():
try:
response = self._api.datasets_insert(self._name_parts,
friendly_name=friendly_name,
description=description)
except Exception as e:
raise e
if 'selfLink' not in response:
raise Exception("Could not create dataset %s" % self._full_name)
return self | [
"def",
"create",
"(",
"self",
",",
"friendly_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"exists",
"(",
")",
":",
"try",
":",
"response",
"=",
"self",
".",
"_api",
".",
"datasets_insert",
"(",
"self",
".",
"_name_parts",
",",
"friendly_name",
"=",
"friendly_name",
",",
"description",
"=",
"description",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"'selfLink'",
"not",
"in",
"response",
":",
"raise",
"Exception",
"(",
"\"Could not create dataset %s\"",
"%",
"self",
".",
"_full_name",
")",
"return",
"self"
] | Creates the Dataset with the specified friendly name and description.
Args:
friendly_name: (optional) the friendly name for the dataset if it is being created.
description: (optional) a description for the dataset if it is being created.
Returns:
The Dataset.
Raises:
Exception if the Dataset could not be created. | [
"Creates",
"the",
"Dataset",
"with",
"the",
"specified",
"friendly",
"name",
"and",
"description",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_dataset.py#L121-L141 |
5,012 | googledatalab/pydatalab | datalab/bigquery/_dataset.py | Dataset.update | def update(self, friendly_name=None, description=None):
""" Selectively updates Dataset information.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
Returns:
"""
self._get_info()
if self._info:
if friendly_name:
self._info['friendlyName'] = friendly_name
if description:
self._info['description'] = description
try:
self._api.datasets_update(self._name_parts, self._info)
except Exception as e:
raise e
finally:
self._info = None | python | def update(self, friendly_name=None, description=None):
""" Selectively updates Dataset information.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
Returns:
"""
self._get_info()
if self._info:
if friendly_name:
self._info['friendlyName'] = friendly_name
if description:
self._info['description'] = description
try:
self._api.datasets_update(self._name_parts, self._info)
except Exception as e:
raise e
finally:
self._info = None | [
"def",
"update",
"(",
"self",
",",
"friendly_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"self",
".",
"_get_info",
"(",
")",
"if",
"self",
".",
"_info",
":",
"if",
"friendly_name",
":",
"self",
".",
"_info",
"[",
"'friendlyName'",
"]",
"=",
"friendly_name",
"if",
"description",
":",
"self",
".",
"_info",
"[",
"'description'",
"]",
"=",
"description",
"try",
":",
"self",
".",
"_api",
".",
"datasets_update",
"(",
"self",
".",
"_name_parts",
",",
"self",
".",
"_info",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"finally",
":",
"self",
".",
"_info",
"=",
"None"
] | Selectively updates Dataset information.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
Returns: | [
"Selectively",
"updates",
"Dataset",
"information",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_dataset.py#L143-L164 |
5,013 | googledatalab/pydatalab | google/datalab/bigquery/_view.py | View.query | def query(self):
"""The Query that defines the view."""
if not self.exists():
return None
self._table._load_info()
if 'view' in self._table._info and 'query' in self._table._info['view']:
return _query.Query(self._table._info['view']['query'])
return None | python | def query(self):
"""The Query that defines the view."""
if not self.exists():
return None
self._table._load_info()
if 'view' in self._table._info and 'query' in self._table._info['view']:
return _query.Query(self._table._info['view']['query'])
return None | [
"def",
"query",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"exists",
"(",
")",
":",
"return",
"None",
"self",
".",
"_table",
".",
"_load_info",
"(",
")",
"if",
"'view'",
"in",
"self",
".",
"_table",
".",
"_info",
"and",
"'query'",
"in",
"self",
".",
"_table",
".",
"_info",
"[",
"'view'",
"]",
":",
"return",
"_query",
".",
"Query",
"(",
"self",
".",
"_table",
".",
"_info",
"[",
"'view'",
"]",
"[",
"'query'",
"]",
")",
"return",
"None"
] | The Query that defines the view. | [
"The",
"Query",
"that",
"defines",
"the",
"view",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_view.py#L70-L77 |
5,014 | googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py | run_numerical_categorical_analysis | def run_numerical_categorical_analysis(args, schema_list):
"""Makes the numerical and categorical analysis files.
Args:
args: the command line args
schema_list: python object of the schema json file.
Raises:
ValueError: if schema contains unknown column types.
"""
header = [column['name'] for column in schema_list]
input_files = file_io.get_matching_files(args.input_file_pattern)
# Check the schema is valid
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type != 'string' and col_type != 'integer' and col_type != 'float':
raise ValueError('Schema contains an unsupported type %s.' % col_type)
# initialize the results
def _init_numerical_results():
return {'min': float('inf'),
'max': float('-inf'),
'count': 0,
'sum': 0.0}
numerical_results = collections.defaultdict(_init_numerical_results)
categorical_results = collections.defaultdict(set)
# for each file, update the numerical stats from that file, and update the set
# of unique labels.
for input_file in input_files:
with file_io.FileIO(input_file, 'r') as f:
for line in f:
parsed_line = dict(zip(header, line.strip().split(',')))
for col_schema in schema_list:
col_name = col_schema['name']
col_type = col_schema['type']
if col_type.lower() == 'string':
categorical_results[col_name].update([parsed_line[col_name]])
else:
# numerical column.
# if empty, skip
if not parsed_line[col_name].strip():
continue
numerical_results[col_name]['min'] = (
min(numerical_results[col_name]['min'],
float(parsed_line[col_name])))
numerical_results[col_name]['max'] = (
max(numerical_results[col_name]['max'],
float(parsed_line[col_name])))
numerical_results[col_name]['count'] += 1
numerical_results[col_name]['sum'] += float(parsed_line[col_name])
# Update numerical_results to just have min/min/mean
for col_schema in schema_list:
if col_schema['type'].lower() != 'string':
col_name = col_schema['name']
mean = numerical_results[col_name]['sum'] / numerical_results[col_name]['count']
del numerical_results[col_name]['sum']
del numerical_results[col_name]['count']
numerical_results[col_name]['mean'] = mean
# Write the numerical_results to a json file.
file_io.write_string_to_file(
os.path.join(args.output_dir, NUMERICAL_ANALYSIS_FILE),
json.dumps(numerical_results, indent=2, separators=(',', ': ')))
# Write the vocab files. Each label is on its own line.
for name, unique_labels in six.iteritems(categorical_results):
labels = '\n'.join(list(unique_labels))
file_io.write_string_to_file(
os.path.join(args.output_dir, CATEGORICAL_ANALYSIS_FILE % name),
labels) | python | def run_numerical_categorical_analysis(args, schema_list):
"""Makes the numerical and categorical analysis files.
Args:
args: the command line args
schema_list: python object of the schema json file.
Raises:
ValueError: if schema contains unknown column types.
"""
header = [column['name'] for column in schema_list]
input_files = file_io.get_matching_files(args.input_file_pattern)
# Check the schema is valid
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type != 'string' and col_type != 'integer' and col_type != 'float':
raise ValueError('Schema contains an unsupported type %s.' % col_type)
# initialize the results
def _init_numerical_results():
return {'min': float('inf'),
'max': float('-inf'),
'count': 0,
'sum': 0.0}
numerical_results = collections.defaultdict(_init_numerical_results)
categorical_results = collections.defaultdict(set)
# for each file, update the numerical stats from that file, and update the set
# of unique labels.
for input_file in input_files:
with file_io.FileIO(input_file, 'r') as f:
for line in f:
parsed_line = dict(zip(header, line.strip().split(',')))
for col_schema in schema_list:
col_name = col_schema['name']
col_type = col_schema['type']
if col_type.lower() == 'string':
categorical_results[col_name].update([parsed_line[col_name]])
else:
# numerical column.
# if empty, skip
if not parsed_line[col_name].strip():
continue
numerical_results[col_name]['min'] = (
min(numerical_results[col_name]['min'],
float(parsed_line[col_name])))
numerical_results[col_name]['max'] = (
max(numerical_results[col_name]['max'],
float(parsed_line[col_name])))
numerical_results[col_name]['count'] += 1
numerical_results[col_name]['sum'] += float(parsed_line[col_name])
# Update numerical_results to just have min/min/mean
for col_schema in schema_list:
if col_schema['type'].lower() != 'string':
col_name = col_schema['name']
mean = numerical_results[col_name]['sum'] / numerical_results[col_name]['count']
del numerical_results[col_name]['sum']
del numerical_results[col_name]['count']
numerical_results[col_name]['mean'] = mean
# Write the numerical_results to a json file.
file_io.write_string_to_file(
os.path.join(args.output_dir, NUMERICAL_ANALYSIS_FILE),
json.dumps(numerical_results, indent=2, separators=(',', ': ')))
# Write the vocab files. Each label is on its own line.
for name, unique_labels in six.iteritems(categorical_results):
labels = '\n'.join(list(unique_labels))
file_io.write_string_to_file(
os.path.join(args.output_dir, CATEGORICAL_ANALYSIS_FILE % name),
labels) | [
"def",
"run_numerical_categorical_analysis",
"(",
"args",
",",
"schema_list",
")",
":",
"header",
"=",
"[",
"column",
"[",
"'name'",
"]",
"for",
"column",
"in",
"schema_list",
"]",
"input_files",
"=",
"file_io",
".",
"get_matching_files",
"(",
"args",
".",
"input_file_pattern",
")",
"# Check the schema is valid",
"for",
"col_schema",
"in",
"schema_list",
":",
"col_type",
"=",
"col_schema",
"[",
"'type'",
"]",
".",
"lower",
"(",
")",
"if",
"col_type",
"!=",
"'string'",
"and",
"col_type",
"!=",
"'integer'",
"and",
"col_type",
"!=",
"'float'",
":",
"raise",
"ValueError",
"(",
"'Schema contains an unsupported type %s.'",
"%",
"col_type",
")",
"# initialize the results",
"def",
"_init_numerical_results",
"(",
")",
":",
"return",
"{",
"'min'",
":",
"float",
"(",
"'inf'",
")",
",",
"'max'",
":",
"float",
"(",
"'-inf'",
")",
",",
"'count'",
":",
"0",
",",
"'sum'",
":",
"0.0",
"}",
"numerical_results",
"=",
"collections",
".",
"defaultdict",
"(",
"_init_numerical_results",
")",
"categorical_results",
"=",
"collections",
".",
"defaultdict",
"(",
"set",
")",
"# for each file, update the numerical stats from that file, and update the set",
"# of unique labels.",
"for",
"input_file",
"in",
"input_files",
":",
"with",
"file_io",
".",
"FileIO",
"(",
"input_file",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"parsed_line",
"=",
"dict",
"(",
"zip",
"(",
"header",
",",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"','",
")",
")",
")",
"for",
"col_schema",
"in",
"schema_list",
":",
"col_name",
"=",
"col_schema",
"[",
"'name'",
"]",
"col_type",
"=",
"col_schema",
"[",
"'type'",
"]",
"if",
"col_type",
".",
"lower",
"(",
")",
"==",
"'string'",
":",
"categorical_results",
"[",
"col_name",
"]",
".",
"update",
"(",
"[",
"parsed_line",
"[",
"col_name",
"]",
"]",
")",
"else",
":",
"# numerical column.",
"# if empty, skip",
"if",
"not",
"parsed_line",
"[",
"col_name",
"]",
".",
"strip",
"(",
")",
":",
"continue",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'min'",
"]",
"=",
"(",
"min",
"(",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'min'",
"]",
",",
"float",
"(",
"parsed_line",
"[",
"col_name",
"]",
")",
")",
")",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'max'",
"]",
"=",
"(",
"max",
"(",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'max'",
"]",
",",
"float",
"(",
"parsed_line",
"[",
"col_name",
"]",
")",
")",
")",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'count'",
"]",
"+=",
"1",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'sum'",
"]",
"+=",
"float",
"(",
"parsed_line",
"[",
"col_name",
"]",
")",
"# Update numerical_results to just have min/min/mean",
"for",
"col_schema",
"in",
"schema_list",
":",
"if",
"col_schema",
"[",
"'type'",
"]",
".",
"lower",
"(",
")",
"!=",
"'string'",
":",
"col_name",
"=",
"col_schema",
"[",
"'name'",
"]",
"mean",
"=",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'sum'",
"]",
"/",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'count'",
"]",
"del",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'sum'",
"]",
"del",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'count'",
"]",
"numerical_results",
"[",
"col_name",
"]",
"[",
"'mean'",
"]",
"=",
"mean",
"# Write the numerical_results to a json file.",
"file_io",
".",
"write_string_to_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output_dir",
",",
"NUMERICAL_ANALYSIS_FILE",
")",
",",
"json",
".",
"dumps",
"(",
"numerical_results",
",",
"indent",
"=",
"2",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
")",
"# Write the vocab files. Each label is on its own line.",
"for",
"name",
",",
"unique_labels",
"in",
"six",
".",
"iteritems",
"(",
"categorical_results",
")",
":",
"labels",
"=",
"'\\n'",
".",
"join",
"(",
"list",
"(",
"unique_labels",
")",
")",
"file_io",
".",
"write_string_to_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output_dir",
",",
"CATEGORICAL_ANALYSIS_FILE",
"%",
"name",
")",
",",
"labels",
")"
] | Makes the numerical and categorical analysis files.
Args:
args: the command line args
schema_list: python object of the schema json file.
Raises:
ValueError: if schema contains unknown column types. | [
"Makes",
"the",
"numerical",
"and",
"categorical",
"analysis",
"files",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py#L69-L144 |
5,015 | googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py | run_analysis | def run_analysis(args):
"""Builds an analysis files for training."""
# Read the schema and input feature types
schema_list = json.loads(
file_io.read_file_to_string(args.schema_file))
run_numerical_categorical_analysis(args, schema_list)
# Also save a copy of the schema in the output folder.
file_io.copy(args.schema_file,
os.path.join(args.output_dir, SCHEMA_FILE),
overwrite=True) | python | def run_analysis(args):
"""Builds an analysis files for training."""
# Read the schema and input feature types
schema_list = json.loads(
file_io.read_file_to_string(args.schema_file))
run_numerical_categorical_analysis(args, schema_list)
# Also save a copy of the schema in the output folder.
file_io.copy(args.schema_file,
os.path.join(args.output_dir, SCHEMA_FILE),
overwrite=True) | [
"def",
"run_analysis",
"(",
"args",
")",
":",
"# Read the schema and input feature types",
"schema_list",
"=",
"json",
".",
"loads",
"(",
"file_io",
".",
"read_file_to_string",
"(",
"args",
".",
"schema_file",
")",
")",
"run_numerical_categorical_analysis",
"(",
"args",
",",
"schema_list",
")",
"# Also save a copy of the schema in the output folder.",
"file_io",
".",
"copy",
"(",
"args",
".",
"schema_file",
",",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output_dir",
",",
"SCHEMA_FILE",
")",
",",
"overwrite",
"=",
"True",
")"
] | Builds an analysis files for training. | [
"Builds",
"an",
"analysis",
"files",
"for",
"training",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py#L147-L159 |
5,016 | googledatalab/pydatalab | google/datalab/utils/commands/_html.py | Html._repr_html_ | def _repr_html_(self):
"""Generates the HTML representation.
"""
parts = []
if self._class:
parts.append('<div id="hh_%s" class="%s">%s</div>' % (self._id, self._class, self._markup))
else:
parts.append('<div id="hh_%s">%s</div>' % (self._id, self._markup))
if len(self._script) != 0:
parts.append('<script>')
parts.append('require([')
parts.append(','.join(['"%s"' % d[0] for d in self._dependencies]))
parts.append('], function(')
parts.append(','.join([d[1] for d in self._dependencies]))
parts.append(') {')
parts.append(self._script)
parts.append('});')
parts.append('</script>')
return ''.join(parts) | python | def _repr_html_(self):
"""Generates the HTML representation.
"""
parts = []
if self._class:
parts.append('<div id="hh_%s" class="%s">%s</div>' % (self._id, self._class, self._markup))
else:
parts.append('<div id="hh_%s">%s</div>' % (self._id, self._markup))
if len(self._script) != 0:
parts.append('<script>')
parts.append('require([')
parts.append(','.join(['"%s"' % d[0] for d in self._dependencies]))
parts.append('], function(')
parts.append(','.join([d[1] for d in self._dependencies]))
parts.append(') {')
parts.append(self._script)
parts.append('});')
parts.append('</script>')
return ''.join(parts) | [
"def",
"_repr_html_",
"(",
"self",
")",
":",
"parts",
"=",
"[",
"]",
"if",
"self",
".",
"_class",
":",
"parts",
".",
"append",
"(",
"'<div id=\"hh_%s\" class=\"%s\">%s</div>'",
"%",
"(",
"self",
".",
"_id",
",",
"self",
".",
"_class",
",",
"self",
".",
"_markup",
")",
")",
"else",
":",
"parts",
".",
"append",
"(",
"'<div id=\"hh_%s\">%s</div>'",
"%",
"(",
"self",
".",
"_id",
",",
"self",
".",
"_markup",
")",
")",
"if",
"len",
"(",
"self",
".",
"_script",
")",
"!=",
"0",
":",
"parts",
".",
"append",
"(",
"'<script>'",
")",
"parts",
".",
"append",
"(",
"'require(['",
")",
"parts",
".",
"append",
"(",
"','",
".",
"join",
"(",
"[",
"'\"%s\"'",
"%",
"d",
"[",
"0",
"]",
"for",
"d",
"in",
"self",
".",
"_dependencies",
"]",
")",
")",
"parts",
".",
"append",
"(",
"'], function('",
")",
"parts",
".",
"append",
"(",
"','",
".",
"join",
"(",
"[",
"d",
"[",
"1",
"]",
"for",
"d",
"in",
"self",
".",
"_dependencies",
"]",
")",
")",
"parts",
".",
"append",
"(",
"') {'",
")",
"parts",
".",
"append",
"(",
"self",
".",
"_script",
")",
"parts",
".",
"append",
"(",
"'});'",
")",
"parts",
".",
"append",
"(",
"'</script>'",
")",
"return",
"''",
".",
"join",
"(",
"parts",
")"
] | Generates the HTML representation. | [
"Generates",
"the",
"HTML",
"representation",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_html.py#L64-L84 |
5,017 | googledatalab/pydatalab | google/datalab/utils/commands/_html.py | HtmlBuilder._render_objects | def _render_objects(self, items, attributes=None, datatype='object'):
"""Renders an HTML table with the specified list of objects.
Args:
items: the iterable collection of objects to render.
attributes: the optional list of properties or keys to render.
datatype: the type of data; one of 'object' for Python objects, 'dict' for a list
of dictionaries, or 'chartdata' for Google chart data.
"""
if not items:
return
if datatype == 'chartdata':
if not attributes:
attributes = [items['cols'][i]['label'] for i in range(0, len(items['cols']))]
items = items['rows']
indices = {attributes[i]: i for i in range(0, len(attributes))}
num_segments = len(self._segments)
self._segments.append('<table>')
first = True
for o in items:
if first:
first = False
if datatype == 'dict' and not attributes:
attributes = list(o.keys())
if attributes is not None:
self._segments.append('<tr>')
for attr in attributes:
self._segments.append('<th>%s</th>' % attr)
self._segments.append('</tr>')
self._segments.append('<tr>')
if attributes is None:
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o))
else:
for attr in attributes:
if datatype == 'dict':
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o.get(attr, None), nbsp=True))
elif datatype == 'chartdata':
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o['c'][indices[attr]]['v'],
nbsp=True))
else:
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o.__getattribute__(attr),
nbsp=True))
self._segments.append('</tr>')
self._segments.append('</table>')
if first:
# The table was empty; drop it from the segments.
self._segments = self._segments[:num_segments] | python | def _render_objects(self, items, attributes=None, datatype='object'):
"""Renders an HTML table with the specified list of objects.
Args:
items: the iterable collection of objects to render.
attributes: the optional list of properties or keys to render.
datatype: the type of data; one of 'object' for Python objects, 'dict' for a list
of dictionaries, or 'chartdata' for Google chart data.
"""
if not items:
return
if datatype == 'chartdata':
if not attributes:
attributes = [items['cols'][i]['label'] for i in range(0, len(items['cols']))]
items = items['rows']
indices = {attributes[i]: i for i in range(0, len(attributes))}
num_segments = len(self._segments)
self._segments.append('<table>')
first = True
for o in items:
if first:
first = False
if datatype == 'dict' and not attributes:
attributes = list(o.keys())
if attributes is not None:
self._segments.append('<tr>')
for attr in attributes:
self._segments.append('<th>%s</th>' % attr)
self._segments.append('</tr>')
self._segments.append('<tr>')
if attributes is None:
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o))
else:
for attr in attributes:
if datatype == 'dict':
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o.get(attr, None), nbsp=True))
elif datatype == 'chartdata':
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o['c'][indices[attr]]['v'],
nbsp=True))
else:
self._segments.append('<td>%s</td>' % HtmlBuilder._format(o.__getattribute__(attr),
nbsp=True))
self._segments.append('</tr>')
self._segments.append('</table>')
if first:
# The table was empty; drop it from the segments.
self._segments = self._segments[:num_segments] | [
"def",
"_render_objects",
"(",
"self",
",",
"items",
",",
"attributes",
"=",
"None",
",",
"datatype",
"=",
"'object'",
")",
":",
"if",
"not",
"items",
":",
"return",
"if",
"datatype",
"==",
"'chartdata'",
":",
"if",
"not",
"attributes",
":",
"attributes",
"=",
"[",
"items",
"[",
"'cols'",
"]",
"[",
"i",
"]",
"[",
"'label'",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"items",
"[",
"'cols'",
"]",
")",
")",
"]",
"items",
"=",
"items",
"[",
"'rows'",
"]",
"indices",
"=",
"{",
"attributes",
"[",
"i",
"]",
":",
"i",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"attributes",
")",
")",
"}",
"num_segments",
"=",
"len",
"(",
"self",
".",
"_segments",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"'<table>'",
")",
"first",
"=",
"True",
"for",
"o",
"in",
"items",
":",
"if",
"first",
":",
"first",
"=",
"False",
"if",
"datatype",
"==",
"'dict'",
"and",
"not",
"attributes",
":",
"attributes",
"=",
"list",
"(",
"o",
".",
"keys",
"(",
")",
")",
"if",
"attributes",
"is",
"not",
"None",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"'<tr>'",
")",
"for",
"attr",
"in",
"attributes",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"'<th>%s</th>'",
"%",
"attr",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"'</tr>'",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"'<tr>'",
")",
"if",
"attributes",
"is",
"None",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"'<td>%s</td>'",
"%",
"HtmlBuilder",
".",
"_format",
"(",
"o",
")",
")",
"else",
":",
"for",
"attr",
"in",
"attributes",
":",
"if",
"datatype",
"==",
"'dict'",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"'<td>%s</td>'",
"%",
"HtmlBuilder",
".",
"_format",
"(",
"o",
".",
"get",
"(",
"attr",
",",
"None",
")",
",",
"nbsp",
"=",
"True",
")",
")",
"elif",
"datatype",
"==",
"'chartdata'",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"'<td>%s</td>'",
"%",
"HtmlBuilder",
".",
"_format",
"(",
"o",
"[",
"'c'",
"]",
"[",
"indices",
"[",
"attr",
"]",
"]",
"[",
"'v'",
"]",
",",
"nbsp",
"=",
"True",
")",
")",
"else",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"'<td>%s</td>'",
"%",
"HtmlBuilder",
".",
"_format",
"(",
"o",
".",
"__getattribute__",
"(",
"attr",
")",
",",
"nbsp",
"=",
"True",
")",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"'</tr>'",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"'</table>'",
")",
"if",
"first",
":",
"# The table was empty; drop it from the segments.",
"self",
".",
"_segments",
"=",
"self",
".",
"_segments",
"[",
":",
"num_segments",
"]"
] | Renders an HTML table with the specified list of objects.
Args:
items: the iterable collection of objects to render.
attributes: the optional list of properties or keys to render.
datatype: the type of data; one of 'object' for Python objects, 'dict' for a list
of dictionaries, or 'chartdata' for Google chart data. | [
"Renders",
"an",
"HTML",
"table",
"with",
"the",
"specified",
"list",
"of",
"objects",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_html.py#L96-L149 |
5,018 | googledatalab/pydatalab | google/datalab/utils/commands/_html.py | HtmlBuilder._render_list | def _render_list(self, items, empty='<pre><empty></pre>'):
"""Renders an HTML list with the specified list of strings.
Args:
items: the iterable collection of objects to render.
empty: what to render if the list is None or empty.
"""
if not items or len(items) == 0:
self._segments.append(empty)
return
self._segments.append('<ul>')
for o in items:
self._segments.append('<li>')
self._segments.append(str(o))
self._segments.append('</li>')
self._segments.append('</ul>') | python | def _render_list(self, items, empty='<pre><empty></pre>'):
"""Renders an HTML list with the specified list of strings.
Args:
items: the iterable collection of objects to render.
empty: what to render if the list is None or empty.
"""
if not items or len(items) == 0:
self._segments.append(empty)
return
self._segments.append('<ul>')
for o in items:
self._segments.append('<li>')
self._segments.append(str(o))
self._segments.append('</li>')
self._segments.append('</ul>') | [
"def",
"_render_list",
"(",
"self",
",",
"items",
",",
"empty",
"=",
"'<pre><empty></pre>'",
")",
":",
"if",
"not",
"items",
"or",
"len",
"(",
"items",
")",
"==",
"0",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"empty",
")",
"return",
"self",
".",
"_segments",
".",
"append",
"(",
"'<ul>'",
")",
"for",
"o",
"in",
"items",
":",
"self",
".",
"_segments",
".",
"append",
"(",
"'<li>'",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"str",
"(",
"o",
")",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"'</li>'",
")",
"self",
".",
"_segments",
".",
"append",
"(",
"'</ul>'",
")"
] | Renders an HTML list with the specified list of strings.
Args:
items: the iterable collection of objects to render.
empty: what to render if the list is None or empty. | [
"Renders",
"an",
"HTML",
"list",
"with",
"the",
"specified",
"list",
"of",
"strings",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_html.py#L161-L176 |
5,019 | googledatalab/pydatalab | datalab/bigquery/_table.py | Table.sample | def sample(self, fields=None, count=5, sampling=None, use_cache=True, dialect=None,
billing_tier=None):
"""Retrieves a sampling of data from the table.
Args:
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable object containing the resulting data.
Raises:
Exception if the sample query could not be executed or query response was malformed.
"""
# Do import here to avoid top-level circular dependencies.
from . import _query
sql = self._repr_sql_()
return _query.Query.sampling_query(sql, context=self._context, count=count, fields=fields,
sampling=sampling).results(use_cache=use_cache,
dialect=dialect,
billing_tier=billing_tier) | python | def sample(self, fields=None, count=5, sampling=None, use_cache=True, dialect=None,
billing_tier=None):
"""Retrieves a sampling of data from the table.
Args:
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable object containing the resulting data.
Raises:
Exception if the sample query could not be executed or query response was malformed.
"""
# Do import here to avoid top-level circular dependencies.
from . import _query
sql = self._repr_sql_()
return _query.Query.sampling_query(sql, context=self._context, count=count, fields=fields,
sampling=sampling).results(use_cache=use_cache,
dialect=dialect,
billing_tier=billing_tier) | [
"def",
"sample",
"(",
"self",
",",
"fields",
"=",
"None",
",",
"count",
"=",
"5",
",",
"sampling",
"=",
"None",
",",
"use_cache",
"=",
"True",
",",
"dialect",
"=",
"None",
",",
"billing_tier",
"=",
"None",
")",
":",
"# Do import here to avoid top-level circular dependencies.",
"from",
".",
"import",
"_query",
"sql",
"=",
"self",
".",
"_repr_sql_",
"(",
")",
"return",
"_query",
".",
"Query",
".",
"sampling_query",
"(",
"sql",
",",
"context",
"=",
"self",
".",
"_context",
",",
"count",
"=",
"count",
",",
"fields",
"=",
"fields",
",",
"sampling",
"=",
"sampling",
")",
".",
"results",
"(",
"use_cache",
"=",
"use_cache",
",",
"dialect",
"=",
"dialect",
",",
"billing_tier",
"=",
"billing_tier",
")"
] | Retrieves a sampling of data from the table.
Args:
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable object containing the resulting data.
Raises:
Exception if the sample query could not be executed or query response was malformed. | [
"Retrieves",
"a",
"sampling",
"of",
"data",
"from",
"the",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_table.py#L248-L277 |
5,020 | googledatalab/pydatalab | datalab/bigquery/_table.py | Table._encode_dict_as_row | def _encode_dict_as_row(record, column_name_map):
""" Encode a dictionary representing a table row in a form suitable for streaming to BQ.
This includes encoding timestamps as ISO-compatible strings and removing invalid
characters from column names.
Args:
record: a Python dictionary representing the table row.
column_name_map: a dictionary mapping dictionary keys to column names. This is initially
empty and built up by this method when it first encounters each column, then used as a
cache subsequently.
Returns:
The sanitized dictionary.
"""
for k in list(record.keys()):
v = record[k]
# If the column is a date, convert to ISO string.
if isinstance(v, pandas.Timestamp) or isinstance(v, datetime.datetime):
v = record[k] = record[k].isoformat()
# If k has invalid characters clean it up
if k not in column_name_map:
column_name_map[k] = ''.join(c for c in k if c in Table._VALID_COLUMN_NAME_CHARACTERS)
new_k = column_name_map[k]
if k != new_k:
record[new_k] = v
del record[k]
return record | python | def _encode_dict_as_row(record, column_name_map):
""" Encode a dictionary representing a table row in a form suitable for streaming to BQ.
This includes encoding timestamps as ISO-compatible strings and removing invalid
characters from column names.
Args:
record: a Python dictionary representing the table row.
column_name_map: a dictionary mapping dictionary keys to column names. This is initially
empty and built up by this method when it first encounters each column, then used as a
cache subsequently.
Returns:
The sanitized dictionary.
"""
for k in list(record.keys()):
v = record[k]
# If the column is a date, convert to ISO string.
if isinstance(v, pandas.Timestamp) or isinstance(v, datetime.datetime):
v = record[k] = record[k].isoformat()
# If k has invalid characters clean it up
if k not in column_name_map:
column_name_map[k] = ''.join(c for c in k if c in Table._VALID_COLUMN_NAME_CHARACTERS)
new_k = column_name_map[k]
if k != new_k:
record[new_k] = v
del record[k]
return record | [
"def",
"_encode_dict_as_row",
"(",
"record",
",",
"column_name_map",
")",
":",
"for",
"k",
"in",
"list",
"(",
"record",
".",
"keys",
"(",
")",
")",
":",
"v",
"=",
"record",
"[",
"k",
"]",
"# If the column is a date, convert to ISO string.",
"if",
"isinstance",
"(",
"v",
",",
"pandas",
".",
"Timestamp",
")",
"or",
"isinstance",
"(",
"v",
",",
"datetime",
".",
"datetime",
")",
":",
"v",
"=",
"record",
"[",
"k",
"]",
"=",
"record",
"[",
"k",
"]",
".",
"isoformat",
"(",
")",
"# If k has invalid characters clean it up",
"if",
"k",
"not",
"in",
"column_name_map",
":",
"column_name_map",
"[",
"k",
"]",
"=",
"''",
".",
"join",
"(",
"c",
"for",
"c",
"in",
"k",
"if",
"c",
"in",
"Table",
".",
"_VALID_COLUMN_NAME_CHARACTERS",
")",
"new_k",
"=",
"column_name_map",
"[",
"k",
"]",
"if",
"k",
"!=",
"new_k",
":",
"record",
"[",
"new_k",
"]",
"=",
"v",
"del",
"record",
"[",
"k",
"]",
"return",
"record"
] | Encode a dictionary representing a table row in a form suitable for streaming to BQ.
This includes encoding timestamps as ISO-compatible strings and removing invalid
characters from column names.
Args:
record: a Python dictionary representing the table row.
column_name_map: a dictionary mapping dictionary keys to column names. This is initially
empty and built up by this method when it first encounters each column, then used as a
cache subsequently.
Returns:
The sanitized dictionary. | [
"Encode",
"a",
"dictionary",
"representing",
"a",
"table",
"row",
"in",
"a",
"form",
"suitable",
"for",
"streaming",
"to",
"BQ",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_table.py#L280-L307 |
5,021 | googledatalab/pydatalab | datalab/bigquery/_table.py | Table.insert_data | def insert_data(self, data, include_index=False, index_name=None):
""" Insert the contents of a Pandas DataFrame or a list of dictionaries into the table.
The insertion will be performed using at most 500 rows per POST, and at most 10 POSTs per
second, as BigQuery has some limits on streaming rates.
Args:
data: the DataFrame or list to insert.
include_index: whether to include the DataFrame or list index as a column in the BQ table.
index_name: for a list, if include_index is True, this should be the name for the index.
If not specified, 'Index' will be used.
Returns:
The table.
Raises:
Exception if the table doesn't exist, the table's schema differs from the data's schema,
or the insert failed.
"""
# TODO(gram): we could create the Table here is it doesn't exist using a schema derived
# from the data. IIRC we decided not to but doing so seems less unwieldy that having to
# create it first and then validate the schema against it itself.
# There are BigQuery limits on the streaming API:
#
# max_rows_per_post = 500
# max_bytes_per_row = 20000
# max_rows_per_second = 10000
# max_bytes_per_post = 1000000
# max_bytes_per_second = 10000000
#
# It is non-trivial to enforce these here, and the max bytes per row is not something we
# can really control. As an approximation we enforce the 500 row limit
# with a 0.05 sec POST interval (to enforce the 10,000 rows per sec limit).
max_rows_per_post = 500
post_interval = 0.05
# TODO(gram): add different exception types for each failure case.
if not self.exists():
raise Exception('Table %s does not exist.' % self._full_name)
data_schema = _schema.Schema.from_data(data)
if isinstance(data, list):
if include_index:
if not index_name:
index_name = 'Index'
data_schema._add_field(index_name, 'INTEGER')
table_schema = self.schema
# Do some validation of the two schema to make sure they are compatible.
for data_field in data_schema:
name = data_field.name
table_field = table_schema[name]
if table_field is None:
raise Exception('Table does not contain field %s' % name)
data_type = data_field.data_type
table_type = table_field.data_type
if table_type != data_type:
raise Exception('Field %s in data has type %s but in table has type %s' %
(name, data_type, table_type))
total_rows = len(data)
total_pushed = 0
job_id = uuid.uuid4().hex
rows = []
column_name_map = {}
is_dataframe = isinstance(data, pandas.DataFrame)
if is_dataframe:
# reset_index creates a new dataframe so we don't affect the original. reset_index(drop=True)
# drops the original index and uses an integer range.
gen = data.reset_index(drop=not include_index).iterrows()
else:
gen = enumerate(data)
for index, row in gen:
if is_dataframe:
row = row.to_dict()
elif include_index:
row[index_name] = index
rows.append({
'json': self._encode_dict_as_row(row, column_name_map),
'insertId': job_id + str(index)
})
total_pushed += 1
if (total_pushed == total_rows) or (len(rows) == max_rows_per_post):
try:
response = self._api.tabledata_insert_all(self._name_parts, rows)
except Exception as e:
raise e
if 'insertErrors' in response:
raise Exception('insertAll failed: %s' % response['insertErrors'])
time.sleep(post_interval) # Streaming API is rate-limited
rows = []
# Block until data is ready
while True:
self._info = self._api.tables_get(self._name_parts)
if 'streamingBuffer' not in self._info or \
'estimatedRows' not in self._info['streamingBuffer'] or \
int(self._info['streamingBuffer']['estimatedRows']) > 0:
break
time.sleep(2)
return self | python | def insert_data(self, data, include_index=False, index_name=None):
""" Insert the contents of a Pandas DataFrame or a list of dictionaries into the table.
The insertion will be performed using at most 500 rows per POST, and at most 10 POSTs per
second, as BigQuery has some limits on streaming rates.
Args:
data: the DataFrame or list to insert.
include_index: whether to include the DataFrame or list index as a column in the BQ table.
index_name: for a list, if include_index is True, this should be the name for the index.
If not specified, 'Index' will be used.
Returns:
The table.
Raises:
Exception if the table doesn't exist, the table's schema differs from the data's schema,
or the insert failed.
"""
# TODO(gram): we could create the Table here is it doesn't exist using a schema derived
# from the data. IIRC we decided not to but doing so seems less unwieldy that having to
# create it first and then validate the schema against it itself.
# There are BigQuery limits on the streaming API:
#
# max_rows_per_post = 500
# max_bytes_per_row = 20000
# max_rows_per_second = 10000
# max_bytes_per_post = 1000000
# max_bytes_per_second = 10000000
#
# It is non-trivial to enforce these here, and the max bytes per row is not something we
# can really control. As an approximation we enforce the 500 row limit
# with a 0.05 sec POST interval (to enforce the 10,000 rows per sec limit).
max_rows_per_post = 500
post_interval = 0.05
# TODO(gram): add different exception types for each failure case.
if not self.exists():
raise Exception('Table %s does not exist.' % self._full_name)
data_schema = _schema.Schema.from_data(data)
if isinstance(data, list):
if include_index:
if not index_name:
index_name = 'Index'
data_schema._add_field(index_name, 'INTEGER')
table_schema = self.schema
# Do some validation of the two schema to make sure they are compatible.
for data_field in data_schema:
name = data_field.name
table_field = table_schema[name]
if table_field is None:
raise Exception('Table does not contain field %s' % name)
data_type = data_field.data_type
table_type = table_field.data_type
if table_type != data_type:
raise Exception('Field %s in data has type %s but in table has type %s' %
(name, data_type, table_type))
total_rows = len(data)
total_pushed = 0
job_id = uuid.uuid4().hex
rows = []
column_name_map = {}
is_dataframe = isinstance(data, pandas.DataFrame)
if is_dataframe:
# reset_index creates a new dataframe so we don't affect the original. reset_index(drop=True)
# drops the original index and uses an integer range.
gen = data.reset_index(drop=not include_index).iterrows()
else:
gen = enumerate(data)
for index, row in gen:
if is_dataframe:
row = row.to_dict()
elif include_index:
row[index_name] = index
rows.append({
'json': self._encode_dict_as_row(row, column_name_map),
'insertId': job_id + str(index)
})
total_pushed += 1
if (total_pushed == total_rows) or (len(rows) == max_rows_per_post):
try:
response = self._api.tabledata_insert_all(self._name_parts, rows)
except Exception as e:
raise e
if 'insertErrors' in response:
raise Exception('insertAll failed: %s' % response['insertErrors'])
time.sleep(post_interval) # Streaming API is rate-limited
rows = []
# Block until data is ready
while True:
self._info = self._api.tables_get(self._name_parts)
if 'streamingBuffer' not in self._info or \
'estimatedRows' not in self._info['streamingBuffer'] or \
int(self._info['streamingBuffer']['estimatedRows']) > 0:
break
time.sleep(2)
return self | [
"def",
"insert_data",
"(",
"self",
",",
"data",
",",
"include_index",
"=",
"False",
",",
"index_name",
"=",
"None",
")",
":",
"# TODO(gram): we could create the Table here is it doesn't exist using a schema derived",
"# from the data. IIRC we decided not to but doing so seems less unwieldy that having to",
"# create it first and then validate the schema against it itself.",
"# There are BigQuery limits on the streaming API:",
"#",
"# max_rows_per_post = 500",
"# max_bytes_per_row = 20000",
"# max_rows_per_second = 10000",
"# max_bytes_per_post = 1000000",
"# max_bytes_per_second = 10000000",
"#",
"# It is non-trivial to enforce these here, and the max bytes per row is not something we",
"# can really control. As an approximation we enforce the 500 row limit",
"# with a 0.05 sec POST interval (to enforce the 10,000 rows per sec limit).",
"max_rows_per_post",
"=",
"500",
"post_interval",
"=",
"0.05",
"# TODO(gram): add different exception types for each failure case.",
"if",
"not",
"self",
".",
"exists",
"(",
")",
":",
"raise",
"Exception",
"(",
"'Table %s does not exist.'",
"%",
"self",
".",
"_full_name",
")",
"data_schema",
"=",
"_schema",
".",
"Schema",
".",
"from_data",
"(",
"data",
")",
"if",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"if",
"include_index",
":",
"if",
"not",
"index_name",
":",
"index_name",
"=",
"'Index'",
"data_schema",
".",
"_add_field",
"(",
"index_name",
",",
"'INTEGER'",
")",
"table_schema",
"=",
"self",
".",
"schema",
"# Do some validation of the two schema to make sure they are compatible.",
"for",
"data_field",
"in",
"data_schema",
":",
"name",
"=",
"data_field",
".",
"name",
"table_field",
"=",
"table_schema",
"[",
"name",
"]",
"if",
"table_field",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'Table does not contain field %s'",
"%",
"name",
")",
"data_type",
"=",
"data_field",
".",
"data_type",
"table_type",
"=",
"table_field",
".",
"data_type",
"if",
"table_type",
"!=",
"data_type",
":",
"raise",
"Exception",
"(",
"'Field %s in data has type %s but in table has type %s'",
"%",
"(",
"name",
",",
"data_type",
",",
"table_type",
")",
")",
"total_rows",
"=",
"len",
"(",
"data",
")",
"total_pushed",
"=",
"0",
"job_id",
"=",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
"rows",
"=",
"[",
"]",
"column_name_map",
"=",
"{",
"}",
"is_dataframe",
"=",
"isinstance",
"(",
"data",
",",
"pandas",
".",
"DataFrame",
")",
"if",
"is_dataframe",
":",
"# reset_index creates a new dataframe so we don't affect the original. reset_index(drop=True)",
"# drops the original index and uses an integer range.",
"gen",
"=",
"data",
".",
"reset_index",
"(",
"drop",
"=",
"not",
"include_index",
")",
".",
"iterrows",
"(",
")",
"else",
":",
"gen",
"=",
"enumerate",
"(",
"data",
")",
"for",
"index",
",",
"row",
"in",
"gen",
":",
"if",
"is_dataframe",
":",
"row",
"=",
"row",
".",
"to_dict",
"(",
")",
"elif",
"include_index",
":",
"row",
"[",
"index_name",
"]",
"=",
"index",
"rows",
".",
"append",
"(",
"{",
"'json'",
":",
"self",
".",
"_encode_dict_as_row",
"(",
"row",
",",
"column_name_map",
")",
",",
"'insertId'",
":",
"job_id",
"+",
"str",
"(",
"index",
")",
"}",
")",
"total_pushed",
"+=",
"1",
"if",
"(",
"total_pushed",
"==",
"total_rows",
")",
"or",
"(",
"len",
"(",
"rows",
")",
"==",
"max_rows_per_post",
")",
":",
"try",
":",
"response",
"=",
"self",
".",
"_api",
".",
"tabledata_insert_all",
"(",
"self",
".",
"_name_parts",
",",
"rows",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"'insertErrors'",
"in",
"response",
":",
"raise",
"Exception",
"(",
"'insertAll failed: %s'",
"%",
"response",
"[",
"'insertErrors'",
"]",
")",
"time",
".",
"sleep",
"(",
"post_interval",
")",
"# Streaming API is rate-limited",
"rows",
"=",
"[",
"]",
"# Block until data is ready",
"while",
"True",
":",
"self",
".",
"_info",
"=",
"self",
".",
"_api",
".",
"tables_get",
"(",
"self",
".",
"_name_parts",
")",
"if",
"'streamingBuffer'",
"not",
"in",
"self",
".",
"_info",
"or",
"'estimatedRows'",
"not",
"in",
"self",
".",
"_info",
"[",
"'streamingBuffer'",
"]",
"or",
"int",
"(",
"self",
".",
"_info",
"[",
"'streamingBuffer'",
"]",
"[",
"'estimatedRows'",
"]",
")",
">",
"0",
":",
"break",
"time",
".",
"sleep",
"(",
"2",
")",
"return",
"self"
] | Insert the contents of a Pandas DataFrame or a list of dictionaries into the table.
The insertion will be performed using at most 500 rows per POST, and at most 10 POSTs per
second, as BigQuery has some limits on streaming rates.
Args:
data: the DataFrame or list to insert.
include_index: whether to include the DataFrame or list index as a column in the BQ table.
index_name: for a list, if include_index is True, this should be the name for the index.
If not specified, 'Index' will be used.
Returns:
The table.
Raises:
Exception if the table doesn't exist, the table's schema differs from the data's schema,
or the insert failed. | [
"Insert",
"the",
"contents",
"of",
"a",
"Pandas",
"DataFrame",
"or",
"a",
"list",
"of",
"dictionaries",
"into",
"the",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_table.py#L309-L417 |
5,022 | googledatalab/pydatalab | datalab/bigquery/_table.py | Table.range | def range(self, start_row=0, max_rows=None):
""" Get an iterator to iterate through a set of table rows.
Args:
start_row: the row of the table at which to start the iteration (default 0)
max_rows: an upper limit on the number of rows to iterate through (default None)
Returns:
A row iterator.
"""
fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows)
return iter(datalab.utils.Iterator(fetcher)) | python | def range(self, start_row=0, max_rows=None):
""" Get an iterator to iterate through a set of table rows.
Args:
start_row: the row of the table at which to start the iteration (default 0)
max_rows: an upper limit on the number of rows to iterate through (default None)
Returns:
A row iterator.
"""
fetcher = self._get_row_fetcher(start_row=start_row, max_rows=max_rows)
return iter(datalab.utils.Iterator(fetcher)) | [
"def",
"range",
"(",
"self",
",",
"start_row",
"=",
"0",
",",
"max_rows",
"=",
"None",
")",
":",
"fetcher",
"=",
"self",
".",
"_get_row_fetcher",
"(",
"start_row",
"=",
"start_row",
",",
"max_rows",
"=",
"max_rows",
")",
"return",
"iter",
"(",
"datalab",
".",
"utils",
".",
"Iterator",
"(",
"fetcher",
")",
")"
] | Get an iterator to iterate through a set of table rows.
Args:
start_row: the row of the table at which to start the iteration (default 0)
max_rows: an upper limit on the number of rows to iterate through (default None)
Returns:
A row iterator. | [
"Get",
"an",
"iterator",
"to",
"iterate",
"through",
"a",
"set",
"of",
"table",
"rows",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_table.py#L614-L625 |
5,023 | googledatalab/pydatalab | datalab/bigquery/_table.py | Table.to_file_async | def to_file_async(self, destination, format='csv', csv_delimiter=',', csv_header=True):
"""Start saving the results to a local file in CSV format and return a Job for completion.
Args:
destination: path on the local filesystem for the saved results.
format: the format to use for the exported data; currently only 'csv' is supported.
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
Returns:
A Job for the async save operation.
Raises:
An Exception if the operation failed.
"""
self.to_file(destination, format=format, csv_delimiter=csv_delimiter, csv_header=csv_header) | python | def to_file_async(self, destination, format='csv', csv_delimiter=',', csv_header=True):
"""Start saving the results to a local file in CSV format and return a Job for completion.
Args:
destination: path on the local filesystem for the saved results.
format: the format to use for the exported data; currently only 'csv' is supported.
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
Returns:
A Job for the async save operation.
Raises:
An Exception if the operation failed.
"""
self.to_file(destination, format=format, csv_delimiter=csv_delimiter, csv_header=csv_header) | [
"def",
"to_file_async",
"(",
"self",
",",
"destination",
",",
"format",
"=",
"'csv'",
",",
"csv_delimiter",
"=",
"','",
",",
"csv_header",
"=",
"True",
")",
":",
"self",
".",
"to_file",
"(",
"destination",
",",
"format",
"=",
"format",
",",
"csv_delimiter",
"=",
"csv_delimiter",
",",
"csv_header",
"=",
"csv_header",
")"
] | Start saving the results to a local file in CSV format and return a Job for completion.
Args:
destination: path on the local filesystem for the saved results.
format: the format to use for the exported data; currently only 'csv' is supported.
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
Returns:
A Job for the async save operation.
Raises:
An Exception if the operation failed. | [
"Start",
"saving",
"the",
"results",
"to",
"a",
"local",
"file",
"in",
"CSV",
"format",
"and",
"return",
"a",
"Job",
"for",
"completion",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_table.py#L680-L693 |
5,024 | googledatalab/pydatalab | datalab/bigquery/_table.py | Table.update | def update(self, friendly_name=None, description=None, expiry=None, schema=None):
""" Selectively updates Table information.
Any parameters that are omitted or None are not updated.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
expiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch.
schema: if not None, the new schema: either a list of dictionaries or a Schema.
"""
self._load_info()
if friendly_name is not None:
self._info['friendlyName'] = friendly_name
if description is not None:
self._info['description'] = description
if expiry is not None:
if isinstance(expiry, datetime.datetime):
expiry = calendar.timegm(expiry.utctimetuple()) * 1000
self._info['expirationTime'] = expiry
if schema is not None:
if isinstance(schema, _schema.Schema):
schema = schema._bq_schema
self._info['schema'] = {'fields': schema}
try:
self._api.table_update(self._name_parts, self._info)
except datalab.utils.RequestException:
# The cached metadata is out of sync now; abandon it.
self._info = None
except Exception as e:
raise e | python | def update(self, friendly_name=None, description=None, expiry=None, schema=None):
""" Selectively updates Table information.
Any parameters that are omitted or None are not updated.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
expiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch.
schema: if not None, the new schema: either a list of dictionaries or a Schema.
"""
self._load_info()
if friendly_name is not None:
self._info['friendlyName'] = friendly_name
if description is not None:
self._info['description'] = description
if expiry is not None:
if isinstance(expiry, datetime.datetime):
expiry = calendar.timegm(expiry.utctimetuple()) * 1000
self._info['expirationTime'] = expiry
if schema is not None:
if isinstance(schema, _schema.Schema):
schema = schema._bq_schema
self._info['schema'] = {'fields': schema}
try:
self._api.table_update(self._name_parts, self._info)
except datalab.utils.RequestException:
# The cached metadata is out of sync now; abandon it.
self._info = None
except Exception as e:
raise e | [
"def",
"update",
"(",
"self",
",",
"friendly_name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"expiry",
"=",
"None",
",",
"schema",
"=",
"None",
")",
":",
"self",
".",
"_load_info",
"(",
")",
"if",
"friendly_name",
"is",
"not",
"None",
":",
"self",
".",
"_info",
"[",
"'friendlyName'",
"]",
"=",
"friendly_name",
"if",
"description",
"is",
"not",
"None",
":",
"self",
".",
"_info",
"[",
"'description'",
"]",
"=",
"description",
"if",
"expiry",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"expiry",
",",
"datetime",
".",
"datetime",
")",
":",
"expiry",
"=",
"calendar",
".",
"timegm",
"(",
"expiry",
".",
"utctimetuple",
"(",
")",
")",
"*",
"1000",
"self",
".",
"_info",
"[",
"'expirationTime'",
"]",
"=",
"expiry",
"if",
"schema",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"schema",
",",
"_schema",
".",
"Schema",
")",
":",
"schema",
"=",
"schema",
".",
"_bq_schema",
"self",
".",
"_info",
"[",
"'schema'",
"]",
"=",
"{",
"'fields'",
":",
"schema",
"}",
"try",
":",
"self",
".",
"_api",
".",
"table_update",
"(",
"self",
".",
"_name_parts",
",",
"self",
".",
"_info",
")",
"except",
"datalab",
".",
"utils",
".",
"RequestException",
":",
"# The cached metadata is out of sync now; abandon it.",
"self",
".",
"_info",
"=",
"None",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] | Selectively updates Table information.
Any parameters that are omitted or None are not updated.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
expiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch.
schema: if not None, the new schema: either a list of dictionaries or a Schema. | [
"Selectively",
"updates",
"Table",
"information",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_table.py#L712-L742 |
5,025 | googledatalab/pydatalab | datalab/bigquery/_table.py | Table.to_query | def to_query(self, fields=None):
""" Return a Query for this Table.
Args:
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table.
"""
# Do import here to avoid top-level circular dependencies.
from . import _query
if fields is None:
fields = '*'
elif isinstance(fields, list):
fields = ','.join(fields)
return _query.Query('SELECT %s FROM %s' % (fields, self._repr_sql_()), context=self._context) | python | def to_query(self, fields=None):
""" Return a Query for this Table.
Args:
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table.
"""
# Do import here to avoid top-level circular dependencies.
from . import _query
if fields is None:
fields = '*'
elif isinstance(fields, list):
fields = ','.join(fields)
return _query.Query('SELECT %s FROM %s' % (fields, self._repr_sql_()), context=self._context) | [
"def",
"to_query",
"(",
"self",
",",
"fields",
"=",
"None",
")",
":",
"# Do import here to avoid top-level circular dependencies.",
"from",
".",
"import",
"_query",
"if",
"fields",
"is",
"None",
":",
"fields",
"=",
"'*'",
"elif",
"isinstance",
"(",
"fields",
",",
"list",
")",
":",
"fields",
"=",
"','",
".",
"join",
"(",
"fields",
")",
"return",
"_query",
".",
"Query",
"(",
"'SELECT %s FROM %s'",
"%",
"(",
"fields",
",",
"self",
".",
"_repr_sql_",
"(",
")",
")",
",",
"context",
"=",
"self",
".",
"_context",
")"
] | Return a Query for this Table.
Args:
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table. | [
"Return",
"a",
"Query",
"for",
"this",
"Table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_table.py#L914-L930 |
5,026 | googledatalab/pydatalab | datalab/storage/_item.py | Item.copy_to | def copy_to(self, new_key, bucket=None):
"""Copies this item to the specified new key.
Args:
new_key: the new key to copy this item to.
bucket: the bucket of the new item; if None (the default) use the same bucket.
Returns:
An Item corresponding to new key.
Raises:
Exception if there was an error copying the item.
"""
if bucket is None:
bucket = self._bucket
try:
new_info = self._api.objects_copy(self._bucket, self._key, bucket, new_key)
except Exception as e:
raise e
return Item(bucket, new_key, new_info, context=self._context) | python | def copy_to(self, new_key, bucket=None):
"""Copies this item to the specified new key.
Args:
new_key: the new key to copy this item to.
bucket: the bucket of the new item; if None (the default) use the same bucket.
Returns:
An Item corresponding to new key.
Raises:
Exception if there was an error copying the item.
"""
if bucket is None:
bucket = self._bucket
try:
new_info = self._api.objects_copy(self._bucket, self._key, bucket, new_key)
except Exception as e:
raise e
return Item(bucket, new_key, new_info, context=self._context) | [
"def",
"copy_to",
"(",
"self",
",",
"new_key",
",",
"bucket",
"=",
"None",
")",
":",
"if",
"bucket",
"is",
"None",
":",
"bucket",
"=",
"self",
".",
"_bucket",
"try",
":",
"new_info",
"=",
"self",
".",
"_api",
".",
"objects_copy",
"(",
"self",
".",
"_bucket",
",",
"self",
".",
"_key",
",",
"bucket",
",",
"new_key",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"return",
"Item",
"(",
"bucket",
",",
"new_key",
",",
"new_info",
",",
"context",
"=",
"self",
".",
"_context",
")"
] | Copies this item to the specified new key.
Args:
new_key: the new key to copy this item to.
bucket: the bucket of the new item; if None (the default) use the same bucket.
Returns:
An Item corresponding to new key.
Raises:
Exception if there was an error copying the item. | [
"Copies",
"this",
"item",
"to",
"the",
"specified",
"new",
"key",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_item.py#L111-L128 |
5,027 | googledatalab/pydatalab | datalab/storage/_item.py | Item.exists | def exists(self):
""" Checks if the item exists. """
try:
return self.metadata is not None
except datalab.utils.RequestException:
return False
except Exception as e:
raise e | python | def exists(self):
""" Checks if the item exists. """
try:
return self.metadata is not None
except datalab.utils.RequestException:
return False
except Exception as e:
raise e | [
"def",
"exists",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"metadata",
"is",
"not",
"None",
"except",
"datalab",
".",
"utils",
".",
"RequestException",
":",
"return",
"False",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] | Checks if the item exists. | [
"Checks",
"if",
"the",
"item",
"exists",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_item.py#L130-L137 |
5,028 | googledatalab/pydatalab | datalab/storage/_item.py | Item.delete | def delete(self):
"""Deletes this item from its bucket.
Raises:
Exception if there was an error deleting the item.
"""
if self.exists():
try:
self._api.objects_delete(self._bucket, self._key)
except Exception as e:
raise e | python | def delete(self):
"""Deletes this item from its bucket.
Raises:
Exception if there was an error deleting the item.
"""
if self.exists():
try:
self._api.objects_delete(self._bucket, self._key)
except Exception as e:
raise e | [
"def",
"delete",
"(",
"self",
")",
":",
"if",
"self",
".",
"exists",
"(",
")",
":",
"try",
":",
"self",
".",
"_api",
".",
"objects_delete",
"(",
"self",
".",
"_bucket",
",",
"self",
".",
"_key",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] | Deletes this item from its bucket.
Raises:
Exception if there was an error deleting the item. | [
"Deletes",
"this",
"item",
"from",
"its",
"bucket",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_item.py#L139-L149 |
5,029 | googledatalab/pydatalab | datalab/storage/_item.py | Item.write_to | def write_to(self, content, content_type):
"""Writes text content to this item.
Args:
content: the text content to be written.
content_type: the type of text content.
Raises:
Exception if there was an error requesting the item's content.
"""
try:
self._api.object_upload(self._bucket, self._key, content, content_type)
except Exception as e:
raise e | python | def write_to(self, content, content_type):
"""Writes text content to this item.
Args:
content: the text content to be written.
content_type: the type of text content.
Raises:
Exception if there was an error requesting the item's content.
"""
try:
self._api.object_upload(self._bucket, self._key, content, content_type)
except Exception as e:
raise e | [
"def",
"write_to",
"(",
"self",
",",
"content",
",",
"content_type",
")",
":",
"try",
":",
"self",
".",
"_api",
".",
"object_upload",
"(",
"self",
".",
"_bucket",
",",
"self",
".",
"_key",
",",
"content",
",",
"content_type",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] | Writes text content to this item.
Args:
content: the text content to be written.
content_type: the type of text content.
Raises:
Exception if there was an error requesting the item's content. | [
"Writes",
"text",
"content",
"to",
"this",
"item",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_item.py#L212-L224 |
5,030 | googledatalab/pydatalab | datalab/storage/_item.py | Items.contains | def contains(self, key):
"""Checks if the specified item exists.
Args:
key: the key of the item to lookup.
Returns:
True if the item exists; False otherwise.
Raises:
Exception if there was an error requesting information about the item.
"""
try:
self._api.objects_get(self._bucket, key)
except datalab.utils.RequestException as e:
if e.status == 404:
return False
raise e
except Exception as e:
raise e
return True | python | def contains(self, key):
"""Checks if the specified item exists.
Args:
key: the key of the item to lookup.
Returns:
True if the item exists; False otherwise.
Raises:
Exception if there was an error requesting information about the item.
"""
try:
self._api.objects_get(self._bucket, key)
except datalab.utils.RequestException as e:
if e.status == 404:
return False
raise e
except Exception as e:
raise e
return True | [
"def",
"contains",
"(",
"self",
",",
"key",
")",
":",
"try",
":",
"self",
".",
"_api",
".",
"objects_get",
"(",
"self",
".",
"_bucket",
",",
"key",
")",
"except",
"datalab",
".",
"utils",
".",
"RequestException",
"as",
"e",
":",
"if",
"e",
".",
"status",
"==",
"404",
":",
"return",
"False",
"raise",
"e",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"return",
"True"
] | Checks if the specified item exists.
Args:
key: the key of the item to lookup.
Returns:
True if the item exists; False otherwise.
Raises:
Exception if there was an error requesting information about the item. | [
"Checks",
"if",
"the",
"specified",
"item",
"exists",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_item.py#L252-L270 |
5,031 | googledatalab/pydatalab | google/datalab/utils/_http.py | Http.request | def request(url, args=None, data=None, headers=None, method=None,
credentials=None, raw_response=False, stats=None):
"""Issues HTTP requests.
Args:
url: the URL to request.
args: optional query string arguments.
data: optional data to be sent within the request.
headers: optional headers to include in the request.
method: optional HTTP method to use. If unspecified this is inferred
(GET or POST) based on the existence of request data.
credentials: optional set of credentials to authorize the request.
raw_response: whether the raw response content should be returned as-is.
stats: an optional dictionary that, if provided, will be populated with some
useful info about the request, like 'duration' in seconds and 'data_size' in
bytes. These may be useful optimizing the access to rate-limited APIs.
Returns:
The parsed response object.
Raises:
Exception when the HTTP request fails or the response cannot be processed.
"""
if headers is None:
headers = {}
headers['user-agent'] = 'GoogleCloudDataLab/1.0'
# Add querystring to the URL if there are any arguments.
if args is not None:
qs = urllib.parse.urlencode(args)
url = url + '?' + qs
# Setup method to POST if unspecified, and appropriate request headers
# if there is data to be sent within the request.
if data is not None:
if method is None:
method = 'POST'
if data != '':
# If there is a content type specified, use it (and the data) as-is.
# Otherwise, assume JSON, and serialize the data object.
if 'Content-Type' not in headers:
data = json.dumps(data)
headers['Content-Type'] = 'application/json'
headers['Content-Length'] = str(len(data))
else:
if method == 'POST':
headers['Content-Length'] = '0'
# If the method is still unset, i.e. it was unspecified, and there
# was no data to be POSTed, then default to GET request.
if method is None:
method = 'GET'
http = Http.http
# Authorize with credentials if given
if credentials is not None:
# Make a copy of the shared http instance before we modify it.
http = copy.copy(http)
http = google_auth_httplib2.AuthorizedHttp(credentials)
if stats is not None:
stats['duration'] = datetime.datetime.utcnow()
response = None
try:
log.debug('request: method[%(method)s], url[%(url)s], body[%(data)s]' % locals())
response, content = http.request(url,
method=method,
body=data,
headers=headers)
if 200 <= response.status < 300:
if raw_response:
return content
if type(content) == str:
return json.loads(content)
else:
return json.loads(str(content, encoding='UTF-8'))
else:
raise RequestException(response.status, content)
except ValueError:
raise Exception('Failed to process HTTP response.')
except httplib2.HttpLib2Error:
raise Exception('Failed to send HTTP request.')
finally:
if stats is not None:
stats['data_size'] = len(data)
stats['status'] = response.status
stats['duration'] = (datetime.datetime.utcnow() - stats['duration']).total_seconds() | python | def request(url, args=None, data=None, headers=None, method=None,
credentials=None, raw_response=False, stats=None):
"""Issues HTTP requests.
Args:
url: the URL to request.
args: optional query string arguments.
data: optional data to be sent within the request.
headers: optional headers to include in the request.
method: optional HTTP method to use. If unspecified this is inferred
(GET or POST) based on the existence of request data.
credentials: optional set of credentials to authorize the request.
raw_response: whether the raw response content should be returned as-is.
stats: an optional dictionary that, if provided, will be populated with some
useful info about the request, like 'duration' in seconds and 'data_size' in
bytes. These may be useful optimizing the access to rate-limited APIs.
Returns:
The parsed response object.
Raises:
Exception when the HTTP request fails or the response cannot be processed.
"""
if headers is None:
headers = {}
headers['user-agent'] = 'GoogleCloudDataLab/1.0'
# Add querystring to the URL if there are any arguments.
if args is not None:
qs = urllib.parse.urlencode(args)
url = url + '?' + qs
# Setup method to POST if unspecified, and appropriate request headers
# if there is data to be sent within the request.
if data is not None:
if method is None:
method = 'POST'
if data != '':
# If there is a content type specified, use it (and the data) as-is.
# Otherwise, assume JSON, and serialize the data object.
if 'Content-Type' not in headers:
data = json.dumps(data)
headers['Content-Type'] = 'application/json'
headers['Content-Length'] = str(len(data))
else:
if method == 'POST':
headers['Content-Length'] = '0'
# If the method is still unset, i.e. it was unspecified, and there
# was no data to be POSTed, then default to GET request.
if method is None:
method = 'GET'
http = Http.http
# Authorize with credentials if given
if credentials is not None:
# Make a copy of the shared http instance before we modify it.
http = copy.copy(http)
http = google_auth_httplib2.AuthorizedHttp(credentials)
if stats is not None:
stats['duration'] = datetime.datetime.utcnow()
response = None
try:
log.debug('request: method[%(method)s], url[%(url)s], body[%(data)s]' % locals())
response, content = http.request(url,
method=method,
body=data,
headers=headers)
if 200 <= response.status < 300:
if raw_response:
return content
if type(content) == str:
return json.loads(content)
else:
return json.loads(str(content, encoding='UTF-8'))
else:
raise RequestException(response.status, content)
except ValueError:
raise Exception('Failed to process HTTP response.')
except httplib2.HttpLib2Error:
raise Exception('Failed to send HTTP request.')
finally:
if stats is not None:
stats['data_size'] = len(data)
stats['status'] = response.status
stats['duration'] = (datetime.datetime.utcnow() - stats['duration']).total_seconds() | [
"def",
"request",
"(",
"url",
",",
"args",
"=",
"None",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"method",
"=",
"None",
",",
"credentials",
"=",
"None",
",",
"raw_response",
"=",
"False",
",",
"stats",
"=",
"None",
")",
":",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"{",
"}",
"headers",
"[",
"'user-agent'",
"]",
"=",
"'GoogleCloudDataLab/1.0'",
"# Add querystring to the URL if there are any arguments.",
"if",
"args",
"is",
"not",
"None",
":",
"qs",
"=",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"args",
")",
"url",
"=",
"url",
"+",
"'?'",
"+",
"qs",
"# Setup method to POST if unspecified, and appropriate request headers",
"# if there is data to be sent within the request.",
"if",
"data",
"is",
"not",
"None",
":",
"if",
"method",
"is",
"None",
":",
"method",
"=",
"'POST'",
"if",
"data",
"!=",
"''",
":",
"# If there is a content type specified, use it (and the data) as-is.",
"# Otherwise, assume JSON, and serialize the data object.",
"if",
"'Content-Type'",
"not",
"in",
"headers",
":",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"headers",
"[",
"'Content-Type'",
"]",
"=",
"'application/json'",
"headers",
"[",
"'Content-Length'",
"]",
"=",
"str",
"(",
"len",
"(",
"data",
")",
")",
"else",
":",
"if",
"method",
"==",
"'POST'",
":",
"headers",
"[",
"'Content-Length'",
"]",
"=",
"'0'",
"# If the method is still unset, i.e. it was unspecified, and there",
"# was no data to be POSTed, then default to GET request.",
"if",
"method",
"is",
"None",
":",
"method",
"=",
"'GET'",
"http",
"=",
"Http",
".",
"http",
"# Authorize with credentials if given",
"if",
"credentials",
"is",
"not",
"None",
":",
"# Make a copy of the shared http instance before we modify it.",
"http",
"=",
"copy",
".",
"copy",
"(",
"http",
")",
"http",
"=",
"google_auth_httplib2",
".",
"AuthorizedHttp",
"(",
"credentials",
")",
"if",
"stats",
"is",
"not",
"None",
":",
"stats",
"[",
"'duration'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"response",
"=",
"None",
"try",
":",
"log",
".",
"debug",
"(",
"'request: method[%(method)s], url[%(url)s], body[%(data)s]'",
"%",
"locals",
"(",
")",
")",
"response",
",",
"content",
"=",
"http",
".",
"request",
"(",
"url",
",",
"method",
"=",
"method",
",",
"body",
"=",
"data",
",",
"headers",
"=",
"headers",
")",
"if",
"200",
"<=",
"response",
".",
"status",
"<",
"300",
":",
"if",
"raw_response",
":",
"return",
"content",
"if",
"type",
"(",
"content",
")",
"==",
"str",
":",
"return",
"json",
".",
"loads",
"(",
"content",
")",
"else",
":",
"return",
"json",
".",
"loads",
"(",
"str",
"(",
"content",
",",
"encoding",
"=",
"'UTF-8'",
")",
")",
"else",
":",
"raise",
"RequestException",
"(",
"response",
".",
"status",
",",
"content",
")",
"except",
"ValueError",
":",
"raise",
"Exception",
"(",
"'Failed to process HTTP response.'",
")",
"except",
"httplib2",
".",
"HttpLib2Error",
":",
"raise",
"Exception",
"(",
"'Failed to send HTTP request.'",
")",
"finally",
":",
"if",
"stats",
"is",
"not",
"None",
":",
"stats",
"[",
"'data_size'",
"]",
"=",
"len",
"(",
"data",
")",
"stats",
"[",
"'status'",
"]",
"=",
"response",
".",
"status",
"stats",
"[",
"'duration'",
"]",
"=",
"(",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"-",
"stats",
"[",
"'duration'",
"]",
")",
".",
"total_seconds",
"(",
")"
] | Issues HTTP requests.
Args:
url: the URL to request.
args: optional query string arguments.
data: optional data to be sent within the request.
headers: optional headers to include in the request.
method: optional HTTP method to use. If unspecified this is inferred
(GET or POST) based on the existence of request data.
credentials: optional set of credentials to authorize the request.
raw_response: whether the raw response content should be returned as-is.
stats: an optional dictionary that, if provided, will be populated with some
useful info about the request, like 'duration' in seconds and 'data_size' in
bytes. These may be useful optimizing the access to rate-limited APIs.
Returns:
The parsed response object.
Raises:
Exception when the HTTP request fails or the response cannot be processed. | [
"Issues",
"HTTP",
"requests",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/_http.py#L81-L167 |
5,032 | googledatalab/pydatalab | google/datalab/contrib/pipeline/commands/_pipeline.py | _add_command | def _add_command(parser, subparser_fn, handler, cell_required=False,
cell_prohibited=False):
""" Create and initialize a pipeline subcommand handler. """
sub_parser = subparser_fn(parser)
sub_parser.set_defaults(func=lambda args, cell: _dispatch_handler(
args, cell, sub_parser, handler, cell_required=cell_required,
cell_prohibited=cell_prohibited)) | python | def _add_command(parser, subparser_fn, handler, cell_required=False,
cell_prohibited=False):
""" Create and initialize a pipeline subcommand handler. """
sub_parser = subparser_fn(parser)
sub_parser.set_defaults(func=lambda args, cell: _dispatch_handler(
args, cell, sub_parser, handler, cell_required=cell_required,
cell_prohibited=cell_prohibited)) | [
"def",
"_add_command",
"(",
"parser",
",",
"subparser_fn",
",",
"handler",
",",
"cell_required",
"=",
"False",
",",
"cell_prohibited",
"=",
"False",
")",
":",
"sub_parser",
"=",
"subparser_fn",
"(",
"parser",
")",
"sub_parser",
".",
"set_defaults",
"(",
"func",
"=",
"lambda",
"args",
",",
"cell",
":",
"_dispatch_handler",
"(",
"args",
",",
"cell",
",",
"sub_parser",
",",
"handler",
",",
"cell_required",
"=",
"cell_required",
",",
"cell_prohibited",
"=",
"cell_prohibited",
")",
")"
] | Create and initialize a pipeline subcommand handler. | [
"Create",
"and",
"initialize",
"a",
"pipeline",
"subcommand",
"handler",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/pipeline/commands/_pipeline.py#L57-L63 |
5,033 | googledatalab/pydatalab | google/datalab/contrib/pipeline/commands/_pipeline.py | pipeline | def pipeline(line, cell=None):
"""Implements the pipeline cell magic for ipython notebooks.
The supported syntax is:
%%pipeline <command> [<args>]
<cell>
or:
%pipeline <command> [<args>]
Use %pipeline --help for a list of commands, or %pipeline <command> --help for
help on a specific command.
"""
return google.datalab.utils.commands.handle_magic_line(line, cell, _pipeline_parser) | python | def pipeline(line, cell=None):
"""Implements the pipeline cell magic for ipython notebooks.
The supported syntax is:
%%pipeline <command> [<args>]
<cell>
or:
%pipeline <command> [<args>]
Use %pipeline --help for a list of commands, or %pipeline <command> --help for
help on a specific command.
"""
return google.datalab.utils.commands.handle_magic_line(line, cell, _pipeline_parser) | [
"def",
"pipeline",
"(",
"line",
",",
"cell",
"=",
"None",
")",
":",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"handle_magic_line",
"(",
"line",
",",
"cell",
",",
"_pipeline_parser",
")"
] | Implements the pipeline cell magic for ipython notebooks.
The supported syntax is:
%%pipeline <command> [<args>]
<cell>
or:
%pipeline <command> [<args>]
Use %pipeline --help for a list of commands, or %pipeline <command> --help for
help on a specific command. | [
"Implements",
"the",
"pipeline",
"cell",
"magic",
"for",
"ipython",
"notebooks",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/pipeline/commands/_pipeline.py#L90-L105 |
5,034 | googledatalab/pydatalab | google/datalab/contrib/pipeline/commands/_pipeline.py | _dispatch_handler | def _dispatch_handler(args, cell, parser, handler, cell_required=False,
cell_prohibited=False):
""" Makes sure cell magics include cell and line magics don't, before
dispatching to handler.
Args:
args: the parsed arguments from the magic line.
cell: the contents of the cell, if any.
parser: the argument parser for <cmd>; used for error message.
handler: the handler to call if the cell present/absent check passes.
cell_required: True for cell magics, False for line magics that can't be
cell magics.
cell_prohibited: True for line magics, False for cell magics that can't be
line magics.
Returns:
The result of calling the handler.
Raises:
Exception if the invocation is not valid.
"""
if cell_prohibited:
if cell and len(cell.strip()):
parser.print_help()
raise Exception(
'Additional data is not supported with the %s command.' % parser.prog)
return handler(args)
if cell_required and not cell:
parser.print_help()
raise Exception('The %s command requires additional data' % parser.prog)
return handler(args, cell) | python | def _dispatch_handler(args, cell, parser, handler, cell_required=False,
cell_prohibited=False):
""" Makes sure cell magics include cell and line magics don't, before
dispatching to handler.
Args:
args: the parsed arguments from the magic line.
cell: the contents of the cell, if any.
parser: the argument parser for <cmd>; used for error message.
handler: the handler to call if the cell present/absent check passes.
cell_required: True for cell magics, False for line magics that can't be
cell magics.
cell_prohibited: True for line magics, False for cell magics that can't be
line magics.
Returns:
The result of calling the handler.
Raises:
Exception if the invocation is not valid.
"""
if cell_prohibited:
if cell and len(cell.strip()):
parser.print_help()
raise Exception(
'Additional data is not supported with the %s command.' % parser.prog)
return handler(args)
if cell_required and not cell:
parser.print_help()
raise Exception('The %s command requires additional data' % parser.prog)
return handler(args, cell) | [
"def",
"_dispatch_handler",
"(",
"args",
",",
"cell",
",",
"parser",
",",
"handler",
",",
"cell_required",
"=",
"False",
",",
"cell_prohibited",
"=",
"False",
")",
":",
"if",
"cell_prohibited",
":",
"if",
"cell",
"and",
"len",
"(",
"cell",
".",
"strip",
"(",
")",
")",
":",
"parser",
".",
"print_help",
"(",
")",
"raise",
"Exception",
"(",
"'Additional data is not supported with the %s command.'",
"%",
"parser",
".",
"prog",
")",
"return",
"handler",
"(",
"args",
")",
"if",
"cell_required",
"and",
"not",
"cell",
":",
"parser",
".",
"print_help",
"(",
")",
"raise",
"Exception",
"(",
"'The %s command requires additional data'",
"%",
"parser",
".",
"prog",
")",
"return",
"handler",
"(",
"args",
",",
"cell",
")"
] | Makes sure cell magics include cell and line magics don't, before
dispatching to handler.
Args:
args: the parsed arguments from the magic line.
cell: the contents of the cell, if any.
parser: the argument parser for <cmd>; used for error message.
handler: the handler to call if the cell present/absent check passes.
cell_required: True for cell magics, False for line magics that can't be
cell magics.
cell_prohibited: True for line magics, False for cell magics that can't be
line magics.
Returns:
The result of calling the handler.
Raises:
Exception if the invocation is not valid. | [
"Makes",
"sure",
"cell",
"magics",
"include",
"cell",
"and",
"line",
"magics",
"don",
"t",
"before",
"dispatching",
"to",
"handler",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/pipeline/commands/_pipeline.py#L108-L138 |
5,035 | googledatalab/pydatalab | solutionbox/ml_workbench/xgboost/trainer/feature_analysis.py | expand_defaults | def expand_defaults(schema, features):
"""Add to features any default transformations.
Not every column in the schema has an explicit feature transformation listed
in the featurs file. For these columns, add a default transformation based on
the schema's type. The features dict is modified by this function call.
After this function call, every column in schema is used in a feature, and
every feature uses a column in the schema.
Args:
schema: schema list
features: features dict
Raises:
ValueError: if transform cannot be applied given schema type.
"""
schema_names = [x['name'] for x in schema]
# Add missing source columns
for name, transform in six.iteritems(features):
if 'source_column' not in transform:
transform['source_column'] = name
# Check source columns are in the schema and collect which are used.
used_schema_columns = []
for name, transform in six.iteritems(features):
if transform['source_column'] not in schema_names:
raise ValueError('source column %s is not in the schema for transform %s'
% (transform['source_column'], name))
used_schema_columns.append(transform['source_column'])
# Update default transformation based on schema.
for col_schema in schema:
schema_name = col_schema['name']
schema_type = col_schema['type'].lower()
if schema_type not in constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA]:
raise ValueError(('Only the following schema types are supported: %s'
% ' '.join(constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA])))
if schema_name not in used_schema_columns:
# add the default transform to the features
if schema_type in constant.NUMERIC_SCHEMA:
features[schema_name] = {
'transform': constant.DEFAULT_NUMERIC_TRANSFORM,
'source_column': schema_name}
elif schema_type == constant.STRING_SCHEMA:
features[schema_name] = {
'transform': constant.DEFAULT_CATEGORICAL_TRANSFORM,
'source_column': schema_name}
else:
raise NotImplementedError('Unknown type %s' % schema_type) | python | def expand_defaults(schema, features):
"""Add to features any default transformations.
Not every column in the schema has an explicit feature transformation listed
in the featurs file. For these columns, add a default transformation based on
the schema's type. The features dict is modified by this function call.
After this function call, every column in schema is used in a feature, and
every feature uses a column in the schema.
Args:
schema: schema list
features: features dict
Raises:
ValueError: if transform cannot be applied given schema type.
"""
schema_names = [x['name'] for x in schema]
# Add missing source columns
for name, transform in six.iteritems(features):
if 'source_column' not in transform:
transform['source_column'] = name
# Check source columns are in the schema and collect which are used.
used_schema_columns = []
for name, transform in six.iteritems(features):
if transform['source_column'] not in schema_names:
raise ValueError('source column %s is not in the schema for transform %s'
% (transform['source_column'], name))
used_schema_columns.append(transform['source_column'])
# Update default transformation based on schema.
for col_schema in schema:
schema_name = col_schema['name']
schema_type = col_schema['type'].lower()
if schema_type not in constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA]:
raise ValueError(('Only the following schema types are supported: %s'
% ' '.join(constant.NUMERIC_SCHEMA + [constant.STRING_SCHEMA])))
if schema_name not in used_schema_columns:
# add the default transform to the features
if schema_type in constant.NUMERIC_SCHEMA:
features[schema_name] = {
'transform': constant.DEFAULT_NUMERIC_TRANSFORM,
'source_column': schema_name}
elif schema_type == constant.STRING_SCHEMA:
features[schema_name] = {
'transform': constant.DEFAULT_CATEGORICAL_TRANSFORM,
'source_column': schema_name}
else:
raise NotImplementedError('Unknown type %s' % schema_type) | [
"def",
"expand_defaults",
"(",
"schema",
",",
"features",
")",
":",
"schema_names",
"=",
"[",
"x",
"[",
"'name'",
"]",
"for",
"x",
"in",
"schema",
"]",
"# Add missing source columns",
"for",
"name",
",",
"transform",
"in",
"six",
".",
"iteritems",
"(",
"features",
")",
":",
"if",
"'source_column'",
"not",
"in",
"transform",
":",
"transform",
"[",
"'source_column'",
"]",
"=",
"name",
"# Check source columns are in the schema and collect which are used.",
"used_schema_columns",
"=",
"[",
"]",
"for",
"name",
",",
"transform",
"in",
"six",
".",
"iteritems",
"(",
"features",
")",
":",
"if",
"transform",
"[",
"'source_column'",
"]",
"not",
"in",
"schema_names",
":",
"raise",
"ValueError",
"(",
"'source column %s is not in the schema for transform %s'",
"%",
"(",
"transform",
"[",
"'source_column'",
"]",
",",
"name",
")",
")",
"used_schema_columns",
".",
"append",
"(",
"transform",
"[",
"'source_column'",
"]",
")",
"# Update default transformation based on schema.",
"for",
"col_schema",
"in",
"schema",
":",
"schema_name",
"=",
"col_schema",
"[",
"'name'",
"]",
"schema_type",
"=",
"col_schema",
"[",
"'type'",
"]",
".",
"lower",
"(",
")",
"if",
"schema_type",
"not",
"in",
"constant",
".",
"NUMERIC_SCHEMA",
"+",
"[",
"constant",
".",
"STRING_SCHEMA",
"]",
":",
"raise",
"ValueError",
"(",
"(",
"'Only the following schema types are supported: %s'",
"%",
"' '",
".",
"join",
"(",
"constant",
".",
"NUMERIC_SCHEMA",
"+",
"[",
"constant",
".",
"STRING_SCHEMA",
"]",
")",
")",
")",
"if",
"schema_name",
"not",
"in",
"used_schema_columns",
":",
"# add the default transform to the features",
"if",
"schema_type",
"in",
"constant",
".",
"NUMERIC_SCHEMA",
":",
"features",
"[",
"schema_name",
"]",
"=",
"{",
"'transform'",
":",
"constant",
".",
"DEFAULT_NUMERIC_TRANSFORM",
",",
"'source_column'",
":",
"schema_name",
"}",
"elif",
"schema_type",
"==",
"constant",
".",
"STRING_SCHEMA",
":",
"features",
"[",
"schema_name",
"]",
"=",
"{",
"'transform'",
":",
"constant",
".",
"DEFAULT_CATEGORICAL_TRANSFORM",
",",
"'source_column'",
":",
"schema_name",
"}",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Unknown type %s'",
"%",
"schema_type",
")"
] | Add to features any default transformations.
Not every column in the schema has an explicit feature transformation listed
in the featurs file. For these columns, add a default transformation based on
the schema's type. The features dict is modified by this function call.
After this function call, every column in schema is used in a feature, and
every feature uses a column in the schema.
Args:
schema: schema list
features: features dict
Raises:
ValueError: if transform cannot be applied given schema type. | [
"Add",
"to",
"features",
"any",
"default",
"transformations",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/xgboost/trainer/feature_analysis.py#L114-L167 |
5,036 | googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _sample_cell | def _sample_cell(args, cell_body):
"""Implements the bigquery sample cell magic for ipython notebooks.
Args:
args: the optional arguments following '%%bigquery sample'.
cell_body: optional contents of the cell interpreted as SQL, YAML or JSON.
Returns:
The results of executing the sampling query, or a profile of the sample data.
"""
env = datalab.utils.commands.notebook_environment()
query = None
table = None
view = None
if args['query']:
query = _get_query_argument(args, cell_body, env)
elif args['table']:
table = _get_table(args['table'])
elif args['view']:
view = datalab.utils.commands.get_notebook_item(args['view'])
if not isinstance(view, datalab.bigquery.View):
raise Exception('%s is not a view' % args['view'])
else:
query = datalab.bigquery.Query(cell_body, values=env)
count = args['count']
method = args['method']
if method == 'random':
sampling = datalab.bigquery.Sampling.random(percent=args['percent'], count=count)
elif method == 'hashed':
sampling = datalab.bigquery.Sampling.hashed(field_name=args['field'], percent=args['percent'],
count=count)
elif method == 'sorted':
ascending = args['order'] == 'ascending'
sampling = datalab.bigquery.Sampling.sorted(args['field'],
ascending=ascending,
count=count)
elif method == 'limit':
sampling = datalab.bigquery.Sampling.default(count=count)
else:
sampling = datalab.bigquery.Sampling.default(count=count)
if query:
results = query.sample(sampling=sampling, dialect=args['dialect'], billing_tier=args['billing'])
elif view:
results = view.sample(sampling=sampling)
else:
results = table.sample(sampling=sampling)
if args['verbose']:
print(results.sql)
if args['profile']:
return datalab.utils.commands.profile_df(results.to_dataframe())
else:
return results | python | def _sample_cell(args, cell_body):
"""Implements the bigquery sample cell magic for ipython notebooks.
Args:
args: the optional arguments following '%%bigquery sample'.
cell_body: optional contents of the cell interpreted as SQL, YAML or JSON.
Returns:
The results of executing the sampling query, or a profile of the sample data.
"""
env = datalab.utils.commands.notebook_environment()
query = None
table = None
view = None
if args['query']:
query = _get_query_argument(args, cell_body, env)
elif args['table']:
table = _get_table(args['table'])
elif args['view']:
view = datalab.utils.commands.get_notebook_item(args['view'])
if not isinstance(view, datalab.bigquery.View):
raise Exception('%s is not a view' % args['view'])
else:
query = datalab.bigquery.Query(cell_body, values=env)
count = args['count']
method = args['method']
if method == 'random':
sampling = datalab.bigquery.Sampling.random(percent=args['percent'], count=count)
elif method == 'hashed':
sampling = datalab.bigquery.Sampling.hashed(field_name=args['field'], percent=args['percent'],
count=count)
elif method == 'sorted':
ascending = args['order'] == 'ascending'
sampling = datalab.bigquery.Sampling.sorted(args['field'],
ascending=ascending,
count=count)
elif method == 'limit':
sampling = datalab.bigquery.Sampling.default(count=count)
else:
sampling = datalab.bigquery.Sampling.default(count=count)
if query:
results = query.sample(sampling=sampling, dialect=args['dialect'], billing_tier=args['billing'])
elif view:
results = view.sample(sampling=sampling)
else:
results = table.sample(sampling=sampling)
if args['verbose']:
print(results.sql)
if args['profile']:
return datalab.utils.commands.profile_df(results.to_dataframe())
else:
return results | [
"def",
"_sample_cell",
"(",
"args",
",",
"cell_body",
")",
":",
"env",
"=",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
"query",
"=",
"None",
"table",
"=",
"None",
"view",
"=",
"None",
"if",
"args",
"[",
"'query'",
"]",
":",
"query",
"=",
"_get_query_argument",
"(",
"args",
",",
"cell_body",
",",
"env",
")",
"elif",
"args",
"[",
"'table'",
"]",
":",
"table",
"=",
"_get_table",
"(",
"args",
"[",
"'table'",
"]",
")",
"elif",
"args",
"[",
"'view'",
"]",
":",
"view",
"=",
"datalab",
".",
"utils",
".",
"commands",
".",
"get_notebook_item",
"(",
"args",
"[",
"'view'",
"]",
")",
"if",
"not",
"isinstance",
"(",
"view",
",",
"datalab",
".",
"bigquery",
".",
"View",
")",
":",
"raise",
"Exception",
"(",
"'%s is not a view'",
"%",
"args",
"[",
"'view'",
"]",
")",
"else",
":",
"query",
"=",
"datalab",
".",
"bigquery",
".",
"Query",
"(",
"cell_body",
",",
"values",
"=",
"env",
")",
"count",
"=",
"args",
"[",
"'count'",
"]",
"method",
"=",
"args",
"[",
"'method'",
"]",
"if",
"method",
"==",
"'random'",
":",
"sampling",
"=",
"datalab",
".",
"bigquery",
".",
"Sampling",
".",
"random",
"(",
"percent",
"=",
"args",
"[",
"'percent'",
"]",
",",
"count",
"=",
"count",
")",
"elif",
"method",
"==",
"'hashed'",
":",
"sampling",
"=",
"datalab",
".",
"bigquery",
".",
"Sampling",
".",
"hashed",
"(",
"field_name",
"=",
"args",
"[",
"'field'",
"]",
",",
"percent",
"=",
"args",
"[",
"'percent'",
"]",
",",
"count",
"=",
"count",
")",
"elif",
"method",
"==",
"'sorted'",
":",
"ascending",
"=",
"args",
"[",
"'order'",
"]",
"==",
"'ascending'",
"sampling",
"=",
"datalab",
".",
"bigquery",
".",
"Sampling",
".",
"sorted",
"(",
"args",
"[",
"'field'",
"]",
",",
"ascending",
"=",
"ascending",
",",
"count",
"=",
"count",
")",
"elif",
"method",
"==",
"'limit'",
":",
"sampling",
"=",
"datalab",
".",
"bigquery",
".",
"Sampling",
".",
"default",
"(",
"count",
"=",
"count",
")",
"else",
":",
"sampling",
"=",
"datalab",
".",
"bigquery",
".",
"Sampling",
".",
"default",
"(",
"count",
"=",
"count",
")",
"if",
"query",
":",
"results",
"=",
"query",
".",
"sample",
"(",
"sampling",
"=",
"sampling",
",",
"dialect",
"=",
"args",
"[",
"'dialect'",
"]",
",",
"billing_tier",
"=",
"args",
"[",
"'billing'",
"]",
")",
"elif",
"view",
":",
"results",
"=",
"view",
".",
"sample",
"(",
"sampling",
"=",
"sampling",
")",
"else",
":",
"results",
"=",
"table",
".",
"sample",
"(",
"sampling",
"=",
"sampling",
")",
"if",
"args",
"[",
"'verbose'",
"]",
":",
"print",
"(",
"results",
".",
"sql",
")",
"if",
"args",
"[",
"'profile'",
"]",
":",
"return",
"datalab",
".",
"utils",
".",
"commands",
".",
"profile_df",
"(",
"results",
".",
"to_dataframe",
"(",
")",
")",
"else",
":",
"return",
"results"
] | Implements the bigquery sample cell magic for ipython notebooks.
Args:
args: the optional arguments following '%%bigquery sample'.
cell_body: optional contents of the cell interpreted as SQL, YAML or JSON.
Returns:
The results of executing the sampling query, or a profile of the sample data. | [
"Implements",
"the",
"bigquery",
"sample",
"cell",
"magic",
"for",
"ipython",
"notebooks",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L285-L339 |
5,037 | googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _create_cell | def _create_cell(args, cell_body):
"""Implements the BigQuery cell magic used to create datasets and tables.
The supported syntax is:
%%bigquery create dataset -n|--name <name> [-f|--friendly <friendlyname>]
[<description>]
or:
%%bigquery create table -n|--name <tablename> [--overwrite]
[<YAML or JSON cell_body defining schema to use for tables>]
Args:
args: the argument following '%bigquery create <command>'.
"""
if args['command'] == 'dataset':
try:
datalab.bigquery.Dataset(args['name']).create(friendly_name=args['friendly'],
description=cell_body)
except Exception as e:
print('Failed to create dataset %s: %s' % (args['name'], e))
else:
if cell_body is None:
print('Failed to create %s: no schema specified' % args['name'])
else:
try:
record = datalab.utils.commands.parse_config(cell_body,
datalab.utils.commands.notebook_environment(),
as_dict=False)
schema = datalab.bigquery.Schema(record)
datalab.bigquery.Table(args['name']).create(schema=schema, overwrite=args['overwrite'])
except Exception as e:
print('Failed to create table %s: %s' % (args['name'], e)) | python | def _create_cell(args, cell_body):
"""Implements the BigQuery cell magic used to create datasets and tables.
The supported syntax is:
%%bigquery create dataset -n|--name <name> [-f|--friendly <friendlyname>]
[<description>]
or:
%%bigquery create table -n|--name <tablename> [--overwrite]
[<YAML or JSON cell_body defining schema to use for tables>]
Args:
args: the argument following '%bigquery create <command>'.
"""
if args['command'] == 'dataset':
try:
datalab.bigquery.Dataset(args['name']).create(friendly_name=args['friendly'],
description=cell_body)
except Exception as e:
print('Failed to create dataset %s: %s' % (args['name'], e))
else:
if cell_body is None:
print('Failed to create %s: no schema specified' % args['name'])
else:
try:
record = datalab.utils.commands.parse_config(cell_body,
datalab.utils.commands.notebook_environment(),
as_dict=False)
schema = datalab.bigquery.Schema(record)
datalab.bigquery.Table(args['name']).create(schema=schema, overwrite=args['overwrite'])
except Exception as e:
print('Failed to create table %s: %s' % (args['name'], e)) | [
"def",
"_create_cell",
"(",
"args",
",",
"cell_body",
")",
":",
"if",
"args",
"[",
"'command'",
"]",
"==",
"'dataset'",
":",
"try",
":",
"datalab",
".",
"bigquery",
".",
"Dataset",
"(",
"args",
"[",
"'name'",
"]",
")",
".",
"create",
"(",
"friendly_name",
"=",
"args",
"[",
"'friendly'",
"]",
",",
"description",
"=",
"cell_body",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Failed to create dataset %s: %s'",
"%",
"(",
"args",
"[",
"'name'",
"]",
",",
"e",
")",
")",
"else",
":",
"if",
"cell_body",
"is",
"None",
":",
"print",
"(",
"'Failed to create %s: no schema specified'",
"%",
"args",
"[",
"'name'",
"]",
")",
"else",
":",
"try",
":",
"record",
"=",
"datalab",
".",
"utils",
".",
"commands",
".",
"parse_config",
"(",
"cell_body",
",",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
",",
"as_dict",
"=",
"False",
")",
"schema",
"=",
"datalab",
".",
"bigquery",
".",
"Schema",
"(",
"record",
")",
"datalab",
".",
"bigquery",
".",
"Table",
"(",
"args",
"[",
"'name'",
"]",
")",
".",
"create",
"(",
"schema",
"=",
"schema",
",",
"overwrite",
"=",
"args",
"[",
"'overwrite'",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Failed to create table %s: %s'",
"%",
"(",
"args",
"[",
"'name'",
"]",
",",
"e",
")",
")"
] | Implements the BigQuery cell magic used to create datasets and tables.
The supported syntax is:
%%bigquery create dataset -n|--name <name> [-f|--friendly <friendlyname>]
[<description>]
or:
%%bigquery create table -n|--name <tablename> [--overwrite]
[<YAML or JSON cell_body defining schema to use for tables>]
Args:
args: the argument following '%bigquery create <command>'. | [
"Implements",
"the",
"BigQuery",
"cell",
"magic",
"used",
"to",
"create",
"datasets",
"and",
"tables",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L342-L375 |
5,038 | googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _delete_cell | def _delete_cell(args, _):
"""Implements the BigQuery cell magic used to delete datasets and tables.
The supported syntax is:
%%bigquery delete dataset -n|--name <name>
or:
%%bigquery delete table -n|--name <name>
Args:
args: the argument following '%bigquery delete <command>'.
"""
# TODO(gram): add support for wildchars and multiple arguments at some point. The latter is
# easy, the former a bit more tricky if non-default projects are involved.
if args['command'] == 'dataset':
try:
datalab.bigquery.Dataset(args['name']).delete()
except Exception as e:
print('Failed to delete dataset %s: %s' % (args['name'], e))
else:
try:
datalab.bigquery.Table(args['name']).delete()
except Exception as e:
print('Failed to delete table %s: %s' % (args['name'], e)) | python | def _delete_cell(args, _):
"""Implements the BigQuery cell magic used to delete datasets and tables.
The supported syntax is:
%%bigquery delete dataset -n|--name <name>
or:
%%bigquery delete table -n|--name <name>
Args:
args: the argument following '%bigquery delete <command>'.
"""
# TODO(gram): add support for wildchars and multiple arguments at some point. The latter is
# easy, the former a bit more tricky if non-default projects are involved.
if args['command'] == 'dataset':
try:
datalab.bigquery.Dataset(args['name']).delete()
except Exception as e:
print('Failed to delete dataset %s: %s' % (args['name'], e))
else:
try:
datalab.bigquery.Table(args['name']).delete()
except Exception as e:
print('Failed to delete table %s: %s' % (args['name'], e)) | [
"def",
"_delete_cell",
"(",
"args",
",",
"_",
")",
":",
"# TODO(gram): add support for wildchars and multiple arguments at some point. The latter is",
"# easy, the former a bit more tricky if non-default projects are involved.",
"if",
"args",
"[",
"'command'",
"]",
"==",
"'dataset'",
":",
"try",
":",
"datalab",
".",
"bigquery",
".",
"Dataset",
"(",
"args",
"[",
"'name'",
"]",
")",
".",
"delete",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Failed to delete dataset %s: %s'",
"%",
"(",
"args",
"[",
"'name'",
"]",
",",
"e",
")",
")",
"else",
":",
"try",
":",
"datalab",
".",
"bigquery",
".",
"Table",
"(",
"args",
"[",
"'name'",
"]",
")",
".",
"delete",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Failed to delete table %s: %s'",
"%",
"(",
"args",
"[",
"'name'",
"]",
",",
"e",
")",
")"
] | Implements the BigQuery cell magic used to delete datasets and tables.
The supported syntax is:
%%bigquery delete dataset -n|--name <name>
or:
%%bigquery delete table -n|--name <name>
Args:
args: the argument following '%bigquery delete <command>'. | [
"Implements",
"the",
"BigQuery",
"cell",
"magic",
"used",
"to",
"delete",
"datasets",
"and",
"tables",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L378-L403 |
5,039 | googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _udf_cell | def _udf_cell(args, js):
"""Implements the bigquery_udf cell magic for ipython notebooks.
The supported syntax is:
%%bigquery udf --module <var>
<js function>
Args:
args: the optional arguments following '%%bigquery udf'.
js: the UDF declaration (inputs and outputs) and implementation in javascript.
Returns:
The results of executing the UDF converted to a dataframe if no variable
was specified. None otherwise.
"""
variable_name = args['module']
if not variable_name:
raise Exception('Declaration must be of the form %%bigquery udf --module <variable name>')
# Parse out the input and output specification
spec_pattern = r'\{\{([^}]+)\}\}'
spec_part_pattern = r'[a-z_][a-z0-9_]*'
specs = re.findall(spec_pattern, js)
if len(specs) < 2:
raise Exception('The JavaScript must declare the input row and output emitter parameters '
'using valid jsdoc format comments.\n'
'The input row param declaration must be typed as {{field:type, field2:type}} '
'and the output emitter param declaration must be typed as '
'function({{field:type, field2:type}}.')
inputs = []
input_spec_parts = re.findall(spec_part_pattern, specs[0], flags=re.IGNORECASE)
if len(input_spec_parts) % 2 != 0:
raise Exception('Invalid input row param declaration. The jsdoc type expression must '
'define an object with field and type pairs.')
for n, t in zip(input_spec_parts[0::2], input_spec_parts[1::2]):
inputs.append((n, t))
outputs = []
output_spec_parts = re.findall(spec_part_pattern, specs[1], flags=re.IGNORECASE)
if len(output_spec_parts) % 2 != 0:
raise Exception('Invalid output emitter param declaration. The jsdoc type expression must '
'define a function accepting an an object with field and type pairs.')
for n, t in zip(output_spec_parts[0::2], output_spec_parts[1::2]):
outputs.append((n, t))
# Look for imports. We use a non-standard @import keyword; we could alternatively use @requires.
# Object names can contain any characters except \r and \n.
import_pattern = r'@import[\s]+(gs://[a-z\d][a-z\d_\.\-]*[a-z\d]/[^\n\r]+)'
imports = re.findall(import_pattern, js)
# Split the cell if necessary. We look for a 'function(' with no name and a header comment
# block with @param and assume this is the primary function, up to a closing '}' at the start
# of the line. The remaining cell content is used as support code.
split_pattern = r'(.*)(/\*.*?@param.*?@param.*?\*/\w*\n\w*function\w*\(.*?^}\n?)(.*)'
parts = re.match(split_pattern, js, re.MULTILINE | re.DOTALL)
support_code = ''
if parts:
support_code = (parts.group(1) + parts.group(3)).strip()
if len(support_code):
js = parts.group(2)
# Finally build the UDF object
udf = datalab.bigquery.UDF(inputs, outputs, variable_name, js, support_code, imports)
datalab.utils.commands.notebook_environment()[variable_name] = udf | python | def _udf_cell(args, js):
"""Implements the bigquery_udf cell magic for ipython notebooks.
The supported syntax is:
%%bigquery udf --module <var>
<js function>
Args:
args: the optional arguments following '%%bigquery udf'.
js: the UDF declaration (inputs and outputs) and implementation in javascript.
Returns:
The results of executing the UDF converted to a dataframe if no variable
was specified. None otherwise.
"""
variable_name = args['module']
if not variable_name:
raise Exception('Declaration must be of the form %%bigquery udf --module <variable name>')
# Parse out the input and output specification
spec_pattern = r'\{\{([^}]+)\}\}'
spec_part_pattern = r'[a-z_][a-z0-9_]*'
specs = re.findall(spec_pattern, js)
if len(specs) < 2:
raise Exception('The JavaScript must declare the input row and output emitter parameters '
'using valid jsdoc format comments.\n'
'The input row param declaration must be typed as {{field:type, field2:type}} '
'and the output emitter param declaration must be typed as '
'function({{field:type, field2:type}}.')
inputs = []
input_spec_parts = re.findall(spec_part_pattern, specs[0], flags=re.IGNORECASE)
if len(input_spec_parts) % 2 != 0:
raise Exception('Invalid input row param declaration. The jsdoc type expression must '
'define an object with field and type pairs.')
for n, t in zip(input_spec_parts[0::2], input_spec_parts[1::2]):
inputs.append((n, t))
outputs = []
output_spec_parts = re.findall(spec_part_pattern, specs[1], flags=re.IGNORECASE)
if len(output_spec_parts) % 2 != 0:
raise Exception('Invalid output emitter param declaration. The jsdoc type expression must '
'define a function accepting an an object with field and type pairs.')
for n, t in zip(output_spec_parts[0::2], output_spec_parts[1::2]):
outputs.append((n, t))
# Look for imports. We use a non-standard @import keyword; we could alternatively use @requires.
# Object names can contain any characters except \r and \n.
import_pattern = r'@import[\s]+(gs://[a-z\d][a-z\d_\.\-]*[a-z\d]/[^\n\r]+)'
imports = re.findall(import_pattern, js)
# Split the cell if necessary. We look for a 'function(' with no name and a header comment
# block with @param and assume this is the primary function, up to a closing '}' at the start
# of the line. The remaining cell content is used as support code.
split_pattern = r'(.*)(/\*.*?@param.*?@param.*?\*/\w*\n\w*function\w*\(.*?^}\n?)(.*)'
parts = re.match(split_pattern, js, re.MULTILINE | re.DOTALL)
support_code = ''
if parts:
support_code = (parts.group(1) + parts.group(3)).strip()
if len(support_code):
js = parts.group(2)
# Finally build the UDF object
udf = datalab.bigquery.UDF(inputs, outputs, variable_name, js, support_code, imports)
datalab.utils.commands.notebook_environment()[variable_name] = udf | [
"def",
"_udf_cell",
"(",
"args",
",",
"js",
")",
":",
"variable_name",
"=",
"args",
"[",
"'module'",
"]",
"if",
"not",
"variable_name",
":",
"raise",
"Exception",
"(",
"'Declaration must be of the form %%bigquery udf --module <variable name>'",
")",
"# Parse out the input and output specification",
"spec_pattern",
"=",
"r'\\{\\{([^}]+)\\}\\}'",
"spec_part_pattern",
"=",
"r'[a-z_][a-z0-9_]*'",
"specs",
"=",
"re",
".",
"findall",
"(",
"spec_pattern",
",",
"js",
")",
"if",
"len",
"(",
"specs",
")",
"<",
"2",
":",
"raise",
"Exception",
"(",
"'The JavaScript must declare the input row and output emitter parameters '",
"'using valid jsdoc format comments.\\n'",
"'The input row param declaration must be typed as {{field:type, field2:type}} '",
"'and the output emitter param declaration must be typed as '",
"'function({{field:type, field2:type}}.'",
")",
"inputs",
"=",
"[",
"]",
"input_spec_parts",
"=",
"re",
".",
"findall",
"(",
"spec_part_pattern",
",",
"specs",
"[",
"0",
"]",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"if",
"len",
"(",
"input_spec_parts",
")",
"%",
"2",
"!=",
"0",
":",
"raise",
"Exception",
"(",
"'Invalid input row param declaration. The jsdoc type expression must '",
"'define an object with field and type pairs.'",
")",
"for",
"n",
",",
"t",
"in",
"zip",
"(",
"input_spec_parts",
"[",
"0",
":",
":",
"2",
"]",
",",
"input_spec_parts",
"[",
"1",
":",
":",
"2",
"]",
")",
":",
"inputs",
".",
"append",
"(",
"(",
"n",
",",
"t",
")",
")",
"outputs",
"=",
"[",
"]",
"output_spec_parts",
"=",
"re",
".",
"findall",
"(",
"spec_part_pattern",
",",
"specs",
"[",
"1",
"]",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"if",
"len",
"(",
"output_spec_parts",
")",
"%",
"2",
"!=",
"0",
":",
"raise",
"Exception",
"(",
"'Invalid output emitter param declaration. The jsdoc type expression must '",
"'define a function accepting an an object with field and type pairs.'",
")",
"for",
"n",
",",
"t",
"in",
"zip",
"(",
"output_spec_parts",
"[",
"0",
":",
":",
"2",
"]",
",",
"output_spec_parts",
"[",
"1",
":",
":",
"2",
"]",
")",
":",
"outputs",
".",
"append",
"(",
"(",
"n",
",",
"t",
")",
")",
"# Look for imports. We use a non-standard @import keyword; we could alternatively use @requires.",
"# Object names can contain any characters except \\r and \\n.",
"import_pattern",
"=",
"r'@import[\\s]+(gs://[a-z\\d][a-z\\d_\\.\\-]*[a-z\\d]/[^\\n\\r]+)'",
"imports",
"=",
"re",
".",
"findall",
"(",
"import_pattern",
",",
"js",
")",
"# Split the cell if necessary. We look for a 'function(' with no name and a header comment",
"# block with @param and assume this is the primary function, up to a closing '}' at the start",
"# of the line. The remaining cell content is used as support code.",
"split_pattern",
"=",
"r'(.*)(/\\*.*?@param.*?@param.*?\\*/\\w*\\n\\w*function\\w*\\(.*?^}\\n?)(.*)'",
"parts",
"=",
"re",
".",
"match",
"(",
"split_pattern",
",",
"js",
",",
"re",
".",
"MULTILINE",
"|",
"re",
".",
"DOTALL",
")",
"support_code",
"=",
"''",
"if",
"parts",
":",
"support_code",
"=",
"(",
"parts",
".",
"group",
"(",
"1",
")",
"+",
"parts",
".",
"group",
"(",
"3",
")",
")",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"support_code",
")",
":",
"js",
"=",
"parts",
".",
"group",
"(",
"2",
")",
"# Finally build the UDF object",
"udf",
"=",
"datalab",
".",
"bigquery",
".",
"UDF",
"(",
"inputs",
",",
"outputs",
",",
"variable_name",
",",
"js",
",",
"support_code",
",",
"imports",
")",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
"[",
"variable_name",
"]",
"=",
"udf"
] | Implements the bigquery_udf cell magic for ipython notebooks.
The supported syntax is:
%%bigquery udf --module <var>
<js function>
Args:
args: the optional arguments following '%%bigquery udf'.
js: the UDF declaration (inputs and outputs) and implementation in javascript.
Returns:
The results of executing the UDF converted to a dataframe if no variable
was specified. None otherwise. | [
"Implements",
"the",
"bigquery_udf",
"cell",
"magic",
"for",
"ipython",
"notebooks",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L428-L492 |
5,040 | googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _pipeline_cell | def _pipeline_cell(args, cell_body):
"""Implements the BigQuery cell magic used to validate, execute or deploy BQ pipelines.
The supported syntax is:
%%bigquery pipeline [-q|--sql <query identifier>] <other args> <action>
[<YAML or JSON cell_body or inline SQL>]
Args:
args: the arguments following '%bigquery pipeline'.
cell_body: optional contents of the cell interpreted as YAML or JSON.
Returns:
The QueryResultsTable
"""
if args['action'] == 'deploy':
raise Exception('Deploying a pipeline is not yet supported')
env = {}
for key, value in datalab.utils.commands.notebook_environment().items():
if isinstance(value, datalab.bigquery._udf.UDF):
env[key] = value
query = _get_query_argument(args, cell_body, env)
if args['verbose']:
print(query.sql)
if args['action'] == 'dryrun':
print(query.sql)
result = query.execute_dry_run()
return datalab.bigquery._query_stats.QueryStats(total_bytes=result['totalBytesProcessed'],
is_cached=result['cacheHit'])
if args['action'] == 'run':
return query.execute(args['target'], table_mode=args['mode'], use_cache=not args['nocache'],
allow_large_results=args['large'], dialect=args['dialect'],
billing_tier=args['billing']).results | python | def _pipeline_cell(args, cell_body):
"""Implements the BigQuery cell magic used to validate, execute or deploy BQ pipelines.
The supported syntax is:
%%bigquery pipeline [-q|--sql <query identifier>] <other args> <action>
[<YAML or JSON cell_body or inline SQL>]
Args:
args: the arguments following '%bigquery pipeline'.
cell_body: optional contents of the cell interpreted as YAML or JSON.
Returns:
The QueryResultsTable
"""
if args['action'] == 'deploy':
raise Exception('Deploying a pipeline is not yet supported')
env = {}
for key, value in datalab.utils.commands.notebook_environment().items():
if isinstance(value, datalab.bigquery._udf.UDF):
env[key] = value
query = _get_query_argument(args, cell_body, env)
if args['verbose']:
print(query.sql)
if args['action'] == 'dryrun':
print(query.sql)
result = query.execute_dry_run()
return datalab.bigquery._query_stats.QueryStats(total_bytes=result['totalBytesProcessed'],
is_cached=result['cacheHit'])
if args['action'] == 'run':
return query.execute(args['target'], table_mode=args['mode'], use_cache=not args['nocache'],
allow_large_results=args['large'], dialect=args['dialect'],
billing_tier=args['billing']).results | [
"def",
"_pipeline_cell",
"(",
"args",
",",
"cell_body",
")",
":",
"if",
"args",
"[",
"'action'",
"]",
"==",
"'deploy'",
":",
"raise",
"Exception",
"(",
"'Deploying a pipeline is not yet supported'",
")",
"env",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"datalab",
".",
"bigquery",
".",
"_udf",
".",
"UDF",
")",
":",
"env",
"[",
"key",
"]",
"=",
"value",
"query",
"=",
"_get_query_argument",
"(",
"args",
",",
"cell_body",
",",
"env",
")",
"if",
"args",
"[",
"'verbose'",
"]",
":",
"print",
"(",
"query",
".",
"sql",
")",
"if",
"args",
"[",
"'action'",
"]",
"==",
"'dryrun'",
":",
"print",
"(",
"query",
".",
"sql",
")",
"result",
"=",
"query",
".",
"execute_dry_run",
"(",
")",
"return",
"datalab",
".",
"bigquery",
".",
"_query_stats",
".",
"QueryStats",
"(",
"total_bytes",
"=",
"result",
"[",
"'totalBytesProcessed'",
"]",
",",
"is_cached",
"=",
"result",
"[",
"'cacheHit'",
"]",
")",
"if",
"args",
"[",
"'action'",
"]",
"==",
"'run'",
":",
"return",
"query",
".",
"execute",
"(",
"args",
"[",
"'target'",
"]",
",",
"table_mode",
"=",
"args",
"[",
"'mode'",
"]",
",",
"use_cache",
"=",
"not",
"args",
"[",
"'nocache'",
"]",
",",
"allow_large_results",
"=",
"args",
"[",
"'large'",
"]",
",",
"dialect",
"=",
"args",
"[",
"'dialect'",
"]",
",",
"billing_tier",
"=",
"args",
"[",
"'billing'",
"]",
")",
".",
"results"
] | Implements the BigQuery cell magic used to validate, execute or deploy BQ pipelines.
The supported syntax is:
%%bigquery pipeline [-q|--sql <query identifier>] <other args> <action>
[<YAML or JSON cell_body or inline SQL>]
Args:
args: the arguments following '%bigquery pipeline'.
cell_body: optional contents of the cell interpreted as YAML or JSON.
Returns:
The QueryResultsTable | [
"Implements",
"the",
"BigQuery",
"cell",
"magic",
"used",
"to",
"validate",
"execute",
"or",
"deploy",
"BQ",
"pipelines",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L516-L548 |
5,041 | googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _table_line | def _table_line(args):
"""Implements the BigQuery table magic used to display tables.
The supported syntax is:
%bigquery table -t|--table <name> <other args>
Args:
args: the arguments following '%bigquery table'.
Returns:
The HTML rendering for the table.
"""
# TODO(gram): It would be good to turn _table_viewer into a class that has a registered
# renderer. That would allow this to return a table viewer object which is easier to test.
name = args['table']
table = _get_table(name)
if table and table.exists():
fields = args['cols'].split(',') if args['cols'] else None
html = _table_viewer(table, rows_per_page=args['rows'], fields=fields)
return IPython.core.display.HTML(html)
else:
raise Exception('Table %s does not exist; cannot display' % name) | python | def _table_line(args):
"""Implements the BigQuery table magic used to display tables.
The supported syntax is:
%bigquery table -t|--table <name> <other args>
Args:
args: the arguments following '%bigquery table'.
Returns:
The HTML rendering for the table.
"""
# TODO(gram): It would be good to turn _table_viewer into a class that has a registered
# renderer. That would allow this to return a table viewer object which is easier to test.
name = args['table']
table = _get_table(name)
if table and table.exists():
fields = args['cols'].split(',') if args['cols'] else None
html = _table_viewer(table, rows_per_page=args['rows'], fields=fields)
return IPython.core.display.HTML(html)
else:
raise Exception('Table %s does not exist; cannot display' % name) | [
"def",
"_table_line",
"(",
"args",
")",
":",
"# TODO(gram): It would be good to turn _table_viewer into a class that has a registered",
"# renderer. That would allow this to return a table viewer object which is easier to test.",
"name",
"=",
"args",
"[",
"'table'",
"]",
"table",
"=",
"_get_table",
"(",
"name",
")",
"if",
"table",
"and",
"table",
".",
"exists",
"(",
")",
":",
"fields",
"=",
"args",
"[",
"'cols'",
"]",
".",
"split",
"(",
"','",
")",
"if",
"args",
"[",
"'cols'",
"]",
"else",
"None",
"html",
"=",
"_table_viewer",
"(",
"table",
",",
"rows_per_page",
"=",
"args",
"[",
"'rows'",
"]",
",",
"fields",
"=",
"fields",
")",
"return",
"IPython",
".",
"core",
".",
"display",
".",
"HTML",
"(",
"html",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Table %s does not exist; cannot display'",
"%",
"name",
")"
] | Implements the BigQuery table magic used to display tables.
The supported syntax is:
%bigquery table -t|--table <name> <other args>
Args:
args: the arguments following '%bigquery table'.
Returns:
The HTML rendering for the table. | [
"Implements",
"the",
"BigQuery",
"table",
"magic",
"used",
"to",
"display",
"tables",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L551-L571 |
5,042 | googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _get_schema | def _get_schema(name):
""" Given a variable or table name, get the Schema if it exists. """
item = datalab.utils.commands.get_notebook_item(name)
if not item:
item = _get_table(name)
if isinstance(item, datalab.bigquery.Schema):
return item
if hasattr(item, 'schema') and isinstance(item.schema, datalab.bigquery._schema.Schema):
return item.schema
return None | python | def _get_schema(name):
""" Given a variable or table name, get the Schema if it exists. """
item = datalab.utils.commands.get_notebook_item(name)
if not item:
item = _get_table(name)
if isinstance(item, datalab.bigquery.Schema):
return item
if hasattr(item, 'schema') and isinstance(item.schema, datalab.bigquery._schema.Schema):
return item.schema
return None | [
"def",
"_get_schema",
"(",
"name",
")",
":",
"item",
"=",
"datalab",
".",
"utils",
".",
"commands",
".",
"get_notebook_item",
"(",
"name",
")",
"if",
"not",
"item",
":",
"item",
"=",
"_get_table",
"(",
"name",
")",
"if",
"isinstance",
"(",
"item",
",",
"datalab",
".",
"bigquery",
".",
"Schema",
")",
":",
"return",
"item",
"if",
"hasattr",
"(",
"item",
",",
"'schema'",
")",
"and",
"isinstance",
"(",
"item",
".",
"schema",
",",
"datalab",
".",
"bigquery",
".",
"_schema",
".",
"Schema",
")",
":",
"return",
"item",
".",
"schema",
"return",
"None"
] | Given a variable or table name, get the Schema if it exists. | [
"Given",
"a",
"variable",
"or",
"table",
"name",
"get",
"the",
"Schema",
"if",
"it",
"exists",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L574-L584 |
5,043 | googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _render_table | def _render_table(data, fields=None):
""" Helper to render a list of dictionaries as an HTML display object. """
return IPython.core.display.HTML(datalab.utils.commands.HtmlBuilder.render_table(data, fields)) | python | def _render_table(data, fields=None):
""" Helper to render a list of dictionaries as an HTML display object. """
return IPython.core.display.HTML(datalab.utils.commands.HtmlBuilder.render_table(data, fields)) | [
"def",
"_render_table",
"(",
"data",
",",
"fields",
"=",
"None",
")",
":",
"return",
"IPython",
".",
"core",
".",
"display",
".",
"HTML",
"(",
"datalab",
".",
"utils",
".",
"commands",
".",
"HtmlBuilder",
".",
"render_table",
"(",
"data",
",",
"fields",
")",
")"
] | Helper to render a list of dictionaries as an HTML display object. | [
"Helper",
"to",
"render",
"a",
"list",
"of",
"dictionaries",
"as",
"an",
"HTML",
"display",
"object",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L636-L638 |
5,044 | googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _datasets_line | def _datasets_line(args):
"""Implements the BigQuery datasets magic used to display datasets in a project.
The supported syntax is:
%bigquery datasets [-f <filter>] [-p|--project <project_id>]
Args:
args: the arguments following '%bigquery datasets'.
Returns:
The HTML rendering for the table of datasets.
"""
filter_ = args['filter'] if args['filter'] else '*'
return _render_list([str(dataset) for dataset in datalab.bigquery.Datasets(args['project'])
if fnmatch.fnmatch(str(dataset), filter_)]) | python | def _datasets_line(args):
"""Implements the BigQuery datasets magic used to display datasets in a project.
The supported syntax is:
%bigquery datasets [-f <filter>] [-p|--project <project_id>]
Args:
args: the arguments following '%bigquery datasets'.
Returns:
The HTML rendering for the table of datasets.
"""
filter_ = args['filter'] if args['filter'] else '*'
return _render_list([str(dataset) for dataset in datalab.bigquery.Datasets(args['project'])
if fnmatch.fnmatch(str(dataset), filter_)]) | [
"def",
"_datasets_line",
"(",
"args",
")",
":",
"filter_",
"=",
"args",
"[",
"'filter'",
"]",
"if",
"args",
"[",
"'filter'",
"]",
"else",
"'*'",
"return",
"_render_list",
"(",
"[",
"str",
"(",
"dataset",
")",
"for",
"dataset",
"in",
"datalab",
".",
"bigquery",
".",
"Datasets",
"(",
"args",
"[",
"'project'",
"]",
")",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"str",
"(",
"dataset",
")",
",",
"filter_",
")",
"]",
")"
] | Implements the BigQuery datasets magic used to display datasets in a project.
The supported syntax is:
%bigquery datasets [-f <filter>] [-p|--project <project_id>]
Args:
args: the arguments following '%bigquery datasets'.
Returns:
The HTML rendering for the table of datasets. | [
"Implements",
"the",
"BigQuery",
"datasets",
"magic",
"used",
"to",
"display",
"datasets",
"in",
"a",
"project",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L646-L660 |
5,045 | googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _tables_line | def _tables_line(args):
"""Implements the BigQuery tables magic used to display tables in a dataset.
The supported syntax is:
%bigquery tables -p|--project <project_id> -d|--dataset <dataset_id>
Args:
args: the arguments following '%bigquery tables'.
Returns:
The HTML rendering for the list of tables.
"""
filter_ = args['filter'] if args['filter'] else '*'
if args['dataset']:
if args['project'] is None:
datasets = [datalab.bigquery.Dataset(args['dataset'])]
else:
datasets = [datalab.bigquery.Dataset((args['project'], args['dataset']))]
else:
datasets = datalab.bigquery.Datasets(args['project'])
tables = []
for dataset in datasets:
tables.extend([str(table) for table in dataset if fnmatch.fnmatch(str(table), filter_)])
return _render_list(tables) | python | def _tables_line(args):
"""Implements the BigQuery tables magic used to display tables in a dataset.
The supported syntax is:
%bigquery tables -p|--project <project_id> -d|--dataset <dataset_id>
Args:
args: the arguments following '%bigquery tables'.
Returns:
The HTML rendering for the list of tables.
"""
filter_ = args['filter'] if args['filter'] else '*'
if args['dataset']:
if args['project'] is None:
datasets = [datalab.bigquery.Dataset(args['dataset'])]
else:
datasets = [datalab.bigquery.Dataset((args['project'], args['dataset']))]
else:
datasets = datalab.bigquery.Datasets(args['project'])
tables = []
for dataset in datasets:
tables.extend([str(table) for table in dataset if fnmatch.fnmatch(str(table), filter_)])
return _render_list(tables) | [
"def",
"_tables_line",
"(",
"args",
")",
":",
"filter_",
"=",
"args",
"[",
"'filter'",
"]",
"if",
"args",
"[",
"'filter'",
"]",
"else",
"'*'",
"if",
"args",
"[",
"'dataset'",
"]",
":",
"if",
"args",
"[",
"'project'",
"]",
"is",
"None",
":",
"datasets",
"=",
"[",
"datalab",
".",
"bigquery",
".",
"Dataset",
"(",
"args",
"[",
"'dataset'",
"]",
")",
"]",
"else",
":",
"datasets",
"=",
"[",
"datalab",
".",
"bigquery",
".",
"Dataset",
"(",
"(",
"args",
"[",
"'project'",
"]",
",",
"args",
"[",
"'dataset'",
"]",
")",
")",
"]",
"else",
":",
"datasets",
"=",
"datalab",
".",
"bigquery",
".",
"Datasets",
"(",
"args",
"[",
"'project'",
"]",
")",
"tables",
"=",
"[",
"]",
"for",
"dataset",
"in",
"datasets",
":",
"tables",
".",
"extend",
"(",
"[",
"str",
"(",
"table",
")",
"for",
"table",
"in",
"dataset",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"str",
"(",
"table",
")",
",",
"filter_",
")",
"]",
")",
"return",
"_render_list",
"(",
"tables",
")"
] | Implements the BigQuery tables magic used to display tables in a dataset.
The supported syntax is:
%bigquery tables -p|--project <project_id> -d|--dataset <dataset_id>
Args:
args: the arguments following '%bigquery tables'.
Returns:
The HTML rendering for the list of tables. | [
"Implements",
"the",
"BigQuery",
"tables",
"magic",
"used",
"to",
"display",
"tables",
"in",
"a",
"dataset",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L663-L688 |
5,046 | googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | _extract_line | def _extract_line(args):
"""Implements the BigQuery extract magic used to extract table data to GCS.
The supported syntax is:
%bigquery extract -S|--source <table> -D|--destination <url> <other_args>
Args:
args: the arguments following '%bigquery extract'.
Returns:
A message about whether the extract succeeded or failed.
"""
name = args['source']
source = datalab.utils.commands.get_notebook_item(name)
if not source:
source = _get_table(name)
if not source:
raise Exception('No source named %s found' % name)
elif isinstance(source, datalab.bigquery.Table) and not source.exists():
raise Exception('Table %s does not exist' % name)
else:
job = source.extract(args['destination'],
format='CSV' if args['format'] == 'csv' else 'NEWLINE_DELIMITED_JSON',
compress=args['compress'],
csv_delimiter=args['delimiter'],
csv_header=args['header'])
if job.failed:
raise Exception('Extract failed: %s' % str(job.fatal_error))
elif job.errors:
raise Exception('Extract completed with errors: %s' % str(job.errors)) | python | def _extract_line(args):
"""Implements the BigQuery extract magic used to extract table data to GCS.
The supported syntax is:
%bigquery extract -S|--source <table> -D|--destination <url> <other_args>
Args:
args: the arguments following '%bigquery extract'.
Returns:
A message about whether the extract succeeded or failed.
"""
name = args['source']
source = datalab.utils.commands.get_notebook_item(name)
if not source:
source = _get_table(name)
if not source:
raise Exception('No source named %s found' % name)
elif isinstance(source, datalab.bigquery.Table) and not source.exists():
raise Exception('Table %s does not exist' % name)
else:
job = source.extract(args['destination'],
format='CSV' if args['format'] == 'csv' else 'NEWLINE_DELIMITED_JSON',
compress=args['compress'],
csv_delimiter=args['delimiter'],
csv_header=args['header'])
if job.failed:
raise Exception('Extract failed: %s' % str(job.fatal_error))
elif job.errors:
raise Exception('Extract completed with errors: %s' % str(job.errors)) | [
"def",
"_extract_line",
"(",
"args",
")",
":",
"name",
"=",
"args",
"[",
"'source'",
"]",
"source",
"=",
"datalab",
".",
"utils",
".",
"commands",
".",
"get_notebook_item",
"(",
"name",
")",
"if",
"not",
"source",
":",
"source",
"=",
"_get_table",
"(",
"name",
")",
"if",
"not",
"source",
":",
"raise",
"Exception",
"(",
"'No source named %s found'",
"%",
"name",
")",
"elif",
"isinstance",
"(",
"source",
",",
"datalab",
".",
"bigquery",
".",
"Table",
")",
"and",
"not",
"source",
".",
"exists",
"(",
")",
":",
"raise",
"Exception",
"(",
"'Table %s does not exist'",
"%",
"name",
")",
"else",
":",
"job",
"=",
"source",
".",
"extract",
"(",
"args",
"[",
"'destination'",
"]",
",",
"format",
"=",
"'CSV'",
"if",
"args",
"[",
"'format'",
"]",
"==",
"'csv'",
"else",
"'NEWLINE_DELIMITED_JSON'",
",",
"compress",
"=",
"args",
"[",
"'compress'",
"]",
",",
"csv_delimiter",
"=",
"args",
"[",
"'delimiter'",
"]",
",",
"csv_header",
"=",
"args",
"[",
"'header'",
"]",
")",
"if",
"job",
".",
"failed",
":",
"raise",
"Exception",
"(",
"'Extract failed: %s'",
"%",
"str",
"(",
"job",
".",
"fatal_error",
")",
")",
"elif",
"job",
".",
"errors",
":",
"raise",
"Exception",
"(",
"'Extract completed with errors: %s'",
"%",
"str",
"(",
"job",
".",
"errors",
")",
")"
] | Implements the BigQuery extract magic used to extract table data to GCS.
The supported syntax is:
%bigquery extract -S|--source <table> -D|--destination <url> <other_args>
Args:
args: the arguments following '%bigquery extract'.
Returns:
A message about whether the extract succeeded or failed. | [
"Implements",
"the",
"BigQuery",
"extract",
"magic",
"used",
"to",
"extract",
"table",
"data",
"to",
"GCS",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L691-L722 |
5,047 | googledatalab/pydatalab | datalab/bigquery/commands/_bigquery.py | bigquery | def bigquery(line, cell=None):
"""Implements the bigquery cell magic for ipython notebooks.
The supported syntax is:
%%bigquery <command> [<args>]
<cell>
or:
%bigquery <command> [<args>]
Use %bigquery --help for a list of commands, or %bigquery <command> --help for help
on a specific command.
"""
namespace = {}
if line.find('$') >= 0:
# We likely have variables to expand; get the appropriate context.
namespace = datalab.utils.commands.notebook_environment()
return datalab.utils.commands.handle_magic_line(line, cell, _bigquery_parser, namespace=namespace) | python | def bigquery(line, cell=None):
"""Implements the bigquery cell magic for ipython notebooks.
The supported syntax is:
%%bigquery <command> [<args>]
<cell>
or:
%bigquery <command> [<args>]
Use %bigquery --help for a list of commands, or %bigquery <command> --help for help
on a specific command.
"""
namespace = {}
if line.find('$') >= 0:
# We likely have variables to expand; get the appropriate context.
namespace = datalab.utils.commands.notebook_environment()
return datalab.utils.commands.handle_magic_line(line, cell, _bigquery_parser, namespace=namespace) | [
"def",
"bigquery",
"(",
"line",
",",
"cell",
"=",
"None",
")",
":",
"namespace",
"=",
"{",
"}",
"if",
"line",
".",
"find",
"(",
"'$'",
")",
">=",
"0",
":",
"# We likely have variables to expand; get the appropriate context.",
"namespace",
"=",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
"return",
"datalab",
".",
"utils",
".",
"commands",
".",
"handle_magic_line",
"(",
"line",
",",
"cell",
",",
"_bigquery_parser",
",",
"namespace",
"=",
"namespace",
")"
] | Implements the bigquery cell magic for ipython notebooks.
The supported syntax is:
%%bigquery <command> [<args>]
<cell>
or:
%bigquery <command> [<args>]
Use %bigquery --help for a list of commands, or %bigquery <command> --help for help
on a specific command. | [
"Implements",
"the",
"bigquery",
"cell",
"magic",
"for",
"ipython",
"notebooks",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/commands/_bigquery.py#L840-L860 |
5,048 | googledatalab/pydatalab | google/datalab/bigquery/_query_output.py | QueryOutput.table | def table(name=None, mode='create', use_cache=True, priority='interactive',
allow_large_results=False):
""" Construct a query output object where the result is a table
Args:
name: the result table name as a string or TableName; if None (the default), then a
temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled
to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much
as three hours but are not rate-limited.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a name to be specified) (default False).
"""
output = QueryOutput()
output._output_type = 'table'
output._table_name = name
output._table_mode = mode
output._use_cache = use_cache
output._priority = priority
output._allow_large_results = allow_large_results
return output | python | def table(name=None, mode='create', use_cache=True, priority='interactive',
allow_large_results=False):
""" Construct a query output object where the result is a table
Args:
name: the result table name as a string or TableName; if None (the default), then a
temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled
to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much
as three hours but are not rate-limited.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a name to be specified) (default False).
"""
output = QueryOutput()
output._output_type = 'table'
output._table_name = name
output._table_mode = mode
output._use_cache = use_cache
output._priority = priority
output._allow_large_results = allow_large_results
return output | [
"def",
"table",
"(",
"name",
"=",
"None",
",",
"mode",
"=",
"'create'",
",",
"use_cache",
"=",
"True",
",",
"priority",
"=",
"'interactive'",
",",
"allow_large_results",
"=",
"False",
")",
":",
"output",
"=",
"QueryOutput",
"(",
")",
"output",
".",
"_output_type",
"=",
"'table'",
"output",
".",
"_table_name",
"=",
"name",
"output",
".",
"_table_mode",
"=",
"mode",
"output",
".",
"_use_cache",
"=",
"use_cache",
"output",
".",
"_priority",
"=",
"priority",
"output",
".",
"_allow_large_results",
"=",
"allow_large_results",
"return",
"output"
] | Construct a query output object where the result is a table
Args:
name: the result table name as a string or TableName; if None (the default), then a
temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled
to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much
as three hours but are not rate-limited.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a name to be specified) (default False). | [
"Construct",
"a",
"query",
"output",
"object",
"where",
"the",
"result",
"is",
"a",
"table"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_query_output.py#L19-L43 |
5,049 | googledatalab/pydatalab | google/datalab/bigquery/_query_output.py | QueryOutput.file | def file(path, format='csv', csv_delimiter=',', csv_header=True, compress=False,
use_cache=True):
""" Construct a query output object where the result is either a local file or a GCS path
Note that there are two jobs that may need to be run sequentially, one to run the query,
and the second to extract the resulting table. These are wrapped by a single outer Job.
If the query has already been executed and you would prefer to get a Job just for the
extract, you can can call extract[_async] on the QueryResultsTable returned by the query
Args:
path: the destination path. Can either be a local or GCS URI (starting with gs://)
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use (default ',').
csv_header: for CSV exports, whether to include an initial header line (default True).
compress: whether to compress the data on export. Compression is not supported for
AVRO format (default False). Applies only to GCS URIs.
use_cache: whether to use cached results or not (default True).
"""
output = QueryOutput()
output._output_type = 'file'
output._file_path = path
output._file_format = format
output._csv_delimiter = csv_delimiter
output._csv_header = csv_header
output._compress_file = compress
return output | python | def file(path, format='csv', csv_delimiter=',', csv_header=True, compress=False,
use_cache=True):
""" Construct a query output object where the result is either a local file or a GCS path
Note that there are two jobs that may need to be run sequentially, one to run the query,
and the second to extract the resulting table. These are wrapped by a single outer Job.
If the query has already been executed and you would prefer to get a Job just for the
extract, you can can call extract[_async] on the QueryResultsTable returned by the query
Args:
path: the destination path. Can either be a local or GCS URI (starting with gs://)
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use (default ',').
csv_header: for CSV exports, whether to include an initial header line (default True).
compress: whether to compress the data on export. Compression is not supported for
AVRO format (default False). Applies only to GCS URIs.
use_cache: whether to use cached results or not (default True).
"""
output = QueryOutput()
output._output_type = 'file'
output._file_path = path
output._file_format = format
output._csv_delimiter = csv_delimiter
output._csv_header = csv_header
output._compress_file = compress
return output | [
"def",
"file",
"(",
"path",
",",
"format",
"=",
"'csv'",
",",
"csv_delimiter",
"=",
"','",
",",
"csv_header",
"=",
"True",
",",
"compress",
"=",
"False",
",",
"use_cache",
"=",
"True",
")",
":",
"output",
"=",
"QueryOutput",
"(",
")",
"output",
".",
"_output_type",
"=",
"'file'",
"output",
".",
"_file_path",
"=",
"path",
"output",
".",
"_file_format",
"=",
"format",
"output",
".",
"_csv_delimiter",
"=",
"csv_delimiter",
"output",
".",
"_csv_header",
"=",
"csv_header",
"output",
".",
"_compress_file",
"=",
"compress",
"return",
"output"
] | Construct a query output object where the result is either a local file or a GCS path
Note that there are two jobs that may need to be run sequentially, one to run the query,
and the second to extract the resulting table. These are wrapped by a single outer Job.
If the query has already been executed and you would prefer to get a Job just for the
extract, you can can call extract[_async] on the QueryResultsTable returned by the query
Args:
path: the destination path. Can either be a local or GCS URI (starting with gs://)
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for CSV exports, the field delimiter to use (default ',').
csv_header: for CSV exports, whether to include an initial header line (default True).
compress: whether to compress the data on export. Compression is not supported for
AVRO format (default False). Applies only to GCS URIs.
use_cache: whether to use cached results or not (default True). | [
"Construct",
"a",
"query",
"output",
"object",
"where",
"the",
"result",
"is",
"either",
"a",
"local",
"file",
"or",
"a",
"GCS",
"path"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_query_output.py#L46-L73 |
5,050 | googledatalab/pydatalab | google/datalab/bigquery/_query_output.py | QueryOutput.dataframe | def dataframe(start_row=0, max_rows=None, use_cache=True):
""" Construct a query output object where the result is a dataframe
Args:
start_row: the row of the table at which to start the export (default 0).
max_rows: an upper limit on the number of rows to export (default None).
use_cache: whether to use cached results or not (default True).
"""
output = QueryOutput()
output._output_type = 'dataframe'
output._dataframe_start_row = start_row
output._dataframe_max_rows = max_rows
output._use_cache = use_cache
return output | python | def dataframe(start_row=0, max_rows=None, use_cache=True):
""" Construct a query output object where the result is a dataframe
Args:
start_row: the row of the table at which to start the export (default 0).
max_rows: an upper limit on the number of rows to export (default None).
use_cache: whether to use cached results or not (default True).
"""
output = QueryOutput()
output._output_type = 'dataframe'
output._dataframe_start_row = start_row
output._dataframe_max_rows = max_rows
output._use_cache = use_cache
return output | [
"def",
"dataframe",
"(",
"start_row",
"=",
"0",
",",
"max_rows",
"=",
"None",
",",
"use_cache",
"=",
"True",
")",
":",
"output",
"=",
"QueryOutput",
"(",
")",
"output",
".",
"_output_type",
"=",
"'dataframe'",
"output",
".",
"_dataframe_start_row",
"=",
"start_row",
"output",
".",
"_dataframe_max_rows",
"=",
"max_rows",
"output",
".",
"_use_cache",
"=",
"use_cache",
"return",
"output"
] | Construct a query output object where the result is a dataframe
Args:
start_row: the row of the table at which to start the export (default 0).
max_rows: an upper limit on the number of rows to export (default None).
use_cache: whether to use cached results or not (default True). | [
"Construct",
"a",
"query",
"output",
"object",
"where",
"the",
"result",
"is",
"a",
"dataframe"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_query_output.py#L76-L89 |
5,051 | googledatalab/pydatalab | google/datalab/ml/_tensorboard.py | TensorBoard.list | def list():
"""List running TensorBoard instances."""
running_list = []
parser = argparse.ArgumentParser()
parser.add_argument('--logdir')
parser.add_argument('--port')
for p in psutil.process_iter():
if p.name() != 'tensorboard' or p.status() == psutil.STATUS_ZOMBIE:
continue
cmd_args = p.cmdline()
del cmd_args[0:2] # remove 'python' and 'tensorboard'
args = parser.parse_args(cmd_args)
running_list.append({'pid': p.pid, 'logdir': args.logdir, 'port': args.port})
return pd.DataFrame(running_list) | python | def list():
"""List running TensorBoard instances."""
running_list = []
parser = argparse.ArgumentParser()
parser.add_argument('--logdir')
parser.add_argument('--port')
for p in psutil.process_iter():
if p.name() != 'tensorboard' or p.status() == psutil.STATUS_ZOMBIE:
continue
cmd_args = p.cmdline()
del cmd_args[0:2] # remove 'python' and 'tensorboard'
args = parser.parse_args(cmd_args)
running_list.append({'pid': p.pid, 'logdir': args.logdir, 'port': args.port})
return pd.DataFrame(running_list) | [
"def",
"list",
"(",
")",
":",
"running_list",
"=",
"[",
"]",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--logdir'",
")",
"parser",
".",
"add_argument",
"(",
"'--port'",
")",
"for",
"p",
"in",
"psutil",
".",
"process_iter",
"(",
")",
":",
"if",
"p",
".",
"name",
"(",
")",
"!=",
"'tensorboard'",
"or",
"p",
".",
"status",
"(",
")",
"==",
"psutil",
".",
"STATUS_ZOMBIE",
":",
"continue",
"cmd_args",
"=",
"p",
".",
"cmdline",
"(",
")",
"del",
"cmd_args",
"[",
"0",
":",
"2",
"]",
"# remove 'python' and 'tensorboard'",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"cmd_args",
")",
"running_list",
".",
"append",
"(",
"{",
"'pid'",
":",
"p",
".",
"pid",
",",
"'logdir'",
":",
"args",
".",
"logdir",
",",
"'port'",
":",
"args",
".",
"port",
"}",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"running_list",
")"
] | List running TensorBoard instances. | [
"List",
"running",
"TensorBoard",
"instances",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_tensorboard.py#L33-L47 |
5,052 | googledatalab/pydatalab | google/datalab/ml/_tensorboard.py | TensorBoard.start | def start(logdir):
"""Start a TensorBoard instance.
Args:
logdir: the logdir to run TensorBoard on.
Raises:
Exception if the instance cannot be started.
"""
if logdir.startswith('gs://'):
# Check user does have access. TensorBoard will start successfully regardless
# the user has read permissions or not so we check permissions here to
# give user alerts if needed.
datalab.storage._api.Api.verify_permitted_to_read(logdir)
port = datalab.utils.pick_unused_port()
args = ['tensorboard', '--logdir=' + logdir, '--port=' + str(port)]
p = subprocess.Popen(args)
retry = 10
while (retry > 0):
if datalab.utils.is_http_running_on(port):
basepath = os.environ.get('DATALAB_ENDPOINT_URL', '')
url = '%s/_proxy/%d/' % (basepath.rstrip('/'), port)
html = '<p>TensorBoard was started successfully with pid %d. ' % p.pid
html += 'Click <a href="%s" target="_blank">here</a> to access it.</p>' % url
IPython.display.display_html(html, raw=True)
return p.pid
time.sleep(1)
retry -= 1
raise Exception('Cannot start TensorBoard.') | python | def start(logdir):
"""Start a TensorBoard instance.
Args:
logdir: the logdir to run TensorBoard on.
Raises:
Exception if the instance cannot be started.
"""
if logdir.startswith('gs://'):
# Check user does have access. TensorBoard will start successfully regardless
# the user has read permissions or not so we check permissions here to
# give user alerts if needed.
datalab.storage._api.Api.verify_permitted_to_read(logdir)
port = datalab.utils.pick_unused_port()
args = ['tensorboard', '--logdir=' + logdir, '--port=' + str(port)]
p = subprocess.Popen(args)
retry = 10
while (retry > 0):
if datalab.utils.is_http_running_on(port):
basepath = os.environ.get('DATALAB_ENDPOINT_URL', '')
url = '%s/_proxy/%d/' % (basepath.rstrip('/'), port)
html = '<p>TensorBoard was started successfully with pid %d. ' % p.pid
html += 'Click <a href="%s" target="_blank">here</a> to access it.</p>' % url
IPython.display.display_html(html, raw=True)
return p.pid
time.sleep(1)
retry -= 1
raise Exception('Cannot start TensorBoard.') | [
"def",
"start",
"(",
"logdir",
")",
":",
"if",
"logdir",
".",
"startswith",
"(",
"'gs://'",
")",
":",
"# Check user does have access. TensorBoard will start successfully regardless",
"# the user has read permissions or not so we check permissions here to",
"# give user alerts if needed.",
"datalab",
".",
"storage",
".",
"_api",
".",
"Api",
".",
"verify_permitted_to_read",
"(",
"logdir",
")",
"port",
"=",
"datalab",
".",
"utils",
".",
"pick_unused_port",
"(",
")",
"args",
"=",
"[",
"'tensorboard'",
",",
"'--logdir='",
"+",
"logdir",
",",
"'--port='",
"+",
"str",
"(",
"port",
")",
"]",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
")",
"retry",
"=",
"10",
"while",
"(",
"retry",
">",
"0",
")",
":",
"if",
"datalab",
".",
"utils",
".",
"is_http_running_on",
"(",
"port",
")",
":",
"basepath",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'DATALAB_ENDPOINT_URL'",
",",
"''",
")",
"url",
"=",
"'%s/_proxy/%d/'",
"%",
"(",
"basepath",
".",
"rstrip",
"(",
"'/'",
")",
",",
"port",
")",
"html",
"=",
"'<p>TensorBoard was started successfully with pid %d. '",
"%",
"p",
".",
"pid",
"html",
"+=",
"'Click <a href=\"%s\" target=\"_blank\">here</a> to access it.</p>'",
"%",
"url",
"IPython",
".",
"display",
".",
"display_html",
"(",
"html",
",",
"raw",
"=",
"True",
")",
"return",
"p",
".",
"pid",
"time",
".",
"sleep",
"(",
"1",
")",
"retry",
"-=",
"1",
"raise",
"Exception",
"(",
"'Cannot start TensorBoard.'",
")"
] | Start a TensorBoard instance.
Args:
logdir: the logdir to run TensorBoard on.
Raises:
Exception if the instance cannot be started. | [
"Start",
"a",
"TensorBoard",
"instance",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_tensorboard.py#L50-L79 |
5,053 | googledatalab/pydatalab | google/datalab/ml/_tensorboard.py | TensorBoard.stop | def stop(pid):
"""Shut down a specific process.
Args:
pid: the pid of the process to shutdown.
"""
if psutil.pid_exists(pid):
try:
p = psutil.Process(pid)
p.kill()
except Exception:
pass | python | def stop(pid):
"""Shut down a specific process.
Args:
pid: the pid of the process to shutdown.
"""
if psutil.pid_exists(pid):
try:
p = psutil.Process(pid)
p.kill()
except Exception:
pass | [
"def",
"stop",
"(",
"pid",
")",
":",
"if",
"psutil",
".",
"pid_exists",
"(",
"pid",
")",
":",
"try",
":",
"p",
"=",
"psutil",
".",
"Process",
"(",
"pid",
")",
"p",
".",
"kill",
"(",
")",
"except",
"Exception",
":",
"pass"
] | Shut down a specific process.
Args:
pid: the pid of the process to shutdown. | [
"Shut",
"down",
"a",
"specific",
"process",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_tensorboard.py#L82-L93 |
5,054 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_preprocess.py | EmbeddingsGraph.build_graph | def build_graph(self):
"""Forms the core by building a wrapper around the inception graph.
Here we add the necessary input & output tensors, to decode jpegs,
serialize embeddings, restore from checkpoint etc.
To use other Inception models modify this file. Note that to use other
models beside Inception, you should make sure input_shape matches
their input. Resizing or other modifications may be necessary as well.
See tensorflow/contrib/slim/python/slim/nets/inception_v3.py for
details about InceptionV3.
Returns:
input_jpeg: A tensor containing raw image bytes as the input layer.
embedding: The embeddings tensor, that will be materialized later.
"""
import tensorflow as tf
input_jpeg = tf.placeholder(tf.string, shape=None)
image = tf.image.decode_jpeg(input_jpeg, channels=self.CHANNELS)
# Note resize expects a batch_size, but we are feeding a single image.
# So we have to expand then squeeze. Resize returns float32 in the
# range [0, uint8_max]
image = tf.expand_dims(image, 0)
# convert_image_dtype also scales [0, uint8_max] -> [0 ,1).
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.image.resize_bilinear(
image, [self.HEIGHT, self.WIDTH], align_corners=False)
# Then rescale range to [-1, 1) for Inception.
image = tf.subtract(image, 0.5)
inception_input = tf.multiply(image, 2.0)
# Build Inception layers, which expect a tensor of type float from [-1, 1)
# and shape [batch_size, height, width, channels].
with tf.contrib.slim.arg_scope(_inceptionlib.inception_v3_arg_scope()):
_, end_points = _inceptionlib.inception_v3(inception_input, is_training=False)
embedding = end_points['PreLogits']
return input_jpeg, embedding | python | def build_graph(self):
"""Forms the core by building a wrapper around the inception graph.
Here we add the necessary input & output tensors, to decode jpegs,
serialize embeddings, restore from checkpoint etc.
To use other Inception models modify this file. Note that to use other
models beside Inception, you should make sure input_shape matches
their input. Resizing or other modifications may be necessary as well.
See tensorflow/contrib/slim/python/slim/nets/inception_v3.py for
details about InceptionV3.
Returns:
input_jpeg: A tensor containing raw image bytes as the input layer.
embedding: The embeddings tensor, that will be materialized later.
"""
import tensorflow as tf
input_jpeg = tf.placeholder(tf.string, shape=None)
image = tf.image.decode_jpeg(input_jpeg, channels=self.CHANNELS)
# Note resize expects a batch_size, but we are feeding a single image.
# So we have to expand then squeeze. Resize returns float32 in the
# range [0, uint8_max]
image = tf.expand_dims(image, 0)
# convert_image_dtype also scales [0, uint8_max] -> [0 ,1).
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.image.resize_bilinear(
image, [self.HEIGHT, self.WIDTH], align_corners=False)
# Then rescale range to [-1, 1) for Inception.
image = tf.subtract(image, 0.5)
inception_input = tf.multiply(image, 2.0)
# Build Inception layers, which expect a tensor of type float from [-1, 1)
# and shape [batch_size, height, width, channels].
with tf.contrib.slim.arg_scope(_inceptionlib.inception_v3_arg_scope()):
_, end_points = _inceptionlib.inception_v3(inception_input, is_training=False)
embedding = end_points['PreLogits']
return input_jpeg, embedding | [
"def",
"build_graph",
"(",
"self",
")",
":",
"import",
"tensorflow",
"as",
"tf",
"input_jpeg",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"string",
",",
"shape",
"=",
"None",
")",
"image",
"=",
"tf",
".",
"image",
".",
"decode_jpeg",
"(",
"input_jpeg",
",",
"channels",
"=",
"self",
".",
"CHANNELS",
")",
"# Note resize expects a batch_size, but we are feeding a single image.",
"# So we have to expand then squeeze. Resize returns float32 in the",
"# range [0, uint8_max]",
"image",
"=",
"tf",
".",
"expand_dims",
"(",
"image",
",",
"0",
")",
"# convert_image_dtype also scales [0, uint8_max] -> [0 ,1).",
"image",
"=",
"tf",
".",
"image",
".",
"convert_image_dtype",
"(",
"image",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"image",
"=",
"tf",
".",
"image",
".",
"resize_bilinear",
"(",
"image",
",",
"[",
"self",
".",
"HEIGHT",
",",
"self",
".",
"WIDTH",
"]",
",",
"align_corners",
"=",
"False",
")",
"# Then rescale range to [-1, 1) for Inception.",
"image",
"=",
"tf",
".",
"subtract",
"(",
"image",
",",
"0.5",
")",
"inception_input",
"=",
"tf",
".",
"multiply",
"(",
"image",
",",
"2.0",
")",
"# Build Inception layers, which expect a tensor of type float from [-1, 1)",
"# and shape [batch_size, height, width, channels].",
"with",
"tf",
".",
"contrib",
".",
"slim",
".",
"arg_scope",
"(",
"_inceptionlib",
".",
"inception_v3_arg_scope",
"(",
")",
")",
":",
"_",
",",
"end_points",
"=",
"_inceptionlib",
".",
"inception_v3",
"(",
"inception_input",
",",
"is_training",
"=",
"False",
")",
"embedding",
"=",
"end_points",
"[",
"'PreLogits'",
"]",
"return",
"input_jpeg",
",",
"embedding"
] | Forms the core by building a wrapper around the inception graph.
Here we add the necessary input & output tensors, to decode jpegs,
serialize embeddings, restore from checkpoint etc.
To use other Inception models modify this file. Note that to use other
models beside Inception, you should make sure input_shape matches
their input. Resizing or other modifications may be necessary as well.
See tensorflow/contrib/slim/python/slim/nets/inception_v3.py for
details about InceptionV3.
Returns:
input_jpeg: A tensor containing raw image bytes as the input layer.
embedding: The embeddings tensor, that will be materialized later. | [
"Forms",
"the",
"core",
"by",
"building",
"a",
"wrapper",
"around",
"the",
"inception",
"graph",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_preprocess.py#L126-L167 |
5,055 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_preprocess.py | EmbeddingsGraph.restore_from_checkpoint | def restore_from_checkpoint(self, checkpoint_path):
"""To restore inception model variables from the checkpoint file.
Some variables might be missing in the checkpoint file, so it only
loads the ones that are avialable, assuming the rest would be
initialized later.
Args:
checkpoint_path: Path to the checkpoint file for the Inception graph.
"""
import tensorflow as tf
# Get all variables to restore. Exclude Logits and AuxLogits because they
# depend on the input data and we do not need to intialize them from
# checkpoint.
all_vars = tf.contrib.slim.get_variables_to_restore(
exclude=['InceptionV3/AuxLogits', 'InceptionV3/Logits', 'global_step'])
saver = tf.train.Saver(all_vars)
saver.restore(self.tf_session, checkpoint_path) | python | def restore_from_checkpoint(self, checkpoint_path):
"""To restore inception model variables from the checkpoint file.
Some variables might be missing in the checkpoint file, so it only
loads the ones that are avialable, assuming the rest would be
initialized later.
Args:
checkpoint_path: Path to the checkpoint file for the Inception graph.
"""
import tensorflow as tf
# Get all variables to restore. Exclude Logits and AuxLogits because they
# depend on the input data and we do not need to intialize them from
# checkpoint.
all_vars = tf.contrib.slim.get_variables_to_restore(
exclude=['InceptionV3/AuxLogits', 'InceptionV3/Logits', 'global_step'])
saver = tf.train.Saver(all_vars)
saver.restore(self.tf_session, checkpoint_path) | [
"def",
"restore_from_checkpoint",
"(",
"self",
",",
"checkpoint_path",
")",
":",
"import",
"tensorflow",
"as",
"tf",
"# Get all variables to restore. Exclude Logits and AuxLogits because they",
"# depend on the input data and we do not need to intialize them from",
"# checkpoint.",
"all_vars",
"=",
"tf",
".",
"contrib",
".",
"slim",
".",
"get_variables_to_restore",
"(",
"exclude",
"=",
"[",
"'InceptionV3/AuxLogits'",
",",
"'InceptionV3/Logits'",
",",
"'global_step'",
"]",
")",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
"all_vars",
")",
"saver",
".",
"restore",
"(",
"self",
".",
"tf_session",
",",
"checkpoint_path",
")"
] | To restore inception model variables from the checkpoint file.
Some variables might be missing in the checkpoint file, so it only
loads the ones that are avialable, assuming the rest would be
initialized later.
Args:
checkpoint_path: Path to the checkpoint file for the Inception graph. | [
"To",
"restore",
"inception",
"model",
"variables",
"from",
"the",
"checkpoint",
"file",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_preprocess.py#L169-L186 |
5,056 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_preprocess.py | EmbeddingsGraph.calculate_embedding | def calculate_embedding(self, batch_image_bytes):
"""Get the embeddings for a given JPEG image.
Args:
batch_image_bytes: As if returned from [ff.read() for ff in file_list].
Returns:
The Inception embeddings (bottleneck layer output)
"""
return self.tf_session.run(
self.embedding, feed_dict={self.input_jpeg: batch_image_bytes}) | python | def calculate_embedding(self, batch_image_bytes):
"""Get the embeddings for a given JPEG image.
Args:
batch_image_bytes: As if returned from [ff.read() for ff in file_list].
Returns:
The Inception embeddings (bottleneck layer output)
"""
return self.tf_session.run(
self.embedding, feed_dict={self.input_jpeg: batch_image_bytes}) | [
"def",
"calculate_embedding",
"(",
"self",
",",
"batch_image_bytes",
")",
":",
"return",
"self",
".",
"tf_session",
".",
"run",
"(",
"self",
".",
"embedding",
",",
"feed_dict",
"=",
"{",
"self",
".",
"input_jpeg",
":",
"batch_image_bytes",
"}",
")"
] | Get the embeddings for a given JPEG image.
Args:
batch_image_bytes: As if returned from [ff.read() for ff in file_list].
Returns:
The Inception embeddings (bottleneck layer output) | [
"Get",
"the",
"embeddings",
"for",
"a",
"given",
"JPEG",
"image",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_preprocess.py#L188-L198 |
5,057 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_model.py | Model.add_final_training_ops | def add_final_training_ops(self,
embeddings,
all_labels_count,
bottleneck_tensor_size,
hidden_layer_size=BOTTLENECK_TENSOR_SIZE / 4,
dropout_keep_prob=None):
"""Adds a new softmax and fully-connected layer for training.
The set up for the softmax and fully-connected layers is based on:
https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
This function can be customized to add arbitrary layers for
application-specific requirements.
Args:
embeddings: The embedding (bottleneck) tensor.
all_labels_count: The number of all labels including the default label.
bottleneck_tensor_size: The number of embeddings.
hidden_layer_size: The size of the hidden_layer. Roughtly, 1/4 of the
bottleneck tensor size.
dropout_keep_prob: the percentage of activation values that are retained.
Returns:
softmax: The softmax or tensor. It stores the final scores.
logits: The logits tensor.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
embeddings,
shape=[None, bottleneck_tensor_size],
name='ReshapeSqueezed')
bottleneck_with_no_gradient = tf.stop_gradient(bottleneck_input)
with tf.name_scope('Wx_plus_b'):
hidden = layers.fully_connected(bottleneck_with_no_gradient,
hidden_layer_size)
# We need a dropout when the size of the dataset is rather small.
if dropout_keep_prob:
hidden = tf.nn.dropout(hidden, dropout_keep_prob)
logits = layers.fully_connected(
hidden, all_labels_count, activation_fn=None)
softmax = tf.nn.softmax(logits, name='softmax')
return softmax, logits | python | def add_final_training_ops(self,
embeddings,
all_labels_count,
bottleneck_tensor_size,
hidden_layer_size=BOTTLENECK_TENSOR_SIZE / 4,
dropout_keep_prob=None):
"""Adds a new softmax and fully-connected layer for training.
The set up for the softmax and fully-connected layers is based on:
https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
This function can be customized to add arbitrary layers for
application-specific requirements.
Args:
embeddings: The embedding (bottleneck) tensor.
all_labels_count: The number of all labels including the default label.
bottleneck_tensor_size: The number of embeddings.
hidden_layer_size: The size of the hidden_layer. Roughtly, 1/4 of the
bottleneck tensor size.
dropout_keep_prob: the percentage of activation values that are retained.
Returns:
softmax: The softmax or tensor. It stores the final scores.
logits: The logits tensor.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
embeddings,
shape=[None, bottleneck_tensor_size],
name='ReshapeSqueezed')
bottleneck_with_no_gradient = tf.stop_gradient(bottleneck_input)
with tf.name_scope('Wx_plus_b'):
hidden = layers.fully_connected(bottleneck_with_no_gradient,
hidden_layer_size)
# We need a dropout when the size of the dataset is rather small.
if dropout_keep_prob:
hidden = tf.nn.dropout(hidden, dropout_keep_prob)
logits = layers.fully_connected(
hidden, all_labels_count, activation_fn=None)
softmax = tf.nn.softmax(logits, name='softmax')
return softmax, logits | [
"def",
"add_final_training_ops",
"(",
"self",
",",
"embeddings",
",",
"all_labels_count",
",",
"bottleneck_tensor_size",
",",
"hidden_layer_size",
"=",
"BOTTLENECK_TENSOR_SIZE",
"/",
"4",
",",
"dropout_keep_prob",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'input'",
")",
":",
"bottleneck_input",
"=",
"tf",
".",
"placeholder_with_default",
"(",
"embeddings",
",",
"shape",
"=",
"[",
"None",
",",
"bottleneck_tensor_size",
"]",
",",
"name",
"=",
"'ReshapeSqueezed'",
")",
"bottleneck_with_no_gradient",
"=",
"tf",
".",
"stop_gradient",
"(",
"bottleneck_input",
")",
"with",
"tf",
".",
"name_scope",
"(",
"'Wx_plus_b'",
")",
":",
"hidden",
"=",
"layers",
".",
"fully_connected",
"(",
"bottleneck_with_no_gradient",
",",
"hidden_layer_size",
")",
"# We need a dropout when the size of the dataset is rather small.",
"if",
"dropout_keep_prob",
":",
"hidden",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"hidden",
",",
"dropout_keep_prob",
")",
"logits",
"=",
"layers",
".",
"fully_connected",
"(",
"hidden",
",",
"all_labels_count",
",",
"activation_fn",
"=",
"None",
")",
"softmax",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"logits",
",",
"name",
"=",
"'softmax'",
")",
"return",
"softmax",
",",
"logits"
] | Adds a new softmax and fully-connected layer for training.
The set up for the softmax and fully-connected layers is based on:
https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
This function can be customized to add arbitrary layers for
application-specific requirements.
Args:
embeddings: The embedding (bottleneck) tensor.
all_labels_count: The number of all labels including the default label.
bottleneck_tensor_size: The number of embeddings.
hidden_layer_size: The size of the hidden_layer. Roughtly, 1/4 of the
bottleneck tensor size.
dropout_keep_prob: the percentage of activation values that are retained.
Returns:
softmax: The softmax or tensor. It stores the final scores.
logits: The logits tensor. | [
"Adds",
"a",
"new",
"softmax",
"and",
"fully",
"-",
"connected",
"layer",
"for",
"training",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_model.py#L71-L112 |
5,058 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_model.py | Model.build_inception_graph | def build_inception_graph(self):
"""Builds an inception graph and add the necessary input & output tensors.
To use other Inception models modify this file. Also preprocessing must be
modified accordingly.
See tensorflow/contrib/slim/python/slim/nets/inception_v3.py for
details about InceptionV3.
Returns:
input_jpeg: A placeholder for jpeg string batch that allows feeding the
Inception layer with image bytes for prediction.
inception_embeddings: The embeddings tensor.
"""
image_str_tensor = tf.placeholder(tf.string, shape=[None])
# The CloudML Prediction API always "feeds" the Tensorflow graph with
# dynamic batch sizes e.g. (?,). decode_jpeg only processes scalar
# strings because it cannot guarantee a batch of images would have
# the same output size. We use tf.map_fn to give decode_jpeg a scalar
# string from dynamic batches.
image = tf.map_fn(
_util.decode_and_resize, image_str_tensor, back_prop=False, dtype=tf.uint8)
# convert_image_dtype, also scales [0, uint8_max] -> [0 ,1).
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Then shift images to [-1, 1) for Inception.
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
# Build Inception layers, which expect A tensor of type float from [-1, 1)
# and shape [batch_size, height, width, channels].
with slim.arg_scope(_inceptionlib.inception_v3_arg_scope()):
_, end_points = _inceptionlib.inception_v3(image, is_training=False)
inception_embeddings = end_points['PreLogits']
inception_embeddings = tf.squeeze(
inception_embeddings, [1, 2], name='SpatialSqueeze')
return image_str_tensor, inception_embeddings | python | def build_inception_graph(self):
"""Builds an inception graph and add the necessary input & output tensors.
To use other Inception models modify this file. Also preprocessing must be
modified accordingly.
See tensorflow/contrib/slim/python/slim/nets/inception_v3.py for
details about InceptionV3.
Returns:
input_jpeg: A placeholder for jpeg string batch that allows feeding the
Inception layer with image bytes for prediction.
inception_embeddings: The embeddings tensor.
"""
image_str_tensor = tf.placeholder(tf.string, shape=[None])
# The CloudML Prediction API always "feeds" the Tensorflow graph with
# dynamic batch sizes e.g. (?,). decode_jpeg only processes scalar
# strings because it cannot guarantee a batch of images would have
# the same output size. We use tf.map_fn to give decode_jpeg a scalar
# string from dynamic batches.
image = tf.map_fn(
_util.decode_and_resize, image_str_tensor, back_prop=False, dtype=tf.uint8)
# convert_image_dtype, also scales [0, uint8_max] -> [0 ,1).
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Then shift images to [-1, 1) for Inception.
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
# Build Inception layers, which expect A tensor of type float from [-1, 1)
# and shape [batch_size, height, width, channels].
with slim.arg_scope(_inceptionlib.inception_v3_arg_scope()):
_, end_points = _inceptionlib.inception_v3(image, is_training=False)
inception_embeddings = end_points['PreLogits']
inception_embeddings = tf.squeeze(
inception_embeddings, [1, 2], name='SpatialSqueeze')
return image_str_tensor, inception_embeddings | [
"def",
"build_inception_graph",
"(",
"self",
")",
":",
"image_str_tensor",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"string",
",",
"shape",
"=",
"[",
"None",
"]",
")",
"# The CloudML Prediction API always \"feeds\" the Tensorflow graph with",
"# dynamic batch sizes e.g. (?,). decode_jpeg only processes scalar",
"# strings because it cannot guarantee a batch of images would have",
"# the same output size. We use tf.map_fn to give decode_jpeg a scalar",
"# string from dynamic batches.",
"image",
"=",
"tf",
".",
"map_fn",
"(",
"_util",
".",
"decode_and_resize",
",",
"image_str_tensor",
",",
"back_prop",
"=",
"False",
",",
"dtype",
"=",
"tf",
".",
"uint8",
")",
"# convert_image_dtype, also scales [0, uint8_max] -> [0 ,1).",
"image",
"=",
"tf",
".",
"image",
".",
"convert_image_dtype",
"(",
"image",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# Then shift images to [-1, 1) for Inception.",
"image",
"=",
"tf",
".",
"subtract",
"(",
"image",
",",
"0.5",
")",
"image",
"=",
"tf",
".",
"multiply",
"(",
"image",
",",
"2.0",
")",
"# Build Inception layers, which expect A tensor of type float from [-1, 1)",
"# and shape [batch_size, height, width, channels].",
"with",
"slim",
".",
"arg_scope",
"(",
"_inceptionlib",
".",
"inception_v3_arg_scope",
"(",
")",
")",
":",
"_",
",",
"end_points",
"=",
"_inceptionlib",
".",
"inception_v3",
"(",
"image",
",",
"is_training",
"=",
"False",
")",
"inception_embeddings",
"=",
"end_points",
"[",
"'PreLogits'",
"]",
"inception_embeddings",
"=",
"tf",
".",
"squeeze",
"(",
"inception_embeddings",
",",
"[",
"1",
",",
"2",
"]",
",",
"name",
"=",
"'SpatialSqueeze'",
")",
"return",
"image_str_tensor",
",",
"inception_embeddings"
] | Builds an inception graph and add the necessary input & output tensors.
To use other Inception models modify this file. Also preprocessing must be
modified accordingly.
See tensorflow/contrib/slim/python/slim/nets/inception_v3.py for
details about InceptionV3.
Returns:
input_jpeg: A placeholder for jpeg string batch that allows feeding the
Inception layer with image bytes for prediction.
inception_embeddings: The embeddings tensor. | [
"Builds",
"an",
"inception",
"graph",
"and",
"add",
"the",
"necessary",
"input",
"&",
"output",
"tensors",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_model.py#L114-L152 |
5,059 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_model.py | Model.build_graph | def build_graph(self, data_paths, batch_size, graph_mod):
"""Builds generic graph for training or eval."""
tensors = GraphReferences()
is_training = graph_mod == GraphMod.TRAIN
if data_paths:
_, tensors.examples = _util.read_examples(
data_paths,
batch_size,
shuffle=is_training,
num_epochs=None if is_training else 2)
else:
tensors.examples = tf.placeholder(tf.string, name='input', shape=(None,))
if graph_mod == GraphMod.PREDICT:
inception_input, inception_embeddings = self.build_inception_graph()
# Build the Inception graph. We later add final training layers
# to this graph. This is currently used only for prediction.
# For training, we use pre-processed data, so it is not needed.
embeddings = inception_embeddings
tensors.input_jpeg = inception_input
else:
# For training and evaluation we assume data is preprocessed, so the
# inputs are tf-examples.
# Generate placeholders for examples.
with tf.name_scope('inputs'):
feature_map = {
'image_uri':
tf.FixedLenFeature(
shape=[], dtype=tf.string, default_value=['']),
# Some images may have no labels. For those, we assume a default
# label. So the number of labels is label_count+1 for the default
# label.
'label':
tf.FixedLenFeature(
shape=[1], dtype=tf.int64,
default_value=[len(self.labels)]),
'embedding':
tf.FixedLenFeature(
shape=[BOTTLENECK_TENSOR_SIZE], dtype=tf.float32)
}
parsed = tf.parse_example(tensors.examples, features=feature_map)
labels = tf.squeeze(parsed['label'])
uris = tf.squeeze(parsed['image_uri'])
embeddings = parsed['embedding']
# We assume a default label, so the total number of labels is equal to
# label_count+1.
all_labels_count = len(self.labels) + 1
with tf.name_scope('final_ops'):
softmax, logits = self.add_final_training_ops(
embeddings,
all_labels_count,
BOTTLENECK_TENSOR_SIZE,
dropout_keep_prob=self.dropout if is_training else None)
# Prediction is the index of the label with the highest score. We are
# interested only in the top score.
prediction = tf.argmax(softmax, 1)
tensors.predictions = [prediction, softmax, embeddings]
if graph_mod == GraphMod.PREDICT:
return tensors
with tf.name_scope('evaluate'):
loss_value = loss(logits, labels)
# Add to the Graph the Ops that calculate and apply gradients.
if is_training:
tensors.train, tensors.global_step = training(loss_value)
else:
tensors.global_step = tf.Variable(0, name='global_step', trainable=False)
tensors.uris = uris
# Add means across all batches.
loss_updates, loss_op = _util.loss(loss_value)
accuracy_updates, accuracy_op = _util.accuracy(logits, labels)
if not is_training:
tf.summary.scalar('accuracy', accuracy_op)
tf.summary.scalar('loss', loss_op)
tensors.metric_updates = loss_updates + accuracy_updates
tensors.metric_values = [loss_op, accuracy_op]
return tensors | python | def build_graph(self, data_paths, batch_size, graph_mod):
"""Builds generic graph for training or eval."""
tensors = GraphReferences()
is_training = graph_mod == GraphMod.TRAIN
if data_paths:
_, tensors.examples = _util.read_examples(
data_paths,
batch_size,
shuffle=is_training,
num_epochs=None if is_training else 2)
else:
tensors.examples = tf.placeholder(tf.string, name='input', shape=(None,))
if graph_mod == GraphMod.PREDICT:
inception_input, inception_embeddings = self.build_inception_graph()
# Build the Inception graph. We later add final training layers
# to this graph. This is currently used only for prediction.
# For training, we use pre-processed data, so it is not needed.
embeddings = inception_embeddings
tensors.input_jpeg = inception_input
else:
# For training and evaluation we assume data is preprocessed, so the
# inputs are tf-examples.
# Generate placeholders for examples.
with tf.name_scope('inputs'):
feature_map = {
'image_uri':
tf.FixedLenFeature(
shape=[], dtype=tf.string, default_value=['']),
# Some images may have no labels. For those, we assume a default
# label. So the number of labels is label_count+1 for the default
# label.
'label':
tf.FixedLenFeature(
shape=[1], dtype=tf.int64,
default_value=[len(self.labels)]),
'embedding':
tf.FixedLenFeature(
shape=[BOTTLENECK_TENSOR_SIZE], dtype=tf.float32)
}
parsed = tf.parse_example(tensors.examples, features=feature_map)
labels = tf.squeeze(parsed['label'])
uris = tf.squeeze(parsed['image_uri'])
embeddings = parsed['embedding']
# We assume a default label, so the total number of labels is equal to
# label_count+1.
all_labels_count = len(self.labels) + 1
with tf.name_scope('final_ops'):
softmax, logits = self.add_final_training_ops(
embeddings,
all_labels_count,
BOTTLENECK_TENSOR_SIZE,
dropout_keep_prob=self.dropout if is_training else None)
# Prediction is the index of the label with the highest score. We are
# interested only in the top score.
prediction = tf.argmax(softmax, 1)
tensors.predictions = [prediction, softmax, embeddings]
if graph_mod == GraphMod.PREDICT:
return tensors
with tf.name_scope('evaluate'):
loss_value = loss(logits, labels)
# Add to the Graph the Ops that calculate and apply gradients.
if is_training:
tensors.train, tensors.global_step = training(loss_value)
else:
tensors.global_step = tf.Variable(0, name='global_step', trainable=False)
tensors.uris = uris
# Add means across all batches.
loss_updates, loss_op = _util.loss(loss_value)
accuracy_updates, accuracy_op = _util.accuracy(logits, labels)
if not is_training:
tf.summary.scalar('accuracy', accuracy_op)
tf.summary.scalar('loss', loss_op)
tensors.metric_updates = loss_updates + accuracy_updates
tensors.metric_values = [loss_op, accuracy_op]
return tensors | [
"def",
"build_graph",
"(",
"self",
",",
"data_paths",
",",
"batch_size",
",",
"graph_mod",
")",
":",
"tensors",
"=",
"GraphReferences",
"(",
")",
"is_training",
"=",
"graph_mod",
"==",
"GraphMod",
".",
"TRAIN",
"if",
"data_paths",
":",
"_",
",",
"tensors",
".",
"examples",
"=",
"_util",
".",
"read_examples",
"(",
"data_paths",
",",
"batch_size",
",",
"shuffle",
"=",
"is_training",
",",
"num_epochs",
"=",
"None",
"if",
"is_training",
"else",
"2",
")",
"else",
":",
"tensors",
".",
"examples",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"string",
",",
"name",
"=",
"'input'",
",",
"shape",
"=",
"(",
"None",
",",
")",
")",
"if",
"graph_mod",
"==",
"GraphMod",
".",
"PREDICT",
":",
"inception_input",
",",
"inception_embeddings",
"=",
"self",
".",
"build_inception_graph",
"(",
")",
"# Build the Inception graph. We later add final training layers",
"# to this graph. This is currently used only for prediction.",
"# For training, we use pre-processed data, so it is not needed.",
"embeddings",
"=",
"inception_embeddings",
"tensors",
".",
"input_jpeg",
"=",
"inception_input",
"else",
":",
"# For training and evaluation we assume data is preprocessed, so the",
"# inputs are tf-examples.",
"# Generate placeholders for examples.",
"with",
"tf",
".",
"name_scope",
"(",
"'inputs'",
")",
":",
"feature_map",
"=",
"{",
"'image_uri'",
":",
"tf",
".",
"FixedLenFeature",
"(",
"shape",
"=",
"[",
"]",
",",
"dtype",
"=",
"tf",
".",
"string",
",",
"default_value",
"=",
"[",
"''",
"]",
")",
",",
"# Some images may have no labels. For those, we assume a default",
"# label. So the number of labels is label_count+1 for the default",
"# label.",
"'label'",
":",
"tf",
".",
"FixedLenFeature",
"(",
"shape",
"=",
"[",
"1",
"]",
",",
"dtype",
"=",
"tf",
".",
"int64",
",",
"default_value",
"=",
"[",
"len",
"(",
"self",
".",
"labels",
")",
"]",
")",
",",
"'embedding'",
":",
"tf",
".",
"FixedLenFeature",
"(",
"shape",
"=",
"[",
"BOTTLENECK_TENSOR_SIZE",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"}",
"parsed",
"=",
"tf",
".",
"parse_example",
"(",
"tensors",
".",
"examples",
",",
"features",
"=",
"feature_map",
")",
"labels",
"=",
"tf",
".",
"squeeze",
"(",
"parsed",
"[",
"'label'",
"]",
")",
"uris",
"=",
"tf",
".",
"squeeze",
"(",
"parsed",
"[",
"'image_uri'",
"]",
")",
"embeddings",
"=",
"parsed",
"[",
"'embedding'",
"]",
"# We assume a default label, so the total number of labels is equal to",
"# label_count+1.",
"all_labels_count",
"=",
"len",
"(",
"self",
".",
"labels",
")",
"+",
"1",
"with",
"tf",
".",
"name_scope",
"(",
"'final_ops'",
")",
":",
"softmax",
",",
"logits",
"=",
"self",
".",
"add_final_training_ops",
"(",
"embeddings",
",",
"all_labels_count",
",",
"BOTTLENECK_TENSOR_SIZE",
",",
"dropout_keep_prob",
"=",
"self",
".",
"dropout",
"if",
"is_training",
"else",
"None",
")",
"# Prediction is the index of the label with the highest score. We are",
"# interested only in the top score.",
"prediction",
"=",
"tf",
".",
"argmax",
"(",
"softmax",
",",
"1",
")",
"tensors",
".",
"predictions",
"=",
"[",
"prediction",
",",
"softmax",
",",
"embeddings",
"]",
"if",
"graph_mod",
"==",
"GraphMod",
".",
"PREDICT",
":",
"return",
"tensors",
"with",
"tf",
".",
"name_scope",
"(",
"'evaluate'",
")",
":",
"loss_value",
"=",
"loss",
"(",
"logits",
",",
"labels",
")",
"# Add to the Graph the Ops that calculate and apply gradients.",
"if",
"is_training",
":",
"tensors",
".",
"train",
",",
"tensors",
".",
"global_step",
"=",
"training",
"(",
"loss_value",
")",
"else",
":",
"tensors",
".",
"global_step",
"=",
"tf",
".",
"Variable",
"(",
"0",
",",
"name",
"=",
"'global_step'",
",",
"trainable",
"=",
"False",
")",
"tensors",
".",
"uris",
"=",
"uris",
"# Add means across all batches.",
"loss_updates",
",",
"loss_op",
"=",
"_util",
".",
"loss",
"(",
"loss_value",
")",
"accuracy_updates",
",",
"accuracy_op",
"=",
"_util",
".",
"accuracy",
"(",
"logits",
",",
"labels",
")",
"if",
"not",
"is_training",
":",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'accuracy'",
",",
"accuracy_op",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'loss'",
",",
"loss_op",
")",
"tensors",
".",
"metric_updates",
"=",
"loss_updates",
"+",
"accuracy_updates",
"tensors",
".",
"metric_values",
"=",
"[",
"loss_op",
",",
"accuracy_op",
"]",
"return",
"tensors"
] | Builds generic graph for training or eval. | [
"Builds",
"generic",
"graph",
"for",
"training",
"or",
"eval",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_model.py#L154-L237 |
5,060 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_model.py | Model.restore_from_checkpoint | def restore_from_checkpoint(self, session, inception_checkpoint_file,
trained_checkpoint_file):
"""To restore model variables from the checkpoint file.
The graph is assumed to consist of an inception model and other
layers including a softmax and a fully connected layer. The former is
pre-trained and the latter is trained using the pre-processed data. So
we restore this from two checkpoint files.
Args:
session: The session to be used for restoring from checkpoint.
inception_checkpoint_file: Path to the checkpoint file for the Inception
graph.
trained_checkpoint_file: path to the trained checkpoint for the other
layers.
"""
inception_exclude_scopes = [
'InceptionV3/AuxLogits', 'InceptionV3/Logits', 'global_step',
'final_ops'
]
reader = tf.train.NewCheckpointReader(inception_checkpoint_file)
var_to_shape_map = reader.get_variable_to_shape_map()
# Get all variables to restore. Exclude Logits and AuxLogits because they
# depend on the input data and we do not need to intialize them.
all_vars = tf.contrib.slim.get_variables_to_restore(
exclude=inception_exclude_scopes)
# Remove variables that do not exist in the inception checkpoint (for
# example the final softmax and fully-connected layers).
inception_vars = {
var.op.name: var
for var in all_vars if var.op.name in var_to_shape_map
}
inception_saver = tf.train.Saver(inception_vars)
inception_saver.restore(session, inception_checkpoint_file)
# Restore the rest of the variables from the trained checkpoint.
trained_vars = tf.contrib.slim.get_variables_to_restore(
exclude=inception_exclude_scopes + inception_vars.keys())
trained_saver = tf.train.Saver(trained_vars)
trained_saver.restore(session, trained_checkpoint_file) | python | def restore_from_checkpoint(self, session, inception_checkpoint_file,
trained_checkpoint_file):
"""To restore model variables from the checkpoint file.
The graph is assumed to consist of an inception model and other
layers including a softmax and a fully connected layer. The former is
pre-trained and the latter is trained using the pre-processed data. So
we restore this from two checkpoint files.
Args:
session: The session to be used for restoring from checkpoint.
inception_checkpoint_file: Path to the checkpoint file for the Inception
graph.
trained_checkpoint_file: path to the trained checkpoint for the other
layers.
"""
inception_exclude_scopes = [
'InceptionV3/AuxLogits', 'InceptionV3/Logits', 'global_step',
'final_ops'
]
reader = tf.train.NewCheckpointReader(inception_checkpoint_file)
var_to_shape_map = reader.get_variable_to_shape_map()
# Get all variables to restore. Exclude Logits and AuxLogits because they
# depend on the input data and we do not need to intialize them.
all_vars = tf.contrib.slim.get_variables_to_restore(
exclude=inception_exclude_scopes)
# Remove variables that do not exist in the inception checkpoint (for
# example the final softmax and fully-connected layers).
inception_vars = {
var.op.name: var
for var in all_vars if var.op.name in var_to_shape_map
}
inception_saver = tf.train.Saver(inception_vars)
inception_saver.restore(session, inception_checkpoint_file)
# Restore the rest of the variables from the trained checkpoint.
trained_vars = tf.contrib.slim.get_variables_to_restore(
exclude=inception_exclude_scopes + inception_vars.keys())
trained_saver = tf.train.Saver(trained_vars)
trained_saver.restore(session, trained_checkpoint_file) | [
"def",
"restore_from_checkpoint",
"(",
"self",
",",
"session",
",",
"inception_checkpoint_file",
",",
"trained_checkpoint_file",
")",
":",
"inception_exclude_scopes",
"=",
"[",
"'InceptionV3/AuxLogits'",
",",
"'InceptionV3/Logits'",
",",
"'global_step'",
",",
"'final_ops'",
"]",
"reader",
"=",
"tf",
".",
"train",
".",
"NewCheckpointReader",
"(",
"inception_checkpoint_file",
")",
"var_to_shape_map",
"=",
"reader",
".",
"get_variable_to_shape_map",
"(",
")",
"# Get all variables to restore. Exclude Logits and AuxLogits because they",
"# depend on the input data and we do not need to intialize them.",
"all_vars",
"=",
"tf",
".",
"contrib",
".",
"slim",
".",
"get_variables_to_restore",
"(",
"exclude",
"=",
"inception_exclude_scopes",
")",
"# Remove variables that do not exist in the inception checkpoint (for",
"# example the final softmax and fully-connected layers).",
"inception_vars",
"=",
"{",
"var",
".",
"op",
".",
"name",
":",
"var",
"for",
"var",
"in",
"all_vars",
"if",
"var",
".",
"op",
".",
"name",
"in",
"var_to_shape_map",
"}",
"inception_saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
"inception_vars",
")",
"inception_saver",
".",
"restore",
"(",
"session",
",",
"inception_checkpoint_file",
")",
"# Restore the rest of the variables from the trained checkpoint.",
"trained_vars",
"=",
"tf",
".",
"contrib",
".",
"slim",
".",
"get_variables_to_restore",
"(",
"exclude",
"=",
"inception_exclude_scopes",
"+",
"inception_vars",
".",
"keys",
"(",
")",
")",
"trained_saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
"trained_vars",
")",
"trained_saver",
".",
"restore",
"(",
"session",
",",
"trained_checkpoint_file",
")"
] | To restore model variables from the checkpoint file.
The graph is assumed to consist of an inception model and other
layers including a softmax and a fully connected layer. The former is
pre-trained and the latter is trained using the pre-processed data. So
we restore this from two checkpoint files.
Args:
session: The session to be used for restoring from checkpoint.
inception_checkpoint_file: Path to the checkpoint file for the Inception
graph.
trained_checkpoint_file: path to the trained checkpoint for the other
layers. | [
"To",
"restore",
"model",
"variables",
"from",
"the",
"checkpoint",
"file",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_model.py#L245-L284 |
5,061 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_model.py | Model.build_prediction_graph | def build_prediction_graph(self):
"""Builds prediction graph and registers appropriate endpoints."""
tensors = self.build_graph(None, 1, GraphMod.PREDICT)
keys_placeholder = tf.placeholder(tf.string, shape=[None])
inputs = {
'key': keys_placeholder,
'image_bytes': tensors.input_jpeg
}
# To extract the id, we need to add the identity function.
keys = tf.identity(keys_placeholder)
labels = self.labels + ['UNKNOWN']
labels_tensor = tf.constant(labels)
labels_table = tf.contrib.lookup.index_to_string_table_from_tensor(mapping=labels_tensor)
predicted_label = labels_table.lookup(tensors.predictions[0])
# Need to duplicate the labels by num_of_instances so the output is one batch
# (all output members share the same outer dimension).
# The labels are needed for client to match class scores list.
labels_tensor = tf.expand_dims(tf.constant(labels), 0)
num_instance = tf.shape(keys)
labels_tensors_n = tf.tile(labels_tensor, tf.concat(axis=0, values=[num_instance, [1]]))
outputs = {
'key': keys,
'prediction': predicted_label,
'labels': labels_tensors_n,
'scores': tensors.predictions[1],
}
return inputs, outputs | python | def build_prediction_graph(self):
"""Builds prediction graph and registers appropriate endpoints."""
tensors = self.build_graph(None, 1, GraphMod.PREDICT)
keys_placeholder = tf.placeholder(tf.string, shape=[None])
inputs = {
'key': keys_placeholder,
'image_bytes': tensors.input_jpeg
}
# To extract the id, we need to add the identity function.
keys = tf.identity(keys_placeholder)
labels = self.labels + ['UNKNOWN']
labels_tensor = tf.constant(labels)
labels_table = tf.contrib.lookup.index_to_string_table_from_tensor(mapping=labels_tensor)
predicted_label = labels_table.lookup(tensors.predictions[0])
# Need to duplicate the labels by num_of_instances so the output is one batch
# (all output members share the same outer dimension).
# The labels are needed for client to match class scores list.
labels_tensor = tf.expand_dims(tf.constant(labels), 0)
num_instance = tf.shape(keys)
labels_tensors_n = tf.tile(labels_tensor, tf.concat(axis=0, values=[num_instance, [1]]))
outputs = {
'key': keys,
'prediction': predicted_label,
'labels': labels_tensors_n,
'scores': tensors.predictions[1],
}
return inputs, outputs | [
"def",
"build_prediction_graph",
"(",
"self",
")",
":",
"tensors",
"=",
"self",
".",
"build_graph",
"(",
"None",
",",
"1",
",",
"GraphMod",
".",
"PREDICT",
")",
"keys_placeholder",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"string",
",",
"shape",
"=",
"[",
"None",
"]",
")",
"inputs",
"=",
"{",
"'key'",
":",
"keys_placeholder",
",",
"'image_bytes'",
":",
"tensors",
".",
"input_jpeg",
"}",
"# To extract the id, we need to add the identity function.",
"keys",
"=",
"tf",
".",
"identity",
"(",
"keys_placeholder",
")",
"labels",
"=",
"self",
".",
"labels",
"+",
"[",
"'UNKNOWN'",
"]",
"labels_tensor",
"=",
"tf",
".",
"constant",
"(",
"labels",
")",
"labels_table",
"=",
"tf",
".",
"contrib",
".",
"lookup",
".",
"index_to_string_table_from_tensor",
"(",
"mapping",
"=",
"labels_tensor",
")",
"predicted_label",
"=",
"labels_table",
".",
"lookup",
"(",
"tensors",
".",
"predictions",
"[",
"0",
"]",
")",
"# Need to duplicate the labels by num_of_instances so the output is one batch",
"# (all output members share the same outer dimension).",
"# The labels are needed for client to match class scores list.",
"labels_tensor",
"=",
"tf",
".",
"expand_dims",
"(",
"tf",
".",
"constant",
"(",
"labels",
")",
",",
"0",
")",
"num_instance",
"=",
"tf",
".",
"shape",
"(",
"keys",
")",
"labels_tensors_n",
"=",
"tf",
".",
"tile",
"(",
"labels_tensor",
",",
"tf",
".",
"concat",
"(",
"axis",
"=",
"0",
",",
"values",
"=",
"[",
"num_instance",
",",
"[",
"1",
"]",
"]",
")",
")",
"outputs",
"=",
"{",
"'key'",
":",
"keys",
",",
"'prediction'",
":",
"predicted_label",
",",
"'labels'",
":",
"labels_tensors_n",
",",
"'scores'",
":",
"tensors",
".",
"predictions",
"[",
"1",
"]",
",",
"}",
"return",
"inputs",
",",
"outputs"
] | Builds prediction graph and registers appropriate endpoints. | [
"Builds",
"prediction",
"graph",
"and",
"registers",
"appropriate",
"endpoints",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_model.py#L286-L317 |
5,062 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_model.py | Model.export | def export(self, last_checkpoint, output_dir):
"""Builds a prediction graph and xports the model.
Args:
last_checkpoint: Path to the latest checkpoint file from training.
output_dir: Path to the folder to be used to output the model.
"""
logging.info('Exporting prediction graph to %s', output_dir)
with tf.Session(graph=tf.Graph()) as sess:
# Build and save prediction meta graph and trained variable values.
inputs, outputs = self.build_prediction_graph()
signature_def_map = {
'serving_default': signature_def_utils.predict_signature_def(inputs, outputs)
}
init_op = tf.global_variables_initializer()
sess.run(init_op)
self.restore_from_checkpoint(sess, self.inception_checkpoint_file,
last_checkpoint)
init_op_serving = control_flow_ops.group(
variables.local_variables_initializer(),
tf.tables_initializer())
builder = saved_model_builder.SavedModelBuilder(output_dir)
builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map=signature_def_map,
legacy_init_op=init_op_serving)
builder.save(False) | python | def export(self, last_checkpoint, output_dir):
"""Builds a prediction graph and xports the model.
Args:
last_checkpoint: Path to the latest checkpoint file from training.
output_dir: Path to the folder to be used to output the model.
"""
logging.info('Exporting prediction graph to %s', output_dir)
with tf.Session(graph=tf.Graph()) as sess:
# Build and save prediction meta graph and trained variable values.
inputs, outputs = self.build_prediction_graph()
signature_def_map = {
'serving_default': signature_def_utils.predict_signature_def(inputs, outputs)
}
init_op = tf.global_variables_initializer()
sess.run(init_op)
self.restore_from_checkpoint(sess, self.inception_checkpoint_file,
last_checkpoint)
init_op_serving = control_flow_ops.group(
variables.local_variables_initializer(),
tf.tables_initializer())
builder = saved_model_builder.SavedModelBuilder(output_dir)
builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map=signature_def_map,
legacy_init_op=init_op_serving)
builder.save(False) | [
"def",
"export",
"(",
"self",
",",
"last_checkpoint",
",",
"output_dir",
")",
":",
"logging",
".",
"info",
"(",
"'Exporting prediction graph to %s'",
",",
"output_dir",
")",
"with",
"tf",
".",
"Session",
"(",
"graph",
"=",
"tf",
".",
"Graph",
"(",
")",
")",
"as",
"sess",
":",
"# Build and save prediction meta graph and trained variable values.",
"inputs",
",",
"outputs",
"=",
"self",
".",
"build_prediction_graph",
"(",
")",
"signature_def_map",
"=",
"{",
"'serving_default'",
":",
"signature_def_utils",
".",
"predict_signature_def",
"(",
"inputs",
",",
"outputs",
")",
"}",
"init_op",
"=",
"tf",
".",
"global_variables_initializer",
"(",
")",
"sess",
".",
"run",
"(",
"init_op",
")",
"self",
".",
"restore_from_checkpoint",
"(",
"sess",
",",
"self",
".",
"inception_checkpoint_file",
",",
"last_checkpoint",
")",
"init_op_serving",
"=",
"control_flow_ops",
".",
"group",
"(",
"variables",
".",
"local_variables_initializer",
"(",
")",
",",
"tf",
".",
"tables_initializer",
"(",
")",
")",
"builder",
"=",
"saved_model_builder",
".",
"SavedModelBuilder",
"(",
"output_dir",
")",
"builder",
".",
"add_meta_graph_and_variables",
"(",
"sess",
",",
"[",
"tag_constants",
".",
"SERVING",
"]",
",",
"signature_def_map",
"=",
"signature_def_map",
",",
"legacy_init_op",
"=",
"init_op_serving",
")",
"builder",
".",
"save",
"(",
"False",
")"
] | Builds a prediction graph and xports the model.
Args:
last_checkpoint: Path to the latest checkpoint file from training.
output_dir: Path to the folder to be used to output the model. | [
"Builds",
"a",
"prediction",
"graph",
"and",
"xports",
"the",
"model",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_model.py#L319-L346 |
5,063 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_model.py | Model.format_metric_values | def format_metric_values(self, metric_values):
"""Formats metric values - used for logging purpose."""
# Early in training, metric_values may actually be None.
loss_str = 'N/A'
accuracy_str = 'N/A'
try:
loss_str = 'loss: %.3f' % metric_values[0]
accuracy_str = 'accuracy: %.3f' % metric_values[1]
except (TypeError, IndexError):
pass
return '%s, %s' % (loss_str, accuracy_str) | python | def format_metric_values(self, metric_values):
"""Formats metric values - used for logging purpose."""
# Early in training, metric_values may actually be None.
loss_str = 'N/A'
accuracy_str = 'N/A'
try:
loss_str = 'loss: %.3f' % metric_values[0]
accuracy_str = 'accuracy: %.3f' % metric_values[1]
except (TypeError, IndexError):
pass
return '%s, %s' % (loss_str, accuracy_str) | [
"def",
"format_metric_values",
"(",
"self",
",",
"metric_values",
")",
":",
"# Early in training, metric_values may actually be None.",
"loss_str",
"=",
"'N/A'",
"accuracy_str",
"=",
"'N/A'",
"try",
":",
"loss_str",
"=",
"'loss: %.3f'",
"%",
"metric_values",
"[",
"0",
"]",
"accuracy_str",
"=",
"'accuracy: %.3f'",
"%",
"metric_values",
"[",
"1",
"]",
"except",
"(",
"TypeError",
",",
"IndexError",
")",
":",
"pass",
"return",
"'%s, %s'",
"%",
"(",
"loss_str",
",",
"accuracy_str",
")"
] | Formats metric values - used for logging purpose. | [
"Formats",
"metric",
"values",
"-",
"used",
"for",
"logging",
"purpose",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_model.py#L348-L360 |
5,064 | googledatalab/pydatalab | google/datalab/ml/_util.py | package_and_copy | def package_and_copy(package_root_dir, setup_py, output_tar_path):
"""Repackage an CloudML package and copy it to a staging dir.
Args:
package_root_dir: the root dir to install package from. Usually you can get the path
from inside your module using a relative path to __file__.
setup_py: the path to setup.py.
output_tar_path: the GCS path of the output tarball package.
Raises:
ValueError if output_tar_path is not a GCS path, or setup_py does not exist.
"""
if not output_tar_path.startswith('gs://'):
raise ValueError('output_tar_path needs to be a GCS path.')
if not os.path.isfile(setup_py):
raise ValueError('Supplied file "%s" does not exist.' % setup_py)
dest_setup_py = os.path.join(package_root_dir, 'setup.py')
if dest_setup_py != setup_py:
# setuptools requires a "setup.py" in the current dir, so copy setup.py there.
# Also check if there is an existing setup.py. If so, back it up.
if os.path.isfile(dest_setup_py):
os.rename(dest_setup_py, dest_setup_py + '._bak_')
shutil.copyfile(setup_py, dest_setup_py)
tempdir = tempfile.mkdtemp()
previous_cwd = os.getcwd()
os.chdir(package_root_dir)
try:
# Repackage.
sdist = ['python', dest_setup_py, 'sdist', '--format=gztar', '-d', tempdir]
subprocess.check_call(sdist)
# Copy to GCS.
source = os.path.join(tempdir, '*.tar.gz')
gscopy = ['gsutil', 'cp', source, output_tar_path]
subprocess.check_call(gscopy)
return
finally:
os.chdir(previous_cwd)
if dest_setup_py != setup_py:
os.remove(dest_setup_py)
if os.path.isfile(dest_setup_py + '._bak_'):
os.rename(dest_setup_py + '._bak_', dest_setup_py)
shutil.rmtree(tempdir) | python | def package_and_copy(package_root_dir, setup_py, output_tar_path):
"""Repackage an CloudML package and copy it to a staging dir.
Args:
package_root_dir: the root dir to install package from. Usually you can get the path
from inside your module using a relative path to __file__.
setup_py: the path to setup.py.
output_tar_path: the GCS path of the output tarball package.
Raises:
ValueError if output_tar_path is not a GCS path, or setup_py does not exist.
"""
if not output_tar_path.startswith('gs://'):
raise ValueError('output_tar_path needs to be a GCS path.')
if not os.path.isfile(setup_py):
raise ValueError('Supplied file "%s" does not exist.' % setup_py)
dest_setup_py = os.path.join(package_root_dir, 'setup.py')
if dest_setup_py != setup_py:
# setuptools requires a "setup.py" in the current dir, so copy setup.py there.
# Also check if there is an existing setup.py. If so, back it up.
if os.path.isfile(dest_setup_py):
os.rename(dest_setup_py, dest_setup_py + '._bak_')
shutil.copyfile(setup_py, dest_setup_py)
tempdir = tempfile.mkdtemp()
previous_cwd = os.getcwd()
os.chdir(package_root_dir)
try:
# Repackage.
sdist = ['python', dest_setup_py, 'sdist', '--format=gztar', '-d', tempdir]
subprocess.check_call(sdist)
# Copy to GCS.
source = os.path.join(tempdir, '*.tar.gz')
gscopy = ['gsutil', 'cp', source, output_tar_path]
subprocess.check_call(gscopy)
return
finally:
os.chdir(previous_cwd)
if dest_setup_py != setup_py:
os.remove(dest_setup_py)
if os.path.isfile(dest_setup_py + '._bak_'):
os.rename(dest_setup_py + '._bak_', dest_setup_py)
shutil.rmtree(tempdir) | [
"def",
"package_and_copy",
"(",
"package_root_dir",
",",
"setup_py",
",",
"output_tar_path",
")",
":",
"if",
"not",
"output_tar_path",
".",
"startswith",
"(",
"'gs://'",
")",
":",
"raise",
"ValueError",
"(",
"'output_tar_path needs to be a GCS path.'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"setup_py",
")",
":",
"raise",
"ValueError",
"(",
"'Supplied file \"%s\" does not exist.'",
"%",
"setup_py",
")",
"dest_setup_py",
"=",
"os",
".",
"path",
".",
"join",
"(",
"package_root_dir",
",",
"'setup.py'",
")",
"if",
"dest_setup_py",
"!=",
"setup_py",
":",
"# setuptools requires a \"setup.py\" in the current dir, so copy setup.py there.",
"# Also check if there is an existing setup.py. If so, back it up.",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"dest_setup_py",
")",
":",
"os",
".",
"rename",
"(",
"dest_setup_py",
",",
"dest_setup_py",
"+",
"'._bak_'",
")",
"shutil",
".",
"copyfile",
"(",
"setup_py",
",",
"dest_setup_py",
")",
"tempdir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"previous_cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"package_root_dir",
")",
"try",
":",
"# Repackage.",
"sdist",
"=",
"[",
"'python'",
",",
"dest_setup_py",
",",
"'sdist'",
",",
"'--format=gztar'",
",",
"'-d'",
",",
"tempdir",
"]",
"subprocess",
".",
"check_call",
"(",
"sdist",
")",
"# Copy to GCS.",
"source",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"'*.tar.gz'",
")",
"gscopy",
"=",
"[",
"'gsutil'",
",",
"'cp'",
",",
"source",
",",
"output_tar_path",
"]",
"subprocess",
".",
"check_call",
"(",
"gscopy",
")",
"return",
"finally",
":",
"os",
".",
"chdir",
"(",
"previous_cwd",
")",
"if",
"dest_setup_py",
"!=",
"setup_py",
":",
"os",
".",
"remove",
"(",
"dest_setup_py",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"dest_setup_py",
"+",
"'._bak_'",
")",
":",
"os",
".",
"rename",
"(",
"dest_setup_py",
"+",
"'._bak_'",
",",
"dest_setup_py",
")",
"shutil",
".",
"rmtree",
"(",
"tempdir",
")"
] | Repackage an CloudML package and copy it to a staging dir.
Args:
package_root_dir: the root dir to install package from. Usually you can get the path
from inside your module using a relative path to __file__.
setup_py: the path to setup.py.
output_tar_path: the GCS path of the output tarball package.
Raises:
ValueError if output_tar_path is not a GCS path, or setup_py does not exist. | [
"Repackage",
"an",
"CloudML",
"package",
"and",
"copy",
"it",
"to",
"a",
"staging",
"dir",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_util.py#L45-L88 |
5,065 | googledatalab/pydatalab | google/datalab/ml/_util.py | read_file_to_string | def read_file_to_string(path):
"""Read a file into a string."""
bytes_string = tf.gfile.Open(path, 'r').read()
return dlutils.python_portable_string(bytes_string) | python | def read_file_to_string(path):
"""Read a file into a string."""
bytes_string = tf.gfile.Open(path, 'r').read()
return dlutils.python_portable_string(bytes_string) | [
"def",
"read_file_to_string",
"(",
"path",
")",
":",
"bytes_string",
"=",
"tf",
".",
"gfile",
".",
"Open",
"(",
"path",
",",
"'r'",
")",
".",
"read",
"(",
")",
"return",
"dlutils",
".",
"python_portable_string",
"(",
"bytes_string",
")"
] | Read a file into a string. | [
"Read",
"a",
"file",
"into",
"a",
"string",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_util.py#L91-L94 |
5,066 | googledatalab/pydatalab | datalab/data/commands/_sql.py | _date | def _date(val, offset=None):
""" A special pseudo-type for pipeline arguments.
This allows us to parse dates as Python datetimes, including special values like 'now'
and 'today', as well as apply offsets to the datetime.
Args:
val: a string containing the value for the datetime. This can be 'now', 'today' (midnight at
start of day), 'yesterday' (midnight at start of yesterday), or a formatted date that
will be passed to the datetime constructor. Note that 'now' etc are assumed to
be in UTC.
offset: for date arguments a string containing a comma-separated list of
relative offsets to apply of the form <n><u> where <n> is an integer and
<u> is a single character unit (d=day, m=month, y=year, h=hour, m=minute).
Returns:
A Python datetime resulting from starting at <val> and applying the sequence of deltas
specified in <offset>.
"""
if val is None:
return val
if val == '' or val == 'now':
when = datetime.datetime.utcnow()
elif val == 'today':
dt = datetime.datetime.utcnow()
when = datetime.datetime(dt.year, dt.month, dt.day)
elif val == 'yesterday':
dt = datetime.datetime.utcnow() - datetime.timedelta(1)
when = datetime.datetime(dt.year, dt.month, dt.day)
else:
when = datetime.datetime.strptime(val, "%Y%m%d")
if offset is not None:
for part in offset.split(','):
unit = part[-1]
quantity = int(part[:-1])
# We can use timedelta for days and under, but not for years and months
if unit == 'y':
when = datetime.datetime(year=when.year + quantity, month=when.month, day=when.day,
hour=when.hour, minute=when.minute)
elif unit == 'm':
new_year = when.year
new_month = when.month + quantity
if new_month < 1:
new_month = -new_month
new_year += 1 + (new_month // 12)
new_month = 12 - new_month % 12
elif new_month > 12:
new_year += (new_month - 1) // 12
new_month = 1 + (new_month - 1) % 12
when = datetime.datetime(year=new_year, month=new_month, day=when.day,
hour=when.hour, minute=when.minute)
elif unit == 'd':
when += datetime.timedelta(days=quantity)
elif unit == 'h':
when += datetime.timedelta(hours=quantity)
elif unit == 'M':
when += datetime.timedelta(minutes=quantity)
return when | python | def _date(val, offset=None):
""" A special pseudo-type for pipeline arguments.
This allows us to parse dates as Python datetimes, including special values like 'now'
and 'today', as well as apply offsets to the datetime.
Args:
val: a string containing the value for the datetime. This can be 'now', 'today' (midnight at
start of day), 'yesterday' (midnight at start of yesterday), or a formatted date that
will be passed to the datetime constructor. Note that 'now' etc are assumed to
be in UTC.
offset: for date arguments a string containing a comma-separated list of
relative offsets to apply of the form <n><u> where <n> is an integer and
<u> is a single character unit (d=day, m=month, y=year, h=hour, m=minute).
Returns:
A Python datetime resulting from starting at <val> and applying the sequence of deltas
specified in <offset>.
"""
if val is None:
return val
if val == '' or val == 'now':
when = datetime.datetime.utcnow()
elif val == 'today':
dt = datetime.datetime.utcnow()
when = datetime.datetime(dt.year, dt.month, dt.day)
elif val == 'yesterday':
dt = datetime.datetime.utcnow() - datetime.timedelta(1)
when = datetime.datetime(dt.year, dt.month, dt.day)
else:
when = datetime.datetime.strptime(val, "%Y%m%d")
if offset is not None:
for part in offset.split(','):
unit = part[-1]
quantity = int(part[:-1])
# We can use timedelta for days and under, but not for years and months
if unit == 'y':
when = datetime.datetime(year=when.year + quantity, month=when.month, day=when.day,
hour=when.hour, minute=when.minute)
elif unit == 'm':
new_year = when.year
new_month = when.month + quantity
if new_month < 1:
new_month = -new_month
new_year += 1 + (new_month // 12)
new_month = 12 - new_month % 12
elif new_month > 12:
new_year += (new_month - 1) // 12
new_month = 1 + (new_month - 1) % 12
when = datetime.datetime(year=new_year, month=new_month, day=when.day,
hour=when.hour, minute=when.minute)
elif unit == 'd':
when += datetime.timedelta(days=quantity)
elif unit == 'h':
when += datetime.timedelta(hours=quantity)
elif unit == 'M':
when += datetime.timedelta(minutes=quantity)
return when | [
"def",
"_date",
"(",
"val",
",",
"offset",
"=",
"None",
")",
":",
"if",
"val",
"is",
"None",
":",
"return",
"val",
"if",
"val",
"==",
"''",
"or",
"val",
"==",
"'now'",
":",
"when",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"elif",
"val",
"==",
"'today'",
":",
"dt",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"when",
"=",
"datetime",
".",
"datetime",
"(",
"dt",
".",
"year",
",",
"dt",
".",
"month",
",",
"dt",
".",
"day",
")",
"elif",
"val",
"==",
"'yesterday'",
":",
"dt",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"-",
"datetime",
".",
"timedelta",
"(",
"1",
")",
"when",
"=",
"datetime",
".",
"datetime",
"(",
"dt",
".",
"year",
",",
"dt",
".",
"month",
",",
"dt",
".",
"day",
")",
"else",
":",
"when",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"val",
",",
"\"%Y%m%d\"",
")",
"if",
"offset",
"is",
"not",
"None",
":",
"for",
"part",
"in",
"offset",
".",
"split",
"(",
"','",
")",
":",
"unit",
"=",
"part",
"[",
"-",
"1",
"]",
"quantity",
"=",
"int",
"(",
"part",
"[",
":",
"-",
"1",
"]",
")",
"# We can use timedelta for days and under, but not for years and months",
"if",
"unit",
"==",
"'y'",
":",
"when",
"=",
"datetime",
".",
"datetime",
"(",
"year",
"=",
"when",
".",
"year",
"+",
"quantity",
",",
"month",
"=",
"when",
".",
"month",
",",
"day",
"=",
"when",
".",
"day",
",",
"hour",
"=",
"when",
".",
"hour",
",",
"minute",
"=",
"when",
".",
"minute",
")",
"elif",
"unit",
"==",
"'m'",
":",
"new_year",
"=",
"when",
".",
"year",
"new_month",
"=",
"when",
".",
"month",
"+",
"quantity",
"if",
"new_month",
"<",
"1",
":",
"new_month",
"=",
"-",
"new_month",
"new_year",
"+=",
"1",
"+",
"(",
"new_month",
"//",
"12",
")",
"new_month",
"=",
"12",
"-",
"new_month",
"%",
"12",
"elif",
"new_month",
">",
"12",
":",
"new_year",
"+=",
"(",
"new_month",
"-",
"1",
")",
"//",
"12",
"new_month",
"=",
"1",
"+",
"(",
"new_month",
"-",
"1",
")",
"%",
"12",
"when",
"=",
"datetime",
".",
"datetime",
"(",
"year",
"=",
"new_year",
",",
"month",
"=",
"new_month",
",",
"day",
"=",
"when",
".",
"day",
",",
"hour",
"=",
"when",
".",
"hour",
",",
"minute",
"=",
"when",
".",
"minute",
")",
"elif",
"unit",
"==",
"'d'",
":",
"when",
"+=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"quantity",
")",
"elif",
"unit",
"==",
"'h'",
":",
"when",
"+=",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"quantity",
")",
"elif",
"unit",
"==",
"'M'",
":",
"when",
"+=",
"datetime",
".",
"timedelta",
"(",
"minutes",
"=",
"quantity",
")",
"return",
"when"
] | A special pseudo-type for pipeline arguments.
This allows us to parse dates as Python datetimes, including special values like 'now'
and 'today', as well as apply offsets to the datetime.
Args:
val: a string containing the value for the datetime. This can be 'now', 'today' (midnight at
start of day), 'yesterday' (midnight at start of yesterday), or a formatted date that
will be passed to the datetime constructor. Note that 'now' etc are assumed to
be in UTC.
offset: for date arguments a string containing a comma-separated list of
relative offsets to apply of the form <n><u> where <n> is an integer and
<u> is a single character unit (d=day, m=month, y=year, h=hour, m=minute).
Returns:
A Python datetime resulting from starting at <val> and applying the sequence of deltas
specified in <offset>. | [
"A",
"special",
"pseudo",
"-",
"type",
"for",
"pipeline",
"arguments",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/commands/_sql.py#L119-L177 |
5,067 | googledatalab/pydatalab | datalab/data/commands/_sql.py | _make_string_formatter | def _make_string_formatter(f, offset=None):
""" A closure-izer for string arguments that include a format and possibly an offset. """
format = f
delta = offset
return lambda v: time.strftime(format, (_date(v, delta)).timetuple()) | python | def _make_string_formatter(f, offset=None):
""" A closure-izer for string arguments that include a format and possibly an offset. """
format = f
delta = offset
return lambda v: time.strftime(format, (_date(v, delta)).timetuple()) | [
"def",
"_make_string_formatter",
"(",
"f",
",",
"offset",
"=",
"None",
")",
":",
"format",
"=",
"f",
"delta",
"=",
"offset",
"return",
"lambda",
"v",
":",
"time",
".",
"strftime",
"(",
"format",
",",
"(",
"_date",
"(",
"v",
",",
"delta",
")",
")",
".",
"timetuple",
"(",
")",
")"
] | A closure-izer for string arguments that include a format and possibly an offset. | [
"A",
"closure",
"-",
"izer",
"for",
"string",
"arguments",
"that",
"include",
"a",
"format",
"and",
"possibly",
"an",
"offset",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/commands/_sql.py#L189-L193 |
5,068 | googledatalab/pydatalab | datalab/data/commands/_sql.py | _make_table_formatter | def _make_table_formatter(f, offset=None):
""" A closure-izer for table arguments that include a format and possibly an offset. """
format = f
delta = offset
return lambda v: _resolve_table(v, format, delta) | python | def _make_table_formatter(f, offset=None):
""" A closure-izer for table arguments that include a format and possibly an offset. """
format = f
delta = offset
return lambda v: _resolve_table(v, format, delta) | [
"def",
"_make_table_formatter",
"(",
"f",
",",
"offset",
"=",
"None",
")",
":",
"format",
"=",
"f",
"delta",
"=",
"offset",
"return",
"lambda",
"v",
":",
"_resolve_table",
"(",
"v",
",",
"format",
",",
"delta",
")"
] | A closure-izer for table arguments that include a format and possibly an offset. | [
"A",
"closure",
"-",
"izer",
"for",
"table",
"arguments",
"that",
"include",
"a",
"format",
"and",
"possibly",
"an",
"offset",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/commands/_sql.py#L196-L200 |
5,069 | googledatalab/pydatalab | datalab/data/commands/_sql.py | _arguments | def _arguments(code, module):
"""Define pipeline arguments.
Args:
code: the Python code to execute that defines the arguments.
"""
arg_parser = CommandParser.create('')
try:
# Define our special argument 'types' and add them to the environment.
builtins = {'source': _table, 'datestring': _datestring}
env = {}
env.update(builtins)
# Execute the cell which should be one or more calls to arg().
exec(code, env)
# Iterate through the module dictionary. For any newly defined objects,
# add args to the parser.
for key in env:
# Skip internal/private stuff.
if key in builtins or key[0] == '_':
continue
# If we want to support importing query modules into other query modules, uncomment next 4
# Skip imports but add them to the module
# if isinstance(env[key], types.ModuleType):
# module.__dict__[key] = env[key]
# continue
val = env[key]
key = '--%s' % key
if isinstance(val, bool):
if val:
arg_parser.add_argument(key, default=val, action='store_true')
else:
arg_parser.add_argument(key, default=val, action='store_false')
elif isinstance(val, basestring) or isinstance(val, int) or isinstance(val, float) \
or isinstance(val, int):
arg_parser.add_argument(key, default=val)
elif isinstance(val, list):
arg_parser.add_argument(key, default=val, nargs='+')
elif isinstance(val, tuple):
arg_parser.add_argument(key, default=list(val), nargs='+')
# Is this one of our pseudo-types for dates/tables?
elif isinstance(val, dict) and 'type' in val:
if val['type'] == 'datestring':
arg_parser.add_argument(key, default='',
type=_make_string_formatter(val['format'],
offset=val['offset']))
elif val['type'] == 'table':
if val['format'] is not None:
arg_parser.add_argument(key, default='',
type=_make_table_formatter(val['format'],
offset=val['offset']))
else:
arg_parser.add_argument(key, default=val['name'], type=_make_table)
else:
raise Exception('Cannot generate argument for %s of type %s' % (key, type(val)))
else:
raise Exception('Cannot generate argument for %s of type %s' % (key, type(val)))
except Exception as e:
print("%%sql arguments: %s from code '%s'" % (str(e), str(code)))
return arg_parser | python | def _arguments(code, module):
"""Define pipeline arguments.
Args:
code: the Python code to execute that defines the arguments.
"""
arg_parser = CommandParser.create('')
try:
# Define our special argument 'types' and add them to the environment.
builtins = {'source': _table, 'datestring': _datestring}
env = {}
env.update(builtins)
# Execute the cell which should be one or more calls to arg().
exec(code, env)
# Iterate through the module dictionary. For any newly defined objects,
# add args to the parser.
for key in env:
# Skip internal/private stuff.
if key in builtins or key[0] == '_':
continue
# If we want to support importing query modules into other query modules, uncomment next 4
# Skip imports but add them to the module
# if isinstance(env[key], types.ModuleType):
# module.__dict__[key] = env[key]
# continue
val = env[key]
key = '--%s' % key
if isinstance(val, bool):
if val:
arg_parser.add_argument(key, default=val, action='store_true')
else:
arg_parser.add_argument(key, default=val, action='store_false')
elif isinstance(val, basestring) or isinstance(val, int) or isinstance(val, float) \
or isinstance(val, int):
arg_parser.add_argument(key, default=val)
elif isinstance(val, list):
arg_parser.add_argument(key, default=val, nargs='+')
elif isinstance(val, tuple):
arg_parser.add_argument(key, default=list(val), nargs='+')
# Is this one of our pseudo-types for dates/tables?
elif isinstance(val, dict) and 'type' in val:
if val['type'] == 'datestring':
arg_parser.add_argument(key, default='',
type=_make_string_formatter(val['format'],
offset=val['offset']))
elif val['type'] == 'table':
if val['format'] is not None:
arg_parser.add_argument(key, default='',
type=_make_table_formatter(val['format'],
offset=val['offset']))
else:
arg_parser.add_argument(key, default=val['name'], type=_make_table)
else:
raise Exception('Cannot generate argument for %s of type %s' % (key, type(val)))
else:
raise Exception('Cannot generate argument for %s of type %s' % (key, type(val)))
except Exception as e:
print("%%sql arguments: %s from code '%s'" % (str(e), str(code)))
return arg_parser | [
"def",
"_arguments",
"(",
"code",
",",
"module",
")",
":",
"arg_parser",
"=",
"CommandParser",
".",
"create",
"(",
"''",
")",
"try",
":",
"# Define our special argument 'types' and add them to the environment.",
"builtins",
"=",
"{",
"'source'",
":",
"_table",
",",
"'datestring'",
":",
"_datestring",
"}",
"env",
"=",
"{",
"}",
"env",
".",
"update",
"(",
"builtins",
")",
"# Execute the cell which should be one or more calls to arg().",
"exec",
"(",
"code",
",",
"env",
")",
"# Iterate through the module dictionary. For any newly defined objects,",
"# add args to the parser.",
"for",
"key",
"in",
"env",
":",
"# Skip internal/private stuff.",
"if",
"key",
"in",
"builtins",
"or",
"key",
"[",
"0",
"]",
"==",
"'_'",
":",
"continue",
"# If we want to support importing query modules into other query modules, uncomment next 4",
"# Skip imports but add them to the module",
"# if isinstance(env[key], types.ModuleType):",
"# module.__dict__[key] = env[key]",
"# continue",
"val",
"=",
"env",
"[",
"key",
"]",
"key",
"=",
"'--%s'",
"%",
"key",
"if",
"isinstance",
"(",
"val",
",",
"bool",
")",
":",
"if",
"val",
":",
"arg_parser",
".",
"add_argument",
"(",
"key",
",",
"default",
"=",
"val",
",",
"action",
"=",
"'store_true'",
")",
"else",
":",
"arg_parser",
".",
"add_argument",
"(",
"key",
",",
"default",
"=",
"val",
",",
"action",
"=",
"'store_false'",
")",
"elif",
"isinstance",
"(",
"val",
",",
"basestring",
")",
"or",
"isinstance",
"(",
"val",
",",
"int",
")",
"or",
"isinstance",
"(",
"val",
",",
"float",
")",
"or",
"isinstance",
"(",
"val",
",",
"int",
")",
":",
"arg_parser",
".",
"add_argument",
"(",
"key",
",",
"default",
"=",
"val",
")",
"elif",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"arg_parser",
".",
"add_argument",
"(",
"key",
",",
"default",
"=",
"val",
",",
"nargs",
"=",
"'+'",
")",
"elif",
"isinstance",
"(",
"val",
",",
"tuple",
")",
":",
"arg_parser",
".",
"add_argument",
"(",
"key",
",",
"default",
"=",
"list",
"(",
"val",
")",
",",
"nargs",
"=",
"'+'",
")",
"# Is this one of our pseudo-types for dates/tables?",
"elif",
"isinstance",
"(",
"val",
",",
"dict",
")",
"and",
"'type'",
"in",
"val",
":",
"if",
"val",
"[",
"'type'",
"]",
"==",
"'datestring'",
":",
"arg_parser",
".",
"add_argument",
"(",
"key",
",",
"default",
"=",
"''",
",",
"type",
"=",
"_make_string_formatter",
"(",
"val",
"[",
"'format'",
"]",
",",
"offset",
"=",
"val",
"[",
"'offset'",
"]",
")",
")",
"elif",
"val",
"[",
"'type'",
"]",
"==",
"'table'",
":",
"if",
"val",
"[",
"'format'",
"]",
"is",
"not",
"None",
":",
"arg_parser",
".",
"add_argument",
"(",
"key",
",",
"default",
"=",
"''",
",",
"type",
"=",
"_make_table_formatter",
"(",
"val",
"[",
"'format'",
"]",
",",
"offset",
"=",
"val",
"[",
"'offset'",
"]",
")",
")",
"else",
":",
"arg_parser",
".",
"add_argument",
"(",
"key",
",",
"default",
"=",
"val",
"[",
"'name'",
"]",
",",
"type",
"=",
"_make_table",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Cannot generate argument for %s of type %s'",
"%",
"(",
"key",
",",
"type",
"(",
"val",
")",
")",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Cannot generate argument for %s of type %s'",
"%",
"(",
"key",
",",
"type",
"(",
"val",
")",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"%%sql arguments: %s from code '%s'\"",
"%",
"(",
"str",
"(",
"e",
")",
",",
"str",
"(",
"code",
")",
")",
")",
"return",
"arg_parser"
] | Define pipeline arguments.
Args:
code: the Python code to execute that defines the arguments. | [
"Define",
"pipeline",
"arguments",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/commands/_sql.py#L215-L281 |
5,070 | googledatalab/pydatalab | datalab/data/commands/_sql.py | _split_cell | def _split_cell(cell, module):
""" Split a hybrid %%sql cell into the Python code and the queries.
Populates a module with the queries.
Args:
cell: the contents of the %%sql cell.
module: the module that the contents will populate.
Returns:
The default (last) query for the module.
"""
lines = cell.split('\n')
code = None
last_def = -1
name = None
define_wild_re = re.compile('^DEFINE\s+.*$', re.IGNORECASE)
define_re = re.compile('^DEFINE\s+QUERY\s+([A-Z]\w*)\s*?(.*)$', re.IGNORECASE)
select_re = re.compile('^SELECT\s*.*$', re.IGNORECASE)
standard_sql_re = re.compile('^(CREATE|WITH|INSERT|DELETE|UPDATE)\s*.*$', re.IGNORECASE)
# TODO(gram): a potential issue with this code is if we have leading Python code followed
# by a SQL-style comment before we see SELECT/DEFINE. When switching to the tokenizer see
# if we can address this.
for i, line in enumerate(lines):
define_match = define_re.match(line)
select_match = select_re.match(line)
standard_sql_match = standard_sql_re.match(line)
if i:
prior_content = ''.join(lines[:i]).strip()
if select_match:
# Avoid matching if previous token was '(' or if Standard SQL is found
# TODO: handle the possibility of comments immediately preceding SELECT
select_match = len(prior_content) == 0 or \
(prior_content[-1] != '(' and not standard_sql_re.match(prior_content))
if standard_sql_match:
standard_sql_match = len(prior_content) == 0 or not standard_sql_re.match(prior_content)
if define_match or select_match or standard_sql_match:
# If this is the first query, get the preceding Python code.
if code is None:
code = ('\n'.join(lines[:i])).strip()
if len(code):
code += '\n'
elif last_def >= 0:
# This is not the first query, so gather the previous query text.
query = '\n'.join([line for line in lines[last_def:i] if len(line)]).strip()
if select_match and name != datalab.data._utils._SQL_MODULE_MAIN and len(query) == 0:
# Avoid DEFINE query name\nSELECT ... being seen as an empty DEFINE followed by SELECT
continue
# Save the query
statement = datalab.data.SqlStatement(query, module)
module.__dict__[name] = statement
# And set the 'last' query to be this too
module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement
# Get the query name and strip off our syntactic sugar if appropriate.
if define_match:
name = define_match.group(1)
lines[i] = define_match.group(2)
else:
name = datalab.data._utils._SQL_MODULE_MAIN
# Save the starting line index of the new query
last_def = i
else:
define_wild_match = define_wild_re.match(line)
if define_wild_match:
raise Exception('Expected "DEFINE QUERY <name>"')
if last_def >= 0:
# We were in a query so save this tail query.
query = '\n'.join([line for line in lines[last_def:] if len(line)]).strip()
statement = datalab.data.SqlStatement(query, module)
module.__dict__[name] = statement
module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement
if code is None:
code = ''
module.__dict__[datalab.data._utils._SQL_MODULE_ARGPARSE] = _arguments(code, module)
return module.__dict__.get(datalab.data._utils._SQL_MODULE_LAST, None) | python | def _split_cell(cell, module):
""" Split a hybrid %%sql cell into the Python code and the queries.
Populates a module with the queries.
Args:
cell: the contents of the %%sql cell.
module: the module that the contents will populate.
Returns:
The default (last) query for the module.
"""
lines = cell.split('\n')
code = None
last_def = -1
name = None
define_wild_re = re.compile('^DEFINE\s+.*$', re.IGNORECASE)
define_re = re.compile('^DEFINE\s+QUERY\s+([A-Z]\w*)\s*?(.*)$', re.IGNORECASE)
select_re = re.compile('^SELECT\s*.*$', re.IGNORECASE)
standard_sql_re = re.compile('^(CREATE|WITH|INSERT|DELETE|UPDATE)\s*.*$', re.IGNORECASE)
# TODO(gram): a potential issue with this code is if we have leading Python code followed
# by a SQL-style comment before we see SELECT/DEFINE. When switching to the tokenizer see
# if we can address this.
for i, line in enumerate(lines):
define_match = define_re.match(line)
select_match = select_re.match(line)
standard_sql_match = standard_sql_re.match(line)
if i:
prior_content = ''.join(lines[:i]).strip()
if select_match:
# Avoid matching if previous token was '(' or if Standard SQL is found
# TODO: handle the possibility of comments immediately preceding SELECT
select_match = len(prior_content) == 0 or \
(prior_content[-1] != '(' and not standard_sql_re.match(prior_content))
if standard_sql_match:
standard_sql_match = len(prior_content) == 0 or not standard_sql_re.match(prior_content)
if define_match or select_match or standard_sql_match:
# If this is the first query, get the preceding Python code.
if code is None:
code = ('\n'.join(lines[:i])).strip()
if len(code):
code += '\n'
elif last_def >= 0:
# This is not the first query, so gather the previous query text.
query = '\n'.join([line for line in lines[last_def:i] if len(line)]).strip()
if select_match and name != datalab.data._utils._SQL_MODULE_MAIN and len(query) == 0:
# Avoid DEFINE query name\nSELECT ... being seen as an empty DEFINE followed by SELECT
continue
# Save the query
statement = datalab.data.SqlStatement(query, module)
module.__dict__[name] = statement
# And set the 'last' query to be this too
module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement
# Get the query name and strip off our syntactic sugar if appropriate.
if define_match:
name = define_match.group(1)
lines[i] = define_match.group(2)
else:
name = datalab.data._utils._SQL_MODULE_MAIN
# Save the starting line index of the new query
last_def = i
else:
define_wild_match = define_wild_re.match(line)
if define_wild_match:
raise Exception('Expected "DEFINE QUERY <name>"')
if last_def >= 0:
# We were in a query so save this tail query.
query = '\n'.join([line for line in lines[last_def:] if len(line)]).strip()
statement = datalab.data.SqlStatement(query, module)
module.__dict__[name] = statement
module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement
if code is None:
code = ''
module.__dict__[datalab.data._utils._SQL_MODULE_ARGPARSE] = _arguments(code, module)
return module.__dict__.get(datalab.data._utils._SQL_MODULE_LAST, None) | [
"def",
"_split_cell",
"(",
"cell",
",",
"module",
")",
":",
"lines",
"=",
"cell",
".",
"split",
"(",
"'\\n'",
")",
"code",
"=",
"None",
"last_def",
"=",
"-",
"1",
"name",
"=",
"None",
"define_wild_re",
"=",
"re",
".",
"compile",
"(",
"'^DEFINE\\s+.*$'",
",",
"re",
".",
"IGNORECASE",
")",
"define_re",
"=",
"re",
".",
"compile",
"(",
"'^DEFINE\\s+QUERY\\s+([A-Z]\\w*)\\s*?(.*)$'",
",",
"re",
".",
"IGNORECASE",
")",
"select_re",
"=",
"re",
".",
"compile",
"(",
"'^SELECT\\s*.*$'",
",",
"re",
".",
"IGNORECASE",
")",
"standard_sql_re",
"=",
"re",
".",
"compile",
"(",
"'^(CREATE|WITH|INSERT|DELETE|UPDATE)\\s*.*$'",
",",
"re",
".",
"IGNORECASE",
")",
"# TODO(gram): a potential issue with this code is if we have leading Python code followed",
"# by a SQL-style comment before we see SELECT/DEFINE. When switching to the tokenizer see",
"# if we can address this.",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"define_match",
"=",
"define_re",
".",
"match",
"(",
"line",
")",
"select_match",
"=",
"select_re",
".",
"match",
"(",
"line",
")",
"standard_sql_match",
"=",
"standard_sql_re",
".",
"match",
"(",
"line",
")",
"if",
"i",
":",
"prior_content",
"=",
"''",
".",
"join",
"(",
"lines",
"[",
":",
"i",
"]",
")",
".",
"strip",
"(",
")",
"if",
"select_match",
":",
"# Avoid matching if previous token was '(' or if Standard SQL is found",
"# TODO: handle the possibility of comments immediately preceding SELECT",
"select_match",
"=",
"len",
"(",
"prior_content",
")",
"==",
"0",
"or",
"(",
"prior_content",
"[",
"-",
"1",
"]",
"!=",
"'('",
"and",
"not",
"standard_sql_re",
".",
"match",
"(",
"prior_content",
")",
")",
"if",
"standard_sql_match",
":",
"standard_sql_match",
"=",
"len",
"(",
"prior_content",
")",
"==",
"0",
"or",
"not",
"standard_sql_re",
".",
"match",
"(",
"prior_content",
")",
"if",
"define_match",
"or",
"select_match",
"or",
"standard_sql_match",
":",
"# If this is the first query, get the preceding Python code.",
"if",
"code",
"is",
"None",
":",
"code",
"=",
"(",
"'\\n'",
".",
"join",
"(",
"lines",
"[",
":",
"i",
"]",
")",
")",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"code",
")",
":",
"code",
"+=",
"'\\n'",
"elif",
"last_def",
">=",
"0",
":",
"# This is not the first query, so gather the previous query text.",
"query",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"line",
"for",
"line",
"in",
"lines",
"[",
"last_def",
":",
"i",
"]",
"if",
"len",
"(",
"line",
")",
"]",
")",
".",
"strip",
"(",
")",
"if",
"select_match",
"and",
"name",
"!=",
"datalab",
".",
"data",
".",
"_utils",
".",
"_SQL_MODULE_MAIN",
"and",
"len",
"(",
"query",
")",
"==",
"0",
":",
"# Avoid DEFINE query name\\nSELECT ... being seen as an empty DEFINE followed by SELECT",
"continue",
"# Save the query",
"statement",
"=",
"datalab",
".",
"data",
".",
"SqlStatement",
"(",
"query",
",",
"module",
")",
"module",
".",
"__dict__",
"[",
"name",
"]",
"=",
"statement",
"# And set the 'last' query to be this too",
"module",
".",
"__dict__",
"[",
"datalab",
".",
"data",
".",
"_utils",
".",
"_SQL_MODULE_LAST",
"]",
"=",
"statement",
"# Get the query name and strip off our syntactic sugar if appropriate.",
"if",
"define_match",
":",
"name",
"=",
"define_match",
".",
"group",
"(",
"1",
")",
"lines",
"[",
"i",
"]",
"=",
"define_match",
".",
"group",
"(",
"2",
")",
"else",
":",
"name",
"=",
"datalab",
".",
"data",
".",
"_utils",
".",
"_SQL_MODULE_MAIN",
"# Save the starting line index of the new query",
"last_def",
"=",
"i",
"else",
":",
"define_wild_match",
"=",
"define_wild_re",
".",
"match",
"(",
"line",
")",
"if",
"define_wild_match",
":",
"raise",
"Exception",
"(",
"'Expected \"DEFINE QUERY <name>\"'",
")",
"if",
"last_def",
">=",
"0",
":",
"# We were in a query so save this tail query.",
"query",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"line",
"for",
"line",
"in",
"lines",
"[",
"last_def",
":",
"]",
"if",
"len",
"(",
"line",
")",
"]",
")",
".",
"strip",
"(",
")",
"statement",
"=",
"datalab",
".",
"data",
".",
"SqlStatement",
"(",
"query",
",",
"module",
")",
"module",
".",
"__dict__",
"[",
"name",
"]",
"=",
"statement",
"module",
".",
"__dict__",
"[",
"datalab",
".",
"data",
".",
"_utils",
".",
"_SQL_MODULE_LAST",
"]",
"=",
"statement",
"if",
"code",
"is",
"None",
":",
"code",
"=",
"''",
"module",
".",
"__dict__",
"[",
"datalab",
".",
"data",
".",
"_utils",
".",
"_SQL_MODULE_ARGPARSE",
"]",
"=",
"_arguments",
"(",
"code",
",",
"module",
")",
"return",
"module",
".",
"__dict__",
".",
"get",
"(",
"datalab",
".",
"data",
".",
"_utils",
".",
"_SQL_MODULE_LAST",
",",
"None",
")"
] | Split a hybrid %%sql cell into the Python code and the queries.
Populates a module with the queries.
Args:
cell: the contents of the %%sql cell.
module: the module that the contents will populate.
Returns:
The default (last) query for the module. | [
"Split",
"a",
"hybrid",
"%%sql",
"cell",
"into",
"the",
"Python",
"code",
"and",
"the",
"queries",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/commands/_sql.py#L284-L367 |
5,071 | googledatalab/pydatalab | datalab/data/commands/_sql.py | sql_cell | def sql_cell(args, cell):
"""Implements the SQL cell magic for ipython notebooks.
The supported syntax is:
%%sql [--module <modulename>]
[<optional Python code for default argument values>]
[<optional named queries>]
[<optional unnamed query>]
At least one query should be present. Named queries should start with:
DEFINE QUERY <name>
on a line by itself.
Args:
args: the optional arguments following '%%sql'.
cell: the contents of the cell; Python code for arguments followed by SQL queries.
"""
name = args['module'] if args['module'] else '_sql_cell'
module = imp.new_module(name)
query = _split_cell(cell, module)
ipy = IPython.get_ipython()
if not args['module']:
# Execute now
if query:
return datalab.bigquery.Query(query, values=ipy.user_ns) \
.execute(dialect=args['dialect'], billing_tier=args['billing']).results
else:
# Add it as a module
sys.modules[name] = module
exec('import %s' % name, ipy.user_ns) | python | def sql_cell(args, cell):
"""Implements the SQL cell magic for ipython notebooks.
The supported syntax is:
%%sql [--module <modulename>]
[<optional Python code for default argument values>]
[<optional named queries>]
[<optional unnamed query>]
At least one query should be present. Named queries should start with:
DEFINE QUERY <name>
on a line by itself.
Args:
args: the optional arguments following '%%sql'.
cell: the contents of the cell; Python code for arguments followed by SQL queries.
"""
name = args['module'] if args['module'] else '_sql_cell'
module = imp.new_module(name)
query = _split_cell(cell, module)
ipy = IPython.get_ipython()
if not args['module']:
# Execute now
if query:
return datalab.bigquery.Query(query, values=ipy.user_ns) \
.execute(dialect=args['dialect'], billing_tier=args['billing']).results
else:
# Add it as a module
sys.modules[name] = module
exec('import %s' % name, ipy.user_ns) | [
"def",
"sql_cell",
"(",
"args",
",",
"cell",
")",
":",
"name",
"=",
"args",
"[",
"'module'",
"]",
"if",
"args",
"[",
"'module'",
"]",
"else",
"'_sql_cell'",
"module",
"=",
"imp",
".",
"new_module",
"(",
"name",
")",
"query",
"=",
"_split_cell",
"(",
"cell",
",",
"module",
")",
"ipy",
"=",
"IPython",
".",
"get_ipython",
"(",
")",
"if",
"not",
"args",
"[",
"'module'",
"]",
":",
"# Execute now",
"if",
"query",
":",
"return",
"datalab",
".",
"bigquery",
".",
"Query",
"(",
"query",
",",
"values",
"=",
"ipy",
".",
"user_ns",
")",
".",
"execute",
"(",
"dialect",
"=",
"args",
"[",
"'dialect'",
"]",
",",
"billing_tier",
"=",
"args",
"[",
"'billing'",
"]",
")",
".",
"results",
"else",
":",
"# Add it as a module",
"sys",
".",
"modules",
"[",
"name",
"]",
"=",
"module",
"exec",
"(",
"'import %s'",
"%",
"name",
",",
"ipy",
".",
"user_ns",
")"
] | Implements the SQL cell magic for ipython notebooks.
The supported syntax is:
%%sql [--module <modulename>]
[<optional Python code for default argument values>]
[<optional named queries>]
[<optional unnamed query>]
At least one query should be present. Named queries should start with:
DEFINE QUERY <name>
on a line by itself.
Args:
args: the optional arguments following '%%sql'.
cell: the contents of the cell; Python code for arguments followed by SQL queries. | [
"Implements",
"the",
"SQL",
"cell",
"magic",
"for",
"ipython",
"notebooks",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/commands/_sql.py#L370-L402 |
5,072 | googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/trainer/task.py | get_reader_input_fn | def get_reader_input_fn(train_config, preprocess_output_dir, model_type,
data_paths, batch_size, shuffle, num_epochs=None):
"""Builds input layer for training."""
def get_input_features():
"""Read the input features from the given data paths."""
_, examples = util.read_examples(
input_files=data_paths,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs)
features = util.parse_example_tensor(examples=examples,
train_config=train_config,
keep_target=True)
target_name = train_config['target_column']
target = features.pop(target_name)
features, target = util.preprocess_input(
features=features,
target=target,
train_config=train_config,
preprocess_output_dir=preprocess_output_dir,
model_type=model_type)
return features, target
# Return a function to input the feaures into the model from a data path.
return get_input_features | python | def get_reader_input_fn(train_config, preprocess_output_dir, model_type,
data_paths, batch_size, shuffle, num_epochs=None):
"""Builds input layer for training."""
def get_input_features():
"""Read the input features from the given data paths."""
_, examples = util.read_examples(
input_files=data_paths,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs)
features = util.parse_example_tensor(examples=examples,
train_config=train_config,
keep_target=True)
target_name = train_config['target_column']
target = features.pop(target_name)
features, target = util.preprocess_input(
features=features,
target=target,
train_config=train_config,
preprocess_output_dir=preprocess_output_dir,
model_type=model_type)
return features, target
# Return a function to input the feaures into the model from a data path.
return get_input_features | [
"def",
"get_reader_input_fn",
"(",
"train_config",
",",
"preprocess_output_dir",
",",
"model_type",
",",
"data_paths",
",",
"batch_size",
",",
"shuffle",
",",
"num_epochs",
"=",
"None",
")",
":",
"def",
"get_input_features",
"(",
")",
":",
"\"\"\"Read the input features from the given data paths.\"\"\"",
"_",
",",
"examples",
"=",
"util",
".",
"read_examples",
"(",
"input_files",
"=",
"data_paths",
",",
"batch_size",
"=",
"batch_size",
",",
"shuffle",
"=",
"shuffle",
",",
"num_epochs",
"=",
"num_epochs",
")",
"features",
"=",
"util",
".",
"parse_example_tensor",
"(",
"examples",
"=",
"examples",
",",
"train_config",
"=",
"train_config",
",",
"keep_target",
"=",
"True",
")",
"target_name",
"=",
"train_config",
"[",
"'target_column'",
"]",
"target",
"=",
"features",
".",
"pop",
"(",
"target_name",
")",
"features",
",",
"target",
"=",
"util",
".",
"preprocess_input",
"(",
"features",
"=",
"features",
",",
"target",
"=",
"target",
",",
"train_config",
"=",
"train_config",
",",
"preprocess_output_dir",
"=",
"preprocess_output_dir",
",",
"model_type",
"=",
"model_type",
")",
"return",
"features",
",",
"target",
"# Return a function to input the feaures into the model from a data path.",
"return",
"get_input_features"
] | Builds input layer for training. | [
"Builds",
"input",
"layer",
"for",
"training",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/trainer/task.py#L30-L57 |
5,073 | googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/trainer/task.py | main | def main(argv=None):
"""Run a Tensorflow model on the Iris dataset."""
args = parse_arguments(sys.argv if argv is None else argv)
tf.logging.set_verbosity(tf.logging.INFO)
learn_runner.run(
experiment_fn=get_experiment_fn(args),
output_dir=args.job_dir) | python | def main(argv=None):
"""Run a Tensorflow model on the Iris dataset."""
args = parse_arguments(sys.argv if argv is None else argv)
tf.logging.set_verbosity(tf.logging.INFO)
learn_runner.run(
experiment_fn=get_experiment_fn(args),
output_dir=args.job_dir) | [
"def",
"main",
"(",
"argv",
"=",
"None",
")",
":",
"args",
"=",
"parse_arguments",
"(",
"sys",
".",
"argv",
"if",
"argv",
"is",
"None",
"else",
"argv",
")",
"tf",
".",
"logging",
".",
"set_verbosity",
"(",
"tf",
".",
"logging",
".",
"INFO",
")",
"learn_runner",
".",
"run",
"(",
"experiment_fn",
"=",
"get_experiment_fn",
"(",
"args",
")",
",",
"output_dir",
"=",
"args",
".",
"job_dir",
")"
] | Run a Tensorflow model on the Iris dataset. | [
"Run",
"a",
"Tensorflow",
"model",
"on",
"the",
"Iris",
"dataset",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/trainer/task.py#L231-L238 |
5,074 | googledatalab/pydatalab | google/datalab/stackdriver/commands/_monitoring.py | sd | def sd(line, cell=None):
"""Implements the stackdriver cell magic for ipython notebooks.
Args:
line: the contents of the storage line.
Returns:
The results of executing the cell.
"""
parser = google.datalab.utils.commands.CommandParser(prog='%sd', description=(
'Execute various Stackdriver related operations. Use "%sd '
'<stackdriver_product> -h" for help on a specific Stackdriver product.'))
# %%sd monitoring
_create_monitoring_subparser(parser)
return google.datalab.utils.commands.handle_magic_line(line, cell, parser) | python | def sd(line, cell=None):
"""Implements the stackdriver cell magic for ipython notebooks.
Args:
line: the contents of the storage line.
Returns:
The results of executing the cell.
"""
parser = google.datalab.utils.commands.CommandParser(prog='%sd', description=(
'Execute various Stackdriver related operations. Use "%sd '
'<stackdriver_product> -h" for help on a specific Stackdriver product.'))
# %%sd monitoring
_create_monitoring_subparser(parser)
return google.datalab.utils.commands.handle_magic_line(line, cell, parser) | [
"def",
"sd",
"(",
"line",
",",
"cell",
"=",
"None",
")",
":",
"parser",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"CommandParser",
"(",
"prog",
"=",
"'%sd'",
",",
"description",
"=",
"(",
"'Execute various Stackdriver related operations. Use \"%sd '",
"'<stackdriver_product> -h\" for help on a specific Stackdriver product.'",
")",
")",
"# %%sd monitoring",
"_create_monitoring_subparser",
"(",
"parser",
")",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"handle_magic_line",
"(",
"line",
",",
"cell",
",",
"parser",
")"
] | Implements the stackdriver cell magic for ipython notebooks.
Args:
line: the contents of the storage line.
Returns:
The results of executing the cell. | [
"Implements",
"the",
"stackdriver",
"cell",
"magic",
"for",
"ipython",
"notebooks",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/stackdriver/commands/_monitoring.py#L28-L42 |
5,075 | googledatalab/pydatalab | solutionbox/ml_workbench/xgboost/trainer/task.py | make_prediction_output_tensors | def make_prediction_output_tensors(args, features, input_ops, model_fn_ops,
keep_target):
"""Makes the final prediction output layer."""
target_name = feature_transforms.get_target_name(features)
key_names = get_key_names(features)
outputs = {}
outputs.update({key_name: tf.squeeze(input_ops.features[key_name])
for key_name in key_names})
if is_classification_model(args.model):
# build maps from ints to the origional categorical strings.
class_names = read_vocab(args, target_name)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping=class_names,
default_value='UNKNOWN')
# Get the label of the input target.
if keep_target:
input_target_label = table.lookup(input_ops.features[target_name])
outputs[PG_TARGET] = tf.squeeze(input_target_label)
# TODO(brandondutra): get the score of the target label too.
probabilities = model_fn_ops.predictions['probabilities']
# if top_n == 0, this means use all the classes. We will use class names as
# probabilities labels.
if args.top_n == 0:
predicted_index = tf.argmax(probabilities, axis=1)
predicted = table.lookup(predicted_index)
outputs.update({PG_CLASSIFICATION_FIRST_LABEL: predicted})
probabilities_list = tf.unstack(probabilities, axis=1)
for class_name, p in zip(class_names, probabilities_list):
outputs[class_name] = p
else:
top_n = args.top_n
# get top k labels and their scores.
(top_k_values, top_k_indices) = tf.nn.top_k(probabilities, k=top_n)
top_k_labels = table.lookup(tf.to_int64(top_k_indices))
# Write the top_k values using 2*top_n columns.
num_digits = int(math.ceil(math.log(top_n, 10)))
if num_digits == 0:
num_digits = 1
for i in range(0, top_n):
# Pad i based on the size of k. So if k = 100, i = 23 -> i = '023'. This
# makes sorting the columns easy.
padded_i = str(i + 1).zfill(num_digits)
if i == 0:
label_alias = PG_CLASSIFICATION_FIRST_LABEL
else:
label_alias = PG_CLASSIFICATION_LABEL_TEMPLATE % padded_i
label_tensor_name = (tf.squeeze(
tf.slice(top_k_labels, [0, i], [tf.shape(top_k_labels)[0], 1])))
if i == 0:
score_alias = PG_CLASSIFICATION_FIRST_SCORE
else:
score_alias = PG_CLASSIFICATION_SCORE_TEMPLATE % padded_i
score_tensor_name = (tf.squeeze(
tf.slice(top_k_values,
[0, i],
[tf.shape(top_k_values)[0], 1])))
outputs.update({label_alias: label_tensor_name,
score_alias: score_tensor_name})
else:
if keep_target:
outputs[PG_TARGET] = tf.squeeze(input_ops.features[target_name])
scores = model_fn_ops.predictions['scores']
outputs[PG_REGRESSION_PREDICTED_TARGET] = tf.squeeze(scores)
return outputs | python | def make_prediction_output_tensors(args, features, input_ops, model_fn_ops,
keep_target):
"""Makes the final prediction output layer."""
target_name = feature_transforms.get_target_name(features)
key_names = get_key_names(features)
outputs = {}
outputs.update({key_name: tf.squeeze(input_ops.features[key_name])
for key_name in key_names})
if is_classification_model(args.model):
# build maps from ints to the origional categorical strings.
class_names = read_vocab(args, target_name)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping=class_names,
default_value='UNKNOWN')
# Get the label of the input target.
if keep_target:
input_target_label = table.lookup(input_ops.features[target_name])
outputs[PG_TARGET] = tf.squeeze(input_target_label)
# TODO(brandondutra): get the score of the target label too.
probabilities = model_fn_ops.predictions['probabilities']
# if top_n == 0, this means use all the classes. We will use class names as
# probabilities labels.
if args.top_n == 0:
predicted_index = tf.argmax(probabilities, axis=1)
predicted = table.lookup(predicted_index)
outputs.update({PG_CLASSIFICATION_FIRST_LABEL: predicted})
probabilities_list = tf.unstack(probabilities, axis=1)
for class_name, p in zip(class_names, probabilities_list):
outputs[class_name] = p
else:
top_n = args.top_n
# get top k labels and their scores.
(top_k_values, top_k_indices) = tf.nn.top_k(probabilities, k=top_n)
top_k_labels = table.lookup(tf.to_int64(top_k_indices))
# Write the top_k values using 2*top_n columns.
num_digits = int(math.ceil(math.log(top_n, 10)))
if num_digits == 0:
num_digits = 1
for i in range(0, top_n):
# Pad i based on the size of k. So if k = 100, i = 23 -> i = '023'. This
# makes sorting the columns easy.
padded_i = str(i + 1).zfill(num_digits)
if i == 0:
label_alias = PG_CLASSIFICATION_FIRST_LABEL
else:
label_alias = PG_CLASSIFICATION_LABEL_TEMPLATE % padded_i
label_tensor_name = (tf.squeeze(
tf.slice(top_k_labels, [0, i], [tf.shape(top_k_labels)[0], 1])))
if i == 0:
score_alias = PG_CLASSIFICATION_FIRST_SCORE
else:
score_alias = PG_CLASSIFICATION_SCORE_TEMPLATE % padded_i
score_tensor_name = (tf.squeeze(
tf.slice(top_k_values,
[0, i],
[tf.shape(top_k_values)[0], 1])))
outputs.update({label_alias: label_tensor_name,
score_alias: score_tensor_name})
else:
if keep_target:
outputs[PG_TARGET] = tf.squeeze(input_ops.features[target_name])
scores = model_fn_ops.predictions['scores']
outputs[PG_REGRESSION_PREDICTED_TARGET] = tf.squeeze(scores)
return outputs | [
"def",
"make_prediction_output_tensors",
"(",
"args",
",",
"features",
",",
"input_ops",
",",
"model_fn_ops",
",",
"keep_target",
")",
":",
"target_name",
"=",
"feature_transforms",
".",
"get_target_name",
"(",
"features",
")",
"key_names",
"=",
"get_key_names",
"(",
"features",
")",
"outputs",
"=",
"{",
"}",
"outputs",
".",
"update",
"(",
"{",
"key_name",
":",
"tf",
".",
"squeeze",
"(",
"input_ops",
".",
"features",
"[",
"key_name",
"]",
")",
"for",
"key_name",
"in",
"key_names",
"}",
")",
"if",
"is_classification_model",
"(",
"args",
".",
"model",
")",
":",
"# build maps from ints to the origional categorical strings.",
"class_names",
"=",
"read_vocab",
"(",
"args",
",",
"target_name",
")",
"table",
"=",
"tf",
".",
"contrib",
".",
"lookup",
".",
"index_to_string_table_from_tensor",
"(",
"mapping",
"=",
"class_names",
",",
"default_value",
"=",
"'UNKNOWN'",
")",
"# Get the label of the input target.",
"if",
"keep_target",
":",
"input_target_label",
"=",
"table",
".",
"lookup",
"(",
"input_ops",
".",
"features",
"[",
"target_name",
"]",
")",
"outputs",
"[",
"PG_TARGET",
"]",
"=",
"tf",
".",
"squeeze",
"(",
"input_target_label",
")",
"# TODO(brandondutra): get the score of the target label too.",
"probabilities",
"=",
"model_fn_ops",
".",
"predictions",
"[",
"'probabilities'",
"]",
"# if top_n == 0, this means use all the classes. We will use class names as",
"# probabilities labels.",
"if",
"args",
".",
"top_n",
"==",
"0",
":",
"predicted_index",
"=",
"tf",
".",
"argmax",
"(",
"probabilities",
",",
"axis",
"=",
"1",
")",
"predicted",
"=",
"table",
".",
"lookup",
"(",
"predicted_index",
")",
"outputs",
".",
"update",
"(",
"{",
"PG_CLASSIFICATION_FIRST_LABEL",
":",
"predicted",
"}",
")",
"probabilities_list",
"=",
"tf",
".",
"unstack",
"(",
"probabilities",
",",
"axis",
"=",
"1",
")",
"for",
"class_name",
",",
"p",
"in",
"zip",
"(",
"class_names",
",",
"probabilities_list",
")",
":",
"outputs",
"[",
"class_name",
"]",
"=",
"p",
"else",
":",
"top_n",
"=",
"args",
".",
"top_n",
"# get top k labels and their scores.",
"(",
"top_k_values",
",",
"top_k_indices",
")",
"=",
"tf",
".",
"nn",
".",
"top_k",
"(",
"probabilities",
",",
"k",
"=",
"top_n",
")",
"top_k_labels",
"=",
"table",
".",
"lookup",
"(",
"tf",
".",
"to_int64",
"(",
"top_k_indices",
")",
")",
"# Write the top_k values using 2*top_n columns.",
"num_digits",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"math",
".",
"log",
"(",
"top_n",
",",
"10",
")",
")",
")",
"if",
"num_digits",
"==",
"0",
":",
"num_digits",
"=",
"1",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"top_n",
")",
":",
"# Pad i based on the size of k. So if k = 100, i = 23 -> i = '023'. This",
"# makes sorting the columns easy.",
"padded_i",
"=",
"str",
"(",
"i",
"+",
"1",
")",
".",
"zfill",
"(",
"num_digits",
")",
"if",
"i",
"==",
"0",
":",
"label_alias",
"=",
"PG_CLASSIFICATION_FIRST_LABEL",
"else",
":",
"label_alias",
"=",
"PG_CLASSIFICATION_LABEL_TEMPLATE",
"%",
"padded_i",
"label_tensor_name",
"=",
"(",
"tf",
".",
"squeeze",
"(",
"tf",
".",
"slice",
"(",
"top_k_labels",
",",
"[",
"0",
",",
"i",
"]",
",",
"[",
"tf",
".",
"shape",
"(",
"top_k_labels",
")",
"[",
"0",
"]",
",",
"1",
"]",
")",
")",
")",
"if",
"i",
"==",
"0",
":",
"score_alias",
"=",
"PG_CLASSIFICATION_FIRST_SCORE",
"else",
":",
"score_alias",
"=",
"PG_CLASSIFICATION_SCORE_TEMPLATE",
"%",
"padded_i",
"score_tensor_name",
"=",
"(",
"tf",
".",
"squeeze",
"(",
"tf",
".",
"slice",
"(",
"top_k_values",
",",
"[",
"0",
",",
"i",
"]",
",",
"[",
"tf",
".",
"shape",
"(",
"top_k_values",
")",
"[",
"0",
"]",
",",
"1",
"]",
")",
")",
")",
"outputs",
".",
"update",
"(",
"{",
"label_alias",
":",
"label_tensor_name",
",",
"score_alias",
":",
"score_tensor_name",
"}",
")",
"else",
":",
"if",
"keep_target",
":",
"outputs",
"[",
"PG_TARGET",
"]",
"=",
"tf",
".",
"squeeze",
"(",
"input_ops",
".",
"features",
"[",
"target_name",
"]",
")",
"scores",
"=",
"model_fn_ops",
".",
"predictions",
"[",
"'scores'",
"]",
"outputs",
"[",
"PG_REGRESSION_PREDICTED_TARGET",
"]",
"=",
"tf",
".",
"squeeze",
"(",
"scores",
")",
"return",
"outputs"
] | Makes the final prediction output layer. | [
"Makes",
"the",
"final",
"prediction",
"output",
"layer",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/xgboost/trainer/task.py#L377-L456 |
5,076 | googledatalab/pydatalab | solutionbox/ml_workbench/xgboost/trainer/task.py | read_vocab | def read_vocab(args, column_name):
"""Reads a vocab file if it exists.
Args:
args: command line flags
column_name: name of column to that has a vocab file.
Returns:
List of vocab words or [] if the vocab file is not found.
"""
vocab_path = os.path.join(args.analysis,
feature_transforms.VOCAB_ANALYSIS_FILE % column_name)
if not file_io.file_exists(vocab_path):
return []
vocab, _ = feature_transforms.read_vocab_file(vocab_path)
return vocab | python | def read_vocab(args, column_name):
"""Reads a vocab file if it exists.
Args:
args: command line flags
column_name: name of column to that has a vocab file.
Returns:
List of vocab words or [] if the vocab file is not found.
"""
vocab_path = os.path.join(args.analysis,
feature_transforms.VOCAB_ANALYSIS_FILE % column_name)
if not file_io.file_exists(vocab_path):
return []
vocab, _ = feature_transforms.read_vocab_file(vocab_path)
return vocab | [
"def",
"read_vocab",
"(",
"args",
",",
"column_name",
")",
":",
"vocab_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"analysis",
",",
"feature_transforms",
".",
"VOCAB_ANALYSIS_FILE",
"%",
"column_name",
")",
"if",
"not",
"file_io",
".",
"file_exists",
"(",
"vocab_path",
")",
":",
"return",
"[",
"]",
"vocab",
",",
"_",
"=",
"feature_transforms",
".",
"read_vocab_file",
"(",
"vocab_path",
")",
"return",
"vocab"
] | Reads a vocab file if it exists.
Args:
args: command line flags
column_name: name of column to that has a vocab file.
Returns:
List of vocab words or [] if the vocab file is not found. | [
"Reads",
"a",
"vocab",
"file",
"if",
"it",
"exists",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/xgboost/trainer/task.py#L642-L659 |
5,077 | googledatalab/pydatalab | datalab/utils/_utils.py | get_item | def get_item(env, name, default=None):
""" Get an item from a dictionary, handling nested lookups with dotted notation.
Args:
env: the environment (dictionary) to use to look up the name.
name: the name to look up, in dotted notation.
default: the value to return if the name if not found.
Returns:
The result of looking up the name, if found; else the default.
"""
# TODO: handle attributes
for key in name.split('.'):
if isinstance(env, dict) and key in env:
env = env[key]
elif isinstance(env, types.ModuleType) and key in env.__dict__:
env = env.__dict__[key]
else:
return default
return env | python | def get_item(env, name, default=None):
""" Get an item from a dictionary, handling nested lookups with dotted notation.
Args:
env: the environment (dictionary) to use to look up the name.
name: the name to look up, in dotted notation.
default: the value to return if the name if not found.
Returns:
The result of looking up the name, if found; else the default.
"""
# TODO: handle attributes
for key in name.split('.'):
if isinstance(env, dict) and key in env:
env = env[key]
elif isinstance(env, types.ModuleType) and key in env.__dict__:
env = env.__dict__[key]
else:
return default
return env | [
"def",
"get_item",
"(",
"env",
",",
"name",
",",
"default",
"=",
"None",
")",
":",
"# TODO: handle attributes",
"for",
"key",
"in",
"name",
".",
"split",
"(",
"'.'",
")",
":",
"if",
"isinstance",
"(",
"env",
",",
"dict",
")",
"and",
"key",
"in",
"env",
":",
"env",
"=",
"env",
"[",
"key",
"]",
"elif",
"isinstance",
"(",
"env",
",",
"types",
".",
"ModuleType",
")",
"and",
"key",
"in",
"env",
".",
"__dict__",
":",
"env",
"=",
"env",
".",
"__dict__",
"[",
"key",
"]",
"else",
":",
"return",
"default",
"return",
"env"
] | Get an item from a dictionary, handling nested lookups with dotted notation.
Args:
env: the environment (dictionary) to use to look up the name.
name: the name to look up, in dotted notation.
default: the value to return if the name if not found.
Returns:
The result of looking up the name, if found; else the default. | [
"Get",
"an",
"item",
"from",
"a",
"dictionary",
"handling",
"nested",
"lookups",
"with",
"dotted",
"notation",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/_utils.py#L41-L60 |
5,078 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_predictor.py | predict | def predict(model_dir, images):
"""Local instant prediction."""
results = _tf_predict(model_dir, images)
predicted_and_scores = [(predicted, label_scores[list(labels).index(predicted)])
for predicted, labels, label_scores in results]
return predicted_and_scores | python | def predict(model_dir, images):
"""Local instant prediction."""
results = _tf_predict(model_dir, images)
predicted_and_scores = [(predicted, label_scores[list(labels).index(predicted)])
for predicted, labels, label_scores in results]
return predicted_and_scores | [
"def",
"predict",
"(",
"model_dir",
",",
"images",
")",
":",
"results",
"=",
"_tf_predict",
"(",
"model_dir",
",",
"images",
")",
"predicted_and_scores",
"=",
"[",
"(",
"predicted",
",",
"label_scores",
"[",
"list",
"(",
"labels",
")",
".",
"index",
"(",
"predicted",
")",
"]",
")",
"for",
"predicted",
",",
"labels",
",",
"label_scores",
"in",
"results",
"]",
"return",
"predicted_and_scores"
] | Local instant prediction. | [
"Local",
"instant",
"prediction",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_predictor.py#L58-L64 |
5,079 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_predictor.py | configure_pipeline | def configure_pipeline(p, dataset, model_dir, output_csv, output_bq_table):
"""Configures a dataflow pipeline for batch prediction."""
data = _util.get_sources_from_dataset(p, dataset, 'predict')
if len(dataset.schema) == 2:
output_schema = [
{'name': 'image_url', 'type': 'STRING'},
{'name': 'target', 'type': 'STRING'},
{'name': 'predicted', 'type': 'STRING'},
{'name': 'target_prob', 'type': 'FLOAT'},
{'name': 'predicted_prob', 'type': 'FLOAT'},
]
else:
output_schema = [
{'name': 'image_url', 'type': 'STRING'},
{'name': 'predicted', 'type': 'STRING'},
{'name': 'predicted_prob', 'type': 'FLOAT'},
]
results = (data |
'Load Images' >> beam.ParDo(LoadImagesDoFn()) |
'Batch Inputs' >> beam.ParDo(EmitAsBatchDoFn(20)) |
'Batch Predict' >> beam.ParDo(PredictBatchDoFn(model_dir)) |
'Unbatch' >> beam.ParDo(UnbatchDoFn()) |
'Process Results' >> beam.ParDo(ProcessResultsDoFn()))
if output_csv is not None:
schema_file = output_csv + '.schema.json'
results_save = (results |
'Prepare For Output' >> beam.ParDo(MakeCsvLineDoFn()) |
'Write Csv Results' >> beam.io.textio.WriteToText(output_csv,
shard_name_template=''))
(results_save |
'Sample One' >> beam.transforms.combiners.Sample.FixedSizeGlobally(1) |
'Serialize Schema' >> beam.Map(lambda path: json.dumps(output_schema)) |
'Write Schema' >> beam.io.textio.WriteToText(schema_file, shard_name_template=''))
if output_bq_table is not None:
# BigQuery sink takes schema in the form of 'field1:type1,field2:type2...'
bq_schema_string = ','.join(x['name'] + ':' + x['type'] for x in output_schema)
sink = beam.io.BigQuerySink(output_bq_table, schema=bq_schema_string,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)
results | 'Write BQ Results' >> beam.io.Write(sink) | python | def configure_pipeline(p, dataset, model_dir, output_csv, output_bq_table):
"""Configures a dataflow pipeline for batch prediction."""
data = _util.get_sources_from_dataset(p, dataset, 'predict')
if len(dataset.schema) == 2:
output_schema = [
{'name': 'image_url', 'type': 'STRING'},
{'name': 'target', 'type': 'STRING'},
{'name': 'predicted', 'type': 'STRING'},
{'name': 'target_prob', 'type': 'FLOAT'},
{'name': 'predicted_prob', 'type': 'FLOAT'},
]
else:
output_schema = [
{'name': 'image_url', 'type': 'STRING'},
{'name': 'predicted', 'type': 'STRING'},
{'name': 'predicted_prob', 'type': 'FLOAT'},
]
results = (data |
'Load Images' >> beam.ParDo(LoadImagesDoFn()) |
'Batch Inputs' >> beam.ParDo(EmitAsBatchDoFn(20)) |
'Batch Predict' >> beam.ParDo(PredictBatchDoFn(model_dir)) |
'Unbatch' >> beam.ParDo(UnbatchDoFn()) |
'Process Results' >> beam.ParDo(ProcessResultsDoFn()))
if output_csv is not None:
schema_file = output_csv + '.schema.json'
results_save = (results |
'Prepare For Output' >> beam.ParDo(MakeCsvLineDoFn()) |
'Write Csv Results' >> beam.io.textio.WriteToText(output_csv,
shard_name_template=''))
(results_save |
'Sample One' >> beam.transforms.combiners.Sample.FixedSizeGlobally(1) |
'Serialize Schema' >> beam.Map(lambda path: json.dumps(output_schema)) |
'Write Schema' >> beam.io.textio.WriteToText(schema_file, shard_name_template=''))
if output_bq_table is not None:
# BigQuery sink takes schema in the form of 'field1:type1,field2:type2...'
bq_schema_string = ','.join(x['name'] + ':' + x['type'] for x in output_schema)
sink = beam.io.BigQuerySink(output_bq_table, schema=bq_schema_string,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)
results | 'Write BQ Results' >> beam.io.Write(sink) | [
"def",
"configure_pipeline",
"(",
"p",
",",
"dataset",
",",
"model_dir",
",",
"output_csv",
",",
"output_bq_table",
")",
":",
"data",
"=",
"_util",
".",
"get_sources_from_dataset",
"(",
"p",
",",
"dataset",
",",
"'predict'",
")",
"if",
"len",
"(",
"dataset",
".",
"schema",
")",
"==",
"2",
":",
"output_schema",
"=",
"[",
"{",
"'name'",
":",
"'image_url'",
",",
"'type'",
":",
"'STRING'",
"}",
",",
"{",
"'name'",
":",
"'target'",
",",
"'type'",
":",
"'STRING'",
"}",
",",
"{",
"'name'",
":",
"'predicted'",
",",
"'type'",
":",
"'STRING'",
"}",
",",
"{",
"'name'",
":",
"'target_prob'",
",",
"'type'",
":",
"'FLOAT'",
"}",
",",
"{",
"'name'",
":",
"'predicted_prob'",
",",
"'type'",
":",
"'FLOAT'",
"}",
",",
"]",
"else",
":",
"output_schema",
"=",
"[",
"{",
"'name'",
":",
"'image_url'",
",",
"'type'",
":",
"'STRING'",
"}",
",",
"{",
"'name'",
":",
"'predicted'",
",",
"'type'",
":",
"'STRING'",
"}",
",",
"{",
"'name'",
":",
"'predicted_prob'",
",",
"'type'",
":",
"'FLOAT'",
"}",
",",
"]",
"results",
"=",
"(",
"data",
"|",
"'Load Images'",
">>",
"beam",
".",
"ParDo",
"(",
"LoadImagesDoFn",
"(",
")",
")",
"|",
"'Batch Inputs'",
">>",
"beam",
".",
"ParDo",
"(",
"EmitAsBatchDoFn",
"(",
"20",
")",
")",
"|",
"'Batch Predict'",
">>",
"beam",
".",
"ParDo",
"(",
"PredictBatchDoFn",
"(",
"model_dir",
")",
")",
"|",
"'Unbatch'",
">>",
"beam",
".",
"ParDo",
"(",
"UnbatchDoFn",
"(",
")",
")",
"|",
"'Process Results'",
">>",
"beam",
".",
"ParDo",
"(",
"ProcessResultsDoFn",
"(",
")",
")",
")",
"if",
"output_csv",
"is",
"not",
"None",
":",
"schema_file",
"=",
"output_csv",
"+",
"'.schema.json'",
"results_save",
"=",
"(",
"results",
"|",
"'Prepare For Output'",
">>",
"beam",
".",
"ParDo",
"(",
"MakeCsvLineDoFn",
"(",
")",
")",
"|",
"'Write Csv Results'",
">>",
"beam",
".",
"io",
".",
"textio",
".",
"WriteToText",
"(",
"output_csv",
",",
"shard_name_template",
"=",
"''",
")",
")",
"(",
"results_save",
"|",
"'Sample One'",
">>",
"beam",
".",
"transforms",
".",
"combiners",
".",
"Sample",
".",
"FixedSizeGlobally",
"(",
"1",
")",
"|",
"'Serialize Schema'",
">>",
"beam",
".",
"Map",
"(",
"lambda",
"path",
":",
"json",
".",
"dumps",
"(",
"output_schema",
")",
")",
"|",
"'Write Schema'",
">>",
"beam",
".",
"io",
".",
"textio",
".",
"WriteToText",
"(",
"schema_file",
",",
"shard_name_template",
"=",
"''",
")",
")",
"if",
"output_bq_table",
"is",
"not",
"None",
":",
"# BigQuery sink takes schema in the form of 'field1:type1,field2:type2...'",
"bq_schema_string",
"=",
"','",
".",
"join",
"(",
"x",
"[",
"'name'",
"]",
"+",
"':'",
"+",
"x",
"[",
"'type'",
"]",
"for",
"x",
"in",
"output_schema",
")",
"sink",
"=",
"beam",
".",
"io",
".",
"BigQuerySink",
"(",
"output_bq_table",
",",
"schema",
"=",
"bq_schema_string",
",",
"write_disposition",
"=",
"beam",
".",
"io",
".",
"BigQueryDisposition",
".",
"WRITE_TRUNCATE",
")",
"results",
"|",
"'Write BQ Results'",
">>",
"beam",
".",
"io",
".",
"Write",
"(",
"sink",
")"
] | Configures a dataflow pipeline for batch prediction. | [
"Configures",
"a",
"dataflow",
"pipeline",
"for",
"batch",
"prediction",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_predictor.py#L187-L228 |
5,080 | googledatalab/pydatalab | datalab/bigquery/_query.py | Query.sampling_query | def sampling_query(sql, context, fields=None, count=5, sampling=None, udfs=None,
data_sources=None):
"""Returns a sampling Query for the SQL object.
Args:
sql: the SQL statement (string) or Query object to sample.
context: a Context object providing project_id and credentials.
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
udfs: array of UDFs referenced in the SQL.
data_sources: dictionary of federated (external) tables referenced in the SQL.
Returns:
A Query object for sampling the table.
"""
return Query(_sampling.Sampling.sampling_query(sql, fields, count, sampling), context=context,
udfs=udfs, data_sources=data_sources) | python | def sampling_query(sql, context, fields=None, count=5, sampling=None, udfs=None,
data_sources=None):
"""Returns a sampling Query for the SQL object.
Args:
sql: the SQL statement (string) or Query object to sample.
context: a Context object providing project_id and credentials.
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
udfs: array of UDFs referenced in the SQL.
data_sources: dictionary of federated (external) tables referenced in the SQL.
Returns:
A Query object for sampling the table.
"""
return Query(_sampling.Sampling.sampling_query(sql, fields, count, sampling), context=context,
udfs=udfs, data_sources=data_sources) | [
"def",
"sampling_query",
"(",
"sql",
",",
"context",
",",
"fields",
"=",
"None",
",",
"count",
"=",
"5",
",",
"sampling",
"=",
"None",
",",
"udfs",
"=",
"None",
",",
"data_sources",
"=",
"None",
")",
":",
"return",
"Query",
"(",
"_sampling",
".",
"Sampling",
".",
"sampling_query",
"(",
"sql",
",",
"fields",
",",
"count",
",",
"sampling",
")",
",",
"context",
"=",
"context",
",",
"udfs",
"=",
"udfs",
",",
"data_sources",
"=",
"data_sources",
")"
] | Returns a sampling Query for the SQL object.
Args:
sql: the SQL statement (string) or Query object to sample.
context: a Context object providing project_id and credentials.
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
udfs: array of UDFs referenced in the SQL.
data_sources: dictionary of federated (external) tables referenced in the SQL.
Returns:
A Query object for sampling the table. | [
"Returns",
"a",
"sampling",
"Query",
"for",
"the",
"SQL",
"object",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_query.py#L37-L54 |
5,081 | googledatalab/pydatalab | datalab/bigquery/_query.py | Query.results | def results(self, use_cache=True, dialect=None, billing_tier=None):
"""Retrieves table of results for the query. May block if the query must be executed first.
Args:
use_cache: whether to use cached results or not. Ignored if append is specified.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing the result set.
Raises:
Exception if the query could not be executed or query response was malformed.
"""
if not use_cache or (self._results is None):
self.execute(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier)
return self._results.results | python | def results(self, use_cache=True, dialect=None, billing_tier=None):
"""Retrieves table of results for the query. May block if the query must be executed first.
Args:
use_cache: whether to use cached results or not. Ignored if append is specified.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing the result set.
Raises:
Exception if the query could not be executed or query response was malformed.
"""
if not use_cache or (self._results is None):
self.execute(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier)
return self._results.results | [
"def",
"results",
"(",
"self",
",",
"use_cache",
"=",
"True",
",",
"dialect",
"=",
"None",
",",
"billing_tier",
"=",
"None",
")",
":",
"if",
"not",
"use_cache",
"or",
"(",
"self",
".",
"_results",
"is",
"None",
")",
":",
"self",
".",
"execute",
"(",
"use_cache",
"=",
"use_cache",
",",
"dialect",
"=",
"dialect",
",",
"billing_tier",
"=",
"billing_tier",
")",
"return",
"self",
".",
"_results",
".",
"results"
] | Retrieves table of results for the query. May block if the query must be executed first.
Args:
use_cache: whether to use cached results or not. Ignored if append is specified.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing the result set.
Raises:
Exception if the query could not be executed or query response was malformed. | [
"Retrieves",
"table",
"of",
"results",
"for",
"the",
"query",
".",
"May",
"block",
"if",
"the",
"query",
"must",
"be",
"executed",
"first",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_query.py#L209-L229 |
5,082 | googledatalab/pydatalab | datalab/bigquery/_query.py | Query.extract | def extract(self, storage_uris, format='csv', csv_delimiter=',', csv_header=True,
compress=False, use_cache=True, dialect=None, billing_tier=None):
"""Exports the query results to GCS.
Args:
storage_uris: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for csv exports, the field delimiter to use (default ',').
csv_header: for csv exports, whether to include an initial header line (default True).
compress: whether to compress the data on export. Compression is not supported for
AVRO format (default False).
use_cache: whether to use cached results or not (default True).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A Job object for the export Job if it was completed successfully; else None.
Raises:
An Exception if the query or extract failed.
"""
return self.results(use_cache=use_cache, dialect=dialect,
billing_tier=billing_tier).extract(storage_uris, format=format,
csv_delimiter=csv_delimiter,
csv_header=csv_header,
compress=compress) | python | def extract(self, storage_uris, format='csv', csv_delimiter=',', csv_header=True,
compress=False, use_cache=True, dialect=None, billing_tier=None):
"""Exports the query results to GCS.
Args:
storage_uris: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for csv exports, the field delimiter to use (default ',').
csv_header: for csv exports, whether to include an initial header line (default True).
compress: whether to compress the data on export. Compression is not supported for
AVRO format (default False).
use_cache: whether to use cached results or not (default True).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A Job object for the export Job if it was completed successfully; else None.
Raises:
An Exception if the query or extract failed.
"""
return self.results(use_cache=use_cache, dialect=dialect,
billing_tier=billing_tier).extract(storage_uris, format=format,
csv_delimiter=csv_delimiter,
csv_header=csv_header,
compress=compress) | [
"def",
"extract",
"(",
"self",
",",
"storage_uris",
",",
"format",
"=",
"'csv'",
",",
"csv_delimiter",
"=",
"','",
",",
"csv_header",
"=",
"True",
",",
"compress",
"=",
"False",
",",
"use_cache",
"=",
"True",
",",
"dialect",
"=",
"None",
",",
"billing_tier",
"=",
"None",
")",
":",
"return",
"self",
".",
"results",
"(",
"use_cache",
"=",
"use_cache",
",",
"dialect",
"=",
"dialect",
",",
"billing_tier",
"=",
"billing_tier",
")",
".",
"extract",
"(",
"storage_uris",
",",
"format",
"=",
"format",
",",
"csv_delimiter",
"=",
"csv_delimiter",
",",
"csv_header",
"=",
"csv_header",
",",
"compress",
"=",
"compress",
")"
] | Exports the query results to GCS.
Args:
storage_uris: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of 'csv', 'json', or 'avro'
(default 'csv').
csv_delimiter: for csv exports, the field delimiter to use (default ',').
csv_header: for csv exports, whether to include an initial header line (default True).
compress: whether to compress the data on export. Compression is not supported for
AVRO format (default False).
use_cache: whether to use cached results or not (default True).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A Job object for the export Job if it was completed successfully; else None.
Raises:
An Exception if the query or extract failed. | [
"Exports",
"the",
"query",
"results",
"to",
"GCS",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_query.py#L231-L261 |
5,083 | googledatalab/pydatalab | datalab/bigquery/_query.py | Query.to_dataframe | def to_dataframe(self, start_row=0, max_rows=None, use_cache=True, dialect=None,
billing_tier=None):
""" Exports the query results to a Pandas dataframe.
Args:
start_row: the row of the table at which to start the export (default 0).
max_rows: an upper limit on the number of rows to export (default None).
use_cache: whether to use cached results or not (default True).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A Pandas dataframe containing the table data.
"""
return self.results(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier) \
.to_dataframe(start_row=start_row, max_rows=max_rows) | python | def to_dataframe(self, start_row=0, max_rows=None, use_cache=True, dialect=None,
billing_tier=None):
""" Exports the query results to a Pandas dataframe.
Args:
start_row: the row of the table at which to start the export (default 0).
max_rows: an upper limit on the number of rows to export (default None).
use_cache: whether to use cached results or not (default True).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A Pandas dataframe containing the table data.
"""
return self.results(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier) \
.to_dataframe(start_row=start_row, max_rows=max_rows) | [
"def",
"to_dataframe",
"(",
"self",
",",
"start_row",
"=",
"0",
",",
"max_rows",
"=",
"None",
",",
"use_cache",
"=",
"True",
",",
"dialect",
"=",
"None",
",",
"billing_tier",
"=",
"None",
")",
":",
"return",
"self",
".",
"results",
"(",
"use_cache",
"=",
"use_cache",
",",
"dialect",
"=",
"dialect",
",",
"billing_tier",
"=",
"billing_tier",
")",
".",
"to_dataframe",
"(",
"start_row",
"=",
"start_row",
",",
"max_rows",
"=",
"max_rows",
")"
] | Exports the query results to a Pandas dataframe.
Args:
start_row: the row of the table at which to start the export (default 0).
max_rows: an upper limit on the number of rows to export (default None).
use_cache: whether to use cached results or not (default True).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A Pandas dataframe containing the table data. | [
"Exports",
"the",
"query",
"results",
"to",
"a",
"Pandas",
"dataframe",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_query.py#L303-L323 |
5,084 | googledatalab/pydatalab | datalab/bigquery/_query.py | Query.sample | def sample(self, count=5, fields=None, sampling=None, use_cache=True, dialect=None,
billing_tier=None):
"""Retrieves a sampling of rows for the query.
Args:
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified (default 5).
fields: the list of fields to sample (default None implies all).
sampling: an optional sampling strategy to apply to the table.
use_cache: whether to use cached results or not (default True).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing a sampling of the result set.
Raises:
Exception if the query could not be executed or query response was malformed.
"""
return Query.sampling_query(self._sql, self._context, count=count, fields=fields,
sampling=sampling, udfs=self._udfs,
data_sources=self._data_sources).results(use_cache=use_cache,
dialect=dialect,
billing_tier=billing_tier) | python | def sample(self, count=5, fields=None, sampling=None, use_cache=True, dialect=None,
billing_tier=None):
"""Retrieves a sampling of rows for the query.
Args:
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified (default 5).
fields: the list of fields to sample (default None implies all).
sampling: an optional sampling strategy to apply to the table.
use_cache: whether to use cached results or not (default True).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing a sampling of the result set.
Raises:
Exception if the query could not be executed or query response was malformed.
"""
return Query.sampling_query(self._sql, self._context, count=count, fields=fields,
sampling=sampling, udfs=self._udfs,
data_sources=self._data_sources).results(use_cache=use_cache,
dialect=dialect,
billing_tier=billing_tier) | [
"def",
"sample",
"(",
"self",
",",
"count",
"=",
"5",
",",
"fields",
"=",
"None",
",",
"sampling",
"=",
"None",
",",
"use_cache",
"=",
"True",
",",
"dialect",
"=",
"None",
",",
"billing_tier",
"=",
"None",
")",
":",
"return",
"Query",
".",
"sampling_query",
"(",
"self",
".",
"_sql",
",",
"self",
".",
"_context",
",",
"count",
"=",
"count",
",",
"fields",
"=",
"fields",
",",
"sampling",
"=",
"sampling",
",",
"udfs",
"=",
"self",
".",
"_udfs",
",",
"data_sources",
"=",
"self",
".",
"_data_sources",
")",
".",
"results",
"(",
"use_cache",
"=",
"use_cache",
",",
"dialect",
"=",
"dialect",
",",
"billing_tier",
"=",
"billing_tier",
")"
] | Retrieves a sampling of rows for the query.
Args:
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified (default 5).
fields: the list of fields to sample (default None implies all).
sampling: an optional sampling strategy to apply to the table.
use_cache: whether to use cached results or not (default True).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing a sampling of the result set.
Raises:
Exception if the query could not be executed or query response was malformed. | [
"Retrieves",
"a",
"sampling",
"of",
"rows",
"for",
"the",
"query",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_query.py#L379-L406 |
5,085 | googledatalab/pydatalab | datalab/bigquery/_query.py | Query.execute | def execute(self, table_name=None, table_mode='create', use_cache=True, priority='interactive',
allow_large_results=False, dialect=None, billing_tier=None):
""" Initiate the query, blocking until complete and then return the results.
Args:
table_name: the result table name as a string or TableName; if None (the default), then a
temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled
to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much
as three hours but are not rate-limited.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a table_name to be specified) (default False).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
The QueryResultsTable for the query.
Raises:
Exception if query could not be executed.
"""
job = self.execute_async(table_name=table_name, table_mode=table_mode, use_cache=use_cache,
priority=priority, allow_large_results=allow_large_results,
dialect=dialect, billing_tier=billing_tier)
self._results = job.wait()
return self._results | python | def execute(self, table_name=None, table_mode='create', use_cache=True, priority='interactive',
allow_large_results=False, dialect=None, billing_tier=None):
""" Initiate the query, blocking until complete and then return the results.
Args:
table_name: the result table name as a string or TableName; if None (the default), then a
temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled
to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much
as three hours but are not rate-limited.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a table_name to be specified) (default False).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
The QueryResultsTable for the query.
Raises:
Exception if query could not be executed.
"""
job = self.execute_async(table_name=table_name, table_mode=table_mode, use_cache=use_cache,
priority=priority, allow_large_results=allow_large_results,
dialect=dialect, billing_tier=billing_tier)
self._results = job.wait()
return self._results | [
"def",
"execute",
"(",
"self",
",",
"table_name",
"=",
"None",
",",
"table_mode",
"=",
"'create'",
",",
"use_cache",
"=",
"True",
",",
"priority",
"=",
"'interactive'",
",",
"allow_large_results",
"=",
"False",
",",
"dialect",
"=",
"None",
",",
"billing_tier",
"=",
"None",
")",
":",
"job",
"=",
"self",
".",
"execute_async",
"(",
"table_name",
"=",
"table_name",
",",
"table_mode",
"=",
"table_mode",
",",
"use_cache",
"=",
"use_cache",
",",
"priority",
"=",
"priority",
",",
"allow_large_results",
"=",
"allow_large_results",
",",
"dialect",
"=",
"dialect",
",",
"billing_tier",
"=",
"billing_tier",
")",
"self",
".",
"_results",
"=",
"job",
".",
"wait",
"(",
")",
"return",
"self",
".",
"_results"
] | Initiate the query, blocking until complete and then return the results.
Args:
table_name: the result table name as a string or TableName; if None (the default), then a
temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled
to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much
as three hours but are not rate-limited.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a table_name to be specified) (default False).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
The QueryResultsTable for the query.
Raises:
Exception if query could not be executed. | [
"Initiate",
"the",
"query",
"blocking",
"until",
"complete",
"and",
"then",
"return",
"the",
"results",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_query.py#L496-L529 |
5,086 | googledatalab/pydatalab | datalab/bigquery/_query.py | Query.to_view | def to_view(self, view_name):
""" Create a View from this Query.
Args:
view_name: the name of the View either as a string or a 3-part tuple
(projectid, datasetid, name).
Returns:
A View for the Query.
"""
# Do the import here to avoid circular dependencies at top-level.
from . import _view
return _view.View(view_name, self._context).create(self._sql) | python | def to_view(self, view_name):
""" Create a View from this Query.
Args:
view_name: the name of the View either as a string or a 3-part tuple
(projectid, datasetid, name).
Returns:
A View for the Query.
"""
# Do the import here to avoid circular dependencies at top-level.
from . import _view
return _view.View(view_name, self._context).create(self._sql) | [
"def",
"to_view",
"(",
"self",
",",
"view_name",
")",
":",
"# Do the import here to avoid circular dependencies at top-level.",
"from",
".",
"import",
"_view",
"return",
"_view",
".",
"View",
"(",
"view_name",
",",
"self",
".",
"_context",
")",
".",
"create",
"(",
"self",
".",
"_sql",
")"
] | Create a View from this Query.
Args:
view_name: the name of the View either as a string or a 3-part tuple
(projectid, datasetid, name).
Returns:
A View for the Query. | [
"Create",
"a",
"View",
"from",
"this",
"Query",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_query.py#L531-L543 |
5,087 | googledatalab/pydatalab | google/datalab/utils/commands/_commands.py | CommandParser.format_help | def format_help(self):
"""Override help doc to add cell args. """
if not self._cell_args:
return super(CommandParser, self).format_help()
else:
# Print the standard argparse info, the cell arg block, and then the epilog
# If we don't remove epilog before calling the super, then epilog will
# be printed before the 'Cell args' block.
epilog = self.epilog
self.epilog = None
orig_help = super(CommandParser, self).format_help()
cell_args_help = '\nCell args:\n\n'
for cell_arg, v in six.iteritems(self._cell_args):
required = 'Required' if v['required'] else 'Optional'
cell_args_help += '%s: %s. %s.\n\n' % (cell_arg, required, v['help'])
orig_help += cell_args_help
if epilog:
orig_help += epilog + '\n\n'
return orig_help | python | def format_help(self):
"""Override help doc to add cell args. """
if not self._cell_args:
return super(CommandParser, self).format_help()
else:
# Print the standard argparse info, the cell arg block, and then the epilog
# If we don't remove epilog before calling the super, then epilog will
# be printed before the 'Cell args' block.
epilog = self.epilog
self.epilog = None
orig_help = super(CommandParser, self).format_help()
cell_args_help = '\nCell args:\n\n'
for cell_arg, v in six.iteritems(self._cell_args):
required = 'Required' if v['required'] else 'Optional'
cell_args_help += '%s: %s. %s.\n\n' % (cell_arg, required, v['help'])
orig_help += cell_args_help
if epilog:
orig_help += epilog + '\n\n'
return orig_help | [
"def",
"format_help",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_cell_args",
":",
"return",
"super",
"(",
"CommandParser",
",",
"self",
")",
".",
"format_help",
"(",
")",
"else",
":",
"# Print the standard argparse info, the cell arg block, and then the epilog",
"# If we don't remove epilog before calling the super, then epilog will",
"# be printed before the 'Cell args' block.",
"epilog",
"=",
"self",
".",
"epilog",
"self",
".",
"epilog",
"=",
"None",
"orig_help",
"=",
"super",
"(",
"CommandParser",
",",
"self",
")",
".",
"format_help",
"(",
")",
"cell_args_help",
"=",
"'\\nCell args:\\n\\n'",
"for",
"cell_arg",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_cell_args",
")",
":",
"required",
"=",
"'Required'",
"if",
"v",
"[",
"'required'",
"]",
"else",
"'Optional'",
"cell_args_help",
"+=",
"'%s: %s. %s.\\n\\n'",
"%",
"(",
"cell_arg",
",",
"required",
",",
"v",
"[",
"'help'",
"]",
")",
"orig_help",
"+=",
"cell_args_help",
"if",
"epilog",
":",
"orig_help",
"+=",
"epilog",
"+",
"'\\n\\n'",
"return",
"orig_help"
] | Override help doc to add cell args. | [
"Override",
"help",
"doc",
"to",
"add",
"cell",
"args",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_commands.py#L56-L77 |
5,088 | googledatalab/pydatalab | google/datalab/utils/commands/_commands.py | CommandParser._get_subparsers | def _get_subparsers(self):
"""Recursively get subparsers."""
subparsers = []
for action in self._actions:
if isinstance(action, argparse._SubParsersAction):
for _, subparser in action.choices.items():
subparsers.append(subparser)
ret = subparsers
for sp in subparsers:
ret += sp._get_subparsers()
return ret | python | def _get_subparsers(self):
"""Recursively get subparsers."""
subparsers = []
for action in self._actions:
if isinstance(action, argparse._SubParsersAction):
for _, subparser in action.choices.items():
subparsers.append(subparser)
ret = subparsers
for sp in subparsers:
ret += sp._get_subparsers()
return ret | [
"def",
"_get_subparsers",
"(",
"self",
")",
":",
"subparsers",
"=",
"[",
"]",
"for",
"action",
"in",
"self",
".",
"_actions",
":",
"if",
"isinstance",
"(",
"action",
",",
"argparse",
".",
"_SubParsersAction",
")",
":",
"for",
"_",
",",
"subparser",
"in",
"action",
".",
"choices",
".",
"items",
"(",
")",
":",
"subparsers",
".",
"append",
"(",
"subparser",
")",
"ret",
"=",
"subparsers",
"for",
"sp",
"in",
"subparsers",
":",
"ret",
"+=",
"sp",
".",
"_get_subparsers",
"(",
")",
"return",
"ret"
] | Recursively get subparsers. | [
"Recursively",
"get",
"subparsers",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_commands.py#L101-L113 |
5,089 | googledatalab/pydatalab | google/datalab/utils/commands/_commands.py | CommandParser._get_subparser_line_args | def _get_subparser_line_args(self, subparser_prog):
""" Get line args of a specified subparser by its prog."""
subparsers = self._get_subparsers()
for subparser in subparsers:
if subparser_prog == subparser.prog:
# Found the subparser.
args_to_parse = []
for action in subparser._actions:
if action.option_strings:
for argname in action.option_strings:
if argname.startswith('--'):
args_to_parse.append(argname[2:])
return args_to_parse
return None | python | def _get_subparser_line_args(self, subparser_prog):
""" Get line args of a specified subparser by its prog."""
subparsers = self._get_subparsers()
for subparser in subparsers:
if subparser_prog == subparser.prog:
# Found the subparser.
args_to_parse = []
for action in subparser._actions:
if action.option_strings:
for argname in action.option_strings:
if argname.startswith('--'):
args_to_parse.append(argname[2:])
return args_to_parse
return None | [
"def",
"_get_subparser_line_args",
"(",
"self",
",",
"subparser_prog",
")",
":",
"subparsers",
"=",
"self",
".",
"_get_subparsers",
"(",
")",
"for",
"subparser",
"in",
"subparsers",
":",
"if",
"subparser_prog",
"==",
"subparser",
".",
"prog",
":",
"# Found the subparser.",
"args_to_parse",
"=",
"[",
"]",
"for",
"action",
"in",
"subparser",
".",
"_actions",
":",
"if",
"action",
".",
"option_strings",
":",
"for",
"argname",
"in",
"action",
".",
"option_strings",
":",
"if",
"argname",
".",
"startswith",
"(",
"'--'",
")",
":",
"args_to_parse",
".",
"append",
"(",
"argname",
"[",
"2",
":",
"]",
")",
"return",
"args_to_parse",
"return",
"None"
] | Get line args of a specified subparser by its prog. | [
"Get",
"line",
"args",
"of",
"a",
"specified",
"subparser",
"by",
"its",
"prog",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_commands.py#L115-L130 |
5,090 | googledatalab/pydatalab | google/datalab/utils/commands/_commands.py | CommandParser._get_subparser_cell_args | def _get_subparser_cell_args(self, subparser_prog):
""" Get cell args of a specified subparser by its prog."""
subparsers = self._get_subparsers()
for subparser in subparsers:
if subparser_prog == subparser.prog:
return subparser._cell_args
return None | python | def _get_subparser_cell_args(self, subparser_prog):
""" Get cell args of a specified subparser by its prog."""
subparsers = self._get_subparsers()
for subparser in subparsers:
if subparser_prog == subparser.prog:
return subparser._cell_args
return None | [
"def",
"_get_subparser_cell_args",
"(",
"self",
",",
"subparser_prog",
")",
":",
"subparsers",
"=",
"self",
".",
"_get_subparsers",
"(",
")",
"for",
"subparser",
"in",
"subparsers",
":",
"if",
"subparser_prog",
"==",
"subparser",
".",
"prog",
":",
"return",
"subparser",
".",
"_cell_args",
"return",
"None"
] | Get cell args of a specified subparser by its prog. | [
"Get",
"cell",
"args",
"of",
"a",
"specified",
"subparser",
"by",
"its",
"prog",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_commands.py#L132-L140 |
5,091 | googledatalab/pydatalab | google/datalab/utils/commands/_commands.py | CommandParser.add_cell_argument | def add_cell_argument(self, name, help, required=False):
""" Add a cell only argument.
Args:
name: name of the argument. No need to start with "-" or "--".
help: the help string of the argument.
required: Whether it is required in cell content.
"""
for action in self._actions:
if action.dest == name:
raise ValueError('Arg "%s" was added by add_argument already.' % name)
self._cell_args[name] = {'required': required, 'help': help} | python | def add_cell_argument(self, name, help, required=False):
""" Add a cell only argument.
Args:
name: name of the argument. No need to start with "-" or "--".
help: the help string of the argument.
required: Whether it is required in cell content.
"""
for action in self._actions:
if action.dest == name:
raise ValueError('Arg "%s" was added by add_argument already.' % name)
self._cell_args[name] = {'required': required, 'help': help} | [
"def",
"add_cell_argument",
"(",
"self",
",",
"name",
",",
"help",
",",
"required",
"=",
"False",
")",
":",
"for",
"action",
"in",
"self",
".",
"_actions",
":",
"if",
"action",
".",
"dest",
"==",
"name",
":",
"raise",
"ValueError",
"(",
"'Arg \"%s\" was added by add_argument already.'",
"%",
"name",
")",
"self",
".",
"_cell_args",
"[",
"name",
"]",
"=",
"{",
"'required'",
":",
"required",
",",
"'help'",
":",
"help",
"}"
] | Add a cell only argument.
Args:
name: name of the argument. No need to start with "-" or "--".
help: the help string of the argument.
required: Whether it is required in cell content. | [
"Add",
"a",
"cell",
"only",
"argument",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_commands.py#L142-L155 |
5,092 | googledatalab/pydatalab | google/datalab/utils/commands/_commands.py | CommandParser.parse | def parse(self, line, cell, namespace=None):
"""Parses a line and cell into a dictionary of arguments, expanding variables from a namespace.
For each line parameters beginning with --, it also checks the cell content and see if it exists
there. For example, if "--config1" is a line parameter, it checks to see if cell dict contains
"config1" item, and if so, use the cell value. The "config1" item will also be removed from
cell content.
Args:
line: line content.
cell: cell content.
namespace: user namespace. If None, IPython's user namespace is used.
Returns:
A tuple of: 1. parsed config dict. 2. remaining cell after line parameters are extracted.
"""
if namespace is None:
ipy = IPython.get_ipython()
namespace = ipy.user_ns
# Find which subcommand in the line by comparing line with subcommand progs.
# For example, assuming there are 3 subcommands with their progs
# %bq tables
# %bq tables list
# %bq datasets
# and the line is "tables list --dataset proj.myds"
# it will find the second one --- "tables list" because it matches the prog and
# it is the longest.
args = CommandParser.create_args(line, namespace)
# "prog" is a ArgumentParser's path splitted by namspace, such as '%bq tables list'.
sub_parsers_progs = [x.prog for x in self._get_subparsers()]
matched_progs = []
for prog in sub_parsers_progs:
# Remove the leading magic such as "%bq".
match = prog.split()[1:]
for i in range(len(args)):
if args[i:i + len(match)] == match:
matched_progs.append(prog)
break
matched_prog = None
if matched_progs:
# Get the longest match.
matched_prog = max(matched_progs, key=lambda x: len(x.split()))
# Line args can be provided in cell too. If they are in cell, move them to line
# so we can parse them all together.
line_args = self._get_subparser_line_args(matched_prog)
if line_args:
cell_config = None
try:
cell_config, cell = google.datalab.utils.commands.parse_config_for_selected_keys(
cell, line_args)
except:
# It is okay --- probably because cell is not in yaml or json format.
pass
if cell_config:
google.datalab.utils.commands.replace_vars(cell_config, namespace)
for arg_name in cell_config:
arg_value = cell_config[arg_name]
if arg_value is None:
continue
if '--' + arg_name in args:
raise ValueError('config item "%s" is specified in both cell and line.' % arg_name)
if isinstance(arg_value, bool):
if arg_value:
line += ' --%s' % arg_name
else:
line += ' --%s %s' % (arg_name, str(cell_config[arg_name]))
# Parse args again with the new line.
args = CommandParser.create_args(line, namespace)
args = vars(self.parse_args(args))
# Parse cell args.
cell_config = None
cell_args = self._get_subparser_cell_args(matched_prog)
if cell_args:
try:
cell_config, _ = google.datalab.utils.commands.parse_config_for_selected_keys(
cell, cell_args)
except:
# It is okay --- probably because cell is not in yaml or json format.
pass
if cell_config:
google.datalab.utils.commands.replace_vars(cell_config, namespace)
for arg in cell_args:
if (cell_args[arg]['required'] and
(cell_config is None or cell_config.get(arg, None) is None)):
raise ValueError('Cell config "%s" is required.' % arg)
if cell_config:
args.update(cell_config)
return args, cell | python | def parse(self, line, cell, namespace=None):
"""Parses a line and cell into a dictionary of arguments, expanding variables from a namespace.
For each line parameters beginning with --, it also checks the cell content and see if it exists
there. For example, if "--config1" is a line parameter, it checks to see if cell dict contains
"config1" item, and if so, use the cell value. The "config1" item will also be removed from
cell content.
Args:
line: line content.
cell: cell content.
namespace: user namespace. If None, IPython's user namespace is used.
Returns:
A tuple of: 1. parsed config dict. 2. remaining cell after line parameters are extracted.
"""
if namespace is None:
ipy = IPython.get_ipython()
namespace = ipy.user_ns
# Find which subcommand in the line by comparing line with subcommand progs.
# For example, assuming there are 3 subcommands with their progs
# %bq tables
# %bq tables list
# %bq datasets
# and the line is "tables list --dataset proj.myds"
# it will find the second one --- "tables list" because it matches the prog and
# it is the longest.
args = CommandParser.create_args(line, namespace)
# "prog" is a ArgumentParser's path splitted by namspace, such as '%bq tables list'.
sub_parsers_progs = [x.prog for x in self._get_subparsers()]
matched_progs = []
for prog in sub_parsers_progs:
# Remove the leading magic such as "%bq".
match = prog.split()[1:]
for i in range(len(args)):
if args[i:i + len(match)] == match:
matched_progs.append(prog)
break
matched_prog = None
if matched_progs:
# Get the longest match.
matched_prog = max(matched_progs, key=lambda x: len(x.split()))
# Line args can be provided in cell too. If they are in cell, move them to line
# so we can parse them all together.
line_args = self._get_subparser_line_args(matched_prog)
if line_args:
cell_config = None
try:
cell_config, cell = google.datalab.utils.commands.parse_config_for_selected_keys(
cell, line_args)
except:
# It is okay --- probably because cell is not in yaml or json format.
pass
if cell_config:
google.datalab.utils.commands.replace_vars(cell_config, namespace)
for arg_name in cell_config:
arg_value = cell_config[arg_name]
if arg_value is None:
continue
if '--' + arg_name in args:
raise ValueError('config item "%s" is specified in both cell and line.' % arg_name)
if isinstance(arg_value, bool):
if arg_value:
line += ' --%s' % arg_name
else:
line += ' --%s %s' % (arg_name, str(cell_config[arg_name]))
# Parse args again with the new line.
args = CommandParser.create_args(line, namespace)
args = vars(self.parse_args(args))
# Parse cell args.
cell_config = None
cell_args = self._get_subparser_cell_args(matched_prog)
if cell_args:
try:
cell_config, _ = google.datalab.utils.commands.parse_config_for_selected_keys(
cell, cell_args)
except:
# It is okay --- probably because cell is not in yaml or json format.
pass
if cell_config:
google.datalab.utils.commands.replace_vars(cell_config, namespace)
for arg in cell_args:
if (cell_args[arg]['required'] and
(cell_config is None or cell_config.get(arg, None) is None)):
raise ValueError('Cell config "%s" is required.' % arg)
if cell_config:
args.update(cell_config)
return args, cell | [
"def",
"parse",
"(",
"self",
",",
"line",
",",
"cell",
",",
"namespace",
"=",
"None",
")",
":",
"if",
"namespace",
"is",
"None",
":",
"ipy",
"=",
"IPython",
".",
"get_ipython",
"(",
")",
"namespace",
"=",
"ipy",
".",
"user_ns",
"# Find which subcommand in the line by comparing line with subcommand progs.",
"# For example, assuming there are 3 subcommands with their progs",
"# %bq tables",
"# %bq tables list",
"# %bq datasets",
"# and the line is \"tables list --dataset proj.myds\"",
"# it will find the second one --- \"tables list\" because it matches the prog and",
"# it is the longest.",
"args",
"=",
"CommandParser",
".",
"create_args",
"(",
"line",
",",
"namespace",
")",
"# \"prog\" is a ArgumentParser's path splitted by namspace, such as '%bq tables list'.",
"sub_parsers_progs",
"=",
"[",
"x",
".",
"prog",
"for",
"x",
"in",
"self",
".",
"_get_subparsers",
"(",
")",
"]",
"matched_progs",
"=",
"[",
"]",
"for",
"prog",
"in",
"sub_parsers_progs",
":",
"# Remove the leading magic such as \"%bq\".",
"match",
"=",
"prog",
".",
"split",
"(",
")",
"[",
"1",
":",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"args",
")",
")",
":",
"if",
"args",
"[",
"i",
":",
"i",
"+",
"len",
"(",
"match",
")",
"]",
"==",
"match",
":",
"matched_progs",
".",
"append",
"(",
"prog",
")",
"break",
"matched_prog",
"=",
"None",
"if",
"matched_progs",
":",
"# Get the longest match.",
"matched_prog",
"=",
"max",
"(",
"matched_progs",
",",
"key",
"=",
"lambda",
"x",
":",
"len",
"(",
"x",
".",
"split",
"(",
")",
")",
")",
"# Line args can be provided in cell too. If they are in cell, move them to line",
"# so we can parse them all together.",
"line_args",
"=",
"self",
".",
"_get_subparser_line_args",
"(",
"matched_prog",
")",
"if",
"line_args",
":",
"cell_config",
"=",
"None",
"try",
":",
"cell_config",
",",
"cell",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"parse_config_for_selected_keys",
"(",
"cell",
",",
"line_args",
")",
"except",
":",
"# It is okay --- probably because cell is not in yaml or json format.",
"pass",
"if",
"cell_config",
":",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"replace_vars",
"(",
"cell_config",
",",
"namespace",
")",
"for",
"arg_name",
"in",
"cell_config",
":",
"arg_value",
"=",
"cell_config",
"[",
"arg_name",
"]",
"if",
"arg_value",
"is",
"None",
":",
"continue",
"if",
"'--'",
"+",
"arg_name",
"in",
"args",
":",
"raise",
"ValueError",
"(",
"'config item \"%s\" is specified in both cell and line.'",
"%",
"arg_name",
")",
"if",
"isinstance",
"(",
"arg_value",
",",
"bool",
")",
":",
"if",
"arg_value",
":",
"line",
"+=",
"' --%s'",
"%",
"arg_name",
"else",
":",
"line",
"+=",
"' --%s %s'",
"%",
"(",
"arg_name",
",",
"str",
"(",
"cell_config",
"[",
"arg_name",
"]",
")",
")",
"# Parse args again with the new line.",
"args",
"=",
"CommandParser",
".",
"create_args",
"(",
"line",
",",
"namespace",
")",
"args",
"=",
"vars",
"(",
"self",
".",
"parse_args",
"(",
"args",
")",
")",
"# Parse cell args.",
"cell_config",
"=",
"None",
"cell_args",
"=",
"self",
".",
"_get_subparser_cell_args",
"(",
"matched_prog",
")",
"if",
"cell_args",
":",
"try",
":",
"cell_config",
",",
"_",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"parse_config_for_selected_keys",
"(",
"cell",
",",
"cell_args",
")",
"except",
":",
"# It is okay --- probably because cell is not in yaml or json format.",
"pass",
"if",
"cell_config",
":",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"replace_vars",
"(",
"cell_config",
",",
"namespace",
")",
"for",
"arg",
"in",
"cell_args",
":",
"if",
"(",
"cell_args",
"[",
"arg",
"]",
"[",
"'required'",
"]",
"and",
"(",
"cell_config",
"is",
"None",
"or",
"cell_config",
".",
"get",
"(",
"arg",
",",
"None",
")",
"is",
"None",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Cell config \"%s\" is required.'",
"%",
"arg",
")",
"if",
"cell_config",
":",
"args",
".",
"update",
"(",
"cell_config",
")",
"return",
"args",
",",
"cell"
] | Parses a line and cell into a dictionary of arguments, expanding variables from a namespace.
For each line parameters beginning with --, it also checks the cell content and see if it exists
there. For example, if "--config1" is a line parameter, it checks to see if cell dict contains
"config1" item, and if so, use the cell value. The "config1" item will also be removed from
cell content.
Args:
line: line content.
cell: cell content.
namespace: user namespace. If None, IPython's user namespace is used.
Returns:
A tuple of: 1. parsed config dict. 2. remaining cell after line parameters are extracted. | [
"Parses",
"a",
"line",
"and",
"cell",
"into",
"a",
"dictionary",
"of",
"arguments",
"expanding",
"variables",
"from",
"a",
"namespace",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_commands.py#L157-L257 |
5,093 | googledatalab/pydatalab | google/datalab/ml/_summary.py | Summary._glob_events_files | def _glob_events_files(self, paths, recursive):
"""Find all tf events files under a list of paths recursively. """
event_files = []
for path in paths:
dirs = tf.gfile.Glob(path)
dirs = filter(lambda x: tf.gfile.IsDirectory(x), dirs)
for dir in dirs:
if recursive:
dir_files_pair = [(root, filenames) for root, _, filenames in tf.gfile.Walk(dir)]
else:
dir_files_pair = [(dir, tf.gfile.ListDirectory(dir))]
for root, filenames in dir_files_pair:
file_names = fnmatch.filter(filenames, '*.tfevents.*')
file_paths = [os.path.join(root, x) for x in file_names]
file_paths = filter(lambda x: not tf.gfile.IsDirectory(x), file_paths)
event_files += file_paths
return event_files | python | def _glob_events_files(self, paths, recursive):
"""Find all tf events files under a list of paths recursively. """
event_files = []
for path in paths:
dirs = tf.gfile.Glob(path)
dirs = filter(lambda x: tf.gfile.IsDirectory(x), dirs)
for dir in dirs:
if recursive:
dir_files_pair = [(root, filenames) for root, _, filenames in tf.gfile.Walk(dir)]
else:
dir_files_pair = [(dir, tf.gfile.ListDirectory(dir))]
for root, filenames in dir_files_pair:
file_names = fnmatch.filter(filenames, '*.tfevents.*')
file_paths = [os.path.join(root, x) for x in file_names]
file_paths = filter(lambda x: not tf.gfile.IsDirectory(x), file_paths)
event_files += file_paths
return event_files | [
"def",
"_glob_events_files",
"(",
"self",
",",
"paths",
",",
"recursive",
")",
":",
"event_files",
"=",
"[",
"]",
"for",
"path",
"in",
"paths",
":",
"dirs",
"=",
"tf",
".",
"gfile",
".",
"Glob",
"(",
"path",
")",
"dirs",
"=",
"filter",
"(",
"lambda",
"x",
":",
"tf",
".",
"gfile",
".",
"IsDirectory",
"(",
"x",
")",
",",
"dirs",
")",
"for",
"dir",
"in",
"dirs",
":",
"if",
"recursive",
":",
"dir_files_pair",
"=",
"[",
"(",
"root",
",",
"filenames",
")",
"for",
"root",
",",
"_",
",",
"filenames",
"in",
"tf",
".",
"gfile",
".",
"Walk",
"(",
"dir",
")",
"]",
"else",
":",
"dir_files_pair",
"=",
"[",
"(",
"dir",
",",
"tf",
".",
"gfile",
".",
"ListDirectory",
"(",
"dir",
")",
")",
"]",
"for",
"root",
",",
"filenames",
"in",
"dir_files_pair",
":",
"file_names",
"=",
"fnmatch",
".",
"filter",
"(",
"filenames",
",",
"'*.tfevents.*'",
")",
"file_paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"x",
")",
"for",
"x",
"in",
"file_names",
"]",
"file_paths",
"=",
"filter",
"(",
"lambda",
"x",
":",
"not",
"tf",
".",
"gfile",
".",
"IsDirectory",
"(",
"x",
")",
",",
"file_paths",
")",
"event_files",
"+=",
"file_paths",
"return",
"event_files"
] | Find all tf events files under a list of paths recursively. | [
"Find",
"all",
"tf",
"events",
"files",
"under",
"a",
"list",
"of",
"paths",
"recursively",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_summary.py#L44-L62 |
5,094 | googledatalab/pydatalab | google/datalab/ml/_summary.py | Summary.list_events | def list_events(self):
"""List all scalar events in the directory.
Returns:
A dictionary. Key is the name of a event. Value is a set of dirs that contain that event.
"""
event_dir_dict = collections.defaultdict(set)
for event_file in self._glob_events_files(self._paths, recursive=True):
dir = os.path.dirname(event_file)
try:
for record in tf_record.tf_record_iterator(event_file):
event = event_pb2.Event.FromString(record)
if event.summary is None or event.summary.value is None:
continue
for value in event.summary.value:
if value.simple_value is None or value.tag is None:
continue
event_dir_dict[value.tag].add(dir)
except tf.errors.DataLossError:
# DataLossError seems to happen sometimes for small logs.
# We want to show good records regardless.
continue
return dict(event_dir_dict) | python | def list_events(self):
"""List all scalar events in the directory.
Returns:
A dictionary. Key is the name of a event. Value is a set of dirs that contain that event.
"""
event_dir_dict = collections.defaultdict(set)
for event_file in self._glob_events_files(self._paths, recursive=True):
dir = os.path.dirname(event_file)
try:
for record in tf_record.tf_record_iterator(event_file):
event = event_pb2.Event.FromString(record)
if event.summary is None or event.summary.value is None:
continue
for value in event.summary.value:
if value.simple_value is None or value.tag is None:
continue
event_dir_dict[value.tag].add(dir)
except tf.errors.DataLossError:
# DataLossError seems to happen sometimes for small logs.
# We want to show good records regardless.
continue
return dict(event_dir_dict) | [
"def",
"list_events",
"(",
"self",
")",
":",
"event_dir_dict",
"=",
"collections",
".",
"defaultdict",
"(",
"set",
")",
"for",
"event_file",
"in",
"self",
".",
"_glob_events_files",
"(",
"self",
".",
"_paths",
",",
"recursive",
"=",
"True",
")",
":",
"dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"event_file",
")",
"try",
":",
"for",
"record",
"in",
"tf_record",
".",
"tf_record_iterator",
"(",
"event_file",
")",
":",
"event",
"=",
"event_pb2",
".",
"Event",
".",
"FromString",
"(",
"record",
")",
"if",
"event",
".",
"summary",
"is",
"None",
"or",
"event",
".",
"summary",
".",
"value",
"is",
"None",
":",
"continue",
"for",
"value",
"in",
"event",
".",
"summary",
".",
"value",
":",
"if",
"value",
".",
"simple_value",
"is",
"None",
"or",
"value",
".",
"tag",
"is",
"None",
":",
"continue",
"event_dir_dict",
"[",
"value",
".",
"tag",
"]",
".",
"add",
"(",
"dir",
")",
"except",
"tf",
".",
"errors",
".",
"DataLossError",
":",
"# DataLossError seems to happen sometimes for small logs.",
"# We want to show good records regardless.",
"continue",
"return",
"dict",
"(",
"event_dir_dict",
")"
] | List all scalar events in the directory.
Returns:
A dictionary. Key is the name of a event. Value is a set of dirs that contain that event. | [
"List",
"all",
"scalar",
"events",
"in",
"the",
"directory",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_summary.py#L64-L87 |
5,095 | googledatalab/pydatalab | datalab/bigquery/_federated_table.py | FederatedTable.from_storage | def from_storage(source, source_format='csv', csv_options=None, ignore_unknown_values=False,
max_bad_records=0, compressed=False, schema=None):
""" Create an external table for a GCS object.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: For CSV files, the options such as quote character and delimiter.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: The maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
compressed: whether the data is GZ compressed or not (default False). Note that compressed
data can be used as a federated table but cannot be loaded into a BQ Table.
schema: the schema of the data. This is required for this table to be used as a federated
table or to be loaded using a Table object that itself has no schema (default None).
"""
result = FederatedTable()
# Do some sanity checking and concert some params from friendly form to form used by BQ.
if source_format == 'csv':
result._bq_source_format = 'CSV'
if csv_options is None:
csv_options = _csv_options.CSVOptions() # use defaults
elif source_format == 'json':
if csv_options:
raise Exception('CSV options are not support for JSON tables')
result._bq_source_format = 'NEWLINE_DELIMITED_JSON'
else:
raise Exception("Invalid source format %s" % source_format)
result._source = source if isinstance(source, list) else [source]
result._source_format = source_format
result._csv_options = csv_options
result._ignore_unknown_values = ignore_unknown_values
result._max_bad_records = max_bad_records
result._compressed = compressed
result._schema = schema
return result | python | def from_storage(source, source_format='csv', csv_options=None, ignore_unknown_values=False,
max_bad_records=0, compressed=False, schema=None):
""" Create an external table for a GCS object.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: For CSV files, the options such as quote character and delimiter.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: The maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
compressed: whether the data is GZ compressed or not (default False). Note that compressed
data can be used as a federated table but cannot be loaded into a BQ Table.
schema: the schema of the data. This is required for this table to be used as a federated
table or to be loaded using a Table object that itself has no schema (default None).
"""
result = FederatedTable()
# Do some sanity checking and concert some params from friendly form to form used by BQ.
if source_format == 'csv':
result._bq_source_format = 'CSV'
if csv_options is None:
csv_options = _csv_options.CSVOptions() # use defaults
elif source_format == 'json':
if csv_options:
raise Exception('CSV options are not support for JSON tables')
result._bq_source_format = 'NEWLINE_DELIMITED_JSON'
else:
raise Exception("Invalid source format %s" % source_format)
result._source = source if isinstance(source, list) else [source]
result._source_format = source_format
result._csv_options = csv_options
result._ignore_unknown_values = ignore_unknown_values
result._max_bad_records = max_bad_records
result._compressed = compressed
result._schema = schema
return result | [
"def",
"from_storage",
"(",
"source",
",",
"source_format",
"=",
"'csv'",
",",
"csv_options",
"=",
"None",
",",
"ignore_unknown_values",
"=",
"False",
",",
"max_bad_records",
"=",
"0",
",",
"compressed",
"=",
"False",
",",
"schema",
"=",
"None",
")",
":",
"result",
"=",
"FederatedTable",
"(",
")",
"# Do some sanity checking and concert some params from friendly form to form used by BQ.",
"if",
"source_format",
"==",
"'csv'",
":",
"result",
".",
"_bq_source_format",
"=",
"'CSV'",
"if",
"csv_options",
"is",
"None",
":",
"csv_options",
"=",
"_csv_options",
".",
"CSVOptions",
"(",
")",
"# use defaults",
"elif",
"source_format",
"==",
"'json'",
":",
"if",
"csv_options",
":",
"raise",
"Exception",
"(",
"'CSV options are not support for JSON tables'",
")",
"result",
".",
"_bq_source_format",
"=",
"'NEWLINE_DELIMITED_JSON'",
"else",
":",
"raise",
"Exception",
"(",
"\"Invalid source format %s\"",
"%",
"source_format",
")",
"result",
".",
"_source",
"=",
"source",
"if",
"isinstance",
"(",
"source",
",",
"list",
")",
"else",
"[",
"source",
"]",
"result",
".",
"_source_format",
"=",
"source_format",
"result",
".",
"_csv_options",
"=",
"csv_options",
"result",
".",
"_ignore_unknown_values",
"=",
"ignore_unknown_values",
"result",
".",
"_max_bad_records",
"=",
"max_bad_records",
"result",
".",
"_compressed",
"=",
"compressed",
"result",
".",
"_schema",
"=",
"schema",
"return",
"result"
] | Create an external table for a GCS object.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: For CSV files, the options such as quote character and delimiter.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: The maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
compressed: whether the data is GZ compressed or not (default False). Note that compressed
data can be used as a federated table but cannot be loaded into a BQ Table.
schema: the schema of the data. This is required for this table to be used as a federated
table or to be loaded using a Table object that itself has no schema (default None). | [
"Create",
"an",
"external",
"table",
"for",
"a",
"GCS",
"object",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_federated_table.py#L24-L64 |
5,096 | googledatalab/pydatalab | google/datalab/bigquery/commands/_bigquery.py | get_query_parameters | def get_query_parameters(args, cell_body, date_time=datetime.datetime.now()):
"""Extract query parameters from cell body if provided
Also validates the cell body schema using jsonschema to catch errors before sending the http
request. This validation isn't complete, however; it does not validate recursive schemas,
but it acts as a good filter against most simple schemas
Args:
args: arguments passed to the magic cell
cell_body: body of the magic cell
date_time: The timestamp at which the date-time related parameters need to be resolved.
Returns:
Validated object containing query parameters
"""
env = google.datalab.utils.commands.notebook_environment()
config = google.datalab.utils.commands.parse_config(cell_body, env=env, as_dict=False)
sql = args['query']
if sql is None:
raise Exception('Cannot extract query parameters in non-query cell')
# Validate query_params
if config:
jsonschema.validate(config, BigQuerySchema.QUERY_PARAMS_SCHEMA)
config = config or {}
config_parameters = config.get('parameters', [])
return bigquery.Query.get_query_parameters(config_parameters, date_time=date_time) | python | def get_query_parameters(args, cell_body, date_time=datetime.datetime.now()):
"""Extract query parameters from cell body if provided
Also validates the cell body schema using jsonschema to catch errors before sending the http
request. This validation isn't complete, however; it does not validate recursive schemas,
but it acts as a good filter against most simple schemas
Args:
args: arguments passed to the magic cell
cell_body: body of the magic cell
date_time: The timestamp at which the date-time related parameters need to be resolved.
Returns:
Validated object containing query parameters
"""
env = google.datalab.utils.commands.notebook_environment()
config = google.datalab.utils.commands.parse_config(cell_body, env=env, as_dict=False)
sql = args['query']
if sql is None:
raise Exception('Cannot extract query parameters in non-query cell')
# Validate query_params
if config:
jsonschema.validate(config, BigQuerySchema.QUERY_PARAMS_SCHEMA)
config = config or {}
config_parameters = config.get('parameters', [])
return bigquery.Query.get_query_parameters(config_parameters, date_time=date_time) | [
"def",
"get_query_parameters",
"(",
"args",
",",
"cell_body",
",",
"date_time",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
")",
":",
"env",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
"config",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"parse_config",
"(",
"cell_body",
",",
"env",
"=",
"env",
",",
"as_dict",
"=",
"False",
")",
"sql",
"=",
"args",
"[",
"'query'",
"]",
"if",
"sql",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'Cannot extract query parameters in non-query cell'",
")",
"# Validate query_params",
"if",
"config",
":",
"jsonschema",
".",
"validate",
"(",
"config",
",",
"BigQuerySchema",
".",
"QUERY_PARAMS_SCHEMA",
")",
"config",
"=",
"config",
"or",
"{",
"}",
"config_parameters",
"=",
"config",
".",
"get",
"(",
"'parameters'",
",",
"[",
"]",
")",
"return",
"bigquery",
".",
"Query",
".",
"get_query_parameters",
"(",
"config_parameters",
",",
"date_time",
"=",
"date_time",
")"
] | Extract query parameters from cell body if provided
Also validates the cell body schema using jsonschema to catch errors before sending the http
request. This validation isn't complete, however; it does not validate recursive schemas,
but it acts as a good filter against most simple schemas
Args:
args: arguments passed to the magic cell
cell_body: body of the magic cell
date_time: The timestamp at which the date-time related parameters need to be resolved.
Returns:
Validated object containing query parameters | [
"Extract",
"query",
"parameters",
"from",
"cell",
"body",
"if",
"provided",
"Also",
"validates",
"the",
"cell",
"body",
"schema",
"using",
"jsonschema",
"to",
"catch",
"errors",
"before",
"sending",
"the",
"http",
"request",
".",
"This",
"validation",
"isn",
"t",
"complete",
"however",
";",
"it",
"does",
"not",
"validate",
"recursive",
"schemas",
"but",
"it",
"acts",
"as",
"a",
"good",
"filter",
"against",
"most",
"simple",
"schemas"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L355-L382 |
5,097 | googledatalab/pydatalab | google/datalab/bigquery/commands/_bigquery.py | _udf_cell | def _udf_cell(args, cell_body):
"""Implements the Bigquery udf cell magic for ipython notebooks.
The supported syntax is:
%%bq udf --name <var> --language <lang>
// @param <name> <type>
// @returns <type>
// @import <gcs_path>
<js function>
Args:
args: the optional arguments following '%%bq udf'.
cell_body: the UDF declaration (inputs and outputs) and implementation in javascript.
"""
udf_name = args['name']
if not udf_name:
raise Exception('Declaration must be of the form %%bq udf --name <variable name>')
# Parse out parameters, return type, and imports
param_pattern = r'^\s*\/\/\s*@param\s+([<>\w]+)\s+([<>\w,\s]+)\s*$'
returns_pattern = r'^\s*\/\/\s*@returns\s+([<>\w,\s]+)\s*$'
import_pattern = r'^\s*\/\/\s*@import\s+(\S+)\s*$'
params = re.findall(param_pattern, cell_body, re.MULTILINE)
return_type = re.findall(returns_pattern, cell_body, re.MULTILINE)
imports = re.findall(import_pattern, cell_body, re.MULTILINE)
if len(return_type) < 1:
raise Exception('UDF return type must be defined using // @returns <type>')
if len(return_type) > 1:
raise Exception('Found more than one return type definition')
return_type = return_type[0]
# Finally build the UDF object
udf = bigquery.UDF(udf_name, cell_body, return_type, params, args['language'], imports)
google.datalab.utils.commands.notebook_environment()[udf_name] = udf | python | def _udf_cell(args, cell_body):
"""Implements the Bigquery udf cell magic for ipython notebooks.
The supported syntax is:
%%bq udf --name <var> --language <lang>
// @param <name> <type>
// @returns <type>
// @import <gcs_path>
<js function>
Args:
args: the optional arguments following '%%bq udf'.
cell_body: the UDF declaration (inputs and outputs) and implementation in javascript.
"""
udf_name = args['name']
if not udf_name:
raise Exception('Declaration must be of the form %%bq udf --name <variable name>')
# Parse out parameters, return type, and imports
param_pattern = r'^\s*\/\/\s*@param\s+([<>\w]+)\s+([<>\w,\s]+)\s*$'
returns_pattern = r'^\s*\/\/\s*@returns\s+([<>\w,\s]+)\s*$'
import_pattern = r'^\s*\/\/\s*@import\s+(\S+)\s*$'
params = re.findall(param_pattern, cell_body, re.MULTILINE)
return_type = re.findall(returns_pattern, cell_body, re.MULTILINE)
imports = re.findall(import_pattern, cell_body, re.MULTILINE)
if len(return_type) < 1:
raise Exception('UDF return type must be defined using // @returns <type>')
if len(return_type) > 1:
raise Exception('Found more than one return type definition')
return_type = return_type[0]
# Finally build the UDF object
udf = bigquery.UDF(udf_name, cell_body, return_type, params, args['language'], imports)
google.datalab.utils.commands.notebook_environment()[udf_name] = udf | [
"def",
"_udf_cell",
"(",
"args",
",",
"cell_body",
")",
":",
"udf_name",
"=",
"args",
"[",
"'name'",
"]",
"if",
"not",
"udf_name",
":",
"raise",
"Exception",
"(",
"'Declaration must be of the form %%bq udf --name <variable name>'",
")",
"# Parse out parameters, return type, and imports",
"param_pattern",
"=",
"r'^\\s*\\/\\/\\s*@param\\s+([<>\\w]+)\\s+([<>\\w,\\s]+)\\s*$'",
"returns_pattern",
"=",
"r'^\\s*\\/\\/\\s*@returns\\s+([<>\\w,\\s]+)\\s*$'",
"import_pattern",
"=",
"r'^\\s*\\/\\/\\s*@import\\s+(\\S+)\\s*$'",
"params",
"=",
"re",
".",
"findall",
"(",
"param_pattern",
",",
"cell_body",
",",
"re",
".",
"MULTILINE",
")",
"return_type",
"=",
"re",
".",
"findall",
"(",
"returns_pattern",
",",
"cell_body",
",",
"re",
".",
"MULTILINE",
")",
"imports",
"=",
"re",
".",
"findall",
"(",
"import_pattern",
",",
"cell_body",
",",
"re",
".",
"MULTILINE",
")",
"if",
"len",
"(",
"return_type",
")",
"<",
"1",
":",
"raise",
"Exception",
"(",
"'UDF return type must be defined using // @returns <type>'",
")",
"if",
"len",
"(",
"return_type",
")",
">",
"1",
":",
"raise",
"Exception",
"(",
"'Found more than one return type definition'",
")",
"return_type",
"=",
"return_type",
"[",
"0",
"]",
"# Finally build the UDF object",
"udf",
"=",
"bigquery",
".",
"UDF",
"(",
"udf_name",
",",
"cell_body",
",",
"return_type",
",",
"params",
",",
"args",
"[",
"'language'",
"]",
",",
"imports",
")",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
"[",
"udf_name",
"]",
"=",
"udf"
] | Implements the Bigquery udf cell magic for ipython notebooks.
The supported syntax is:
%%bq udf --name <var> --language <lang>
// @param <name> <type>
// @returns <type>
// @import <gcs_path>
<js function>
Args:
args: the optional arguments following '%%bq udf'.
cell_body: the UDF declaration (inputs and outputs) and implementation in javascript. | [
"Implements",
"the",
"Bigquery",
"udf",
"cell",
"magic",
"for",
"ipython",
"notebooks",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L480-L516 |
5,098 | googledatalab/pydatalab | google/datalab/bigquery/commands/_bigquery.py | _datasource_cell | def _datasource_cell(args, cell_body):
"""Implements the BigQuery datasource cell magic for ipython notebooks.
The supported syntax is
%%bq datasource --name <var> --paths <url> [--format <CSV|JSON>]
<schema>
Args:
args: the optional arguments following '%%bq datasource'
cell_body: the datasource's schema in json/yaml
"""
name = args['name']
paths = args['paths']
data_format = (args['format'] or 'CSV').lower()
compressed = args['compressed'] or False
# Get the source schema from the cell body
record = google.datalab.utils.commands.parse_config(
cell_body, google.datalab.utils.commands.notebook_environment(), as_dict=False)
jsonschema.validate(record, BigQuerySchema.TABLE_SCHEMA_SCHEMA)
schema = bigquery.Schema(record['schema'])
# Finally build the datasource object
datasource = bigquery.ExternalDataSource(source=paths, source_format=data_format,
compressed=compressed, schema=schema)
google.datalab.utils.commands.notebook_environment()[name] = datasource | python | def _datasource_cell(args, cell_body):
"""Implements the BigQuery datasource cell magic for ipython notebooks.
The supported syntax is
%%bq datasource --name <var> --paths <url> [--format <CSV|JSON>]
<schema>
Args:
args: the optional arguments following '%%bq datasource'
cell_body: the datasource's schema in json/yaml
"""
name = args['name']
paths = args['paths']
data_format = (args['format'] or 'CSV').lower()
compressed = args['compressed'] or False
# Get the source schema from the cell body
record = google.datalab.utils.commands.parse_config(
cell_body, google.datalab.utils.commands.notebook_environment(), as_dict=False)
jsonschema.validate(record, BigQuerySchema.TABLE_SCHEMA_SCHEMA)
schema = bigquery.Schema(record['schema'])
# Finally build the datasource object
datasource = bigquery.ExternalDataSource(source=paths, source_format=data_format,
compressed=compressed, schema=schema)
google.datalab.utils.commands.notebook_environment()[name] = datasource | [
"def",
"_datasource_cell",
"(",
"args",
",",
"cell_body",
")",
":",
"name",
"=",
"args",
"[",
"'name'",
"]",
"paths",
"=",
"args",
"[",
"'paths'",
"]",
"data_format",
"=",
"(",
"args",
"[",
"'format'",
"]",
"or",
"'CSV'",
")",
".",
"lower",
"(",
")",
"compressed",
"=",
"args",
"[",
"'compressed'",
"]",
"or",
"False",
"# Get the source schema from the cell body",
"record",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"parse_config",
"(",
"cell_body",
",",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
",",
"as_dict",
"=",
"False",
")",
"jsonschema",
".",
"validate",
"(",
"record",
",",
"BigQuerySchema",
".",
"TABLE_SCHEMA_SCHEMA",
")",
"schema",
"=",
"bigquery",
".",
"Schema",
"(",
"record",
"[",
"'schema'",
"]",
")",
"# Finally build the datasource object",
"datasource",
"=",
"bigquery",
".",
"ExternalDataSource",
"(",
"source",
"=",
"paths",
",",
"source_format",
"=",
"data_format",
",",
"compressed",
"=",
"compressed",
",",
"schema",
"=",
"schema",
")",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
"[",
"name",
"]",
"=",
"datasource"
] | Implements the BigQuery datasource cell magic for ipython notebooks.
The supported syntax is
%%bq datasource --name <var> --paths <url> [--format <CSV|JSON>]
<schema>
Args:
args: the optional arguments following '%%bq datasource'
cell_body: the datasource's schema in json/yaml | [
"Implements",
"the",
"BigQuery",
"datasource",
"cell",
"magic",
"for",
"ipython",
"notebooks",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L519-L545 |
5,099 | googledatalab/pydatalab | google/datalab/bigquery/commands/_bigquery.py | _query_cell | def _query_cell(args, cell_body):
"""Implements the BigQuery cell magic for used to build SQL objects.
The supported syntax is:
%%bq query <args>
[<inline SQL>]
Args:
args: the optional arguments following '%%bql query'.
cell_body: the contents of the cell
"""
name = args['name']
udfs = args['udfs']
datasources = args['datasources']
subqueries = args['subqueries']
# Finally build the query object
query = bigquery.Query(cell_body, env=IPython.get_ipython().user_ns, udfs=udfs,
data_sources=datasources, subqueries=subqueries)
# if no name is specified, execute this query instead of defining it
if name is None:
return query.execute().result()
else:
google.datalab.utils.commands.notebook_environment()[name] = query | python | def _query_cell(args, cell_body):
"""Implements the BigQuery cell magic for used to build SQL objects.
The supported syntax is:
%%bq query <args>
[<inline SQL>]
Args:
args: the optional arguments following '%%bql query'.
cell_body: the contents of the cell
"""
name = args['name']
udfs = args['udfs']
datasources = args['datasources']
subqueries = args['subqueries']
# Finally build the query object
query = bigquery.Query(cell_body, env=IPython.get_ipython().user_ns, udfs=udfs,
data_sources=datasources, subqueries=subqueries)
# if no name is specified, execute this query instead of defining it
if name is None:
return query.execute().result()
else:
google.datalab.utils.commands.notebook_environment()[name] = query | [
"def",
"_query_cell",
"(",
"args",
",",
"cell_body",
")",
":",
"name",
"=",
"args",
"[",
"'name'",
"]",
"udfs",
"=",
"args",
"[",
"'udfs'",
"]",
"datasources",
"=",
"args",
"[",
"'datasources'",
"]",
"subqueries",
"=",
"args",
"[",
"'subqueries'",
"]",
"# Finally build the query object",
"query",
"=",
"bigquery",
".",
"Query",
"(",
"cell_body",
",",
"env",
"=",
"IPython",
".",
"get_ipython",
"(",
")",
".",
"user_ns",
",",
"udfs",
"=",
"udfs",
",",
"data_sources",
"=",
"datasources",
",",
"subqueries",
"=",
"subqueries",
")",
"# if no name is specified, execute this query instead of defining it",
"if",
"name",
"is",
"None",
":",
"return",
"query",
".",
"execute",
"(",
")",
".",
"result",
"(",
")",
"else",
":",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
"[",
"name",
"]",
"=",
"query"
] | Implements the BigQuery cell magic for used to build SQL objects.
The supported syntax is:
%%bq query <args>
[<inline SQL>]
Args:
args: the optional arguments following '%%bql query'.
cell_body: the contents of the cell | [
"Implements",
"the",
"BigQuery",
"cell",
"magic",
"for",
"used",
"to",
"build",
"SQL",
"objects",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L548-L573 |
Subsets and Splits