id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
4,900 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_util.py | repackage_to_staging | def repackage_to_staging(output_path):
"""Repackage it from local installed location and copy it to GCS."""
import google.datalab.ml as ml
# Find the package root. __file__ is under [package_root]/mltoolbox/image/classification.
package_root = os.path.join(os.path.dirname(__file__), '../../../')
# We deploy setup.py in the same dir for repackaging purpose.
setup_py = os.path.join(os.path.dirname(__file__), 'setup.py')
staging_package_url = os.path.join(output_path, 'staging', 'image_classification.tar.gz')
ml.package_and_copy(package_root, setup_py, staging_package_url)
return staging_package_url | python | def repackage_to_staging(output_path):
"""Repackage it from local installed location and copy it to GCS."""
import google.datalab.ml as ml
# Find the package root. __file__ is under [package_root]/mltoolbox/image/classification.
package_root = os.path.join(os.path.dirname(__file__), '../../../')
# We deploy setup.py in the same dir for repackaging purpose.
setup_py = os.path.join(os.path.dirname(__file__), 'setup.py')
staging_package_url = os.path.join(output_path, 'staging', 'image_classification.tar.gz')
ml.package_and_copy(package_root, setup_py, staging_package_url)
return staging_package_url | [
"def",
"repackage_to_staging",
"(",
"output_path",
")",
":",
"import",
"google",
".",
"datalab",
".",
"ml",
"as",
"ml",
"# Find the package root. __file__ is under [package_root]/mltoolbox/image/classification.",
"package_root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'../../../'",
")",
"# We deploy setup.py in the same dir for repackaging purpose.",
"setup_py",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'setup.py'",
")",
"staging_package_url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_path",
",",
"'staging'",
",",
"'image_classification.tar.gz'",
")",
"ml",
".",
"package_and_copy",
"(",
"package_root",
",",
"setup_py",
",",
"staging_package_url",
")",
"return",
"staging_package_url"
] | Repackage it from local installed location and copy it to GCS. | [
"Repackage",
"it",
"from",
"local",
"installed",
"location",
"and",
"copy",
"it",
"to",
"GCS",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_util.py#L257-L268 |
4,901 | googledatalab/pydatalab | google/datalab/contrib/pipeline/_pipeline.py | PipelineGenerator.generate_airflow_spec | def generate_airflow_spec(name, pipeline_spec):
""" Gets the airflow python spec for the Pipeline object.
"""
task_definitions = ''
up_steam_statements = ''
parameters = pipeline_spec.get('parameters')
for (task_id, task_details) in sorted(pipeline_spec['tasks'].items()):
task_def = PipelineGenerator._get_operator_definition(task_id, task_details, parameters)
task_definitions = task_definitions + task_def
dependency_def = PipelineGenerator._get_dependency_definition(
task_id, task_details.get('up_stream', []))
up_steam_statements = up_steam_statements + dependency_def
schedule_config = pipeline_spec.get('schedule', {})
default_args = PipelineGenerator._get_default_args(schedule_config,
pipeline_spec.get('emails', {}))
dag_definition = PipelineGenerator._get_dag_definition(
name, schedule_config.get('interval', '@once'), schedule_config.get('catchup', False))
return PipelineGenerator._imports + default_args + dag_definition + task_definitions + \
up_steam_statements | python | def generate_airflow_spec(name, pipeline_spec):
""" Gets the airflow python spec for the Pipeline object.
"""
task_definitions = ''
up_steam_statements = ''
parameters = pipeline_spec.get('parameters')
for (task_id, task_details) in sorted(pipeline_spec['tasks'].items()):
task_def = PipelineGenerator._get_operator_definition(task_id, task_details, parameters)
task_definitions = task_definitions + task_def
dependency_def = PipelineGenerator._get_dependency_definition(
task_id, task_details.get('up_stream', []))
up_steam_statements = up_steam_statements + dependency_def
schedule_config = pipeline_spec.get('schedule', {})
default_args = PipelineGenerator._get_default_args(schedule_config,
pipeline_spec.get('emails', {}))
dag_definition = PipelineGenerator._get_dag_definition(
name, schedule_config.get('interval', '@once'), schedule_config.get('catchup', False))
return PipelineGenerator._imports + default_args + dag_definition + task_definitions + \
up_steam_statements | [
"def",
"generate_airflow_spec",
"(",
"name",
",",
"pipeline_spec",
")",
":",
"task_definitions",
"=",
"''",
"up_steam_statements",
"=",
"''",
"parameters",
"=",
"pipeline_spec",
".",
"get",
"(",
"'parameters'",
")",
"for",
"(",
"task_id",
",",
"task_details",
")",
"in",
"sorted",
"(",
"pipeline_spec",
"[",
"'tasks'",
"]",
".",
"items",
"(",
")",
")",
":",
"task_def",
"=",
"PipelineGenerator",
".",
"_get_operator_definition",
"(",
"task_id",
",",
"task_details",
",",
"parameters",
")",
"task_definitions",
"=",
"task_definitions",
"+",
"task_def",
"dependency_def",
"=",
"PipelineGenerator",
".",
"_get_dependency_definition",
"(",
"task_id",
",",
"task_details",
".",
"get",
"(",
"'up_stream'",
",",
"[",
"]",
")",
")",
"up_steam_statements",
"=",
"up_steam_statements",
"+",
"dependency_def",
"schedule_config",
"=",
"pipeline_spec",
".",
"get",
"(",
"'schedule'",
",",
"{",
"}",
")",
"default_args",
"=",
"PipelineGenerator",
".",
"_get_default_args",
"(",
"schedule_config",
",",
"pipeline_spec",
".",
"get",
"(",
"'emails'",
",",
"{",
"}",
")",
")",
"dag_definition",
"=",
"PipelineGenerator",
".",
"_get_dag_definition",
"(",
"name",
",",
"schedule_config",
".",
"get",
"(",
"'interval'",
",",
"'@once'",
")",
",",
"schedule_config",
".",
"get",
"(",
"'catchup'",
",",
"False",
")",
")",
"return",
"PipelineGenerator",
".",
"_imports",
"+",
"default_args",
"+",
"dag_definition",
"+",
"task_definitions",
"+",
"up_steam_statements"
] | Gets the airflow python spec for the Pipeline object. | [
"Gets",
"the",
"airflow",
"python",
"spec",
"for",
"the",
"Pipeline",
"object",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/pipeline/_pipeline.py#L48-L68 |
4,902 | googledatalab/pydatalab | google/datalab/contrib/pipeline/_pipeline.py | PipelineGenerator._get_dependency_definition | def _get_dependency_definition(task_id, dependencies):
""" Internal helper collects all the dependencies of the task, and returns
the Airflow equivalent python sytax for specifying them.
"""
set_upstream_statements = ''
for dependency in dependencies:
set_upstream_statements = set_upstream_statements + \
'{0}.set_upstream({1})'.format(task_id, dependency) + '\n'
return set_upstream_statements | python | def _get_dependency_definition(task_id, dependencies):
""" Internal helper collects all the dependencies of the task, and returns
the Airflow equivalent python sytax for specifying them.
"""
set_upstream_statements = ''
for dependency in dependencies:
set_upstream_statements = set_upstream_statements + \
'{0}.set_upstream({1})'.format(task_id, dependency) + '\n'
return set_upstream_statements | [
"def",
"_get_dependency_definition",
"(",
"task_id",
",",
"dependencies",
")",
":",
"set_upstream_statements",
"=",
"''",
"for",
"dependency",
"in",
"dependencies",
":",
"set_upstream_statements",
"=",
"set_upstream_statements",
"+",
"'{0}.set_upstream({1})'",
".",
"format",
"(",
"task_id",
",",
"dependency",
")",
"+",
"'\\n'",
"return",
"set_upstream_statements"
] | Internal helper collects all the dependencies of the task, and returns
the Airflow equivalent python sytax for specifying them. | [
"Internal",
"helper",
"collects",
"all",
"the",
"dependencies",
"of",
"the",
"task",
"and",
"returns",
"the",
"Airflow",
"equivalent",
"python",
"sytax",
"for",
"specifying",
"them",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/pipeline/_pipeline.py#L167-L175 |
4,903 | googledatalab/pydatalab | google/datalab/contrib/pipeline/_pipeline.py | PipelineGenerator._get_operator_class_name | def _get_operator_class_name(task_detail_type):
""" Internal helper gets the name of the Airflow operator class. We maintain
this in a map, so this method really returns the enum name, concatenated
with the string "Operator".
"""
# TODO(rajivpb): Rename this var correctly.
task_type_to_operator_prefix_mapping = {
'pydatalab.bq.execute': ('Execute',
'google.datalab.contrib.bigquery.operators._bq_execute_operator'),
'pydatalab.bq.extract': ('Extract',
'google.datalab.contrib.bigquery.operators._bq_extract_operator'),
'pydatalab.bq.load': ('Load', 'google.datalab.contrib.bigquery.operators._bq_load_operator'),
'Bash': ('Bash', 'airflow.operators.bash_operator')
}
(operator_class_prefix, module) = task_type_to_operator_prefix_mapping.get(
task_detail_type, (None, __name__))
format_string = '{0}Operator'
operator_class_name = format_string.format(operator_class_prefix)
if operator_class_prefix is None:
return format_string.format(task_detail_type), module
return operator_class_name, module | python | def _get_operator_class_name(task_detail_type):
""" Internal helper gets the name of the Airflow operator class. We maintain
this in a map, so this method really returns the enum name, concatenated
with the string "Operator".
"""
# TODO(rajivpb): Rename this var correctly.
task_type_to_operator_prefix_mapping = {
'pydatalab.bq.execute': ('Execute',
'google.datalab.contrib.bigquery.operators._bq_execute_operator'),
'pydatalab.bq.extract': ('Extract',
'google.datalab.contrib.bigquery.operators._bq_extract_operator'),
'pydatalab.bq.load': ('Load', 'google.datalab.contrib.bigquery.operators._bq_load_operator'),
'Bash': ('Bash', 'airflow.operators.bash_operator')
}
(operator_class_prefix, module) = task_type_to_operator_prefix_mapping.get(
task_detail_type, (None, __name__))
format_string = '{0}Operator'
operator_class_name = format_string.format(operator_class_prefix)
if operator_class_prefix is None:
return format_string.format(task_detail_type), module
return operator_class_name, module | [
"def",
"_get_operator_class_name",
"(",
"task_detail_type",
")",
":",
"# TODO(rajivpb): Rename this var correctly.",
"task_type_to_operator_prefix_mapping",
"=",
"{",
"'pydatalab.bq.execute'",
":",
"(",
"'Execute'",
",",
"'google.datalab.contrib.bigquery.operators._bq_execute_operator'",
")",
",",
"'pydatalab.bq.extract'",
":",
"(",
"'Extract'",
",",
"'google.datalab.contrib.bigquery.operators._bq_extract_operator'",
")",
",",
"'pydatalab.bq.load'",
":",
"(",
"'Load'",
",",
"'google.datalab.contrib.bigquery.operators._bq_load_operator'",
")",
",",
"'Bash'",
":",
"(",
"'Bash'",
",",
"'airflow.operators.bash_operator'",
")",
"}",
"(",
"operator_class_prefix",
",",
"module",
")",
"=",
"task_type_to_operator_prefix_mapping",
".",
"get",
"(",
"task_detail_type",
",",
"(",
"None",
",",
"__name__",
")",
")",
"format_string",
"=",
"'{0}Operator'",
"operator_class_name",
"=",
"format_string",
".",
"format",
"(",
"operator_class_prefix",
")",
"if",
"operator_class_prefix",
"is",
"None",
":",
"return",
"format_string",
".",
"format",
"(",
"task_detail_type",
")",
",",
"module",
"return",
"operator_class_name",
",",
"module"
] | Internal helper gets the name of the Airflow operator class. We maintain
this in a map, so this method really returns the enum name, concatenated
with the string "Operator". | [
"Internal",
"helper",
"gets",
"the",
"name",
"of",
"the",
"Airflow",
"operator",
"class",
".",
"We",
"maintain",
"this",
"in",
"a",
"map",
"so",
"this",
"method",
"really",
"returns",
"the",
"enum",
"name",
"concatenated",
"with",
"the",
"string",
"Operator",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/pipeline/_pipeline.py#L178-L198 |
4,904 | googledatalab/pydatalab | google/datalab/contrib/pipeline/_pipeline.py | PipelineGenerator._get_operator_param_name_and_values | def _get_operator_param_name_and_values(operator_class_name, task_details):
""" Internal helper gets the name of the python parameter for the Airflow operator class. In
some cases, we do not expose the airflow parameter name in its native form, but choose to
expose a name that's more standard for Datalab, or one that's more friendly. For example,
Airflow's BigQueryOperator uses 'bql' for the query string, but we want %%bq users in Datalab
to use 'query'. Hence, a few substitutions that are specific to the Airflow operator need to
be made.
Similarly, we the parameter value could come from the notebook's context. All that happens
here.
Returns:
Dict containing _only_ the keys and values that are required in Airflow operator definition.
This requires a substituting existing keys in the dictionary with their Airflow equivalents (
i.e. by adding new keys, and removing the existing ones).
"""
# We make a clone and then remove 'type' and 'up_stream' since these aren't needed for the
# the operator's parameters.
operator_task_details = task_details.copy()
if 'type' in operator_task_details.keys():
del operator_task_details['type']
if 'up_stream' in operator_task_details.keys():
del operator_task_details['up_stream']
# We special-case certain operators if we do some translation of the parameter names. This is
# usually the case when we use syntactic sugar to expose the functionality.
# TODO(rajivpb): It should be possible to make this a lookup from the modules mapping via
# getattr() or equivalent. Avoid hard-coding these class-names here.
if (operator_class_name == 'BigQueryOperator'):
return PipelineGenerator._get_bq_execute_params(operator_task_details)
if (operator_class_name == 'BigQueryToCloudStorageOperator'):
return PipelineGenerator._get_bq_extract_params(operator_task_details)
if (operator_class_name == 'GoogleCloudStorageToBigQueryOperator'):
return PipelineGenerator._get_bq_load_params(operator_task_details)
return operator_task_details | python | def _get_operator_param_name_and_values(operator_class_name, task_details):
""" Internal helper gets the name of the python parameter for the Airflow operator class. In
some cases, we do not expose the airflow parameter name in its native form, but choose to
expose a name that's more standard for Datalab, or one that's more friendly. For example,
Airflow's BigQueryOperator uses 'bql' for the query string, but we want %%bq users in Datalab
to use 'query'. Hence, a few substitutions that are specific to the Airflow operator need to
be made.
Similarly, we the parameter value could come from the notebook's context. All that happens
here.
Returns:
Dict containing _only_ the keys and values that are required in Airflow operator definition.
This requires a substituting existing keys in the dictionary with their Airflow equivalents (
i.e. by adding new keys, and removing the existing ones).
"""
# We make a clone and then remove 'type' and 'up_stream' since these aren't needed for the
# the operator's parameters.
operator_task_details = task_details.copy()
if 'type' in operator_task_details.keys():
del operator_task_details['type']
if 'up_stream' in operator_task_details.keys():
del operator_task_details['up_stream']
# We special-case certain operators if we do some translation of the parameter names. This is
# usually the case when we use syntactic sugar to expose the functionality.
# TODO(rajivpb): It should be possible to make this a lookup from the modules mapping via
# getattr() or equivalent. Avoid hard-coding these class-names here.
if (operator_class_name == 'BigQueryOperator'):
return PipelineGenerator._get_bq_execute_params(operator_task_details)
if (operator_class_name == 'BigQueryToCloudStorageOperator'):
return PipelineGenerator._get_bq_extract_params(operator_task_details)
if (operator_class_name == 'GoogleCloudStorageToBigQueryOperator'):
return PipelineGenerator._get_bq_load_params(operator_task_details)
return operator_task_details | [
"def",
"_get_operator_param_name_and_values",
"(",
"operator_class_name",
",",
"task_details",
")",
":",
"# We make a clone and then remove 'type' and 'up_stream' since these aren't needed for the",
"# the operator's parameters.",
"operator_task_details",
"=",
"task_details",
".",
"copy",
"(",
")",
"if",
"'type'",
"in",
"operator_task_details",
".",
"keys",
"(",
")",
":",
"del",
"operator_task_details",
"[",
"'type'",
"]",
"if",
"'up_stream'",
"in",
"operator_task_details",
".",
"keys",
"(",
")",
":",
"del",
"operator_task_details",
"[",
"'up_stream'",
"]",
"# We special-case certain operators if we do some translation of the parameter names. This is",
"# usually the case when we use syntactic sugar to expose the functionality.",
"# TODO(rajivpb): It should be possible to make this a lookup from the modules mapping via",
"# getattr() or equivalent. Avoid hard-coding these class-names here.",
"if",
"(",
"operator_class_name",
"==",
"'BigQueryOperator'",
")",
":",
"return",
"PipelineGenerator",
".",
"_get_bq_execute_params",
"(",
"operator_task_details",
")",
"if",
"(",
"operator_class_name",
"==",
"'BigQueryToCloudStorageOperator'",
")",
":",
"return",
"PipelineGenerator",
".",
"_get_bq_extract_params",
"(",
"operator_task_details",
")",
"if",
"(",
"operator_class_name",
"==",
"'GoogleCloudStorageToBigQueryOperator'",
")",
":",
"return",
"PipelineGenerator",
".",
"_get_bq_load_params",
"(",
"operator_task_details",
")",
"return",
"operator_task_details"
] | Internal helper gets the name of the python parameter for the Airflow operator class. In
some cases, we do not expose the airflow parameter name in its native form, but choose to
expose a name that's more standard for Datalab, or one that's more friendly. For example,
Airflow's BigQueryOperator uses 'bql' for the query string, but we want %%bq users in Datalab
to use 'query'. Hence, a few substitutions that are specific to the Airflow operator need to
be made.
Similarly, we the parameter value could come from the notebook's context. All that happens
here.
Returns:
Dict containing _only_ the keys and values that are required in Airflow operator definition.
This requires a substituting existing keys in the dictionary with their Airflow equivalents (
i.e. by adding new keys, and removing the existing ones). | [
"Internal",
"helper",
"gets",
"the",
"name",
"of",
"the",
"python",
"parameter",
"for",
"the",
"Airflow",
"operator",
"class",
".",
"In",
"some",
"cases",
"we",
"do",
"not",
"expose",
"the",
"airflow",
"parameter",
"name",
"in",
"its",
"native",
"form",
"but",
"choose",
"to",
"expose",
"a",
"name",
"that",
"s",
"more",
"standard",
"for",
"Datalab",
"or",
"one",
"that",
"s",
"more",
"friendly",
".",
"For",
"example",
"Airflow",
"s",
"BigQueryOperator",
"uses",
"bql",
"for",
"the",
"query",
"string",
"but",
"we",
"want",
"%%bq",
"users",
"in",
"Datalab",
"to",
"use",
"query",
".",
"Hence",
"a",
"few",
"substitutions",
"that",
"are",
"specific",
"to",
"the",
"Airflow",
"operator",
"need",
"to",
"be",
"made",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/pipeline/_pipeline.py#L201-L236 |
4,905 | googledatalab/pydatalab | google/datalab/ml/_dataset.py | BigQueryDataSet.sample | def sample(self, n):
"""Samples data into a Pandas DataFrame. Note that it calls BigQuery so it will
incur cost.
Args:
n: number of sampled counts. Note that the number of counts returned is approximated.
Returns:
A dataframe containing sampled data.
Raises:
Exception if n is larger than number of rows.
"""
total = bq.Query('select count(*) from %s' %
self._get_source()).execute().result()[0].values()[0]
if n > total:
raise ValueError('sample larger than population')
sampling = bq.Sampling.random(percent=n * 100.0 / float(total))
if self._query is not None:
source = self._query
else:
source = 'SELECT * FROM `%s`' % self._table
sample = bq.Query(source).execute(sampling=sampling).result()
df = sample.to_dataframe()
return df | python | def sample(self, n):
"""Samples data into a Pandas DataFrame. Note that it calls BigQuery so it will
incur cost.
Args:
n: number of sampled counts. Note that the number of counts returned is approximated.
Returns:
A dataframe containing sampled data.
Raises:
Exception if n is larger than number of rows.
"""
total = bq.Query('select count(*) from %s' %
self._get_source()).execute().result()[0].values()[0]
if n > total:
raise ValueError('sample larger than population')
sampling = bq.Sampling.random(percent=n * 100.0 / float(total))
if self._query is not None:
source = self._query
else:
source = 'SELECT * FROM `%s`' % self._table
sample = bq.Query(source).execute(sampling=sampling).result()
df = sample.to_dataframe()
return df | [
"def",
"sample",
"(",
"self",
",",
"n",
")",
":",
"total",
"=",
"bq",
".",
"Query",
"(",
"'select count(*) from %s'",
"%",
"self",
".",
"_get_source",
"(",
")",
")",
".",
"execute",
"(",
")",
".",
"result",
"(",
")",
"[",
"0",
"]",
".",
"values",
"(",
")",
"[",
"0",
"]",
"if",
"n",
">",
"total",
":",
"raise",
"ValueError",
"(",
"'sample larger than population'",
")",
"sampling",
"=",
"bq",
".",
"Sampling",
".",
"random",
"(",
"percent",
"=",
"n",
"*",
"100.0",
"/",
"float",
"(",
"total",
")",
")",
"if",
"self",
".",
"_query",
"is",
"not",
"None",
":",
"source",
"=",
"self",
".",
"_query",
"else",
":",
"source",
"=",
"'SELECT * FROM `%s`'",
"%",
"self",
".",
"_table",
"sample",
"=",
"bq",
".",
"Query",
"(",
"source",
")",
".",
"execute",
"(",
"sampling",
"=",
"sampling",
")",
".",
"result",
"(",
")",
"df",
"=",
"sample",
".",
"to_dataframe",
"(",
")",
"return",
"df"
] | Samples data into a Pandas DataFrame. Note that it calls BigQuery so it will
incur cost.
Args:
n: number of sampled counts. Note that the number of counts returned is approximated.
Returns:
A dataframe containing sampled data.
Raises:
Exception if n is larger than number of rows. | [
"Samples",
"data",
"into",
"a",
"Pandas",
"DataFrame",
".",
"Note",
"that",
"it",
"calls",
"BigQuery",
"so",
"it",
"will",
"incur",
"cost",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_dataset.py#L196-L218 |
4,906 | googledatalab/pydatalab | google/datalab/ml/_dataset.py | TransformedDataSet.size | def size(self):
"""The number of instances in the data. If the underlying data source changes,
it may be outdated.
"""
import tensorflow as tf
if self._size is None:
self._size = 0
options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP)
for tfexample_file in self.files:
self._size += sum(1 for x
in tf.python_io.tf_record_iterator(tfexample_file, options=options))
return self._size | python | def size(self):
"""The number of instances in the data. If the underlying data source changes,
it may be outdated.
"""
import tensorflow as tf
if self._size is None:
self._size = 0
options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP)
for tfexample_file in self.files:
self._size += sum(1 for x
in tf.python_io.tf_record_iterator(tfexample_file, options=options))
return self._size | [
"def",
"size",
"(",
"self",
")",
":",
"import",
"tensorflow",
"as",
"tf",
"if",
"self",
".",
"_size",
"is",
"None",
":",
"self",
".",
"_size",
"=",
"0",
"options",
"=",
"tf",
".",
"python_io",
".",
"TFRecordOptions",
"(",
"tf",
".",
"python_io",
".",
"TFRecordCompressionType",
".",
"GZIP",
")",
"for",
"tfexample_file",
"in",
"self",
".",
"files",
":",
"self",
".",
"_size",
"+=",
"sum",
"(",
"1",
"for",
"x",
"in",
"tf",
".",
"python_io",
".",
"tf_record_iterator",
"(",
"tfexample_file",
",",
"options",
"=",
"options",
")",
")",
"return",
"self",
".",
"_size"
] | The number of instances in the data. If the underlying data source changes,
it may be outdated. | [
"The",
"number",
"of",
"instances",
"in",
"the",
"data",
".",
"If",
"the",
"underlying",
"data",
"source",
"changes",
"it",
"may",
"be",
"outdated",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_dataset.py#L252-L265 |
4,907 | googledatalab/pydatalab | google/datalab/stackdriver/monitoring/_group.py | Groups.list | def list(self, pattern='*'):
"""Returns a list of groups that match the filters.
Args:
pattern: An optional pattern to filter the groups based on their display
name. This can include Unix shell-style wildcards. E.g.
``"Production*"``.
Returns:
A list of Group objects that match the filters.
"""
if self._group_dict is None:
self._group_dict = collections.OrderedDict(
(group.id, group) for group in self._client.list_groups())
return [group for group in self._group_dict.values()
if fnmatch.fnmatch(group.display_name, pattern)] | python | def list(self, pattern='*'):
"""Returns a list of groups that match the filters.
Args:
pattern: An optional pattern to filter the groups based on their display
name. This can include Unix shell-style wildcards. E.g.
``"Production*"``.
Returns:
A list of Group objects that match the filters.
"""
if self._group_dict is None:
self._group_dict = collections.OrderedDict(
(group.id, group) for group in self._client.list_groups())
return [group for group in self._group_dict.values()
if fnmatch.fnmatch(group.display_name, pattern)] | [
"def",
"list",
"(",
"self",
",",
"pattern",
"=",
"'*'",
")",
":",
"if",
"self",
".",
"_group_dict",
"is",
"None",
":",
"self",
".",
"_group_dict",
"=",
"collections",
".",
"OrderedDict",
"(",
"(",
"group",
".",
"id",
",",
"group",
")",
"for",
"group",
"in",
"self",
".",
"_client",
".",
"list_groups",
"(",
")",
")",
"return",
"[",
"group",
"for",
"group",
"in",
"self",
".",
"_group_dict",
".",
"values",
"(",
")",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"group",
".",
"display_name",
",",
"pattern",
")",
"]"
] | Returns a list of groups that match the filters.
Args:
pattern: An optional pattern to filter the groups based on their display
name. This can include Unix shell-style wildcards. E.g.
``"Production*"``.
Returns:
A list of Group objects that match the filters. | [
"Returns",
"a",
"list",
"of",
"groups",
"that",
"match",
"the",
"filters",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/stackdriver/monitoring/_group.py#L45-L61 |
4,908 | googledatalab/pydatalab | google/datalab/stackdriver/monitoring/_group.py | Groups.as_dataframe | def as_dataframe(self, pattern='*', max_rows=None):
"""Creates a pandas dataframe from the groups that match the filters.
Args:
pattern: An optional pattern to further filter the groups. This can
include Unix shell-style wildcards. E.g. ``"Production *"``,
``"*-backend"``.
max_rows: The maximum number of groups to return. If None, return all.
Returns:
A pandas dataframe containing matching groups.
"""
data = []
for i, group in enumerate(self.list(pattern)):
if max_rows is not None and i >= max_rows:
break
parent = self._group_dict.get(group.parent_id)
parent_display_name = '' if parent is None else parent.display_name
data.append([
group.id, group.display_name, group.parent_id,
parent_display_name, group.is_cluster, group.filter])
return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS) | python | def as_dataframe(self, pattern='*', max_rows=None):
"""Creates a pandas dataframe from the groups that match the filters.
Args:
pattern: An optional pattern to further filter the groups. This can
include Unix shell-style wildcards. E.g. ``"Production *"``,
``"*-backend"``.
max_rows: The maximum number of groups to return. If None, return all.
Returns:
A pandas dataframe containing matching groups.
"""
data = []
for i, group in enumerate(self.list(pattern)):
if max_rows is not None and i >= max_rows:
break
parent = self._group_dict.get(group.parent_id)
parent_display_name = '' if parent is None else parent.display_name
data.append([
group.id, group.display_name, group.parent_id,
parent_display_name, group.is_cluster, group.filter])
return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS) | [
"def",
"as_dataframe",
"(",
"self",
",",
"pattern",
"=",
"'*'",
",",
"max_rows",
"=",
"None",
")",
":",
"data",
"=",
"[",
"]",
"for",
"i",
",",
"group",
"in",
"enumerate",
"(",
"self",
".",
"list",
"(",
"pattern",
")",
")",
":",
"if",
"max_rows",
"is",
"not",
"None",
"and",
"i",
">=",
"max_rows",
":",
"break",
"parent",
"=",
"self",
".",
"_group_dict",
".",
"get",
"(",
"group",
".",
"parent_id",
")",
"parent_display_name",
"=",
"''",
"if",
"parent",
"is",
"None",
"else",
"parent",
".",
"display_name",
"data",
".",
"append",
"(",
"[",
"group",
".",
"id",
",",
"group",
".",
"display_name",
",",
"group",
".",
"parent_id",
",",
"parent_display_name",
",",
"group",
".",
"is_cluster",
",",
"group",
".",
"filter",
"]",
")",
"return",
"pandas",
".",
"DataFrame",
"(",
"data",
",",
"columns",
"=",
"self",
".",
"_DISPLAY_HEADERS",
")"
] | Creates a pandas dataframe from the groups that match the filters.
Args:
pattern: An optional pattern to further filter the groups. This can
include Unix shell-style wildcards. E.g. ``"Production *"``,
``"*-backend"``.
max_rows: The maximum number of groups to return. If None, return all.
Returns:
A pandas dataframe containing matching groups. | [
"Creates",
"a",
"pandas",
"dataframe",
"from",
"the",
"groups",
"that",
"match",
"the",
"filters",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/stackdriver/monitoring/_group.py#L63-L85 |
4,909 | googledatalab/pydatalab | datalab/data/_sql_statement.py | SqlStatement._find_recursive_dependencies | def _find_recursive_dependencies(sql, values, code, resolved_vars, resolving_vars=None):
""" Recursive helper method for expanding variables including transitive dependencies.
Placeholders in SQL are represented as $<name>. If '$' must appear within
the SQL statement literally, then it can be escaped as '$$'.
Args:
sql: the raw SQL statement with named placeholders.
values: the user-supplied dictionary of name/value pairs to use for placeholder values.
code: an array of referenced UDFs found during expansion.
resolved_vars: a ref parameter for the variable references completely resolved so far.
resolving_vars: a ref parameter for the variable(s) we are currently resolving; if we see
a dependency again that is in this set we know we have a circular reference.
Returns:
The formatted SQL statement with placeholders replaced with their values.
Raises:
Exception if a placeholder was found in the SQL statement, but did not
have a corresponding argument value.
"""
# Get the set of $var references in this SQL.
dependencies = SqlStatement._get_dependencies(sql)
for dependency in dependencies:
# Now we check each dependency. If it is in complete - i.e., we have an expansion
# for it already - we just continue.
if dependency in resolved_vars:
continue
# Look it up in our resolution namespace dictionary.
dep = datalab.utils.get_item(values, dependency)
# If it is a SQL module, get the main/last query from the module, so users can refer
# to $module. Useful especially if final query in module has no DEFINE QUERY <name> part.
if isinstance(dep, types.ModuleType):
dep = _utils.get_default_query_from_module(dep)
# If we can't resolve the $name, give up.
if dep is None:
raise Exception("Unsatisfied dependency $%s" % dependency)
# If it is a SqlStatement, it may have its own $ references in turn; check to make
# sure we don't have circular references, and if not, recursively expand it and add
# it to the set of complete dependencies.
if isinstance(dep, SqlStatement):
if resolving_vars is None:
resolving_vars = []
elif dependency in resolving_vars:
# Circular dependency
raise Exception("Circular dependency in $%s" % dependency)
resolving_vars.append(dependency)
SqlStatement._find_recursive_dependencies(dep._sql, values, code, resolved_vars,
resolving_vars)
resolving_vars.pop()
resolved_vars[dependency] = SqlStatement(dep._sql)
else:
resolved_vars[dependency] = dep | python | def _find_recursive_dependencies(sql, values, code, resolved_vars, resolving_vars=None):
""" Recursive helper method for expanding variables including transitive dependencies.
Placeholders in SQL are represented as $<name>. If '$' must appear within
the SQL statement literally, then it can be escaped as '$$'.
Args:
sql: the raw SQL statement with named placeholders.
values: the user-supplied dictionary of name/value pairs to use for placeholder values.
code: an array of referenced UDFs found during expansion.
resolved_vars: a ref parameter for the variable references completely resolved so far.
resolving_vars: a ref parameter for the variable(s) we are currently resolving; if we see
a dependency again that is in this set we know we have a circular reference.
Returns:
The formatted SQL statement with placeholders replaced with their values.
Raises:
Exception if a placeholder was found in the SQL statement, but did not
have a corresponding argument value.
"""
# Get the set of $var references in this SQL.
dependencies = SqlStatement._get_dependencies(sql)
for dependency in dependencies:
# Now we check each dependency. If it is in complete - i.e., we have an expansion
# for it already - we just continue.
if dependency in resolved_vars:
continue
# Look it up in our resolution namespace dictionary.
dep = datalab.utils.get_item(values, dependency)
# If it is a SQL module, get the main/last query from the module, so users can refer
# to $module. Useful especially if final query in module has no DEFINE QUERY <name> part.
if isinstance(dep, types.ModuleType):
dep = _utils.get_default_query_from_module(dep)
# If we can't resolve the $name, give up.
if dep is None:
raise Exception("Unsatisfied dependency $%s" % dependency)
# If it is a SqlStatement, it may have its own $ references in turn; check to make
# sure we don't have circular references, and if not, recursively expand it and add
# it to the set of complete dependencies.
if isinstance(dep, SqlStatement):
if resolving_vars is None:
resolving_vars = []
elif dependency in resolving_vars:
# Circular dependency
raise Exception("Circular dependency in $%s" % dependency)
resolving_vars.append(dependency)
SqlStatement._find_recursive_dependencies(dep._sql, values, code, resolved_vars,
resolving_vars)
resolving_vars.pop()
resolved_vars[dependency] = SqlStatement(dep._sql)
else:
resolved_vars[dependency] = dep | [
"def",
"_find_recursive_dependencies",
"(",
"sql",
",",
"values",
",",
"code",
",",
"resolved_vars",
",",
"resolving_vars",
"=",
"None",
")",
":",
"# Get the set of $var references in this SQL.",
"dependencies",
"=",
"SqlStatement",
".",
"_get_dependencies",
"(",
"sql",
")",
"for",
"dependency",
"in",
"dependencies",
":",
"# Now we check each dependency. If it is in complete - i.e., we have an expansion",
"# for it already - we just continue.",
"if",
"dependency",
"in",
"resolved_vars",
":",
"continue",
"# Look it up in our resolution namespace dictionary.",
"dep",
"=",
"datalab",
".",
"utils",
".",
"get_item",
"(",
"values",
",",
"dependency",
")",
"# If it is a SQL module, get the main/last query from the module, so users can refer",
"# to $module. Useful especially if final query in module has no DEFINE QUERY <name> part.",
"if",
"isinstance",
"(",
"dep",
",",
"types",
".",
"ModuleType",
")",
":",
"dep",
"=",
"_utils",
".",
"get_default_query_from_module",
"(",
"dep",
")",
"# If we can't resolve the $name, give up.",
"if",
"dep",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Unsatisfied dependency $%s\"",
"%",
"dependency",
")",
"# If it is a SqlStatement, it may have its own $ references in turn; check to make",
"# sure we don't have circular references, and if not, recursively expand it and add",
"# it to the set of complete dependencies.",
"if",
"isinstance",
"(",
"dep",
",",
"SqlStatement",
")",
":",
"if",
"resolving_vars",
"is",
"None",
":",
"resolving_vars",
"=",
"[",
"]",
"elif",
"dependency",
"in",
"resolving_vars",
":",
"# Circular dependency",
"raise",
"Exception",
"(",
"\"Circular dependency in $%s\"",
"%",
"dependency",
")",
"resolving_vars",
".",
"append",
"(",
"dependency",
")",
"SqlStatement",
".",
"_find_recursive_dependencies",
"(",
"dep",
".",
"_sql",
",",
"values",
",",
"code",
",",
"resolved_vars",
",",
"resolving_vars",
")",
"resolving_vars",
".",
"pop",
"(",
")",
"resolved_vars",
"[",
"dependency",
"]",
"=",
"SqlStatement",
"(",
"dep",
".",
"_sql",
")",
"else",
":",
"resolved_vars",
"[",
"dependency",
"]",
"=",
"dep"
] | Recursive helper method for expanding variables including transitive dependencies.
Placeholders in SQL are represented as $<name>. If '$' must appear within
the SQL statement literally, then it can be escaped as '$$'.
Args:
sql: the raw SQL statement with named placeholders.
values: the user-supplied dictionary of name/value pairs to use for placeholder values.
code: an array of referenced UDFs found during expansion.
resolved_vars: a ref parameter for the variable references completely resolved so far.
resolving_vars: a ref parameter for the variable(s) we are currently resolving; if we see
a dependency again that is in this set we know we have a circular reference.
Returns:
The formatted SQL statement with placeholders replaced with their values.
Raises:
Exception if a placeholder was found in the SQL statement, but did not
have a corresponding argument value. | [
"Recursive",
"helper",
"method",
"for",
"expanding",
"variables",
"including",
"transitive",
"dependencies",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/_sql_statement.py#L69-L120 |
4,910 | googledatalab/pydatalab | datalab/data/_sql_statement.py | SqlStatement.format | def format(sql, args=None):
""" Resolve variable references in a query within an environment.
This computes and resolves the transitive dependencies in the query and raises an
exception if that fails due to either undefined or circular references.
Args:
sql: query to format.
args: a dictionary of values to use in variable expansion.
Returns:
The resolved SQL text with variables expanded.
Raises:
Exception on failure.
"""
resolved_vars = {}
code = []
SqlStatement._find_recursive_dependencies(sql, args, code=code,
resolved_vars=resolved_vars)
# Rebuild the SQL string, substituting just '$' for escaped $ occurrences,
# variable references substituted with their values, or literal text copied
# over as-is.
parts = []
for (escape, placeholder, _, literal) in SqlStatement._get_tokens(sql):
if escape:
parts.append('$')
elif placeholder:
variable = placeholder[1:]
try:
value = resolved_vars[variable]
except KeyError as e:
raise Exception('Invalid sql. Unable to substitute $%s.' % e.args[0])
if isinstance(value, types.ModuleType):
value = _utils.get_default_query_from_module(value)
if isinstance(value, SqlStatement):
sql = value.format(value._sql, resolved_vars)
value = '(%s)' % sql
elif '_repr_sql_' in dir(value):
# pylint: disable=protected-access
value = value._repr_sql_()
elif isinstance(value, basestring):
value = SqlStatement._escape_string(value)
elif isinstance(value, list) or isinstance(value, tuple):
if isinstance(value, tuple):
value = list(value)
expansion = '('
for v in value:
if len(expansion) > 1:
expansion += ', '
if isinstance(v, basestring):
expansion += SqlStatement._escape_string(v)
else:
expansion += str(v)
expansion += ')'
value = expansion
else:
value = str(value)
parts.append(value)
elif literal:
parts.append(literal)
expanded = ''.join(parts)
return expanded | python | def format(sql, args=None):
""" Resolve variable references in a query within an environment.
This computes and resolves the transitive dependencies in the query and raises an
exception if that fails due to either undefined or circular references.
Args:
sql: query to format.
args: a dictionary of values to use in variable expansion.
Returns:
The resolved SQL text with variables expanded.
Raises:
Exception on failure.
"""
resolved_vars = {}
code = []
SqlStatement._find_recursive_dependencies(sql, args, code=code,
resolved_vars=resolved_vars)
# Rebuild the SQL string, substituting just '$' for escaped $ occurrences,
# variable references substituted with their values, or literal text copied
# over as-is.
parts = []
for (escape, placeholder, _, literal) in SqlStatement._get_tokens(sql):
if escape:
parts.append('$')
elif placeholder:
variable = placeholder[1:]
try:
value = resolved_vars[variable]
except KeyError as e:
raise Exception('Invalid sql. Unable to substitute $%s.' % e.args[0])
if isinstance(value, types.ModuleType):
value = _utils.get_default_query_from_module(value)
if isinstance(value, SqlStatement):
sql = value.format(value._sql, resolved_vars)
value = '(%s)' % sql
elif '_repr_sql_' in dir(value):
# pylint: disable=protected-access
value = value._repr_sql_()
elif isinstance(value, basestring):
value = SqlStatement._escape_string(value)
elif isinstance(value, list) or isinstance(value, tuple):
if isinstance(value, tuple):
value = list(value)
expansion = '('
for v in value:
if len(expansion) > 1:
expansion += ', '
if isinstance(v, basestring):
expansion += SqlStatement._escape_string(v)
else:
expansion += str(v)
expansion += ')'
value = expansion
else:
value = str(value)
parts.append(value)
elif literal:
parts.append(literal)
expanded = ''.join(parts)
return expanded | [
"def",
"format",
"(",
"sql",
",",
"args",
"=",
"None",
")",
":",
"resolved_vars",
"=",
"{",
"}",
"code",
"=",
"[",
"]",
"SqlStatement",
".",
"_find_recursive_dependencies",
"(",
"sql",
",",
"args",
",",
"code",
"=",
"code",
",",
"resolved_vars",
"=",
"resolved_vars",
")",
"# Rebuild the SQL string, substituting just '$' for escaped $ occurrences,",
"# variable references substituted with their values, or literal text copied",
"# over as-is.",
"parts",
"=",
"[",
"]",
"for",
"(",
"escape",
",",
"placeholder",
",",
"_",
",",
"literal",
")",
"in",
"SqlStatement",
".",
"_get_tokens",
"(",
"sql",
")",
":",
"if",
"escape",
":",
"parts",
".",
"append",
"(",
"'$'",
")",
"elif",
"placeholder",
":",
"variable",
"=",
"placeholder",
"[",
"1",
":",
"]",
"try",
":",
"value",
"=",
"resolved_vars",
"[",
"variable",
"]",
"except",
"KeyError",
"as",
"e",
":",
"raise",
"Exception",
"(",
"'Invalid sql. Unable to substitute $%s.'",
"%",
"e",
".",
"args",
"[",
"0",
"]",
")",
"if",
"isinstance",
"(",
"value",
",",
"types",
".",
"ModuleType",
")",
":",
"value",
"=",
"_utils",
".",
"get_default_query_from_module",
"(",
"value",
")",
"if",
"isinstance",
"(",
"value",
",",
"SqlStatement",
")",
":",
"sql",
"=",
"value",
".",
"format",
"(",
"value",
".",
"_sql",
",",
"resolved_vars",
")",
"value",
"=",
"'(%s)'",
"%",
"sql",
"elif",
"'_repr_sql_'",
"in",
"dir",
"(",
"value",
")",
":",
"# pylint: disable=protected-access",
"value",
"=",
"value",
".",
"_repr_sql_",
"(",
")",
"elif",
"isinstance",
"(",
"value",
",",
"basestring",
")",
":",
"value",
"=",
"SqlStatement",
".",
"_escape_string",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
"or",
"isinstance",
"(",
"value",
",",
"tuple",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"tuple",
")",
":",
"value",
"=",
"list",
"(",
"value",
")",
"expansion",
"=",
"'('",
"for",
"v",
"in",
"value",
":",
"if",
"len",
"(",
"expansion",
")",
">",
"1",
":",
"expansion",
"+=",
"', '",
"if",
"isinstance",
"(",
"v",
",",
"basestring",
")",
":",
"expansion",
"+=",
"SqlStatement",
".",
"_escape_string",
"(",
"v",
")",
"else",
":",
"expansion",
"+=",
"str",
"(",
"v",
")",
"expansion",
"+=",
"')'",
"value",
"=",
"expansion",
"else",
":",
"value",
"=",
"str",
"(",
"value",
")",
"parts",
".",
"append",
"(",
"value",
")",
"elif",
"literal",
":",
"parts",
".",
"append",
"(",
"literal",
")",
"expanded",
"=",
"''",
".",
"join",
"(",
"parts",
")",
"return",
"expanded"
] | Resolve variable references in a query within an environment.
This computes and resolves the transitive dependencies in the query and raises an
exception if that fails due to either undefined or circular references.
Args:
sql: query to format.
args: a dictionary of values to use in variable expansion.
Returns:
The resolved SQL text with variables expanded.
Raises:
Exception on failure. | [
"Resolve",
"variable",
"references",
"in",
"a",
"query",
"within",
"an",
"environment",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/_sql_statement.py#L127-L193 |
4,911 | googledatalab/pydatalab | datalab/data/_sql_statement.py | SqlStatement._get_dependencies | def _get_dependencies(sql):
""" Return the list of variables referenced in this SQL. """
dependencies = []
for (_, placeholder, dollar, _) in SqlStatement._get_tokens(sql):
if placeholder:
variable = placeholder[1:]
if variable not in dependencies:
dependencies.append(variable)
elif dollar:
raise Exception('Invalid sql; $ with no following $ or identifier: %s.' % sql)
return dependencies | python | def _get_dependencies(sql):
""" Return the list of variables referenced in this SQL. """
dependencies = []
for (_, placeholder, dollar, _) in SqlStatement._get_tokens(sql):
if placeholder:
variable = placeholder[1:]
if variable not in dependencies:
dependencies.append(variable)
elif dollar:
raise Exception('Invalid sql; $ with no following $ or identifier: %s.' % sql)
return dependencies | [
"def",
"_get_dependencies",
"(",
"sql",
")",
":",
"dependencies",
"=",
"[",
"]",
"for",
"(",
"_",
",",
"placeholder",
",",
"dollar",
",",
"_",
")",
"in",
"SqlStatement",
".",
"_get_tokens",
"(",
"sql",
")",
":",
"if",
"placeholder",
":",
"variable",
"=",
"placeholder",
"[",
"1",
":",
"]",
"if",
"variable",
"not",
"in",
"dependencies",
":",
"dependencies",
".",
"append",
"(",
"variable",
")",
"elif",
"dollar",
":",
"raise",
"Exception",
"(",
"'Invalid sql; $ with no following $ or identifier: %s.'",
"%",
"sql",
")",
"return",
"dependencies"
] | Return the list of variables referenced in this SQL. | [
"Return",
"the",
"list",
"of",
"variables",
"referenced",
"in",
"this",
"SQL",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/_sql_statement.py#L202-L212 |
4,912 | googledatalab/pydatalab | datalab/utils/commands/_modules.py | pymodule | def pymodule(line, cell=None):
"""Creates and subsequently auto-imports a python module.
"""
parser = _commands.CommandParser.create('pymodule')
parser.add_argument('-n', '--name',
help='the name of the python module to create and import')
parser.set_defaults(func=_pymodule_cell)
return _utils.handle_magic_line(line, cell, parser) | python | def pymodule(line, cell=None):
"""Creates and subsequently auto-imports a python module.
"""
parser = _commands.CommandParser.create('pymodule')
parser.add_argument('-n', '--name',
help='the name of the python module to create and import')
parser.set_defaults(func=_pymodule_cell)
return _utils.handle_magic_line(line, cell, parser) | [
"def",
"pymodule",
"(",
"line",
",",
"cell",
"=",
"None",
")",
":",
"parser",
"=",
"_commands",
".",
"CommandParser",
".",
"create",
"(",
"'pymodule'",
")",
"parser",
".",
"add_argument",
"(",
"'-n'",
",",
"'--name'",
",",
"help",
"=",
"'the name of the python module to create and import'",
")",
"parser",
".",
"set_defaults",
"(",
"func",
"=",
"_pymodule_cell",
")",
"return",
"_utils",
".",
"handle_magic_line",
"(",
"line",
",",
"cell",
",",
"parser",
")"
] | Creates and subsequently auto-imports a python module. | [
"Creates",
"and",
"subsequently",
"auto",
"-",
"imports",
"a",
"python",
"module",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/utils/commands/_modules.py#L31-L38 |
4,913 | googledatalab/pydatalab | google/datalab/utils/_utils.py | compare_datetimes | def compare_datetimes(d1, d2):
""" Compares two datetimes safely, whether they are timezone-naive or timezone-aware.
If either datetime is naive it is converted to an aware datetime assuming UTC.
Args:
d1: first datetime.
d2: second datetime.
Returns:
-1 if d1 < d2, 0 if they are the same, or +1 is d1 > d2.
"""
if d1.tzinfo is None or d1.tzinfo.utcoffset(d1) is None:
d1 = d1.replace(tzinfo=pytz.UTC)
if d2.tzinfo is None or d2.tzinfo.utcoffset(d2) is None:
d2 = d2.replace(tzinfo=pytz.UTC)
if d1 < d2:
return -1
elif d1 > d2:
return 1
return 0 | python | def compare_datetimes(d1, d2):
""" Compares two datetimes safely, whether they are timezone-naive or timezone-aware.
If either datetime is naive it is converted to an aware datetime assuming UTC.
Args:
d1: first datetime.
d2: second datetime.
Returns:
-1 if d1 < d2, 0 if they are the same, or +1 is d1 > d2.
"""
if d1.tzinfo is None or d1.tzinfo.utcoffset(d1) is None:
d1 = d1.replace(tzinfo=pytz.UTC)
if d2.tzinfo is None or d2.tzinfo.utcoffset(d2) is None:
d2 = d2.replace(tzinfo=pytz.UTC)
if d1 < d2:
return -1
elif d1 > d2:
return 1
return 0 | [
"def",
"compare_datetimes",
"(",
"d1",
",",
"d2",
")",
":",
"if",
"d1",
".",
"tzinfo",
"is",
"None",
"or",
"d1",
".",
"tzinfo",
".",
"utcoffset",
"(",
"d1",
")",
"is",
"None",
":",
"d1",
"=",
"d1",
".",
"replace",
"(",
"tzinfo",
"=",
"pytz",
".",
"UTC",
")",
"if",
"d2",
".",
"tzinfo",
"is",
"None",
"or",
"d2",
".",
"tzinfo",
".",
"utcoffset",
"(",
"d2",
")",
"is",
"None",
":",
"d2",
"=",
"d2",
".",
"replace",
"(",
"tzinfo",
"=",
"pytz",
".",
"UTC",
")",
"if",
"d1",
"<",
"d2",
":",
"return",
"-",
"1",
"elif",
"d1",
">",
"d2",
":",
"return",
"1",
"return",
"0"
] | Compares two datetimes safely, whether they are timezone-naive or timezone-aware.
If either datetime is naive it is converted to an aware datetime assuming UTC.
Args:
d1: first datetime.
d2: second datetime.
Returns:
-1 if d1 < d2, 0 if they are the same, or +1 is d1 > d2. | [
"Compares",
"two",
"datetimes",
"safely",
"whether",
"they",
"are",
"timezone",
"-",
"naive",
"or",
"timezone",
"-",
"aware",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/_utils.py#L75-L95 |
4,914 | googledatalab/pydatalab | google/datalab/utils/_utils.py | pick_unused_port | def pick_unused_port():
""" get an unused port on the VM.
Returns:
An unused port.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
addr, port = s.getsockname()
s.close()
return port | python | def pick_unused_port():
""" get an unused port on the VM.
Returns:
An unused port.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
addr, port = s.getsockname()
s.close()
return port | [
"def",
"pick_unused_port",
"(",
")",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"s",
".",
"bind",
"(",
"(",
"'localhost'",
",",
"0",
")",
")",
"addr",
",",
"port",
"=",
"s",
".",
"getsockname",
"(",
")",
"s",
".",
"close",
"(",
")",
"return",
"port"
] | get an unused port on the VM.
Returns:
An unused port. | [
"get",
"an",
"unused",
"port",
"on",
"the",
"VM",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/_utils.py#L98-L108 |
4,915 | googledatalab/pydatalab | google/datalab/utils/_utils.py | is_http_running_on | def is_http_running_on(port):
""" Check if an http server runs on a given port.
Args:
The port to check.
Returns:
True if it is used by an http server. False otherwise.
"""
try:
conn = httplib.HTTPConnection('127.0.0.1:' + str(port))
conn.connect()
conn.close()
return True
except Exception:
return False | python | def is_http_running_on(port):
""" Check if an http server runs on a given port.
Args:
The port to check.
Returns:
True if it is used by an http server. False otherwise.
"""
try:
conn = httplib.HTTPConnection('127.0.0.1:' + str(port))
conn.connect()
conn.close()
return True
except Exception:
return False | [
"def",
"is_http_running_on",
"(",
"port",
")",
":",
"try",
":",
"conn",
"=",
"httplib",
".",
"HTTPConnection",
"(",
"'127.0.0.1:'",
"+",
"str",
"(",
"port",
")",
")",
"conn",
".",
"connect",
"(",
")",
"conn",
".",
"close",
"(",
")",
"return",
"True",
"except",
"Exception",
":",
"return",
"False"
] | Check if an http server runs on a given port.
Args:
The port to check.
Returns:
True if it is used by an http server. False otherwise. | [
"Check",
"if",
"an",
"http",
"server",
"runs",
"on",
"a",
"given",
"port",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/_utils.py#L111-L125 |
4,916 | googledatalab/pydatalab | google/datalab/utils/_utils.py | save_project_id | def save_project_id(project_id):
""" Save project id to config file.
Args:
project_id: the project_id to save.
"""
# Try gcloud first. If gcloud fails (probably because it does not exist), then
# write to a config file.
try:
subprocess.call(['gcloud', 'config', 'set', 'project', project_id])
except:
config_file = os.path.join(get_config_dir(), 'config.json')
config = {}
if os.path.exists(config_file):
with open(config_file) as f:
config = json.loads(f.read())
config['project_id'] = project_id
with open(config_file, 'w') as f:
f.write(json.dumps(config)) | python | def save_project_id(project_id):
""" Save project id to config file.
Args:
project_id: the project_id to save.
"""
# Try gcloud first. If gcloud fails (probably because it does not exist), then
# write to a config file.
try:
subprocess.call(['gcloud', 'config', 'set', 'project', project_id])
except:
config_file = os.path.join(get_config_dir(), 'config.json')
config = {}
if os.path.exists(config_file):
with open(config_file) as f:
config = json.loads(f.read())
config['project_id'] = project_id
with open(config_file, 'w') as f:
f.write(json.dumps(config)) | [
"def",
"save_project_id",
"(",
"project_id",
")",
":",
"# Try gcloud first. If gcloud fails (probably because it does not exist), then",
"# write to a config file.",
"try",
":",
"subprocess",
".",
"call",
"(",
"[",
"'gcloud'",
",",
"'config'",
",",
"'set'",
",",
"'project'",
",",
"project_id",
"]",
")",
"except",
":",
"config_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_config_dir",
"(",
")",
",",
"'config.json'",
")",
"config",
"=",
"{",
"}",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"config_file",
")",
":",
"with",
"open",
"(",
"config_file",
")",
"as",
"f",
":",
"config",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"config",
"[",
"'project_id'",
"]",
"=",
"project_id",
"with",
"open",
"(",
"config_file",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"config",
")",
")"
] | Save project id to config file.
Args:
project_id: the project_id to save. | [
"Save",
"project",
"id",
"to",
"config",
"file",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/_utils.py#L222-L240 |
4,917 | googledatalab/pydatalab | google/datalab/utils/_utils.py | get_default_project_id | def get_default_project_id():
""" Get default project id from config or environment var.
Returns: the project id if available, or None.
"""
# Try getting default project id from gcloud. If it fails try config.json.
try:
proc = subprocess.Popen(['gcloud', 'config', 'list', '--format', 'value(core.project)'],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
value = stdout.strip()
if proc.poll() == 0 and value:
if isinstance(value, six.string_types):
return value
else:
# Hope it's a utf-8 string encoded in bytes. Otherwise an exception will
# be thrown and config.json will be checked.
return value.decode()
except:
pass
config_file = os.path.join(get_config_dir(), 'config.json')
if os.path.exists(config_file):
with open(config_file) as f:
config = json.loads(f.read())
if 'project_id' in config and config['project_id']:
return str(config['project_id'])
if os.getenv('PROJECT_ID') is not None:
return os.getenv('PROJECT_ID')
return None | python | def get_default_project_id():
""" Get default project id from config or environment var.
Returns: the project id if available, or None.
"""
# Try getting default project id from gcloud. If it fails try config.json.
try:
proc = subprocess.Popen(['gcloud', 'config', 'list', '--format', 'value(core.project)'],
stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
value = stdout.strip()
if proc.poll() == 0 and value:
if isinstance(value, six.string_types):
return value
else:
# Hope it's a utf-8 string encoded in bytes. Otherwise an exception will
# be thrown and config.json will be checked.
return value.decode()
except:
pass
config_file = os.path.join(get_config_dir(), 'config.json')
if os.path.exists(config_file):
with open(config_file) as f:
config = json.loads(f.read())
if 'project_id' in config and config['project_id']:
return str(config['project_id'])
if os.getenv('PROJECT_ID') is not None:
return os.getenv('PROJECT_ID')
return None | [
"def",
"get_default_project_id",
"(",
")",
":",
"# Try getting default project id from gcloud. If it fails try config.json.",
"try",
":",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'gcloud'",
",",
"'config'",
",",
"'list'",
",",
"'--format'",
",",
"'value(core.project)'",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"stdout",
",",
"_",
"=",
"proc",
".",
"communicate",
"(",
")",
"value",
"=",
"stdout",
".",
"strip",
"(",
")",
"if",
"proc",
".",
"poll",
"(",
")",
"==",
"0",
"and",
"value",
":",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"return",
"value",
"else",
":",
"# Hope it's a utf-8 string encoded in bytes. Otherwise an exception will",
"# be thrown and config.json will be checked.",
"return",
"value",
".",
"decode",
"(",
")",
"except",
":",
"pass",
"config_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_config_dir",
"(",
")",
",",
"'config.json'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"config_file",
")",
":",
"with",
"open",
"(",
"config_file",
")",
"as",
"f",
":",
"config",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"if",
"'project_id'",
"in",
"config",
"and",
"config",
"[",
"'project_id'",
"]",
":",
"return",
"str",
"(",
"config",
"[",
"'project_id'",
"]",
")",
"if",
"os",
".",
"getenv",
"(",
"'PROJECT_ID'",
")",
"is",
"not",
"None",
":",
"return",
"os",
".",
"getenv",
"(",
"'PROJECT_ID'",
")",
"return",
"None"
] | Get default project id from config or environment var.
Returns: the project id if available, or None. | [
"Get",
"default",
"project",
"id",
"from",
"config",
"or",
"environment",
"var",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/_utils.py#L243-L273 |
4,918 | googledatalab/pydatalab | google/datalab/utils/_utils.py | _construct_context_for_args | def _construct_context_for_args(args):
"""Construct a new Context for the parsed arguments.
Args:
args: the dictionary of magic arguments.
Returns:
A new Context based on the current default context, but with any explicitly
specified arguments overriding the default's config.
"""
global_default_context = google.datalab.Context.default()
config = {}
for key in global_default_context.config:
config[key] = global_default_context.config[key]
billing_tier_arg = args.get('billing', None)
if billing_tier_arg:
config['bigquery_billing_tier'] = billing_tier_arg
return google.datalab.Context(
project_id=global_default_context.project_id,
credentials=global_default_context.credentials,
config=config) | python | def _construct_context_for_args(args):
"""Construct a new Context for the parsed arguments.
Args:
args: the dictionary of magic arguments.
Returns:
A new Context based on the current default context, but with any explicitly
specified arguments overriding the default's config.
"""
global_default_context = google.datalab.Context.default()
config = {}
for key in global_default_context.config:
config[key] = global_default_context.config[key]
billing_tier_arg = args.get('billing', None)
if billing_tier_arg:
config['bigquery_billing_tier'] = billing_tier_arg
return google.datalab.Context(
project_id=global_default_context.project_id,
credentials=global_default_context.credentials,
config=config) | [
"def",
"_construct_context_for_args",
"(",
"args",
")",
":",
"global_default_context",
"=",
"google",
".",
"datalab",
".",
"Context",
".",
"default",
"(",
")",
"config",
"=",
"{",
"}",
"for",
"key",
"in",
"global_default_context",
".",
"config",
":",
"config",
"[",
"key",
"]",
"=",
"global_default_context",
".",
"config",
"[",
"key",
"]",
"billing_tier_arg",
"=",
"args",
".",
"get",
"(",
"'billing'",
",",
"None",
")",
"if",
"billing_tier_arg",
":",
"config",
"[",
"'bigquery_billing_tier'",
"]",
"=",
"billing_tier_arg",
"return",
"google",
".",
"datalab",
".",
"Context",
"(",
"project_id",
"=",
"global_default_context",
".",
"project_id",
",",
"credentials",
"=",
"global_default_context",
".",
"credentials",
",",
"config",
"=",
"config",
")"
] | Construct a new Context for the parsed arguments.
Args:
args: the dictionary of magic arguments.
Returns:
A new Context based on the current default context, but with any explicitly
specified arguments overriding the default's config. | [
"Construct",
"a",
"new",
"Context",
"for",
"the",
"parsed",
"arguments",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/_utils.py#L276-L297 |
4,919 | googledatalab/pydatalab | google/datalab/utils/_utils.py | python_portable_string | def python_portable_string(string, encoding='utf-8'):
"""Converts bytes into a string type.
Valid string types are retuned without modification. So in Python 2, type str
and unicode are not converted.
In Python 3, type bytes is converted to type str (unicode)
"""
if isinstance(string, six.string_types):
return string
if six.PY3:
return string.decode(encoding)
raise ValueError('Unsupported type %s' % str(type(string))) | python | def python_portable_string(string, encoding='utf-8'):
"""Converts bytes into a string type.
Valid string types are retuned without modification. So in Python 2, type str
and unicode are not converted.
In Python 3, type bytes is converted to type str (unicode)
"""
if isinstance(string, six.string_types):
return string
if six.PY3:
return string.decode(encoding)
raise ValueError('Unsupported type %s' % str(type(string))) | [
"def",
"python_portable_string",
"(",
"string",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"if",
"isinstance",
"(",
"string",
",",
"six",
".",
"string_types",
")",
":",
"return",
"string",
"if",
"six",
".",
"PY3",
":",
"return",
"string",
".",
"decode",
"(",
"encoding",
")",
"raise",
"ValueError",
"(",
"'Unsupported type %s'",
"%",
"str",
"(",
"type",
"(",
"string",
")",
")",
")"
] | Converts bytes into a string type.
Valid string types are retuned without modification. So in Python 2, type str
and unicode are not converted.
In Python 3, type bytes is converted to type str (unicode) | [
"Converts",
"bytes",
"into",
"a",
"string",
"type",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/_utils.py#L300-L314 |
4,920 | googledatalab/pydatalab | datalab/storage/commands/_storage.py | _storage_list_buckets | def _storage_list_buckets(project, pattern):
""" List all storage buckets that match a pattern. """
data = [{'Bucket': 'gs://' + bucket.name, 'Created': bucket.metadata.created_on}
for bucket in datalab.storage.Buckets(project_id=project)
if fnmatch.fnmatch(bucket.name, pattern)]
return datalab.utils.commands.render_dictionary(data, ['Bucket', 'Created']) | python | def _storage_list_buckets(project, pattern):
""" List all storage buckets that match a pattern. """
data = [{'Bucket': 'gs://' + bucket.name, 'Created': bucket.metadata.created_on}
for bucket in datalab.storage.Buckets(project_id=project)
if fnmatch.fnmatch(bucket.name, pattern)]
return datalab.utils.commands.render_dictionary(data, ['Bucket', 'Created']) | [
"def",
"_storage_list_buckets",
"(",
"project",
",",
"pattern",
")",
":",
"data",
"=",
"[",
"{",
"'Bucket'",
":",
"'gs://'",
"+",
"bucket",
".",
"name",
",",
"'Created'",
":",
"bucket",
".",
"metadata",
".",
"created_on",
"}",
"for",
"bucket",
"in",
"datalab",
".",
"storage",
".",
"Buckets",
"(",
"project_id",
"=",
"project",
")",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"bucket",
".",
"name",
",",
"pattern",
")",
"]",
"return",
"datalab",
".",
"utils",
".",
"commands",
".",
"render_dictionary",
"(",
"data",
",",
"[",
"'Bucket'",
",",
"'Created'",
"]",
")"
] | List all storage buckets that match a pattern. | [
"List",
"all",
"storage",
"buckets",
"that",
"match",
"a",
"pattern",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/commands/_storage.py#L276-L281 |
4,921 | googledatalab/pydatalab | datalab/storage/commands/_storage.py | _storage_list_keys | def _storage_list_keys(bucket, pattern):
""" List all storage keys in a specified bucket that match a pattern. """
data = [{'Name': item.metadata.name,
'Type': item.metadata.content_type,
'Size': item.metadata.size,
'Updated': item.metadata.updated_on}
for item in _storage_get_keys(bucket, pattern)]
return datalab.utils.commands.render_dictionary(data, ['Name', 'Type', 'Size', 'Updated']) | python | def _storage_list_keys(bucket, pattern):
""" List all storage keys in a specified bucket that match a pattern. """
data = [{'Name': item.metadata.name,
'Type': item.metadata.content_type,
'Size': item.metadata.size,
'Updated': item.metadata.updated_on}
for item in _storage_get_keys(bucket, pattern)]
return datalab.utils.commands.render_dictionary(data, ['Name', 'Type', 'Size', 'Updated']) | [
"def",
"_storage_list_keys",
"(",
"bucket",
",",
"pattern",
")",
":",
"data",
"=",
"[",
"{",
"'Name'",
":",
"item",
".",
"metadata",
".",
"name",
",",
"'Type'",
":",
"item",
".",
"metadata",
".",
"content_type",
",",
"'Size'",
":",
"item",
".",
"metadata",
".",
"size",
",",
"'Updated'",
":",
"item",
".",
"metadata",
".",
"updated_on",
"}",
"for",
"item",
"in",
"_storage_get_keys",
"(",
"bucket",
",",
"pattern",
")",
"]",
"return",
"datalab",
".",
"utils",
".",
"commands",
".",
"render_dictionary",
"(",
"data",
",",
"[",
"'Name'",
",",
"'Type'",
",",
"'Size'",
",",
"'Updated'",
"]",
")"
] | List all storage keys in a specified bucket that match a pattern. | [
"List",
"all",
"storage",
"keys",
"in",
"a",
"specified",
"bucket",
"that",
"match",
"a",
"pattern",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/commands/_storage.py#L294-L301 |
4,922 | googledatalab/pydatalab | google/datalab/bigquery/_api.py | Api.tables_list | def tables_list(self, dataset_name, max_results=0, page_token=None):
"""Issues a request to retrieve a list of tables.
Args:
dataset_name: the name of the dataset to enumerate.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT +\
(Api._TABLES_PATH % (dataset_name.project_id, dataset_name.dataset_id, '', ''))
args = {}
if max_results != 0:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return google.datalab.utils.Http.request(url, args=args, credentials=self.credentials) | python | def tables_list(self, dataset_name, max_results=0, page_token=None):
"""Issues a request to retrieve a list of tables.
Args:
dataset_name: the name of the dataset to enumerate.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT +\
(Api._TABLES_PATH % (dataset_name.project_id, dataset_name.dataset_id, '', ''))
args = {}
if max_results != 0:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return google.datalab.utils.Http.request(url, args=args, credentials=self.credentials) | [
"def",
"tables_list",
"(",
"self",
",",
"dataset_name",
",",
"max_results",
"=",
"0",
",",
"page_token",
"=",
"None",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_TABLES_PATH",
"%",
"(",
"dataset_name",
".",
"project_id",
",",
"dataset_name",
".",
"dataset_id",
",",
"''",
",",
"''",
")",
")",
"args",
"=",
"{",
"}",
"if",
"max_results",
"!=",
"0",
":",
"args",
"[",
"'maxResults'",
"]",
"=",
"max_results",
"if",
"page_token",
"is",
"not",
"None",
":",
"args",
"[",
"'pageToken'",
"]",
"=",
"page_token",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"args",
"=",
"args",
",",
"credentials",
"=",
"self",
".",
"credentials",
")"
] | Issues a request to retrieve a list of tables.
Args:
dataset_name: the name of the dataset to enumerate.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"retrieve",
"a",
"list",
"of",
"tables",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_api.py#L354-L375 |
4,923 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py | _bag_of_words | def _bag_of_words(x):
"""Computes bag of words weights
Note the return type is a float sparse tensor, not a int sparse tensor. This
is so that the output types batch tfidf, and any downstream transformation
in tf layers during training can be applied to both.
"""
def _bow(x):
"""Comptue BOW weights.
As tf layer's sum combiner is used, the weights can be just ones. Tokens are
not summed together here.
"""
return tf.SparseTensor(
indices=x.indices,
values=tf.to_float(tf.ones_like(x.values)),
dense_shape=x.dense_shape)
return _bow(x) | python | def _bag_of_words(x):
"""Computes bag of words weights
Note the return type is a float sparse tensor, not a int sparse tensor. This
is so that the output types batch tfidf, and any downstream transformation
in tf layers during training can be applied to both.
"""
def _bow(x):
"""Comptue BOW weights.
As tf layer's sum combiner is used, the weights can be just ones. Tokens are
not summed together here.
"""
return tf.SparseTensor(
indices=x.indices,
values=tf.to_float(tf.ones_like(x.values)),
dense_shape=x.dense_shape)
return _bow(x) | [
"def",
"_bag_of_words",
"(",
"x",
")",
":",
"def",
"_bow",
"(",
"x",
")",
":",
"\"\"\"Comptue BOW weights.\n\n As tf layer's sum combiner is used, the weights can be just ones. Tokens are\n not summed together here.\n \"\"\"",
"return",
"tf",
".",
"SparseTensor",
"(",
"indices",
"=",
"x",
".",
"indices",
",",
"values",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"ones_like",
"(",
"x",
".",
"values",
")",
")",
",",
"dense_shape",
"=",
"x",
".",
"dense_shape",
")",
"return",
"_bow",
"(",
"x",
")"
] | Computes bag of words weights
Note the return type is a float sparse tensor, not a int sparse tensor. This
is so that the output types batch tfidf, and any downstream transformation
in tf layers during training can be applied to both. | [
"Computes",
"bag",
"of",
"words",
"weights"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py#L203-L221 |
4,924 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py | csv_header_and_defaults | def csv_header_and_defaults(features, schema, stats, keep_target):
"""Gets csv header and default lists."""
target_name = get_target_name(features)
if keep_target and not target_name:
raise ValueError('Cannot find target transform')
csv_header = []
record_defaults = []
for col in schema:
if not keep_target and col['name'] == target_name:
continue
# Note that numerical key columns do not have a stats entry, hence the use
# of get(col['name'], {})
csv_header.append(col['name'])
if col['type'].lower() == INTEGER_SCHEMA:
dtype = tf.int64
default = int(stats['column_stats'].get(col['name'], {}).get('mean', 0))
elif col['type'].lower() == FLOAT_SCHEMA:
dtype = tf.float32
default = float(stats['column_stats'].get(col['name'], {}).get('mean', 0.0))
else:
dtype = tf.string
default = ''
record_defaults.append(tf.constant([default], dtype=dtype))
return csv_header, record_defaults | python | def csv_header_and_defaults(features, schema, stats, keep_target):
"""Gets csv header and default lists."""
target_name = get_target_name(features)
if keep_target and not target_name:
raise ValueError('Cannot find target transform')
csv_header = []
record_defaults = []
for col in schema:
if not keep_target and col['name'] == target_name:
continue
# Note that numerical key columns do not have a stats entry, hence the use
# of get(col['name'], {})
csv_header.append(col['name'])
if col['type'].lower() == INTEGER_SCHEMA:
dtype = tf.int64
default = int(stats['column_stats'].get(col['name'], {}).get('mean', 0))
elif col['type'].lower() == FLOAT_SCHEMA:
dtype = tf.float32
default = float(stats['column_stats'].get(col['name'], {}).get('mean', 0.0))
else:
dtype = tf.string
default = ''
record_defaults.append(tf.constant([default], dtype=dtype))
return csv_header, record_defaults | [
"def",
"csv_header_and_defaults",
"(",
"features",
",",
"schema",
",",
"stats",
",",
"keep_target",
")",
":",
"target_name",
"=",
"get_target_name",
"(",
"features",
")",
"if",
"keep_target",
"and",
"not",
"target_name",
":",
"raise",
"ValueError",
"(",
"'Cannot find target transform'",
")",
"csv_header",
"=",
"[",
"]",
"record_defaults",
"=",
"[",
"]",
"for",
"col",
"in",
"schema",
":",
"if",
"not",
"keep_target",
"and",
"col",
"[",
"'name'",
"]",
"==",
"target_name",
":",
"continue",
"# Note that numerical key columns do not have a stats entry, hence the use",
"# of get(col['name'], {})",
"csv_header",
".",
"append",
"(",
"col",
"[",
"'name'",
"]",
")",
"if",
"col",
"[",
"'type'",
"]",
".",
"lower",
"(",
")",
"==",
"INTEGER_SCHEMA",
":",
"dtype",
"=",
"tf",
".",
"int64",
"default",
"=",
"int",
"(",
"stats",
"[",
"'column_stats'",
"]",
".",
"get",
"(",
"col",
"[",
"'name'",
"]",
",",
"{",
"}",
")",
".",
"get",
"(",
"'mean'",
",",
"0",
")",
")",
"elif",
"col",
"[",
"'type'",
"]",
".",
"lower",
"(",
")",
"==",
"FLOAT_SCHEMA",
":",
"dtype",
"=",
"tf",
".",
"float32",
"default",
"=",
"float",
"(",
"stats",
"[",
"'column_stats'",
"]",
".",
"get",
"(",
"col",
"[",
"'name'",
"]",
",",
"{",
"}",
")",
".",
"get",
"(",
"'mean'",
",",
"0.0",
")",
")",
"else",
":",
"dtype",
"=",
"tf",
".",
"string",
"default",
"=",
"''",
"record_defaults",
".",
"append",
"(",
"tf",
".",
"constant",
"(",
"[",
"default",
"]",
",",
"dtype",
"=",
"dtype",
")",
")",
"return",
"csv_header",
",",
"record_defaults"
] | Gets csv header and default lists. | [
"Gets",
"csv",
"header",
"and",
"default",
"lists",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py#L503-L531 |
4,925 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py | build_csv_serving_tensors_for_transform_step | def build_csv_serving_tensors_for_transform_step(analysis_path,
features,
schema,
stats,
keep_target):
"""Builds a serving function starting from raw csv.
This should only be used by transform.py (the transform step), and the
For image columns, the image should be a base64 string encoding the image.
The output of this function will transform that image to a 2048 long vector
using the inception model.
"""
csv_header, record_defaults = csv_header_and_defaults(features, schema, stats, keep_target)
placeholder = tf.placeholder(dtype=tf.string, shape=(None,),
name='csv_input_placeholder')
tensors = tf.decode_csv(placeholder, record_defaults)
raw_features = dict(zip(csv_header, tensors))
transform_fn = make_preprocessing_fn(analysis_path, features, keep_target)
transformed_tensors = transform_fn(raw_features)
transformed_features = {}
# Expand the dims of non-sparse tensors
for k, v in six.iteritems(transformed_tensors):
if isinstance(v, tf.Tensor) and v.get_shape().ndims == 1:
transformed_features[k] = tf.expand_dims(v, -1)
else:
transformed_features[k] = v
return input_fn_utils.InputFnOps(
transformed_features, None, {"csv_example": placeholder}) | python | def build_csv_serving_tensors_for_transform_step(analysis_path,
features,
schema,
stats,
keep_target):
"""Builds a serving function starting from raw csv.
This should only be used by transform.py (the transform step), and the
For image columns, the image should be a base64 string encoding the image.
The output of this function will transform that image to a 2048 long vector
using the inception model.
"""
csv_header, record_defaults = csv_header_and_defaults(features, schema, stats, keep_target)
placeholder = tf.placeholder(dtype=tf.string, shape=(None,),
name='csv_input_placeholder')
tensors = tf.decode_csv(placeholder, record_defaults)
raw_features = dict(zip(csv_header, tensors))
transform_fn = make_preprocessing_fn(analysis_path, features, keep_target)
transformed_tensors = transform_fn(raw_features)
transformed_features = {}
# Expand the dims of non-sparse tensors
for k, v in six.iteritems(transformed_tensors):
if isinstance(v, tf.Tensor) and v.get_shape().ndims == 1:
transformed_features[k] = tf.expand_dims(v, -1)
else:
transformed_features[k] = v
return input_fn_utils.InputFnOps(
transformed_features, None, {"csv_example": placeholder}) | [
"def",
"build_csv_serving_tensors_for_transform_step",
"(",
"analysis_path",
",",
"features",
",",
"schema",
",",
"stats",
",",
"keep_target",
")",
":",
"csv_header",
",",
"record_defaults",
"=",
"csv_header_and_defaults",
"(",
"features",
",",
"schema",
",",
"stats",
",",
"keep_target",
")",
"placeholder",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"string",
",",
"shape",
"=",
"(",
"None",
",",
")",
",",
"name",
"=",
"'csv_input_placeholder'",
")",
"tensors",
"=",
"tf",
".",
"decode_csv",
"(",
"placeholder",
",",
"record_defaults",
")",
"raw_features",
"=",
"dict",
"(",
"zip",
"(",
"csv_header",
",",
"tensors",
")",
")",
"transform_fn",
"=",
"make_preprocessing_fn",
"(",
"analysis_path",
",",
"features",
",",
"keep_target",
")",
"transformed_tensors",
"=",
"transform_fn",
"(",
"raw_features",
")",
"transformed_features",
"=",
"{",
"}",
"# Expand the dims of non-sparse tensors",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"transformed_tensors",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"tf",
".",
"Tensor",
")",
"and",
"v",
".",
"get_shape",
"(",
")",
".",
"ndims",
"==",
"1",
":",
"transformed_features",
"[",
"k",
"]",
"=",
"tf",
".",
"expand_dims",
"(",
"v",
",",
"-",
"1",
")",
"else",
":",
"transformed_features",
"[",
"k",
"]",
"=",
"v",
"return",
"input_fn_utils",
".",
"InputFnOps",
"(",
"transformed_features",
",",
"None",
",",
"{",
"\"csv_example\"",
":",
"placeholder",
"}",
")"
] | Builds a serving function starting from raw csv.
This should only be used by transform.py (the transform step), and the
For image columns, the image should be a base64 string encoding the image.
The output of this function will transform that image to a 2048 long vector
using the inception model. | [
"Builds",
"a",
"serving",
"function",
"starting",
"from",
"raw",
"csv",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py#L534-L567 |
4,926 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py | build_csv_serving_tensors_for_training_step | def build_csv_serving_tensors_for_training_step(analysis_path,
features,
schema,
stats,
keep_target):
"""Builds a serving function starting from raw csv, used at model export time.
For image columns, the image should be a base64 string encoding the image.
The output of this function will transform that image to a 2048 long vector
using the inception model and then a fully connected net is attached to
the 2048 long image embedding.
"""
transformed_features, _, placeholder_dict = build_csv_serving_tensors_for_transform_step(
analysis_path=analysis_path,
features=features,
schema=schema,
stats=stats,
keep_target=keep_target)
transformed_features = image_feature_engineering(
features=features,
feature_tensors_dict=transformed_features)
return input_fn_utils.InputFnOps(
transformed_features, None, placeholder_dict) | python | def build_csv_serving_tensors_for_training_step(analysis_path,
features,
schema,
stats,
keep_target):
"""Builds a serving function starting from raw csv, used at model export time.
For image columns, the image should be a base64 string encoding the image.
The output of this function will transform that image to a 2048 long vector
using the inception model and then a fully connected net is attached to
the 2048 long image embedding.
"""
transformed_features, _, placeholder_dict = build_csv_serving_tensors_for_transform_step(
analysis_path=analysis_path,
features=features,
schema=schema,
stats=stats,
keep_target=keep_target)
transformed_features = image_feature_engineering(
features=features,
feature_tensors_dict=transformed_features)
return input_fn_utils.InputFnOps(
transformed_features, None, placeholder_dict) | [
"def",
"build_csv_serving_tensors_for_training_step",
"(",
"analysis_path",
",",
"features",
",",
"schema",
",",
"stats",
",",
"keep_target",
")",
":",
"transformed_features",
",",
"_",
",",
"placeholder_dict",
"=",
"build_csv_serving_tensors_for_transform_step",
"(",
"analysis_path",
"=",
"analysis_path",
",",
"features",
"=",
"features",
",",
"schema",
"=",
"schema",
",",
"stats",
"=",
"stats",
",",
"keep_target",
"=",
"keep_target",
")",
"transformed_features",
"=",
"image_feature_engineering",
"(",
"features",
"=",
"features",
",",
"feature_tensors_dict",
"=",
"transformed_features",
")",
"return",
"input_fn_utils",
".",
"InputFnOps",
"(",
"transformed_features",
",",
"None",
",",
"placeholder_dict",
")"
] | Builds a serving function starting from raw csv, used at model export time.
For image columns, the image should be a base64 string encoding the image.
The output of this function will transform that image to a 2048 long vector
using the inception model and then a fully connected net is attached to
the 2048 long image embedding. | [
"Builds",
"a",
"serving",
"function",
"starting",
"from",
"raw",
"csv",
"used",
"at",
"model",
"export",
"time",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py#L570-L595 |
4,927 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py | build_csv_transforming_training_input_fn | def build_csv_transforming_training_input_fn(schema,
features,
stats,
analysis_output_dir,
raw_data_file_pattern,
training_batch_size,
num_epochs=None,
randomize_input=False,
min_after_dequeue=1,
reader_num_threads=1,
allow_smaller_final_batch=True):
"""Creates training input_fn that reads raw csv data and applies transforms.
Args:
schema: schema list
features: features dict
stats: stats dict
analysis_output_dir: output folder from analysis
raw_data_file_pattern: file path, or list of files
training_batch_size: An int specifying the batch size to use.
num_epochs: numer of epochs to read from the files. Use None to read forever.
randomize_input: If true, the input rows are read out of order. This
randomness is limited by the min_after_dequeue value.
min_after_dequeue: Minimum number elements in the reading queue after a
dequeue, used to ensure a level of mixing of elements. Only used if
randomize_input is True.
reader_num_threads: The number of threads enqueuing data.
allow_smaller_final_batch: If false, fractional batches at the end of
training or evaluation are not used.
Returns:
An input_fn suitable for training that reads raw csv training data and
applies transforms.
"""
def raw_training_input_fn():
"""Training input function that reads raw data and applies transforms."""
if isinstance(raw_data_file_pattern, six.string_types):
filepath_list = [raw_data_file_pattern]
else:
filepath_list = raw_data_file_pattern
files = []
for path in filepath_list:
files.extend(file_io.get_matching_files(path))
filename_queue = tf.train.string_input_producer(
files, num_epochs=num_epochs, shuffle=randomize_input)
csv_id, csv_lines = tf.TextLineReader().read_up_to(filename_queue, training_batch_size)
queue_capacity = (reader_num_threads + 3) * training_batch_size + min_after_dequeue
if randomize_input:
_, batch_csv_lines = tf.train.shuffle_batch(
tensors=[csv_id, csv_lines],
batch_size=training_batch_size,
capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=True,
num_threads=reader_num_threads,
allow_smaller_final_batch=allow_smaller_final_batch)
else:
_, batch_csv_lines = tf.train.batch(
tensors=[csv_id, csv_lines],
batch_size=training_batch_size,
capacity=queue_capacity,
enqueue_many=True,
num_threads=reader_num_threads,
allow_smaller_final_batch=allow_smaller_final_batch)
csv_header, record_defaults = csv_header_and_defaults(features, schema, stats, keep_target=True)
parsed_tensors = tf.decode_csv(batch_csv_lines, record_defaults, name='csv_to_tensors')
raw_features = dict(zip(csv_header, parsed_tensors))
transform_fn = make_preprocessing_fn(analysis_output_dir, features, keep_target=True)
transformed_tensors = transform_fn(raw_features)
# Expand the dims of non-sparse tensors. This is needed by tf.learn.
transformed_features = {}
for k, v in six.iteritems(transformed_tensors):
if isinstance(v, tf.Tensor) and v.get_shape().ndims == 1:
transformed_features[k] = tf.expand_dims(v, -1)
else:
transformed_features[k] = v
# image_feature_engineering does not need to be called as images are not
# supported in raw csv for training.
# Remove the target tensor, and return it directly
target_name = get_target_name(features)
if not target_name or target_name not in transformed_features:
raise ValueError('Cannot find target transform in features')
transformed_target = transformed_features.pop(target_name)
return transformed_features, transformed_target
return raw_training_input_fn | python | def build_csv_transforming_training_input_fn(schema,
features,
stats,
analysis_output_dir,
raw_data_file_pattern,
training_batch_size,
num_epochs=None,
randomize_input=False,
min_after_dequeue=1,
reader_num_threads=1,
allow_smaller_final_batch=True):
"""Creates training input_fn that reads raw csv data and applies transforms.
Args:
schema: schema list
features: features dict
stats: stats dict
analysis_output_dir: output folder from analysis
raw_data_file_pattern: file path, or list of files
training_batch_size: An int specifying the batch size to use.
num_epochs: numer of epochs to read from the files. Use None to read forever.
randomize_input: If true, the input rows are read out of order. This
randomness is limited by the min_after_dequeue value.
min_after_dequeue: Minimum number elements in the reading queue after a
dequeue, used to ensure a level of mixing of elements. Only used if
randomize_input is True.
reader_num_threads: The number of threads enqueuing data.
allow_smaller_final_batch: If false, fractional batches at the end of
training or evaluation are not used.
Returns:
An input_fn suitable for training that reads raw csv training data and
applies transforms.
"""
def raw_training_input_fn():
"""Training input function that reads raw data and applies transforms."""
if isinstance(raw_data_file_pattern, six.string_types):
filepath_list = [raw_data_file_pattern]
else:
filepath_list = raw_data_file_pattern
files = []
for path in filepath_list:
files.extend(file_io.get_matching_files(path))
filename_queue = tf.train.string_input_producer(
files, num_epochs=num_epochs, shuffle=randomize_input)
csv_id, csv_lines = tf.TextLineReader().read_up_to(filename_queue, training_batch_size)
queue_capacity = (reader_num_threads + 3) * training_batch_size + min_after_dequeue
if randomize_input:
_, batch_csv_lines = tf.train.shuffle_batch(
tensors=[csv_id, csv_lines],
batch_size=training_batch_size,
capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=True,
num_threads=reader_num_threads,
allow_smaller_final_batch=allow_smaller_final_batch)
else:
_, batch_csv_lines = tf.train.batch(
tensors=[csv_id, csv_lines],
batch_size=training_batch_size,
capacity=queue_capacity,
enqueue_many=True,
num_threads=reader_num_threads,
allow_smaller_final_batch=allow_smaller_final_batch)
csv_header, record_defaults = csv_header_and_defaults(features, schema, stats, keep_target=True)
parsed_tensors = tf.decode_csv(batch_csv_lines, record_defaults, name='csv_to_tensors')
raw_features = dict(zip(csv_header, parsed_tensors))
transform_fn = make_preprocessing_fn(analysis_output_dir, features, keep_target=True)
transformed_tensors = transform_fn(raw_features)
# Expand the dims of non-sparse tensors. This is needed by tf.learn.
transformed_features = {}
for k, v in six.iteritems(transformed_tensors):
if isinstance(v, tf.Tensor) and v.get_shape().ndims == 1:
transformed_features[k] = tf.expand_dims(v, -1)
else:
transformed_features[k] = v
# image_feature_engineering does not need to be called as images are not
# supported in raw csv for training.
# Remove the target tensor, and return it directly
target_name = get_target_name(features)
if not target_name or target_name not in transformed_features:
raise ValueError('Cannot find target transform in features')
transformed_target = transformed_features.pop(target_name)
return transformed_features, transformed_target
return raw_training_input_fn | [
"def",
"build_csv_transforming_training_input_fn",
"(",
"schema",
",",
"features",
",",
"stats",
",",
"analysis_output_dir",
",",
"raw_data_file_pattern",
",",
"training_batch_size",
",",
"num_epochs",
"=",
"None",
",",
"randomize_input",
"=",
"False",
",",
"min_after_dequeue",
"=",
"1",
",",
"reader_num_threads",
"=",
"1",
",",
"allow_smaller_final_batch",
"=",
"True",
")",
":",
"def",
"raw_training_input_fn",
"(",
")",
":",
"\"\"\"Training input function that reads raw data and applies transforms.\"\"\"",
"if",
"isinstance",
"(",
"raw_data_file_pattern",
",",
"six",
".",
"string_types",
")",
":",
"filepath_list",
"=",
"[",
"raw_data_file_pattern",
"]",
"else",
":",
"filepath_list",
"=",
"raw_data_file_pattern",
"files",
"=",
"[",
"]",
"for",
"path",
"in",
"filepath_list",
":",
"files",
".",
"extend",
"(",
"file_io",
".",
"get_matching_files",
"(",
"path",
")",
")",
"filename_queue",
"=",
"tf",
".",
"train",
".",
"string_input_producer",
"(",
"files",
",",
"num_epochs",
"=",
"num_epochs",
",",
"shuffle",
"=",
"randomize_input",
")",
"csv_id",
",",
"csv_lines",
"=",
"tf",
".",
"TextLineReader",
"(",
")",
".",
"read_up_to",
"(",
"filename_queue",
",",
"training_batch_size",
")",
"queue_capacity",
"=",
"(",
"reader_num_threads",
"+",
"3",
")",
"*",
"training_batch_size",
"+",
"min_after_dequeue",
"if",
"randomize_input",
":",
"_",
",",
"batch_csv_lines",
"=",
"tf",
".",
"train",
".",
"shuffle_batch",
"(",
"tensors",
"=",
"[",
"csv_id",
",",
"csv_lines",
"]",
",",
"batch_size",
"=",
"training_batch_size",
",",
"capacity",
"=",
"queue_capacity",
",",
"min_after_dequeue",
"=",
"min_after_dequeue",
",",
"enqueue_many",
"=",
"True",
",",
"num_threads",
"=",
"reader_num_threads",
",",
"allow_smaller_final_batch",
"=",
"allow_smaller_final_batch",
")",
"else",
":",
"_",
",",
"batch_csv_lines",
"=",
"tf",
".",
"train",
".",
"batch",
"(",
"tensors",
"=",
"[",
"csv_id",
",",
"csv_lines",
"]",
",",
"batch_size",
"=",
"training_batch_size",
",",
"capacity",
"=",
"queue_capacity",
",",
"enqueue_many",
"=",
"True",
",",
"num_threads",
"=",
"reader_num_threads",
",",
"allow_smaller_final_batch",
"=",
"allow_smaller_final_batch",
")",
"csv_header",
",",
"record_defaults",
"=",
"csv_header_and_defaults",
"(",
"features",
",",
"schema",
",",
"stats",
",",
"keep_target",
"=",
"True",
")",
"parsed_tensors",
"=",
"tf",
".",
"decode_csv",
"(",
"batch_csv_lines",
",",
"record_defaults",
",",
"name",
"=",
"'csv_to_tensors'",
")",
"raw_features",
"=",
"dict",
"(",
"zip",
"(",
"csv_header",
",",
"parsed_tensors",
")",
")",
"transform_fn",
"=",
"make_preprocessing_fn",
"(",
"analysis_output_dir",
",",
"features",
",",
"keep_target",
"=",
"True",
")",
"transformed_tensors",
"=",
"transform_fn",
"(",
"raw_features",
")",
"# Expand the dims of non-sparse tensors. This is needed by tf.learn.",
"transformed_features",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"transformed_tensors",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"tf",
".",
"Tensor",
")",
"and",
"v",
".",
"get_shape",
"(",
")",
".",
"ndims",
"==",
"1",
":",
"transformed_features",
"[",
"k",
"]",
"=",
"tf",
".",
"expand_dims",
"(",
"v",
",",
"-",
"1",
")",
"else",
":",
"transformed_features",
"[",
"k",
"]",
"=",
"v",
"# image_feature_engineering does not need to be called as images are not",
"# supported in raw csv for training.",
"# Remove the target tensor, and return it directly",
"target_name",
"=",
"get_target_name",
"(",
"features",
")",
"if",
"not",
"target_name",
"or",
"target_name",
"not",
"in",
"transformed_features",
":",
"raise",
"ValueError",
"(",
"'Cannot find target transform in features'",
")",
"transformed_target",
"=",
"transformed_features",
".",
"pop",
"(",
"target_name",
")",
"return",
"transformed_features",
",",
"transformed_target",
"return",
"raw_training_input_fn"
] | Creates training input_fn that reads raw csv data and applies transforms.
Args:
schema: schema list
features: features dict
stats: stats dict
analysis_output_dir: output folder from analysis
raw_data_file_pattern: file path, or list of files
training_batch_size: An int specifying the batch size to use.
num_epochs: numer of epochs to read from the files. Use None to read forever.
randomize_input: If true, the input rows are read out of order. This
randomness is limited by the min_after_dequeue value.
min_after_dequeue: Minimum number elements in the reading queue after a
dequeue, used to ensure a level of mixing of elements. Only used if
randomize_input is True.
reader_num_threads: The number of threads enqueuing data.
allow_smaller_final_batch: If false, fractional batches at the end of
training or evaluation are not used.
Returns:
An input_fn suitable for training that reads raw csv training data and
applies transforms. | [
"Creates",
"training",
"input_fn",
"that",
"reads",
"raw",
"csv",
"data",
"and",
"applies",
"transforms",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py#L598-L698 |
4,928 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py | build_tfexample_transfored_training_input_fn | def build_tfexample_transfored_training_input_fn(schema,
features,
analysis_output_dir,
raw_data_file_pattern,
training_batch_size,
num_epochs=None,
randomize_input=False,
min_after_dequeue=1,
reader_num_threads=1,
allow_smaller_final_batch=True):
"""Creates training input_fn that reads transformed tf.example files.
Args:
schema: schema list
features: features dict
analysis_output_dir: output folder from analysis
raw_data_file_pattern: file path, or list of files
training_batch_size: An int specifying the batch size to use.
num_epochs: numer of epochs to read from the files. Use None to read forever.
randomize_input: If true, the input rows are read out of order. This
randomness is limited by the min_after_dequeue value.
min_after_dequeue: Minimum number elements in the reading queue after a
dequeue, used to ensure a level of mixing of elements. Only used if
randomize_input is True.
reader_num_threads: The number of threads enqueuing data.
allow_smaller_final_batch: If false, fractional batches at the end of
training or evaluation are not used.
Returns:
An input_fn suitable for training that reads transformed data in tf record
files of tf.example.
"""
def transformed_training_input_fn():
"""Training input function that reads transformed data."""
if isinstance(raw_data_file_pattern, six.string_types):
filepath_list = [raw_data_file_pattern]
else:
filepath_list = raw_data_file_pattern
files = []
for path in filepath_list:
files.extend(file_io.get_matching_files(path))
filename_queue = tf.train.string_input_producer(
files, num_epochs=num_epochs, shuffle=randomize_input)
options = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP)
ex_id, ex_str = tf.TFRecordReader(options=options).read_up_to(
filename_queue, training_batch_size)
queue_capacity = (reader_num_threads + 3) * training_batch_size + min_after_dequeue
if randomize_input:
_, batch_ex_str = tf.train.shuffle_batch(
tensors=[ex_id, ex_str],
batch_size=training_batch_size,
capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=True,
num_threads=reader_num_threads,
allow_smaller_final_batch=allow_smaller_final_batch)
else:
_, batch_ex_str = tf.train.batch(
tensors=[ex_id, ex_str],
batch_size=training_batch_size,
capacity=queue_capacity,
enqueue_many=True,
num_threads=reader_num_threads,
allow_smaller_final_batch=allow_smaller_final_batch)
feature_spec = {}
feature_info = get_transformed_feature_info(features, schema)
for name, info in six.iteritems(feature_info):
if info['size'] is None:
feature_spec[name] = tf.VarLenFeature(dtype=info['dtype'])
else:
feature_spec[name] = tf.FixedLenFeature(shape=[info['size']], dtype=info['dtype'])
parsed_tensors = tf.parse_example(batch_ex_str, feature_spec)
# Expand the dims of non-sparse tensors. This is needed by tf.learn.
transformed_features = {}
for k, v in six.iteritems(parsed_tensors):
if isinstance(v, tf.Tensor) and v.get_shape().ndims == 1:
transformed_features[k] = tf.expand_dims(v, -1)
else:
# Sparse tensor
transformed_features[k] = v
transformed_features = image_feature_engineering(
features=features,
feature_tensors_dict=transformed_features)
# Remove the target tensor, and return it directly
target_name = get_target_name(features)
if not target_name or target_name not in transformed_features:
raise ValueError('Cannot find target transform in features')
transformed_target = transformed_features.pop(target_name)
return transformed_features, transformed_target
return transformed_training_input_fn | python | def build_tfexample_transfored_training_input_fn(schema,
features,
analysis_output_dir,
raw_data_file_pattern,
training_batch_size,
num_epochs=None,
randomize_input=False,
min_after_dequeue=1,
reader_num_threads=1,
allow_smaller_final_batch=True):
"""Creates training input_fn that reads transformed tf.example files.
Args:
schema: schema list
features: features dict
analysis_output_dir: output folder from analysis
raw_data_file_pattern: file path, or list of files
training_batch_size: An int specifying the batch size to use.
num_epochs: numer of epochs to read from the files. Use None to read forever.
randomize_input: If true, the input rows are read out of order. This
randomness is limited by the min_after_dequeue value.
min_after_dequeue: Minimum number elements in the reading queue after a
dequeue, used to ensure a level of mixing of elements. Only used if
randomize_input is True.
reader_num_threads: The number of threads enqueuing data.
allow_smaller_final_batch: If false, fractional batches at the end of
training or evaluation are not used.
Returns:
An input_fn suitable for training that reads transformed data in tf record
files of tf.example.
"""
def transformed_training_input_fn():
"""Training input function that reads transformed data."""
if isinstance(raw_data_file_pattern, six.string_types):
filepath_list = [raw_data_file_pattern]
else:
filepath_list = raw_data_file_pattern
files = []
for path in filepath_list:
files.extend(file_io.get_matching_files(path))
filename_queue = tf.train.string_input_producer(
files, num_epochs=num_epochs, shuffle=randomize_input)
options = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP)
ex_id, ex_str = tf.TFRecordReader(options=options).read_up_to(
filename_queue, training_batch_size)
queue_capacity = (reader_num_threads + 3) * training_batch_size + min_after_dequeue
if randomize_input:
_, batch_ex_str = tf.train.shuffle_batch(
tensors=[ex_id, ex_str],
batch_size=training_batch_size,
capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=True,
num_threads=reader_num_threads,
allow_smaller_final_batch=allow_smaller_final_batch)
else:
_, batch_ex_str = tf.train.batch(
tensors=[ex_id, ex_str],
batch_size=training_batch_size,
capacity=queue_capacity,
enqueue_many=True,
num_threads=reader_num_threads,
allow_smaller_final_batch=allow_smaller_final_batch)
feature_spec = {}
feature_info = get_transformed_feature_info(features, schema)
for name, info in six.iteritems(feature_info):
if info['size'] is None:
feature_spec[name] = tf.VarLenFeature(dtype=info['dtype'])
else:
feature_spec[name] = tf.FixedLenFeature(shape=[info['size']], dtype=info['dtype'])
parsed_tensors = tf.parse_example(batch_ex_str, feature_spec)
# Expand the dims of non-sparse tensors. This is needed by tf.learn.
transformed_features = {}
for k, v in six.iteritems(parsed_tensors):
if isinstance(v, tf.Tensor) and v.get_shape().ndims == 1:
transformed_features[k] = tf.expand_dims(v, -1)
else:
# Sparse tensor
transformed_features[k] = v
transformed_features = image_feature_engineering(
features=features,
feature_tensors_dict=transformed_features)
# Remove the target tensor, and return it directly
target_name = get_target_name(features)
if not target_name or target_name not in transformed_features:
raise ValueError('Cannot find target transform in features')
transformed_target = transformed_features.pop(target_name)
return transformed_features, transformed_target
return transformed_training_input_fn | [
"def",
"build_tfexample_transfored_training_input_fn",
"(",
"schema",
",",
"features",
",",
"analysis_output_dir",
",",
"raw_data_file_pattern",
",",
"training_batch_size",
",",
"num_epochs",
"=",
"None",
",",
"randomize_input",
"=",
"False",
",",
"min_after_dequeue",
"=",
"1",
",",
"reader_num_threads",
"=",
"1",
",",
"allow_smaller_final_batch",
"=",
"True",
")",
":",
"def",
"transformed_training_input_fn",
"(",
")",
":",
"\"\"\"Training input function that reads transformed data.\"\"\"",
"if",
"isinstance",
"(",
"raw_data_file_pattern",
",",
"six",
".",
"string_types",
")",
":",
"filepath_list",
"=",
"[",
"raw_data_file_pattern",
"]",
"else",
":",
"filepath_list",
"=",
"raw_data_file_pattern",
"files",
"=",
"[",
"]",
"for",
"path",
"in",
"filepath_list",
":",
"files",
".",
"extend",
"(",
"file_io",
".",
"get_matching_files",
"(",
"path",
")",
")",
"filename_queue",
"=",
"tf",
".",
"train",
".",
"string_input_producer",
"(",
"files",
",",
"num_epochs",
"=",
"num_epochs",
",",
"shuffle",
"=",
"randomize_input",
")",
"options",
"=",
"tf",
".",
"python_io",
".",
"TFRecordOptions",
"(",
"compression_type",
"=",
"tf",
".",
"python_io",
".",
"TFRecordCompressionType",
".",
"GZIP",
")",
"ex_id",
",",
"ex_str",
"=",
"tf",
".",
"TFRecordReader",
"(",
"options",
"=",
"options",
")",
".",
"read_up_to",
"(",
"filename_queue",
",",
"training_batch_size",
")",
"queue_capacity",
"=",
"(",
"reader_num_threads",
"+",
"3",
")",
"*",
"training_batch_size",
"+",
"min_after_dequeue",
"if",
"randomize_input",
":",
"_",
",",
"batch_ex_str",
"=",
"tf",
".",
"train",
".",
"shuffle_batch",
"(",
"tensors",
"=",
"[",
"ex_id",
",",
"ex_str",
"]",
",",
"batch_size",
"=",
"training_batch_size",
",",
"capacity",
"=",
"queue_capacity",
",",
"min_after_dequeue",
"=",
"min_after_dequeue",
",",
"enqueue_many",
"=",
"True",
",",
"num_threads",
"=",
"reader_num_threads",
",",
"allow_smaller_final_batch",
"=",
"allow_smaller_final_batch",
")",
"else",
":",
"_",
",",
"batch_ex_str",
"=",
"tf",
".",
"train",
".",
"batch",
"(",
"tensors",
"=",
"[",
"ex_id",
",",
"ex_str",
"]",
",",
"batch_size",
"=",
"training_batch_size",
",",
"capacity",
"=",
"queue_capacity",
",",
"enqueue_many",
"=",
"True",
",",
"num_threads",
"=",
"reader_num_threads",
",",
"allow_smaller_final_batch",
"=",
"allow_smaller_final_batch",
")",
"feature_spec",
"=",
"{",
"}",
"feature_info",
"=",
"get_transformed_feature_info",
"(",
"features",
",",
"schema",
")",
"for",
"name",
",",
"info",
"in",
"six",
".",
"iteritems",
"(",
"feature_info",
")",
":",
"if",
"info",
"[",
"'size'",
"]",
"is",
"None",
":",
"feature_spec",
"[",
"name",
"]",
"=",
"tf",
".",
"VarLenFeature",
"(",
"dtype",
"=",
"info",
"[",
"'dtype'",
"]",
")",
"else",
":",
"feature_spec",
"[",
"name",
"]",
"=",
"tf",
".",
"FixedLenFeature",
"(",
"shape",
"=",
"[",
"info",
"[",
"'size'",
"]",
"]",
",",
"dtype",
"=",
"info",
"[",
"'dtype'",
"]",
")",
"parsed_tensors",
"=",
"tf",
".",
"parse_example",
"(",
"batch_ex_str",
",",
"feature_spec",
")",
"# Expand the dims of non-sparse tensors. This is needed by tf.learn.",
"transformed_features",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"parsed_tensors",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"tf",
".",
"Tensor",
")",
"and",
"v",
".",
"get_shape",
"(",
")",
".",
"ndims",
"==",
"1",
":",
"transformed_features",
"[",
"k",
"]",
"=",
"tf",
".",
"expand_dims",
"(",
"v",
",",
"-",
"1",
")",
"else",
":",
"# Sparse tensor",
"transformed_features",
"[",
"k",
"]",
"=",
"v",
"transformed_features",
"=",
"image_feature_engineering",
"(",
"features",
"=",
"features",
",",
"feature_tensors_dict",
"=",
"transformed_features",
")",
"# Remove the target tensor, and return it directly",
"target_name",
"=",
"get_target_name",
"(",
"features",
")",
"if",
"not",
"target_name",
"or",
"target_name",
"not",
"in",
"transformed_features",
":",
"raise",
"ValueError",
"(",
"'Cannot find target transform in features'",
")",
"transformed_target",
"=",
"transformed_features",
".",
"pop",
"(",
"target_name",
")",
"return",
"transformed_features",
",",
"transformed_target",
"return",
"transformed_training_input_fn"
] | Creates training input_fn that reads transformed tf.example files.
Args:
schema: schema list
features: features dict
analysis_output_dir: output folder from analysis
raw_data_file_pattern: file path, or list of files
training_batch_size: An int specifying the batch size to use.
num_epochs: numer of epochs to read from the files. Use None to read forever.
randomize_input: If true, the input rows are read out of order. This
randomness is limited by the min_after_dequeue value.
min_after_dequeue: Minimum number elements in the reading queue after a
dequeue, used to ensure a level of mixing of elements. Only used if
randomize_input is True.
reader_num_threads: The number of threads enqueuing data.
allow_smaller_final_batch: If false, fractional batches at the end of
training or evaluation are not used.
Returns:
An input_fn suitable for training that reads transformed data in tf record
files of tf.example. | [
"Creates",
"training",
"input_fn",
"that",
"reads",
"transformed",
"tf",
".",
"example",
"files",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py#L701-L806 |
4,929 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py | image_feature_engineering | def image_feature_engineering(features, feature_tensors_dict):
"""Add a hidden layer on image features.
Args:
features: features dict
feature_tensors_dict: dict of feature-name: tensor
"""
engineered_features = {}
for name, feature_tensor in six.iteritems(feature_tensors_dict):
if name in features and features[name]['transform'] == IMAGE_TRANSFORM:
with tf.name_scope(name, 'Wx_plus_b'):
hidden = tf.contrib.layers.fully_connected(
feature_tensor,
IMAGE_HIDDEN_TENSOR_SIZE)
engineered_features[name] = hidden
else:
engineered_features[name] = feature_tensor
return engineered_features | python | def image_feature_engineering(features, feature_tensors_dict):
"""Add a hidden layer on image features.
Args:
features: features dict
feature_tensors_dict: dict of feature-name: tensor
"""
engineered_features = {}
for name, feature_tensor in six.iteritems(feature_tensors_dict):
if name in features and features[name]['transform'] == IMAGE_TRANSFORM:
with tf.name_scope(name, 'Wx_plus_b'):
hidden = tf.contrib.layers.fully_connected(
feature_tensor,
IMAGE_HIDDEN_TENSOR_SIZE)
engineered_features[name] = hidden
else:
engineered_features[name] = feature_tensor
return engineered_features | [
"def",
"image_feature_engineering",
"(",
"features",
",",
"feature_tensors_dict",
")",
":",
"engineered_features",
"=",
"{",
"}",
"for",
"name",
",",
"feature_tensor",
"in",
"six",
".",
"iteritems",
"(",
"feature_tensors_dict",
")",
":",
"if",
"name",
"in",
"features",
"and",
"features",
"[",
"name",
"]",
"[",
"'transform'",
"]",
"==",
"IMAGE_TRANSFORM",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
",",
"'Wx_plus_b'",
")",
":",
"hidden",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"fully_connected",
"(",
"feature_tensor",
",",
"IMAGE_HIDDEN_TENSOR_SIZE",
")",
"engineered_features",
"[",
"name",
"]",
"=",
"hidden",
"else",
":",
"engineered_features",
"[",
"name",
"]",
"=",
"feature_tensor",
"return",
"engineered_features"
] | Add a hidden layer on image features.
Args:
features: features dict
feature_tensors_dict: dict of feature-name: tensor | [
"Add",
"a",
"hidden",
"layer",
"on",
"image",
"features",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py#L809-L826 |
4,930 | googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py | read_vocab_file | def read_vocab_file(file_path):
"""Reads a vocab file to memeory.
Args:
file_path: Each line of the vocab is in the form "token,example_count"
Returns:
Two lists, one for the vocab, and one for just the example counts.
"""
with file_io.FileIO(file_path, 'r') as f:
vocab_pd = pd.read_csv(
f,
header=None,
names=['vocab', 'count'],
dtype=str, # Prevent pd from converting numerical categories.
na_filter=False) # Prevent pd from converting 'NA' to a NaN.
vocab = vocab_pd['vocab'].tolist()
ex_count = vocab_pd['count'].astype(int).tolist()
return vocab, ex_count | python | def read_vocab_file(file_path):
"""Reads a vocab file to memeory.
Args:
file_path: Each line of the vocab is in the form "token,example_count"
Returns:
Two lists, one for the vocab, and one for just the example counts.
"""
with file_io.FileIO(file_path, 'r') as f:
vocab_pd = pd.read_csv(
f,
header=None,
names=['vocab', 'count'],
dtype=str, # Prevent pd from converting numerical categories.
na_filter=False) # Prevent pd from converting 'NA' to a NaN.
vocab = vocab_pd['vocab'].tolist()
ex_count = vocab_pd['count'].astype(int).tolist()
return vocab, ex_count | [
"def",
"read_vocab_file",
"(",
"file_path",
")",
":",
"with",
"file_io",
".",
"FileIO",
"(",
"file_path",
",",
"'r'",
")",
"as",
"f",
":",
"vocab_pd",
"=",
"pd",
".",
"read_csv",
"(",
"f",
",",
"header",
"=",
"None",
",",
"names",
"=",
"[",
"'vocab'",
",",
"'count'",
"]",
",",
"dtype",
"=",
"str",
",",
"# Prevent pd from converting numerical categories.",
"na_filter",
"=",
"False",
")",
"# Prevent pd from converting 'NA' to a NaN.",
"vocab",
"=",
"vocab_pd",
"[",
"'vocab'",
"]",
".",
"tolist",
"(",
")",
"ex_count",
"=",
"vocab_pd",
"[",
"'count'",
"]",
".",
"astype",
"(",
"int",
")",
".",
"tolist",
"(",
")",
"return",
"vocab",
",",
"ex_count"
] | Reads a vocab file to memeory.
Args:
file_path: Each line of the vocab is in the form "token,example_count"
Returns:
Two lists, one for the vocab, and one for just the example counts. | [
"Reads",
"a",
"vocab",
"file",
"to",
"memeory",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/tensorflow/trainer/feature_transforms.py#L837-L857 |
4,931 | googledatalab/pydatalab | google/datalab/bigquery/_external_data_source.py | ExternalDataSource._to_query_json | def _to_query_json(self):
""" Return the table as a dictionary to be used as JSON in a query job. """
json = {
'compression': 'GZIP' if self._compressed else 'NONE',
'ignoreUnknownValues': self._ignore_unknown_values,
'maxBadRecords': self._max_bad_records,
'sourceFormat': self._bq_source_format,
'sourceUris': self._source,
}
if self._source_format == 'csv' and self._csv_options:
json['csvOptions'] = {}
json['csvOptions'].update(self._csv_options._to_query_json())
if self._schema:
json['schema'] = {'fields': self._schema._bq_schema}
return json | python | def _to_query_json(self):
""" Return the table as a dictionary to be used as JSON in a query job. """
json = {
'compression': 'GZIP' if self._compressed else 'NONE',
'ignoreUnknownValues': self._ignore_unknown_values,
'maxBadRecords': self._max_bad_records,
'sourceFormat': self._bq_source_format,
'sourceUris': self._source,
}
if self._source_format == 'csv' and self._csv_options:
json['csvOptions'] = {}
json['csvOptions'].update(self._csv_options._to_query_json())
if self._schema:
json['schema'] = {'fields': self._schema._bq_schema}
return json | [
"def",
"_to_query_json",
"(",
"self",
")",
":",
"json",
"=",
"{",
"'compression'",
":",
"'GZIP'",
"if",
"self",
".",
"_compressed",
"else",
"'NONE'",
",",
"'ignoreUnknownValues'",
":",
"self",
".",
"_ignore_unknown_values",
",",
"'maxBadRecords'",
":",
"self",
".",
"_max_bad_records",
",",
"'sourceFormat'",
":",
"self",
".",
"_bq_source_format",
",",
"'sourceUris'",
":",
"self",
".",
"_source",
",",
"}",
"if",
"self",
".",
"_source_format",
"==",
"'csv'",
"and",
"self",
".",
"_csv_options",
":",
"json",
"[",
"'csvOptions'",
"]",
"=",
"{",
"}",
"json",
"[",
"'csvOptions'",
"]",
".",
"update",
"(",
"self",
".",
"_csv_options",
".",
"_to_query_json",
"(",
")",
")",
"if",
"self",
".",
"_schema",
":",
"json",
"[",
"'schema'",
"]",
"=",
"{",
"'fields'",
":",
"self",
".",
"_schema",
".",
"_bq_schema",
"}",
"return",
"json"
] | Return the table as a dictionary to be used as JSON in a query job. | [
"Return",
"the",
"table",
"as",
"a",
"dictionary",
"to",
"be",
"used",
"as",
"JSON",
"in",
"a",
"query",
"job",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_external_data_source.py#L70-L84 |
4,932 | googledatalab/pydatalab | google/datalab/kernel/__init__.py | load_ipython_extension | def load_ipython_extension(shell):
"""
Called when the extension is loaded.
Args:
shell - (NotebookWebApplication): handle to the Notebook interactive shell instance.
"""
# Inject our user agent on all requests by monkey-patching a wrapper around httplib2.Http.request.
def _request(self, uri, method="GET", body=None, headers=None,
redirections=_httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
if headers is None:
headers = {}
headers['user-agent'] = 'GoogleCloudDataLab/1.0'
return _orig_request(self, uri, method=method, body=body, headers=headers,
redirections=redirections, connection_type=connection_type)
_httplib2.Http.request = _request
# Similarly for the requests library.
def _init_session(self):
_orig_init(self)
self.headers['User-Agent'] = 'GoogleCloudDataLab/1.0'
_requests.Session.__init__ = _init_session
# Be more tolerant with magics. If the user specified a cell magic that doesn't
# exist and an empty cell body but a line magic with that name exists, run that
# instead. Conversely, if the user specified a line magic that doesn't exist but
# a cell magic exists with that name, run the cell magic with an empty body.
def _run_line_magic(self, magic_name, line):
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
if cm:
return _run_cell_magic(self, magic_name, line, None)
return _orig_run_line_magic(self, magic_name, line)
def _run_cell_magic(self, magic_name, line, cell):
if cell is None or len(cell) == 0 or cell.isspace():
fn = self.find_line_magic(magic_name)
if fn:
return _orig_run_line_magic(self, magic_name, line)
# IPython will complain if cell is empty string but not if it is None
cell = None
return _orig_run_cell_magic(self, magic_name, line, cell)
_shell.InteractiveShell.run_cell_magic = _run_cell_magic
_shell.InteractiveShell.run_line_magic = _run_line_magic
# Define global 'project_id' and 'set_project_id' functions to manage the default project ID. We
# do this conditionally in a try/catch # to avoid the call to Context.default() when running tests
# which mock IPython.get_ipython().
def _get_project_id():
try:
return google.datalab.Context.default().project_id
except Exception:
return None
def _set_project_id(project_id):
context = google.datalab.Context.default()
context.set_project_id(project_id)
try:
from datalab.context import Context as _old_context
_old_context.default().set_project_id(project_id)
except ImportError:
# If the old library is not loaded, then we don't have to do anything
pass
try:
if 'datalab_project_id' not in _IPython.get_ipython().user_ns:
_IPython.get_ipython().user_ns['datalab_project_id'] = _get_project_id
_IPython.get_ipython().user_ns['set_datalab_project_id'] = _set_project_id
except TypeError:
pass | python | def load_ipython_extension(shell):
"""
Called when the extension is loaded.
Args:
shell - (NotebookWebApplication): handle to the Notebook interactive shell instance.
"""
# Inject our user agent on all requests by monkey-patching a wrapper around httplib2.Http.request.
def _request(self, uri, method="GET", body=None, headers=None,
redirections=_httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
if headers is None:
headers = {}
headers['user-agent'] = 'GoogleCloudDataLab/1.0'
return _orig_request(self, uri, method=method, body=body, headers=headers,
redirections=redirections, connection_type=connection_type)
_httplib2.Http.request = _request
# Similarly for the requests library.
def _init_session(self):
_orig_init(self)
self.headers['User-Agent'] = 'GoogleCloudDataLab/1.0'
_requests.Session.__init__ = _init_session
# Be more tolerant with magics. If the user specified a cell magic that doesn't
# exist and an empty cell body but a line magic with that name exists, run that
# instead. Conversely, if the user specified a line magic that doesn't exist but
# a cell magic exists with that name, run the cell magic with an empty body.
def _run_line_magic(self, magic_name, line):
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
if cm:
return _run_cell_magic(self, magic_name, line, None)
return _orig_run_line_magic(self, magic_name, line)
def _run_cell_magic(self, magic_name, line, cell):
if cell is None or len(cell) == 0 or cell.isspace():
fn = self.find_line_magic(magic_name)
if fn:
return _orig_run_line_magic(self, magic_name, line)
# IPython will complain if cell is empty string but not if it is None
cell = None
return _orig_run_cell_magic(self, magic_name, line, cell)
_shell.InteractiveShell.run_cell_magic = _run_cell_magic
_shell.InteractiveShell.run_line_magic = _run_line_magic
# Define global 'project_id' and 'set_project_id' functions to manage the default project ID. We
# do this conditionally in a try/catch # to avoid the call to Context.default() when running tests
# which mock IPython.get_ipython().
def _get_project_id():
try:
return google.datalab.Context.default().project_id
except Exception:
return None
def _set_project_id(project_id):
context = google.datalab.Context.default()
context.set_project_id(project_id)
try:
from datalab.context import Context as _old_context
_old_context.default().set_project_id(project_id)
except ImportError:
# If the old library is not loaded, then we don't have to do anything
pass
try:
if 'datalab_project_id' not in _IPython.get_ipython().user_ns:
_IPython.get_ipython().user_ns['datalab_project_id'] = _get_project_id
_IPython.get_ipython().user_ns['set_datalab_project_id'] = _set_project_id
except TypeError:
pass | [
"def",
"load_ipython_extension",
"(",
"shell",
")",
":",
"# Inject our user agent on all requests by monkey-patching a wrapper around httplib2.Http.request.",
"def",
"_request",
"(",
"self",
",",
"uri",
",",
"method",
"=",
"\"GET\"",
",",
"body",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"redirections",
"=",
"_httplib2",
".",
"DEFAULT_MAX_REDIRECTS",
",",
"connection_type",
"=",
"None",
")",
":",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"{",
"}",
"headers",
"[",
"'user-agent'",
"]",
"=",
"'GoogleCloudDataLab/1.0'",
"return",
"_orig_request",
"(",
"self",
",",
"uri",
",",
"method",
"=",
"method",
",",
"body",
"=",
"body",
",",
"headers",
"=",
"headers",
",",
"redirections",
"=",
"redirections",
",",
"connection_type",
"=",
"connection_type",
")",
"_httplib2",
".",
"Http",
".",
"request",
"=",
"_request",
"# Similarly for the requests library.",
"def",
"_init_session",
"(",
"self",
")",
":",
"_orig_init",
"(",
"self",
")",
"self",
".",
"headers",
"[",
"'User-Agent'",
"]",
"=",
"'GoogleCloudDataLab/1.0'",
"_requests",
".",
"Session",
".",
"__init__",
"=",
"_init_session",
"# Be more tolerant with magics. If the user specified a cell magic that doesn't",
"# exist and an empty cell body but a line magic with that name exists, run that",
"# instead. Conversely, if the user specified a line magic that doesn't exist but",
"# a cell magic exists with that name, run the cell magic with an empty body.",
"def",
"_run_line_magic",
"(",
"self",
",",
"magic_name",
",",
"line",
")",
":",
"fn",
"=",
"self",
".",
"find_line_magic",
"(",
"magic_name",
")",
"if",
"fn",
"is",
"None",
":",
"cm",
"=",
"self",
".",
"find_cell_magic",
"(",
"magic_name",
")",
"if",
"cm",
":",
"return",
"_run_cell_magic",
"(",
"self",
",",
"magic_name",
",",
"line",
",",
"None",
")",
"return",
"_orig_run_line_magic",
"(",
"self",
",",
"magic_name",
",",
"line",
")",
"def",
"_run_cell_magic",
"(",
"self",
",",
"magic_name",
",",
"line",
",",
"cell",
")",
":",
"if",
"cell",
"is",
"None",
"or",
"len",
"(",
"cell",
")",
"==",
"0",
"or",
"cell",
".",
"isspace",
"(",
")",
":",
"fn",
"=",
"self",
".",
"find_line_magic",
"(",
"magic_name",
")",
"if",
"fn",
":",
"return",
"_orig_run_line_magic",
"(",
"self",
",",
"magic_name",
",",
"line",
")",
"# IPython will complain if cell is empty string but not if it is None",
"cell",
"=",
"None",
"return",
"_orig_run_cell_magic",
"(",
"self",
",",
"magic_name",
",",
"line",
",",
"cell",
")",
"_shell",
".",
"InteractiveShell",
".",
"run_cell_magic",
"=",
"_run_cell_magic",
"_shell",
".",
"InteractiveShell",
".",
"run_line_magic",
"=",
"_run_line_magic",
"# Define global 'project_id' and 'set_project_id' functions to manage the default project ID. We",
"# do this conditionally in a try/catch # to avoid the call to Context.default() when running tests",
"# which mock IPython.get_ipython().",
"def",
"_get_project_id",
"(",
")",
":",
"try",
":",
"return",
"google",
".",
"datalab",
".",
"Context",
".",
"default",
"(",
")",
".",
"project_id",
"except",
"Exception",
":",
"return",
"None",
"def",
"_set_project_id",
"(",
"project_id",
")",
":",
"context",
"=",
"google",
".",
"datalab",
".",
"Context",
".",
"default",
"(",
")",
"context",
".",
"set_project_id",
"(",
"project_id",
")",
"try",
":",
"from",
"datalab",
".",
"context",
"import",
"Context",
"as",
"_old_context",
"_old_context",
".",
"default",
"(",
")",
".",
"set_project_id",
"(",
"project_id",
")",
"except",
"ImportError",
":",
"# If the old library is not loaded, then we don't have to do anything",
"pass",
"try",
":",
"if",
"'datalab_project_id'",
"not",
"in",
"_IPython",
".",
"get_ipython",
"(",
")",
".",
"user_ns",
":",
"_IPython",
".",
"get_ipython",
"(",
")",
".",
"user_ns",
"[",
"'datalab_project_id'",
"]",
"=",
"_get_project_id",
"_IPython",
".",
"get_ipython",
"(",
")",
".",
"user_ns",
"[",
"'set_datalab_project_id'",
"]",
"=",
"_set_project_id",
"except",
"TypeError",
":",
"pass"
] | Called when the extension is loaded.
Args:
shell - (NotebookWebApplication): handle to the Notebook interactive shell instance. | [
"Called",
"when",
"the",
"extension",
"is",
"loaded",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/kernel/__init__.py#L44-L122 |
4,933 | googledatalab/pydatalab | datalab/data/_sql_module.py | SqlModule._get_sql_args | def _get_sql_args(parser, args=None):
""" Parse a set of %%sql arguments or get the default value of the arguments.
Args:
parser: the argument parser to use.
args: the argument flags. May be a string or a list. If omitted the empty string is used so
we can get the default values for the arguments. These are all used to override the
arg parser. Alternatively args may be a dictionary, in which case it overrides the
default values from the arg parser.
Returns:
A dictionary of argument names and values.
"""
overrides = None
if args is None:
tokens = []
elif isinstance(args, basestring):
command_line = ' '.join(args.split('\n'))
tokens = shlex.split(command_line)
elif isinstance(args, dict):
overrides = args
tokens = []
else:
tokens = args
args = {} if parser is None else vars(parser.parse_args(tokens))
if overrides:
args.update(overrides)
# Don't return any args that are None as we don't want to expand to 'None'
return {arg: value for arg, value in args.items() if value is not None} | python | def _get_sql_args(parser, args=None):
""" Parse a set of %%sql arguments or get the default value of the arguments.
Args:
parser: the argument parser to use.
args: the argument flags. May be a string or a list. If omitted the empty string is used so
we can get the default values for the arguments. These are all used to override the
arg parser. Alternatively args may be a dictionary, in which case it overrides the
default values from the arg parser.
Returns:
A dictionary of argument names and values.
"""
overrides = None
if args is None:
tokens = []
elif isinstance(args, basestring):
command_line = ' '.join(args.split('\n'))
tokens = shlex.split(command_line)
elif isinstance(args, dict):
overrides = args
tokens = []
else:
tokens = args
args = {} if parser is None else vars(parser.parse_args(tokens))
if overrides:
args.update(overrides)
# Don't return any args that are None as we don't want to expand to 'None'
return {arg: value for arg, value in args.items() if value is not None} | [
"def",
"_get_sql_args",
"(",
"parser",
",",
"args",
"=",
"None",
")",
":",
"overrides",
"=",
"None",
"if",
"args",
"is",
"None",
":",
"tokens",
"=",
"[",
"]",
"elif",
"isinstance",
"(",
"args",
",",
"basestring",
")",
":",
"command_line",
"=",
"' '",
".",
"join",
"(",
"args",
".",
"split",
"(",
"'\\n'",
")",
")",
"tokens",
"=",
"shlex",
".",
"split",
"(",
"command_line",
")",
"elif",
"isinstance",
"(",
"args",
",",
"dict",
")",
":",
"overrides",
"=",
"args",
"tokens",
"=",
"[",
"]",
"else",
":",
"tokens",
"=",
"args",
"args",
"=",
"{",
"}",
"if",
"parser",
"is",
"None",
"else",
"vars",
"(",
"parser",
".",
"parse_args",
"(",
"tokens",
")",
")",
"if",
"overrides",
":",
"args",
".",
"update",
"(",
"overrides",
")",
"# Don't return any args that are None as we don't want to expand to 'None'",
"return",
"{",
"arg",
":",
"value",
"for",
"arg",
",",
"value",
"in",
"args",
".",
"items",
"(",
")",
"if",
"value",
"is",
"not",
"None",
"}"
] | Parse a set of %%sql arguments or get the default value of the arguments.
Args:
parser: the argument parser to use.
args: the argument flags. May be a string or a list. If omitted the empty string is used so
we can get the default values for the arguments. These are all used to override the
arg parser. Alternatively args may be a dictionary, in which case it overrides the
default values from the arg parser.
Returns:
A dictionary of argument names and values. | [
"Parse",
"a",
"set",
"of",
"%%sql",
"arguments",
"or",
"get",
"the",
"default",
"value",
"of",
"the",
"arguments",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/_sql_module.py#L33-L62 |
4,934 | googledatalab/pydatalab | datalab/data/_sql_module.py | SqlModule.get_sql_statement_with_environment | def get_sql_statement_with_environment(item, args=None):
""" Given a SQLStatement, string or module plus command line args or a dictionary,
return a SqlStatement and final dictionary for variable resolution.
Args:
item: a SqlStatement, %%sql module, or string containing a query.
args: a string of command line arguments or a dictionary of values.
Returns:
A SqlStatement for the query or module, plus a dictionary of variable values to use.
"""
if isinstance(item, basestring):
item = _sql_statement.SqlStatement(item)
elif not isinstance(item, _sql_statement.SqlStatement):
item = SqlModule.get_default_query_from_module(item)
if not item:
raise Exception('Expected a SQL statement or module but got %s' % str(item))
env = {}
if item.module:
env.update(item.module.__dict__)
parser = env.get(_utils._SQL_MODULE_ARGPARSE, None)
if parser:
args = SqlModule._get_sql_args(parser, args=args)
else:
args = None
if isinstance(args, dict):
env.update(args)
return item, env | python | def get_sql_statement_with_environment(item, args=None):
""" Given a SQLStatement, string or module plus command line args or a dictionary,
return a SqlStatement and final dictionary for variable resolution.
Args:
item: a SqlStatement, %%sql module, or string containing a query.
args: a string of command line arguments or a dictionary of values.
Returns:
A SqlStatement for the query or module, plus a dictionary of variable values to use.
"""
if isinstance(item, basestring):
item = _sql_statement.SqlStatement(item)
elif not isinstance(item, _sql_statement.SqlStatement):
item = SqlModule.get_default_query_from_module(item)
if not item:
raise Exception('Expected a SQL statement or module but got %s' % str(item))
env = {}
if item.module:
env.update(item.module.__dict__)
parser = env.get(_utils._SQL_MODULE_ARGPARSE, None)
if parser:
args = SqlModule._get_sql_args(parser, args=args)
else:
args = None
if isinstance(args, dict):
env.update(args)
return item, env | [
"def",
"get_sql_statement_with_environment",
"(",
"item",
",",
"args",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"item",
",",
"basestring",
")",
":",
"item",
"=",
"_sql_statement",
".",
"SqlStatement",
"(",
"item",
")",
"elif",
"not",
"isinstance",
"(",
"item",
",",
"_sql_statement",
".",
"SqlStatement",
")",
":",
"item",
"=",
"SqlModule",
".",
"get_default_query_from_module",
"(",
"item",
")",
"if",
"not",
"item",
":",
"raise",
"Exception",
"(",
"'Expected a SQL statement or module but got %s'",
"%",
"str",
"(",
"item",
")",
")",
"env",
"=",
"{",
"}",
"if",
"item",
".",
"module",
":",
"env",
".",
"update",
"(",
"item",
".",
"module",
".",
"__dict__",
")",
"parser",
"=",
"env",
".",
"get",
"(",
"_utils",
".",
"_SQL_MODULE_ARGPARSE",
",",
"None",
")",
"if",
"parser",
":",
"args",
"=",
"SqlModule",
".",
"_get_sql_args",
"(",
"parser",
",",
"args",
"=",
"args",
")",
"else",
":",
"args",
"=",
"None",
"if",
"isinstance",
"(",
"args",
",",
"dict",
")",
":",
"env",
".",
"update",
"(",
"args",
")",
"return",
"item",
",",
"env"
] | Given a SQLStatement, string or module plus command line args or a dictionary,
return a SqlStatement and final dictionary for variable resolution.
Args:
item: a SqlStatement, %%sql module, or string containing a query.
args: a string of command line arguments or a dictionary of values.
Returns:
A SqlStatement for the query or module, plus a dictionary of variable values to use. | [
"Given",
"a",
"SQLStatement",
"string",
"or",
"module",
"plus",
"command",
"line",
"args",
"or",
"a",
"dictionary",
"return",
"a",
"SqlStatement",
"and",
"final",
"dictionary",
"for",
"variable",
"resolution",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/_sql_module.py#L77-L107 |
4,935 | googledatalab/pydatalab | datalab/data/_sql_module.py | SqlModule.expand | def expand(sql, args=None):
""" Expand a SqlStatement, query string or SqlModule with a set of arguments.
Args:
sql: a SqlStatement, %%sql module, or string containing a query.
args: a string of command line arguments or a dictionary of values. If a string, it is
passed to the argument parser for the SqlModule associated with the SqlStatement or
SqlModule. If a dictionary, it is used to override any default arguments from the
argument parser. If the sql argument is a string then args must be None or a dictionary
as in this case there is no associated argument parser.
Returns:
The expanded SQL, list of referenced scripts, and list of referenced external tables.
"""
sql, args = SqlModule.get_sql_statement_with_environment(sql, args)
return _sql_statement.SqlStatement.format(sql._sql, args) | python | def expand(sql, args=None):
""" Expand a SqlStatement, query string or SqlModule with a set of arguments.
Args:
sql: a SqlStatement, %%sql module, or string containing a query.
args: a string of command line arguments or a dictionary of values. If a string, it is
passed to the argument parser for the SqlModule associated with the SqlStatement or
SqlModule. If a dictionary, it is used to override any default arguments from the
argument parser. If the sql argument is a string then args must be None or a dictionary
as in this case there is no associated argument parser.
Returns:
The expanded SQL, list of referenced scripts, and list of referenced external tables.
"""
sql, args = SqlModule.get_sql_statement_with_environment(sql, args)
return _sql_statement.SqlStatement.format(sql._sql, args) | [
"def",
"expand",
"(",
"sql",
",",
"args",
"=",
"None",
")",
":",
"sql",
",",
"args",
"=",
"SqlModule",
".",
"get_sql_statement_with_environment",
"(",
"sql",
",",
"args",
")",
"return",
"_sql_statement",
".",
"SqlStatement",
".",
"format",
"(",
"sql",
".",
"_sql",
",",
"args",
")"
] | Expand a SqlStatement, query string or SqlModule with a set of arguments.
Args:
sql: a SqlStatement, %%sql module, or string containing a query.
args: a string of command line arguments or a dictionary of values. If a string, it is
passed to the argument parser for the SqlModule associated with the SqlStatement or
SqlModule. If a dictionary, it is used to override any default arguments from the
argument parser. If the sql argument is a string then args must be None or a dictionary
as in this case there is no associated argument parser.
Returns:
The expanded SQL, list of referenced scripts, and list of referenced external tables. | [
"Expand",
"a",
"SqlStatement",
"query",
"string",
"or",
"SqlModule",
"with",
"a",
"set",
"of",
"arguments",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/data/_sql_module.py#L110-L124 |
4,936 | googledatalab/pydatalab | google/datalab/bigquery/_utils.py | parse_dataset_name | def parse_dataset_name(name, project_id=None):
"""Parses a dataset name into its individual parts.
Args:
name: the name to parse, or a tuple, dictionary or array containing the parts.
project_id: the expected project ID. If the name does not contain a project ID,
this will be used; if the name does contain a project ID and it does not match
this, an exception will be thrown.
Returns:
A DatasetName named tuple for the dataset.
Raises:
Exception: raised if the name doesn't match the expected formats or a project_id was
specified that does not match that in the name.
"""
_project_id = _dataset_id = None
if isinstance(name, basestring):
# Try to parse as absolute name first.
m = re.match(_ABS_DATASET_NAME_PATTERN, name, re.IGNORECASE)
if m is not None:
_project_id, _dataset_id = m.groups()
else:
# Next try to match as a relative name implicitly scoped within current project.
m = re.match(_REL_DATASET_NAME_PATTERN, name)
if m is not None:
groups = m.groups()
_dataset_id = groups[0]
elif isinstance(name, dict):
try:
_dataset_id = name['dataset_id']
_project_id = name['project_id']
except KeyError:
pass
else:
# Try treat as an array or tuple
if len(name) == 2:
# Treat as a tuple or array.
_project_id, _dataset_id = name
elif len(name) == 1:
_dataset_id = name[0]
if not _dataset_id:
raise Exception('Invalid dataset name: ' + str(name))
if not _project_id:
_project_id = project_id
return DatasetName(_project_id, _dataset_id) | python | def parse_dataset_name(name, project_id=None):
"""Parses a dataset name into its individual parts.
Args:
name: the name to parse, or a tuple, dictionary or array containing the parts.
project_id: the expected project ID. If the name does not contain a project ID,
this will be used; if the name does contain a project ID and it does not match
this, an exception will be thrown.
Returns:
A DatasetName named tuple for the dataset.
Raises:
Exception: raised if the name doesn't match the expected formats or a project_id was
specified that does not match that in the name.
"""
_project_id = _dataset_id = None
if isinstance(name, basestring):
# Try to parse as absolute name first.
m = re.match(_ABS_DATASET_NAME_PATTERN, name, re.IGNORECASE)
if m is not None:
_project_id, _dataset_id = m.groups()
else:
# Next try to match as a relative name implicitly scoped within current project.
m = re.match(_REL_DATASET_NAME_PATTERN, name)
if m is not None:
groups = m.groups()
_dataset_id = groups[0]
elif isinstance(name, dict):
try:
_dataset_id = name['dataset_id']
_project_id = name['project_id']
except KeyError:
pass
else:
# Try treat as an array or tuple
if len(name) == 2:
# Treat as a tuple or array.
_project_id, _dataset_id = name
elif len(name) == 1:
_dataset_id = name[0]
if not _dataset_id:
raise Exception('Invalid dataset name: ' + str(name))
if not _project_id:
_project_id = project_id
return DatasetName(_project_id, _dataset_id) | [
"def",
"parse_dataset_name",
"(",
"name",
",",
"project_id",
"=",
"None",
")",
":",
"_project_id",
"=",
"_dataset_id",
"=",
"None",
"if",
"isinstance",
"(",
"name",
",",
"basestring",
")",
":",
"# Try to parse as absolute name first.",
"m",
"=",
"re",
".",
"match",
"(",
"_ABS_DATASET_NAME_PATTERN",
",",
"name",
",",
"re",
".",
"IGNORECASE",
")",
"if",
"m",
"is",
"not",
"None",
":",
"_project_id",
",",
"_dataset_id",
"=",
"m",
".",
"groups",
"(",
")",
"else",
":",
"# Next try to match as a relative name implicitly scoped within current project.",
"m",
"=",
"re",
".",
"match",
"(",
"_REL_DATASET_NAME_PATTERN",
",",
"name",
")",
"if",
"m",
"is",
"not",
"None",
":",
"groups",
"=",
"m",
".",
"groups",
"(",
")",
"_dataset_id",
"=",
"groups",
"[",
"0",
"]",
"elif",
"isinstance",
"(",
"name",
",",
"dict",
")",
":",
"try",
":",
"_dataset_id",
"=",
"name",
"[",
"'dataset_id'",
"]",
"_project_id",
"=",
"name",
"[",
"'project_id'",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"# Try treat as an array or tuple",
"if",
"len",
"(",
"name",
")",
"==",
"2",
":",
"# Treat as a tuple or array.",
"_project_id",
",",
"_dataset_id",
"=",
"name",
"elif",
"len",
"(",
"name",
")",
"==",
"1",
":",
"_dataset_id",
"=",
"name",
"[",
"0",
"]",
"if",
"not",
"_dataset_id",
":",
"raise",
"Exception",
"(",
"'Invalid dataset name: '",
"+",
"str",
"(",
"name",
")",
")",
"if",
"not",
"_project_id",
":",
"_project_id",
"=",
"project_id",
"return",
"DatasetName",
"(",
"_project_id",
",",
"_dataset_id",
")"
] | Parses a dataset name into its individual parts.
Args:
name: the name to parse, or a tuple, dictionary or array containing the parts.
project_id: the expected project ID. If the name does not contain a project ID,
this will be used; if the name does contain a project ID and it does not match
this, an exception will be thrown.
Returns:
A DatasetName named tuple for the dataset.
Raises:
Exception: raised if the name doesn't match the expected formats or a project_id was
specified that does not match that in the name. | [
"Parses",
"a",
"dataset",
"name",
"into",
"its",
"individual",
"parts",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_utils.py#L58-L102 |
4,937 | googledatalab/pydatalab | google/datalab/bigquery/_utils.py | parse_table_name | def parse_table_name(name, project_id=None, dataset_id=None):
"""Parses a table name into its individual parts.
Args:
name: the name to parse, or a tuple, dictionary or array containing the parts.
project_id: the expected project ID. If the name does not contain a project ID,
this will be used; if the name does contain a project ID and it does not match
this, an exception will be thrown.
dataset_id: the expected dataset ID. If the name does not contain a dataset ID,
this will be used; if the name does contain a dataset ID and it does not match
this, an exception will be thrown.
Returns:
A TableName named tuple consisting of the full name and individual name parts.
Raises:
Exception: raised if the name doesn't match the expected formats, or a project_id and/or
dataset_id was provided that does not match that in the name.
"""
_project_id = _dataset_id = _table_id = _decorator = None
if isinstance(name, basestring):
# Try to parse as absolute name first.
m = re.match(_ABS_TABLE_NAME_PATTERN, name, re.IGNORECASE)
if m is not None:
_project_id, _dataset_id, _table_id, _decorator = m.groups()
else:
# Next try to match as a relative name implicitly scoped within current project.
m = re.match(_REL_TABLE_NAME_PATTERN, name)
if m is not None:
groups = m.groups()
_project_id, _dataset_id, _table_id, _decorator =\
project_id, groups[0], groups[1], groups[2]
else:
# Finally try to match as a table name only.
m = re.match(_TABLE_NAME_PATTERN, name)
if m is not None:
groups = m.groups()
_project_id, _dataset_id, _table_id, _decorator =\
project_id, dataset_id, groups[0], groups[1]
elif isinstance(name, dict):
try:
_table_id = name['table_id']
_dataset_id = name['dataset_id']
_project_id = name['project_id']
except KeyError:
pass
else:
# Try treat as an array or tuple
if len(name) == 4:
_project_id, _dataset_id, _table_id, _decorator = name
elif len(name) == 3:
_project_id, _dataset_id, _table_id = name
elif len(name) == 2:
_dataset_id, _table_id = name
if not _table_id:
raise Exception('Invalid table name: ' + str(name))
if not _project_id:
_project_id = project_id
if not _dataset_id:
_dataset_id = dataset_id
if not _decorator:
_decorator = ''
return TableName(_project_id, _dataset_id, _table_id, _decorator) | python | def parse_table_name(name, project_id=None, dataset_id=None):
"""Parses a table name into its individual parts.
Args:
name: the name to parse, or a tuple, dictionary or array containing the parts.
project_id: the expected project ID. If the name does not contain a project ID,
this will be used; if the name does contain a project ID and it does not match
this, an exception will be thrown.
dataset_id: the expected dataset ID. If the name does not contain a dataset ID,
this will be used; if the name does contain a dataset ID and it does not match
this, an exception will be thrown.
Returns:
A TableName named tuple consisting of the full name and individual name parts.
Raises:
Exception: raised if the name doesn't match the expected formats, or a project_id and/or
dataset_id was provided that does not match that in the name.
"""
_project_id = _dataset_id = _table_id = _decorator = None
if isinstance(name, basestring):
# Try to parse as absolute name first.
m = re.match(_ABS_TABLE_NAME_PATTERN, name, re.IGNORECASE)
if m is not None:
_project_id, _dataset_id, _table_id, _decorator = m.groups()
else:
# Next try to match as a relative name implicitly scoped within current project.
m = re.match(_REL_TABLE_NAME_PATTERN, name)
if m is not None:
groups = m.groups()
_project_id, _dataset_id, _table_id, _decorator =\
project_id, groups[0], groups[1], groups[2]
else:
# Finally try to match as a table name only.
m = re.match(_TABLE_NAME_PATTERN, name)
if m is not None:
groups = m.groups()
_project_id, _dataset_id, _table_id, _decorator =\
project_id, dataset_id, groups[0], groups[1]
elif isinstance(name, dict):
try:
_table_id = name['table_id']
_dataset_id = name['dataset_id']
_project_id = name['project_id']
except KeyError:
pass
else:
# Try treat as an array or tuple
if len(name) == 4:
_project_id, _dataset_id, _table_id, _decorator = name
elif len(name) == 3:
_project_id, _dataset_id, _table_id = name
elif len(name) == 2:
_dataset_id, _table_id = name
if not _table_id:
raise Exception('Invalid table name: ' + str(name))
if not _project_id:
_project_id = project_id
if not _dataset_id:
_dataset_id = dataset_id
if not _decorator:
_decorator = ''
return TableName(_project_id, _dataset_id, _table_id, _decorator) | [
"def",
"parse_table_name",
"(",
"name",
",",
"project_id",
"=",
"None",
",",
"dataset_id",
"=",
"None",
")",
":",
"_project_id",
"=",
"_dataset_id",
"=",
"_table_id",
"=",
"_decorator",
"=",
"None",
"if",
"isinstance",
"(",
"name",
",",
"basestring",
")",
":",
"# Try to parse as absolute name first.",
"m",
"=",
"re",
".",
"match",
"(",
"_ABS_TABLE_NAME_PATTERN",
",",
"name",
",",
"re",
".",
"IGNORECASE",
")",
"if",
"m",
"is",
"not",
"None",
":",
"_project_id",
",",
"_dataset_id",
",",
"_table_id",
",",
"_decorator",
"=",
"m",
".",
"groups",
"(",
")",
"else",
":",
"# Next try to match as a relative name implicitly scoped within current project.",
"m",
"=",
"re",
".",
"match",
"(",
"_REL_TABLE_NAME_PATTERN",
",",
"name",
")",
"if",
"m",
"is",
"not",
"None",
":",
"groups",
"=",
"m",
".",
"groups",
"(",
")",
"_project_id",
",",
"_dataset_id",
",",
"_table_id",
",",
"_decorator",
"=",
"project_id",
",",
"groups",
"[",
"0",
"]",
",",
"groups",
"[",
"1",
"]",
",",
"groups",
"[",
"2",
"]",
"else",
":",
"# Finally try to match as a table name only.",
"m",
"=",
"re",
".",
"match",
"(",
"_TABLE_NAME_PATTERN",
",",
"name",
")",
"if",
"m",
"is",
"not",
"None",
":",
"groups",
"=",
"m",
".",
"groups",
"(",
")",
"_project_id",
",",
"_dataset_id",
",",
"_table_id",
",",
"_decorator",
"=",
"project_id",
",",
"dataset_id",
",",
"groups",
"[",
"0",
"]",
",",
"groups",
"[",
"1",
"]",
"elif",
"isinstance",
"(",
"name",
",",
"dict",
")",
":",
"try",
":",
"_table_id",
"=",
"name",
"[",
"'table_id'",
"]",
"_dataset_id",
"=",
"name",
"[",
"'dataset_id'",
"]",
"_project_id",
"=",
"name",
"[",
"'project_id'",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"# Try treat as an array or tuple",
"if",
"len",
"(",
"name",
")",
"==",
"4",
":",
"_project_id",
",",
"_dataset_id",
",",
"_table_id",
",",
"_decorator",
"=",
"name",
"elif",
"len",
"(",
"name",
")",
"==",
"3",
":",
"_project_id",
",",
"_dataset_id",
",",
"_table_id",
"=",
"name",
"elif",
"len",
"(",
"name",
")",
"==",
"2",
":",
"_dataset_id",
",",
"_table_id",
"=",
"name",
"if",
"not",
"_table_id",
":",
"raise",
"Exception",
"(",
"'Invalid table name: '",
"+",
"str",
"(",
"name",
")",
")",
"if",
"not",
"_project_id",
":",
"_project_id",
"=",
"project_id",
"if",
"not",
"_dataset_id",
":",
"_dataset_id",
"=",
"dataset_id",
"if",
"not",
"_decorator",
":",
"_decorator",
"=",
"''",
"return",
"TableName",
"(",
"_project_id",
",",
"_dataset_id",
",",
"_table_id",
",",
"_decorator",
")"
] | Parses a table name into its individual parts.
Args:
name: the name to parse, or a tuple, dictionary or array containing the parts.
project_id: the expected project ID. If the name does not contain a project ID,
this will be used; if the name does contain a project ID and it does not match
this, an exception will be thrown.
dataset_id: the expected dataset ID. If the name does not contain a dataset ID,
this will be used; if the name does contain a dataset ID and it does not match
this, an exception will be thrown.
Returns:
A TableName named tuple consisting of the full name and individual name parts.
Raises:
Exception: raised if the name doesn't match the expected formats, or a project_id and/or
dataset_id was provided that does not match that in the name. | [
"Parses",
"a",
"table",
"name",
"into",
"its",
"individual",
"parts",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_utils.py#L105-L166 |
4,938 | googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_prediction_explainer.py | PredictionExplainer._make_text_predict_fn | def _make_text_predict_fn(self, labels, instance, column_to_explain):
"""Create a predict_fn that can be used by LIME text explainer. """
def _predict_fn(perturbed_text):
predict_input = []
for x in perturbed_text:
instance_copy = dict(instance)
instance_copy[column_to_explain] = x
predict_input.append(instance_copy)
df = _local_predict.get_prediction_results(self._model_dir, predict_input,
self._headers, with_source=False)
probs = _local_predict.get_probs_for_labels(labels, df)
return np.asarray(probs)
return _predict_fn | python | def _make_text_predict_fn(self, labels, instance, column_to_explain):
"""Create a predict_fn that can be used by LIME text explainer. """
def _predict_fn(perturbed_text):
predict_input = []
for x in perturbed_text:
instance_copy = dict(instance)
instance_copy[column_to_explain] = x
predict_input.append(instance_copy)
df = _local_predict.get_prediction_results(self._model_dir, predict_input,
self._headers, with_source=False)
probs = _local_predict.get_probs_for_labels(labels, df)
return np.asarray(probs)
return _predict_fn | [
"def",
"_make_text_predict_fn",
"(",
"self",
",",
"labels",
",",
"instance",
",",
"column_to_explain",
")",
":",
"def",
"_predict_fn",
"(",
"perturbed_text",
")",
":",
"predict_input",
"=",
"[",
"]",
"for",
"x",
"in",
"perturbed_text",
":",
"instance_copy",
"=",
"dict",
"(",
"instance",
")",
"instance_copy",
"[",
"column_to_explain",
"]",
"=",
"x",
"predict_input",
".",
"append",
"(",
"instance_copy",
")",
"df",
"=",
"_local_predict",
".",
"get_prediction_results",
"(",
"self",
".",
"_model_dir",
",",
"predict_input",
",",
"self",
".",
"_headers",
",",
"with_source",
"=",
"False",
")",
"probs",
"=",
"_local_predict",
".",
"get_probs_for_labels",
"(",
"labels",
",",
"df",
")",
"return",
"np",
".",
"asarray",
"(",
"probs",
")",
"return",
"_predict_fn"
] | Create a predict_fn that can be used by LIME text explainer. | [
"Create",
"a",
"predict_fn",
"that",
"can",
"be",
"used",
"by",
"LIME",
"text",
"explainer",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L56-L71 |
4,939 | googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_prediction_explainer.py | PredictionExplainer._make_image_predict_fn | def _make_image_predict_fn(self, labels, instance, column_to_explain):
"""Create a predict_fn that can be used by LIME image explainer. """
def _predict_fn(perturbed_image):
predict_input = []
for x in perturbed_image:
instance_copy = dict(instance)
instance_copy[column_to_explain] = Image.fromarray(x)
predict_input.append(instance_copy)
df = _local_predict.get_prediction_results(
self._model_dir, predict_input, self._headers,
img_cols=self._image_columns, with_source=False)
probs = _local_predict.get_probs_for_labels(labels, df)
return np.asarray(probs)
return _predict_fn | python | def _make_image_predict_fn(self, labels, instance, column_to_explain):
"""Create a predict_fn that can be used by LIME image explainer. """
def _predict_fn(perturbed_image):
predict_input = []
for x in perturbed_image:
instance_copy = dict(instance)
instance_copy[column_to_explain] = Image.fromarray(x)
predict_input.append(instance_copy)
df = _local_predict.get_prediction_results(
self._model_dir, predict_input, self._headers,
img_cols=self._image_columns, with_source=False)
probs = _local_predict.get_probs_for_labels(labels, df)
return np.asarray(probs)
return _predict_fn | [
"def",
"_make_image_predict_fn",
"(",
"self",
",",
"labels",
",",
"instance",
",",
"column_to_explain",
")",
":",
"def",
"_predict_fn",
"(",
"perturbed_image",
")",
":",
"predict_input",
"=",
"[",
"]",
"for",
"x",
"in",
"perturbed_image",
":",
"instance_copy",
"=",
"dict",
"(",
"instance",
")",
"instance_copy",
"[",
"column_to_explain",
"]",
"=",
"Image",
".",
"fromarray",
"(",
"x",
")",
"predict_input",
".",
"append",
"(",
"instance_copy",
")",
"df",
"=",
"_local_predict",
".",
"get_prediction_results",
"(",
"self",
".",
"_model_dir",
",",
"predict_input",
",",
"self",
".",
"_headers",
",",
"img_cols",
"=",
"self",
".",
"_image_columns",
",",
"with_source",
"=",
"False",
")",
"probs",
"=",
"_local_predict",
".",
"get_probs_for_labels",
"(",
"labels",
",",
"df",
")",
"return",
"np",
".",
"asarray",
"(",
"probs",
")",
"return",
"_predict_fn"
] | Create a predict_fn that can be used by LIME image explainer. | [
"Create",
"a",
"predict_fn",
"that",
"can",
"be",
"used",
"by",
"LIME",
"image",
"explainer",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L73-L90 |
4,940 | googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_prediction_explainer.py | PredictionExplainer._get_unique_categories | def _get_unique_categories(self, df):
"""Get all categories for each categorical columns from training data."""
categories = []
for col in self._categorical_columns:
categocial = pd.Categorical(df[col])
col_categories = list(map(str, categocial.categories))
col_categories.append('_UNKNOWN')
categories.append(col_categories)
return categories | python | def _get_unique_categories(self, df):
"""Get all categories for each categorical columns from training data."""
categories = []
for col in self._categorical_columns:
categocial = pd.Categorical(df[col])
col_categories = list(map(str, categocial.categories))
col_categories.append('_UNKNOWN')
categories.append(col_categories)
return categories | [
"def",
"_get_unique_categories",
"(",
"self",
",",
"df",
")",
":",
"categories",
"=",
"[",
"]",
"for",
"col",
"in",
"self",
".",
"_categorical_columns",
":",
"categocial",
"=",
"pd",
".",
"Categorical",
"(",
"df",
"[",
"col",
"]",
")",
"col_categories",
"=",
"list",
"(",
"map",
"(",
"str",
",",
"categocial",
".",
"categories",
")",
")",
"col_categories",
".",
"append",
"(",
"'_UNKNOWN'",
")",
"categories",
".",
"append",
"(",
"col_categories",
")",
"return",
"categories"
] | Get all categories for each categorical columns from training data. | [
"Get",
"all",
"categories",
"for",
"each",
"categorical",
"columns",
"from",
"training",
"data",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L92-L101 |
4,941 | googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_prediction_explainer.py | PredictionExplainer._preprocess_data_for_tabular_explain | def _preprocess_data_for_tabular_explain(self, df, categories):
"""Get preprocessed training set in numpy array, and categorical names from raw training data.
LIME tabular explainer requires a training set to know the distribution of numeric and
categorical values. The training set has to be numpy arrays, with all categorical values
converted to indices. It also requires list of names for each category.
"""
df = df.copy()
# Remove non tabular columns (text, image).
for col in list(df.columns):
if col not in (self._categorical_columns + self._numeric_columns):
del df[col]
# Convert categorical values into indices.
for col_name, col_categories in zip(self._categorical_columns, categories):
df[col_name] = df[col_name].apply(
lambda x: col_categories.index(str(x)) if str(x) in col_categories
else len(col_categories) - 1)
# Make sure numeric values are really numeric
for numeric_col in self._numeric_columns:
df[numeric_col] = df[numeric_col].apply(lambda x: float(x))
return df.as_matrix(self._categorical_columns + self._numeric_columns) | python | def _preprocess_data_for_tabular_explain(self, df, categories):
"""Get preprocessed training set in numpy array, and categorical names from raw training data.
LIME tabular explainer requires a training set to know the distribution of numeric and
categorical values. The training set has to be numpy arrays, with all categorical values
converted to indices. It also requires list of names for each category.
"""
df = df.copy()
# Remove non tabular columns (text, image).
for col in list(df.columns):
if col not in (self._categorical_columns + self._numeric_columns):
del df[col]
# Convert categorical values into indices.
for col_name, col_categories in zip(self._categorical_columns, categories):
df[col_name] = df[col_name].apply(
lambda x: col_categories.index(str(x)) if str(x) in col_categories
else len(col_categories) - 1)
# Make sure numeric values are really numeric
for numeric_col in self._numeric_columns:
df[numeric_col] = df[numeric_col].apply(lambda x: float(x))
return df.as_matrix(self._categorical_columns + self._numeric_columns) | [
"def",
"_preprocess_data_for_tabular_explain",
"(",
"self",
",",
"df",
",",
"categories",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"# Remove non tabular columns (text, image).",
"for",
"col",
"in",
"list",
"(",
"df",
".",
"columns",
")",
":",
"if",
"col",
"not",
"in",
"(",
"self",
".",
"_categorical_columns",
"+",
"self",
".",
"_numeric_columns",
")",
":",
"del",
"df",
"[",
"col",
"]",
"# Convert categorical values into indices.",
"for",
"col_name",
",",
"col_categories",
"in",
"zip",
"(",
"self",
".",
"_categorical_columns",
",",
"categories",
")",
":",
"df",
"[",
"col_name",
"]",
"=",
"df",
"[",
"col_name",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"col_categories",
".",
"index",
"(",
"str",
"(",
"x",
")",
")",
"if",
"str",
"(",
"x",
")",
"in",
"col_categories",
"else",
"len",
"(",
"col_categories",
")",
"-",
"1",
")",
"# Make sure numeric values are really numeric",
"for",
"numeric_col",
"in",
"self",
".",
"_numeric_columns",
":",
"df",
"[",
"numeric_col",
"]",
"=",
"df",
"[",
"numeric_col",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"float",
"(",
"x",
")",
")",
"return",
"df",
".",
"as_matrix",
"(",
"self",
".",
"_categorical_columns",
"+",
"self",
".",
"_numeric_columns",
")"
] | Get preprocessed training set in numpy array, and categorical names from raw training data.
LIME tabular explainer requires a training set to know the distribution of numeric and
categorical values. The training set has to be numpy arrays, with all categorical values
converted to indices. It also requires list of names for each category. | [
"Get",
"preprocessed",
"training",
"set",
"in",
"numpy",
"array",
"and",
"categorical",
"names",
"from",
"raw",
"training",
"data",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L103-L128 |
4,942 | googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_prediction_explainer.py | PredictionExplainer._make_tabular_predict_fn | def _make_tabular_predict_fn(self, labels, instance, categories):
"""Create a predict_fn that can be used by LIME tabular explainer. """
def _predict_fn(np_instance):
df = pd.DataFrame(
np_instance,
columns=(self._categorical_columns + self._numeric_columns))
# Convert categorical indices back to categories.
for col_name, col_categories in zip(self._categorical_columns, categories):
df[col_name] = df[col_name].apply(lambda x: col_categories[int(x)])
# Add columns that do not exist in the perturbed data,
# such as key, text, and image data.
for col_name in self._headers:
if col_name not in (self._categorical_columns + self._numeric_columns):
df[col_name] = instance[col_name]
r = _local_predict.get_prediction_results(
self._model_dir, df, self._headers, with_source=False)
probs = _local_predict.get_probs_for_labels(labels, r)
probs = np.asarray(probs)
return probs
return _predict_fn | python | def _make_tabular_predict_fn(self, labels, instance, categories):
"""Create a predict_fn that can be used by LIME tabular explainer. """
def _predict_fn(np_instance):
df = pd.DataFrame(
np_instance,
columns=(self._categorical_columns + self._numeric_columns))
# Convert categorical indices back to categories.
for col_name, col_categories in zip(self._categorical_columns, categories):
df[col_name] = df[col_name].apply(lambda x: col_categories[int(x)])
# Add columns that do not exist in the perturbed data,
# such as key, text, and image data.
for col_name in self._headers:
if col_name not in (self._categorical_columns + self._numeric_columns):
df[col_name] = instance[col_name]
r = _local_predict.get_prediction_results(
self._model_dir, df, self._headers, with_source=False)
probs = _local_predict.get_probs_for_labels(labels, r)
probs = np.asarray(probs)
return probs
return _predict_fn | [
"def",
"_make_tabular_predict_fn",
"(",
"self",
",",
"labels",
",",
"instance",
",",
"categories",
")",
":",
"def",
"_predict_fn",
"(",
"np_instance",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"np_instance",
",",
"columns",
"=",
"(",
"self",
".",
"_categorical_columns",
"+",
"self",
".",
"_numeric_columns",
")",
")",
"# Convert categorical indices back to categories.",
"for",
"col_name",
",",
"col_categories",
"in",
"zip",
"(",
"self",
".",
"_categorical_columns",
",",
"categories",
")",
":",
"df",
"[",
"col_name",
"]",
"=",
"df",
"[",
"col_name",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"col_categories",
"[",
"int",
"(",
"x",
")",
"]",
")",
"# Add columns that do not exist in the perturbed data,",
"# such as key, text, and image data.",
"for",
"col_name",
"in",
"self",
".",
"_headers",
":",
"if",
"col_name",
"not",
"in",
"(",
"self",
".",
"_categorical_columns",
"+",
"self",
".",
"_numeric_columns",
")",
":",
"df",
"[",
"col_name",
"]",
"=",
"instance",
"[",
"col_name",
"]",
"r",
"=",
"_local_predict",
".",
"get_prediction_results",
"(",
"self",
".",
"_model_dir",
",",
"df",
",",
"self",
".",
"_headers",
",",
"with_source",
"=",
"False",
")",
"probs",
"=",
"_local_predict",
".",
"get_probs_for_labels",
"(",
"labels",
",",
"r",
")",
"probs",
"=",
"np",
".",
"asarray",
"(",
"probs",
")",
"return",
"probs",
"return",
"_predict_fn"
] | Create a predict_fn that can be used by LIME tabular explainer. | [
"Create",
"a",
"predict_fn",
"that",
"can",
"be",
"used",
"by",
"LIME",
"tabular",
"explainer",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L130-L155 |
4,943 | googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_prediction_explainer.py | PredictionExplainer.explain_tabular | def explain_tabular(self, trainset, labels, instance, num_features=5, kernel_width=3):
"""Explain categorical and numeric features for a prediction.
It analyze the prediction by LIME, and returns a report of the most impactful tabular
features contributing to certain labels.
Args:
trainset: a DataFrame representing the training features that LIME can use to decide
value distributions.
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
num_features: maximum number of features to show.
kernel_width: Passed to LIME LimeTabularExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
"""
from lime.lime_tabular import LimeTabularExplainer
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
categories = self._get_unique_categories(trainset)
np_trainset = self._preprocess_data_for_tabular_explain(trainset, categories)
predict_fn = self._make_tabular_predict_fn(labels, instance, categories)
prediction_df = pd.DataFrame([instance])
prediction_instance = self._preprocess_data_for_tabular_explain(prediction_df, categories)
explainer = LimeTabularExplainer(
np_trainset,
feature_names=(self._categorical_columns + self._numeric_columns),
class_names=labels,
categorical_features=range(len(categories)),
categorical_names={i: v for i, v in enumerate(categories)},
kernel_width=kernel_width)
exp = explainer.explain_instance(
prediction_instance[0],
predict_fn,
num_features=num_features,
labels=range(len(labels)))
return exp | python | def explain_tabular(self, trainset, labels, instance, num_features=5, kernel_width=3):
"""Explain categorical and numeric features for a prediction.
It analyze the prediction by LIME, and returns a report of the most impactful tabular
features contributing to certain labels.
Args:
trainset: a DataFrame representing the training features that LIME can use to decide
value distributions.
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
num_features: maximum number of features to show.
kernel_width: Passed to LIME LimeTabularExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
"""
from lime.lime_tabular import LimeTabularExplainer
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
categories = self._get_unique_categories(trainset)
np_trainset = self._preprocess_data_for_tabular_explain(trainset, categories)
predict_fn = self._make_tabular_predict_fn(labels, instance, categories)
prediction_df = pd.DataFrame([instance])
prediction_instance = self._preprocess_data_for_tabular_explain(prediction_df, categories)
explainer = LimeTabularExplainer(
np_trainset,
feature_names=(self._categorical_columns + self._numeric_columns),
class_names=labels,
categorical_features=range(len(categories)),
categorical_names={i: v for i, v in enumerate(categories)},
kernel_width=kernel_width)
exp = explainer.explain_instance(
prediction_instance[0],
predict_fn,
num_features=num_features,
labels=range(len(labels)))
return exp | [
"def",
"explain_tabular",
"(",
"self",
",",
"trainset",
",",
"labels",
",",
"instance",
",",
"num_features",
"=",
"5",
",",
"kernel_width",
"=",
"3",
")",
":",
"from",
"lime",
".",
"lime_tabular",
"import",
"LimeTabularExplainer",
"if",
"isinstance",
"(",
"instance",
",",
"six",
".",
"string_types",
")",
":",
"instance",
"=",
"next",
"(",
"csv",
".",
"DictReader",
"(",
"[",
"instance",
"]",
",",
"fieldnames",
"=",
"self",
".",
"_headers",
")",
")",
"categories",
"=",
"self",
".",
"_get_unique_categories",
"(",
"trainset",
")",
"np_trainset",
"=",
"self",
".",
"_preprocess_data_for_tabular_explain",
"(",
"trainset",
",",
"categories",
")",
"predict_fn",
"=",
"self",
".",
"_make_tabular_predict_fn",
"(",
"labels",
",",
"instance",
",",
"categories",
")",
"prediction_df",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"instance",
"]",
")",
"prediction_instance",
"=",
"self",
".",
"_preprocess_data_for_tabular_explain",
"(",
"prediction_df",
",",
"categories",
")",
"explainer",
"=",
"LimeTabularExplainer",
"(",
"np_trainset",
",",
"feature_names",
"=",
"(",
"self",
".",
"_categorical_columns",
"+",
"self",
".",
"_numeric_columns",
")",
",",
"class_names",
"=",
"labels",
",",
"categorical_features",
"=",
"range",
"(",
"len",
"(",
"categories",
")",
")",
",",
"categorical_names",
"=",
"{",
"i",
":",
"v",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"categories",
")",
"}",
",",
"kernel_width",
"=",
"kernel_width",
")",
"exp",
"=",
"explainer",
".",
"explain_instance",
"(",
"prediction_instance",
"[",
"0",
"]",
",",
"predict_fn",
",",
"num_features",
"=",
"num_features",
",",
"labels",
"=",
"range",
"(",
"len",
"(",
"labels",
")",
")",
")",
"return",
"exp"
] | Explain categorical and numeric features for a prediction.
It analyze the prediction by LIME, and returns a report of the most impactful tabular
features contributing to certain labels.
Args:
trainset: a DataFrame representing the training features that LIME can use to decide
value distributions.
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
num_features: maximum number of features to show.
kernel_width: Passed to LIME LimeTabularExplainer directly.
Returns:
A LIME's lime.explanation.Explanation. | [
"Explain",
"categorical",
"and",
"numeric",
"features",
"for",
"a",
"prediction",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L157-L199 |
4,944 | googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_prediction_explainer.py | PredictionExplainer.explain_text | def explain_text(self, labels, instance, column_name=None, num_features=10, num_samples=5000):
"""Explain a text field of a prediction.
It analyze the prediction by LIME, and returns a report of which words are most impactful
in contributing to certain labels.
Args:
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
column_name: which text column to explain. Can be None if there is only one text column
in the model input.
num_features: maximum number of words (features) to analyze. Passed to
LIME LimeTextExplainer directly.
num_samples: size of the neighborhood to learn the linear model. Passed to
LIME LimeTextExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
Throws:
ValueError if the given text column is not found in model input or column_name is None
but there are multiple text columns in model input.
"""
from lime.lime_text import LimeTextExplainer
if len(self._text_columns) > 1 and not column_name:
raise ValueError('There are multiple text columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._text_columns:
raise ValueError('Specified column_name "%s" not found in the model input.'
% column_name)
text_column_name = column_name if column_name else self._text_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
predict_fn = self._make_text_predict_fn(labels, instance, text_column_name)
explainer = LimeTextExplainer(class_names=labels)
exp = explainer.explain_instance(
instance[text_column_name], predict_fn, labels=range(len(labels)),
num_features=num_features, num_samples=num_samples)
return exp | python | def explain_text(self, labels, instance, column_name=None, num_features=10, num_samples=5000):
"""Explain a text field of a prediction.
It analyze the prediction by LIME, and returns a report of which words are most impactful
in contributing to certain labels.
Args:
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
column_name: which text column to explain. Can be None if there is only one text column
in the model input.
num_features: maximum number of words (features) to analyze. Passed to
LIME LimeTextExplainer directly.
num_samples: size of the neighborhood to learn the linear model. Passed to
LIME LimeTextExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
Throws:
ValueError if the given text column is not found in model input or column_name is None
but there are multiple text columns in model input.
"""
from lime.lime_text import LimeTextExplainer
if len(self._text_columns) > 1 and not column_name:
raise ValueError('There are multiple text columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._text_columns:
raise ValueError('Specified column_name "%s" not found in the model input.'
% column_name)
text_column_name = column_name if column_name else self._text_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
predict_fn = self._make_text_predict_fn(labels, instance, text_column_name)
explainer = LimeTextExplainer(class_names=labels)
exp = explainer.explain_instance(
instance[text_column_name], predict_fn, labels=range(len(labels)),
num_features=num_features, num_samples=num_samples)
return exp | [
"def",
"explain_text",
"(",
"self",
",",
"labels",
",",
"instance",
",",
"column_name",
"=",
"None",
",",
"num_features",
"=",
"10",
",",
"num_samples",
"=",
"5000",
")",
":",
"from",
"lime",
".",
"lime_text",
"import",
"LimeTextExplainer",
"if",
"len",
"(",
"self",
".",
"_text_columns",
")",
">",
"1",
"and",
"not",
"column_name",
":",
"raise",
"ValueError",
"(",
"'There are multiple text columns in the input of the model. '",
"+",
"'Please specify \"column_name\".'",
")",
"elif",
"column_name",
"and",
"column_name",
"not",
"in",
"self",
".",
"_text_columns",
":",
"raise",
"ValueError",
"(",
"'Specified column_name \"%s\" not found in the model input.'",
"%",
"column_name",
")",
"text_column_name",
"=",
"column_name",
"if",
"column_name",
"else",
"self",
".",
"_text_columns",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"instance",
",",
"six",
".",
"string_types",
")",
":",
"instance",
"=",
"next",
"(",
"csv",
".",
"DictReader",
"(",
"[",
"instance",
"]",
",",
"fieldnames",
"=",
"self",
".",
"_headers",
")",
")",
"predict_fn",
"=",
"self",
".",
"_make_text_predict_fn",
"(",
"labels",
",",
"instance",
",",
"text_column_name",
")",
"explainer",
"=",
"LimeTextExplainer",
"(",
"class_names",
"=",
"labels",
")",
"exp",
"=",
"explainer",
".",
"explain_instance",
"(",
"instance",
"[",
"text_column_name",
"]",
",",
"predict_fn",
",",
"labels",
"=",
"range",
"(",
"len",
"(",
"labels",
")",
")",
",",
"num_features",
"=",
"num_features",
",",
"num_samples",
"=",
"num_samples",
")",
"return",
"exp"
] | Explain a text field of a prediction.
It analyze the prediction by LIME, and returns a report of which words are most impactful
in contributing to certain labels.
Args:
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
column_name: which text column to explain. Can be None if there is only one text column
in the model input.
num_features: maximum number of words (features) to analyze. Passed to
LIME LimeTextExplainer directly.
num_samples: size of the neighborhood to learn the linear model. Passed to
LIME LimeTextExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
Throws:
ValueError if the given text column is not found in model input or column_name is None
but there are multiple text columns in model input. | [
"Explain",
"a",
"text",
"field",
"of",
"a",
"prediction",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L201-L244 |
4,945 | googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_prediction_explainer.py | PredictionExplainer.explain_image | def explain_image(self, labels, instance, column_name=None, num_features=100000,
num_samples=300, batch_size=200, hide_color=0):
"""Explain an image of a prediction.
It analyze the prediction by LIME, and returns a report of which words are most impactful
in contributing to certain labels.
Args:
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
column_name: which image column to explain. Can be None if there is only one image column
in the model input.
num_features: maximum number of areas (features) to analyze. Passed to
LIME LimeImageExplainer directly.
num_samples: size of the neighborhood to learn the linear model. Passed to
LIME LimeImageExplainer directly.
batch_size: size of batches passed to predict_fn. Passed to
LIME LimeImageExplainer directly.
hide_color: the color used to perturb images. Passed to
LIME LimeImageExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
Throws:
ValueError if the given image column is not found in model input or column_name is None
but there are multiple image columns in model input.
"""
from lime.lime_image import LimeImageExplainer
if len(self._image_columns) > 1 and not column_name:
raise ValueError('There are multiple image columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._image_columns:
raise ValueError('Specified column_name "%s" not found in the model input.'
% column_name)
image_column_name = column_name if column_name else self._image_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
predict_fn = self._make_image_predict_fn(labels, instance, image_column_name)
explainer = LimeImageExplainer()
with file_io.FileIO(instance[image_column_name], 'rb') as fi:
im = Image.open(fi)
im.thumbnail((299, 299), Image.ANTIALIAS)
rgb_im = np.asarray(im.convert('RGB'))
exp = explainer.explain_instance(
rgb_im, predict_fn, labels=range(len(labels)), top_labels=None,
hide_color=hide_color, num_features=num_features,
num_samples=num_samples, batch_size=batch_size)
return exp | python | def explain_image(self, labels, instance, column_name=None, num_features=100000,
num_samples=300, batch_size=200, hide_color=0):
"""Explain an image of a prediction.
It analyze the prediction by LIME, and returns a report of which words are most impactful
in contributing to certain labels.
Args:
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
column_name: which image column to explain. Can be None if there is only one image column
in the model input.
num_features: maximum number of areas (features) to analyze. Passed to
LIME LimeImageExplainer directly.
num_samples: size of the neighborhood to learn the linear model. Passed to
LIME LimeImageExplainer directly.
batch_size: size of batches passed to predict_fn. Passed to
LIME LimeImageExplainer directly.
hide_color: the color used to perturb images. Passed to
LIME LimeImageExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
Throws:
ValueError if the given image column is not found in model input or column_name is None
but there are multiple image columns in model input.
"""
from lime.lime_image import LimeImageExplainer
if len(self._image_columns) > 1 and not column_name:
raise ValueError('There are multiple image columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._image_columns:
raise ValueError('Specified column_name "%s" not found in the model input.'
% column_name)
image_column_name = column_name if column_name else self._image_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
predict_fn = self._make_image_predict_fn(labels, instance, image_column_name)
explainer = LimeImageExplainer()
with file_io.FileIO(instance[image_column_name], 'rb') as fi:
im = Image.open(fi)
im.thumbnail((299, 299), Image.ANTIALIAS)
rgb_im = np.asarray(im.convert('RGB'))
exp = explainer.explain_instance(
rgb_im, predict_fn, labels=range(len(labels)), top_labels=None,
hide_color=hide_color, num_features=num_features,
num_samples=num_samples, batch_size=batch_size)
return exp | [
"def",
"explain_image",
"(",
"self",
",",
"labels",
",",
"instance",
",",
"column_name",
"=",
"None",
",",
"num_features",
"=",
"100000",
",",
"num_samples",
"=",
"300",
",",
"batch_size",
"=",
"200",
",",
"hide_color",
"=",
"0",
")",
":",
"from",
"lime",
".",
"lime_image",
"import",
"LimeImageExplainer",
"if",
"len",
"(",
"self",
".",
"_image_columns",
")",
">",
"1",
"and",
"not",
"column_name",
":",
"raise",
"ValueError",
"(",
"'There are multiple image columns in the input of the model. '",
"+",
"'Please specify \"column_name\".'",
")",
"elif",
"column_name",
"and",
"column_name",
"not",
"in",
"self",
".",
"_image_columns",
":",
"raise",
"ValueError",
"(",
"'Specified column_name \"%s\" not found in the model input.'",
"%",
"column_name",
")",
"image_column_name",
"=",
"column_name",
"if",
"column_name",
"else",
"self",
".",
"_image_columns",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"instance",
",",
"six",
".",
"string_types",
")",
":",
"instance",
"=",
"next",
"(",
"csv",
".",
"DictReader",
"(",
"[",
"instance",
"]",
",",
"fieldnames",
"=",
"self",
".",
"_headers",
")",
")",
"predict_fn",
"=",
"self",
".",
"_make_image_predict_fn",
"(",
"labels",
",",
"instance",
",",
"image_column_name",
")",
"explainer",
"=",
"LimeImageExplainer",
"(",
")",
"with",
"file_io",
".",
"FileIO",
"(",
"instance",
"[",
"image_column_name",
"]",
",",
"'rb'",
")",
"as",
"fi",
":",
"im",
"=",
"Image",
".",
"open",
"(",
"fi",
")",
"im",
".",
"thumbnail",
"(",
"(",
"299",
",",
"299",
")",
",",
"Image",
".",
"ANTIALIAS",
")",
"rgb_im",
"=",
"np",
".",
"asarray",
"(",
"im",
".",
"convert",
"(",
"'RGB'",
")",
")",
"exp",
"=",
"explainer",
".",
"explain_instance",
"(",
"rgb_im",
",",
"predict_fn",
",",
"labels",
"=",
"range",
"(",
"len",
"(",
"labels",
")",
")",
",",
"top_labels",
"=",
"None",
",",
"hide_color",
"=",
"hide_color",
",",
"num_features",
"=",
"num_features",
",",
"num_samples",
"=",
"num_samples",
",",
"batch_size",
"=",
"batch_size",
")",
"return",
"exp"
] | Explain an image of a prediction.
It analyze the prediction by LIME, and returns a report of which words are most impactful
in contributing to certain labels.
Args:
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
column_name: which image column to explain. Can be None if there is only one image column
in the model input.
num_features: maximum number of areas (features) to analyze. Passed to
LIME LimeImageExplainer directly.
num_samples: size of the neighborhood to learn the linear model. Passed to
LIME LimeImageExplainer directly.
batch_size: size of batches passed to predict_fn. Passed to
LIME LimeImageExplainer directly.
hide_color: the color used to perturb images. Passed to
LIME LimeImageExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
Throws:
ValueError if the given image column is not found in model input or column_name is None
but there are multiple image columns in model input. | [
"Explain",
"an",
"image",
"of",
"a",
"prediction",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L246-L299 |
4,946 | googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_prediction_explainer.py | PredictionExplainer.probe_image | def probe_image(self, labels, instance, column_name=None, num_scaled_images=50,
top_percent=10):
""" Get pixel importance of the image.
It performs pixel sensitivity analysis by showing only the most important pixels to a
certain label in the image. It uses integrated gradients to measure the
importance of each pixel.
Args:
labels: labels to compute gradients from.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
img_column_name: the name of the image column to probe. If there is only one image
column it can be None.
num_scaled_images: Number of scaled images to get grads from. For example, if 10,
the image will be scaled by 0.1, 0.2, ..., 0,9, 1.0 and it will produce
10 images for grads computation.
top_percent: The percentile of pixels to show only. for example, if 10,
only top 10% impactful pixels will be shown and rest of the pixels will be black.
Returns:
A tuple. First is the resized original image (299x299x3). Second is a list of
the visualization with same size that highlights the most important pixels, one
per each label.
"""
if len(self._image_columns) > 1 and not column_name:
raise ValueError('There are multiple image columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._image_columns:
raise ValueError('Specified column_name "%s" not found in the model input.' %
column_name)
image_column_name = column_name if column_name else self._image_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
image_path = instance[image_column_name]
with file_io.FileIO(image_path, 'rb') as fi:
im = Image.open(fi)
resized_image = im.resize((299, 299))
# Produce a list of scaled images, create instances (csv lines) from these images.
step = 1. / num_scaled_images
scales = np.arange(0.0, 1.0, step) + step
csv_lines = []
for s in scales:
pixels = (np.asarray(resized_image) * s).astype('uint8')
scaled_image = Image.fromarray(pixels)
buf = io.BytesIO()
scaled_image.save(buf, "JPEG")
encoded_image = base64.urlsafe_b64encode(buf.getvalue()).decode('ascii')
instance_copy = dict(instance)
instance_copy[image_column_name] = encoded_image
buf = six.StringIO()
writer = csv.DictWriter(buf, fieldnames=self._headers, lineterminator='')
writer.writerow(instance_copy)
csv_lines.append(buf.getvalue())
integrated_gradients_images = []
for label in labels:
# Send to tf model to get gradients.
grads = self._image_gradients(csv_lines, label, image_column_name)
integrated_grads = resized_image * np.average(grads, axis=0)
# Gray scale the grads by removing color dimension.
# abs() is for getting the most impactful pixels regardless positive or negative.
grayed = np.average(abs(integrated_grads), axis=2)
grayed = np.transpose([grayed, grayed, grayed], axes=[1, 2, 0])
# Only show the most impactful pixels.
p = np.percentile(grayed, 100 - top_percent)
viz_window = np.where(grayed > p, 1, 0)
vis = resized_image * viz_window
im_vis = Image.fromarray(np.uint8(vis))
integrated_gradients_images.append(im_vis)
return resized_image, integrated_gradients_images | python | def probe_image(self, labels, instance, column_name=None, num_scaled_images=50,
top_percent=10):
""" Get pixel importance of the image.
It performs pixel sensitivity analysis by showing only the most important pixels to a
certain label in the image. It uses integrated gradients to measure the
importance of each pixel.
Args:
labels: labels to compute gradients from.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
img_column_name: the name of the image column to probe. If there is only one image
column it can be None.
num_scaled_images: Number of scaled images to get grads from. For example, if 10,
the image will be scaled by 0.1, 0.2, ..., 0,9, 1.0 and it will produce
10 images for grads computation.
top_percent: The percentile of pixels to show only. for example, if 10,
only top 10% impactful pixels will be shown and rest of the pixels will be black.
Returns:
A tuple. First is the resized original image (299x299x3). Second is a list of
the visualization with same size that highlights the most important pixels, one
per each label.
"""
if len(self._image_columns) > 1 and not column_name:
raise ValueError('There are multiple image columns in the input of the model. ' +
'Please specify "column_name".')
elif column_name and column_name not in self._image_columns:
raise ValueError('Specified column_name "%s" not found in the model input.' %
column_name)
image_column_name = column_name if column_name else self._image_columns[0]
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
image_path = instance[image_column_name]
with file_io.FileIO(image_path, 'rb') as fi:
im = Image.open(fi)
resized_image = im.resize((299, 299))
# Produce a list of scaled images, create instances (csv lines) from these images.
step = 1. / num_scaled_images
scales = np.arange(0.0, 1.0, step) + step
csv_lines = []
for s in scales:
pixels = (np.asarray(resized_image) * s).astype('uint8')
scaled_image = Image.fromarray(pixels)
buf = io.BytesIO()
scaled_image.save(buf, "JPEG")
encoded_image = base64.urlsafe_b64encode(buf.getvalue()).decode('ascii')
instance_copy = dict(instance)
instance_copy[image_column_name] = encoded_image
buf = six.StringIO()
writer = csv.DictWriter(buf, fieldnames=self._headers, lineterminator='')
writer.writerow(instance_copy)
csv_lines.append(buf.getvalue())
integrated_gradients_images = []
for label in labels:
# Send to tf model to get gradients.
grads = self._image_gradients(csv_lines, label, image_column_name)
integrated_grads = resized_image * np.average(grads, axis=0)
# Gray scale the grads by removing color dimension.
# abs() is for getting the most impactful pixels regardless positive or negative.
grayed = np.average(abs(integrated_grads), axis=2)
grayed = np.transpose([grayed, grayed, grayed], axes=[1, 2, 0])
# Only show the most impactful pixels.
p = np.percentile(grayed, 100 - top_percent)
viz_window = np.where(grayed > p, 1, 0)
vis = resized_image * viz_window
im_vis = Image.fromarray(np.uint8(vis))
integrated_gradients_images.append(im_vis)
return resized_image, integrated_gradients_images | [
"def",
"probe_image",
"(",
"self",
",",
"labels",
",",
"instance",
",",
"column_name",
"=",
"None",
",",
"num_scaled_images",
"=",
"50",
",",
"top_percent",
"=",
"10",
")",
":",
"if",
"len",
"(",
"self",
".",
"_image_columns",
")",
">",
"1",
"and",
"not",
"column_name",
":",
"raise",
"ValueError",
"(",
"'There are multiple image columns in the input of the model. '",
"+",
"'Please specify \"column_name\".'",
")",
"elif",
"column_name",
"and",
"column_name",
"not",
"in",
"self",
".",
"_image_columns",
":",
"raise",
"ValueError",
"(",
"'Specified column_name \"%s\" not found in the model input.'",
"%",
"column_name",
")",
"image_column_name",
"=",
"column_name",
"if",
"column_name",
"else",
"self",
".",
"_image_columns",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"instance",
",",
"six",
".",
"string_types",
")",
":",
"instance",
"=",
"next",
"(",
"csv",
".",
"DictReader",
"(",
"[",
"instance",
"]",
",",
"fieldnames",
"=",
"self",
".",
"_headers",
")",
")",
"image_path",
"=",
"instance",
"[",
"image_column_name",
"]",
"with",
"file_io",
".",
"FileIO",
"(",
"image_path",
",",
"'rb'",
")",
"as",
"fi",
":",
"im",
"=",
"Image",
".",
"open",
"(",
"fi",
")",
"resized_image",
"=",
"im",
".",
"resize",
"(",
"(",
"299",
",",
"299",
")",
")",
"# Produce a list of scaled images, create instances (csv lines) from these images.",
"step",
"=",
"1.",
"/",
"num_scaled_images",
"scales",
"=",
"np",
".",
"arange",
"(",
"0.0",
",",
"1.0",
",",
"step",
")",
"+",
"step",
"csv_lines",
"=",
"[",
"]",
"for",
"s",
"in",
"scales",
":",
"pixels",
"=",
"(",
"np",
".",
"asarray",
"(",
"resized_image",
")",
"*",
"s",
")",
".",
"astype",
"(",
"'uint8'",
")",
"scaled_image",
"=",
"Image",
".",
"fromarray",
"(",
"pixels",
")",
"buf",
"=",
"io",
".",
"BytesIO",
"(",
")",
"scaled_image",
".",
"save",
"(",
"buf",
",",
"\"JPEG\"",
")",
"encoded_image",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"buf",
".",
"getvalue",
"(",
")",
")",
".",
"decode",
"(",
"'ascii'",
")",
"instance_copy",
"=",
"dict",
"(",
"instance",
")",
"instance_copy",
"[",
"image_column_name",
"]",
"=",
"encoded_image",
"buf",
"=",
"six",
".",
"StringIO",
"(",
")",
"writer",
"=",
"csv",
".",
"DictWriter",
"(",
"buf",
",",
"fieldnames",
"=",
"self",
".",
"_headers",
",",
"lineterminator",
"=",
"''",
")",
"writer",
".",
"writerow",
"(",
"instance_copy",
")",
"csv_lines",
".",
"append",
"(",
"buf",
".",
"getvalue",
"(",
")",
")",
"integrated_gradients_images",
"=",
"[",
"]",
"for",
"label",
"in",
"labels",
":",
"# Send to tf model to get gradients.",
"grads",
"=",
"self",
".",
"_image_gradients",
"(",
"csv_lines",
",",
"label",
",",
"image_column_name",
")",
"integrated_grads",
"=",
"resized_image",
"*",
"np",
".",
"average",
"(",
"grads",
",",
"axis",
"=",
"0",
")",
"# Gray scale the grads by removing color dimension.",
"# abs() is for getting the most impactful pixels regardless positive or negative.",
"grayed",
"=",
"np",
".",
"average",
"(",
"abs",
"(",
"integrated_grads",
")",
",",
"axis",
"=",
"2",
")",
"grayed",
"=",
"np",
".",
"transpose",
"(",
"[",
"grayed",
",",
"grayed",
",",
"grayed",
"]",
",",
"axes",
"=",
"[",
"1",
",",
"2",
",",
"0",
"]",
")",
"# Only show the most impactful pixels.",
"p",
"=",
"np",
".",
"percentile",
"(",
"grayed",
",",
"100",
"-",
"top_percent",
")",
"viz_window",
"=",
"np",
".",
"where",
"(",
"grayed",
">",
"p",
",",
"1",
",",
"0",
")",
"vis",
"=",
"resized_image",
"*",
"viz_window",
"im_vis",
"=",
"Image",
".",
"fromarray",
"(",
"np",
".",
"uint8",
"(",
"vis",
")",
")",
"integrated_gradients_images",
".",
"append",
"(",
"im_vis",
")",
"return",
"resized_image",
",",
"integrated_gradients_images"
] | Get pixel importance of the image.
It performs pixel sensitivity analysis by showing only the most important pixels to a
certain label in the image. It uses integrated gradients to measure the
importance of each pixel.
Args:
labels: labels to compute gradients from.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
img_column_name: the name of the image column to probe. If there is only one image
column it can be None.
num_scaled_images: Number of scaled images to get grads from. For example, if 10,
the image will be scaled by 0.1, 0.2, ..., 0,9, 1.0 and it will produce
10 images for grads computation.
top_percent: The percentile of pixels to show only. for example, if 10,
only top 10% impactful pixels will be shown and rest of the pixels will be black.
Returns:
A tuple. First is the resized original image (299x299x3). Second is a list of
the visualization with same size that highlights the most important pixels, one
per each label. | [
"Get",
"pixel",
"importance",
"of",
"the",
"image",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L334-L414 |
4,947 | googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | Models.get_model_details | def get_model_details(self, model_name):
"""Get details of the specified model from CloudML Service.
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
Returns: a dictionary of the model details.
"""
full_name = model_name
if not model_name.startswith('projects/'):
full_name = ('projects/%s/models/%s' % (self._project_id, model_name))
return self._api.projects().models().get(name=full_name).execute() | python | def get_model_details(self, model_name):
"""Get details of the specified model from CloudML Service.
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
Returns: a dictionary of the model details.
"""
full_name = model_name
if not model_name.startswith('projects/'):
full_name = ('projects/%s/models/%s' % (self._project_id, model_name))
return self._api.projects().models().get(name=full_name).execute() | [
"def",
"get_model_details",
"(",
"self",
",",
"model_name",
")",
":",
"full_name",
"=",
"model_name",
"if",
"not",
"model_name",
".",
"startswith",
"(",
"'projects/'",
")",
":",
"full_name",
"=",
"(",
"'projects/%s/models/%s'",
"%",
"(",
"self",
".",
"_project_id",
",",
"model_name",
")",
")",
"return",
"self",
".",
"_api",
".",
"projects",
"(",
")",
".",
"models",
"(",
")",
".",
"get",
"(",
"name",
"=",
"full_name",
")",
".",
"execute",
"(",
")"
] | Get details of the specified model from CloudML Service.
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
Returns: a dictionary of the model details. | [
"Get",
"details",
"of",
"the",
"specified",
"model",
"from",
"CloudML",
"Service",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L53-L64 |
4,948 | googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | Models.create | def create(self, model_name):
"""Create a model.
Args:
model_name: the short name of the model, such as "iris".
Returns:
If successful, returns informaiton of the model, such as
{u'regions': [u'us-central1'], u'name': u'projects/myproject/models/mymodel'}
Raises:
If the model creation failed.
"""
body = {'name': model_name}
parent = 'projects/' + self._project_id
# Model creation is instant. If anything goes wrong, Exception will be thrown.
return self._api.projects().models().create(body=body, parent=parent).execute() | python | def create(self, model_name):
"""Create a model.
Args:
model_name: the short name of the model, such as "iris".
Returns:
If successful, returns informaiton of the model, such as
{u'regions': [u'us-central1'], u'name': u'projects/myproject/models/mymodel'}
Raises:
If the model creation failed.
"""
body = {'name': model_name}
parent = 'projects/' + self._project_id
# Model creation is instant. If anything goes wrong, Exception will be thrown.
return self._api.projects().models().create(body=body, parent=parent).execute() | [
"def",
"create",
"(",
"self",
",",
"model_name",
")",
":",
"body",
"=",
"{",
"'name'",
":",
"model_name",
"}",
"parent",
"=",
"'projects/'",
"+",
"self",
".",
"_project_id",
"# Model creation is instant. If anything goes wrong, Exception will be thrown.",
"return",
"self",
".",
"_api",
".",
"projects",
"(",
")",
".",
"models",
"(",
")",
".",
"create",
"(",
"body",
"=",
"body",
",",
"parent",
"=",
"parent",
")",
".",
"execute",
"(",
")"
] | Create a model.
Args:
model_name: the short name of the model, such as "iris".
Returns:
If successful, returns informaiton of the model, such as
{u'regions': [u'us-central1'], u'name': u'projects/myproject/models/mymodel'}
Raises:
If the model creation failed. | [
"Create",
"a",
"model",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L66-L80 |
4,949 | googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | Models.list | def list(self, count=10):
"""List models under the current project in a table view.
Args:
count: upper limit of the number of models to list.
Raises:
Exception if it is called in a non-IPython environment.
"""
import IPython
data = []
# Add range(count) to loop so it will stop either it reaches count, or iteration
# on self is exhausted. "self" is iterable (see __iter__() method).
for _, model in zip(range(count), self.get_iterator()):
element = {'name': model['name']}
if 'defaultVersion' in model:
version_short_name = model['defaultVersion']['name'].split('/')[-1]
element['defaultVersion'] = version_short_name
data.append(element)
IPython.display.display(
datalab.utils.commands.render_dictionary(data, ['name', 'defaultVersion'])) | python | def list(self, count=10):
"""List models under the current project in a table view.
Args:
count: upper limit of the number of models to list.
Raises:
Exception if it is called in a non-IPython environment.
"""
import IPython
data = []
# Add range(count) to loop so it will stop either it reaches count, or iteration
# on self is exhausted. "self" is iterable (see __iter__() method).
for _, model in zip(range(count), self.get_iterator()):
element = {'name': model['name']}
if 'defaultVersion' in model:
version_short_name = model['defaultVersion']['name'].split('/')[-1]
element['defaultVersion'] = version_short_name
data.append(element)
IPython.display.display(
datalab.utils.commands.render_dictionary(data, ['name', 'defaultVersion'])) | [
"def",
"list",
"(",
"self",
",",
"count",
"=",
"10",
")",
":",
"import",
"IPython",
"data",
"=",
"[",
"]",
"# Add range(count) to loop so it will stop either it reaches count, or iteration",
"# on self is exhausted. \"self\" is iterable (see __iter__() method).",
"for",
"_",
",",
"model",
"in",
"zip",
"(",
"range",
"(",
"count",
")",
",",
"self",
".",
"get_iterator",
"(",
")",
")",
":",
"element",
"=",
"{",
"'name'",
":",
"model",
"[",
"'name'",
"]",
"}",
"if",
"'defaultVersion'",
"in",
"model",
":",
"version_short_name",
"=",
"model",
"[",
"'defaultVersion'",
"]",
"[",
"'name'",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"element",
"[",
"'defaultVersion'",
"]",
"=",
"version_short_name",
"data",
".",
"append",
"(",
"element",
")",
"IPython",
".",
"display",
".",
"display",
"(",
"datalab",
".",
"utils",
".",
"commands",
".",
"render_dictionary",
"(",
"data",
",",
"[",
"'name'",
",",
"'defaultVersion'",
"]",
")",
")"
] | List models under the current project in a table view.
Args:
count: upper limit of the number of models to list.
Raises:
Exception if it is called in a non-IPython environment. | [
"List",
"models",
"under",
"the",
"current",
"project",
"in",
"a",
"table",
"view",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L97-L117 |
4,950 | googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | ModelVersions.get_version_details | def get_version_details(self, version_name):
"""Get details of a version.
Args:
version: the name of the version in short form, such as "v1".
Returns: a dictionary containing the version details.
"""
name = ('%s/versions/%s' % (self._full_model_name, version_name))
return self._api.projects().models().versions().get(name=name).execute() | python | def get_version_details(self, version_name):
"""Get details of a version.
Args:
version: the name of the version in short form, such as "v1".
Returns: a dictionary containing the version details.
"""
name = ('%s/versions/%s' % (self._full_model_name, version_name))
return self._api.projects().models().versions().get(name=name).execute() | [
"def",
"get_version_details",
"(",
"self",
",",
"version_name",
")",
":",
"name",
"=",
"(",
"'%s/versions/%s'",
"%",
"(",
"self",
".",
"_full_model_name",
",",
"version_name",
")",
")",
"return",
"self",
".",
"_api",
".",
"projects",
"(",
")",
".",
"models",
"(",
")",
".",
"versions",
"(",
")",
".",
"get",
"(",
"name",
"=",
"name",
")",
".",
"execute",
"(",
")"
] | Get details of a version.
Args:
version: the name of the version in short form, such as "v1".
Returns: a dictionary containing the version details. | [
"Get",
"details",
"of",
"a",
"version",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L166-L174 |
4,951 | googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | ModelVersions.deploy | def deploy(self, version_name, path, runtime_version=None):
"""Deploy a model version to the cloud.
Args:
version_name: the name of the version in short form, such as "v1".
path: the Google Cloud Storage path (gs://...) which contains the model files.
runtime_version: the ML Engine runtime version as a string, example '1.2'.
See https://cloud.google.com/ml-engine/docs/concepts/runtime-version-list
for a list of runtimes. If None, the ML Engine service will pick one.
Raises: Exception if the path is invalid or does not contain expected files.
Exception if the service returns invalid response.
"""
if not path.startswith('gs://'):
raise Exception('Invalid path. Only Google Cloud Storage path (gs://...) is accepted.')
# If there is no "export.meta" or"saved_model.pb" under path but there is
# path/model/export.meta or path/model/saved_model.pb, then append /model to the path.
if not datalab.storage.Object.from_url(os.path.join(path, 'export.meta')).exists() and not \
datalab.storage.Object.from_url(os.path.join(path, 'saved_model.pb')).exists():
if datalab.storage.Object.from_url(os.path.join(path, 'model', 'export.meta')).exists() or \
datalab.storage.Object.from_url(os.path.join(path, 'model',
'saved_model.pb')).exists():
path = os.path.join(path, 'model')
else:
print('Cannot find export.meta or saved_model.pb, but continue with deployment anyway.')
body = {'name': self._model_name}
parent = 'projects/' + self._project_id
try:
self._api.projects().models().create(body=body, parent=parent).execute()
except:
# Trying to create an already existing model gets an error. Ignore it.
pass
body = {
'name': version_name,
'deployment_uri': path,
}
if runtime_version:
body['runtime_version'] = runtime_version
response = self._api.projects().models().versions().create(
body=body, parent=self._full_model_name).execute()
if 'name' not in response:
raise Exception('Invalid response from service. "name" is not found.')
_util.wait_for_long_running_operation(response['name']) | python | def deploy(self, version_name, path, runtime_version=None):
"""Deploy a model version to the cloud.
Args:
version_name: the name of the version in short form, such as "v1".
path: the Google Cloud Storage path (gs://...) which contains the model files.
runtime_version: the ML Engine runtime version as a string, example '1.2'.
See https://cloud.google.com/ml-engine/docs/concepts/runtime-version-list
for a list of runtimes. If None, the ML Engine service will pick one.
Raises: Exception if the path is invalid or does not contain expected files.
Exception if the service returns invalid response.
"""
if not path.startswith('gs://'):
raise Exception('Invalid path. Only Google Cloud Storage path (gs://...) is accepted.')
# If there is no "export.meta" or"saved_model.pb" under path but there is
# path/model/export.meta or path/model/saved_model.pb, then append /model to the path.
if not datalab.storage.Object.from_url(os.path.join(path, 'export.meta')).exists() and not \
datalab.storage.Object.from_url(os.path.join(path, 'saved_model.pb')).exists():
if datalab.storage.Object.from_url(os.path.join(path, 'model', 'export.meta')).exists() or \
datalab.storage.Object.from_url(os.path.join(path, 'model',
'saved_model.pb')).exists():
path = os.path.join(path, 'model')
else:
print('Cannot find export.meta or saved_model.pb, but continue with deployment anyway.')
body = {'name': self._model_name}
parent = 'projects/' + self._project_id
try:
self._api.projects().models().create(body=body, parent=parent).execute()
except:
# Trying to create an already existing model gets an error. Ignore it.
pass
body = {
'name': version_name,
'deployment_uri': path,
}
if runtime_version:
body['runtime_version'] = runtime_version
response = self._api.projects().models().versions().create(
body=body, parent=self._full_model_name).execute()
if 'name' not in response:
raise Exception('Invalid response from service. "name" is not found.')
_util.wait_for_long_running_operation(response['name']) | [
"def",
"deploy",
"(",
"self",
",",
"version_name",
",",
"path",
",",
"runtime_version",
"=",
"None",
")",
":",
"if",
"not",
"path",
".",
"startswith",
"(",
"'gs://'",
")",
":",
"raise",
"Exception",
"(",
"'Invalid path. Only Google Cloud Storage path (gs://...) is accepted.'",
")",
"# If there is no \"export.meta\" or\"saved_model.pb\" under path but there is",
"# path/model/export.meta or path/model/saved_model.pb, then append /model to the path.",
"if",
"not",
"datalab",
".",
"storage",
".",
"Object",
".",
"from_url",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'export.meta'",
")",
")",
".",
"exists",
"(",
")",
"and",
"not",
"datalab",
".",
"storage",
".",
"Object",
".",
"from_url",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'saved_model.pb'",
")",
")",
".",
"exists",
"(",
")",
":",
"if",
"datalab",
".",
"storage",
".",
"Object",
".",
"from_url",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'model'",
",",
"'export.meta'",
")",
")",
".",
"exists",
"(",
")",
"or",
"datalab",
".",
"storage",
".",
"Object",
".",
"from_url",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'model'",
",",
"'saved_model.pb'",
")",
")",
".",
"exists",
"(",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'model'",
")",
"else",
":",
"print",
"(",
"'Cannot find export.meta or saved_model.pb, but continue with deployment anyway.'",
")",
"body",
"=",
"{",
"'name'",
":",
"self",
".",
"_model_name",
"}",
"parent",
"=",
"'projects/'",
"+",
"self",
".",
"_project_id",
"try",
":",
"self",
".",
"_api",
".",
"projects",
"(",
")",
".",
"models",
"(",
")",
".",
"create",
"(",
"body",
"=",
"body",
",",
"parent",
"=",
"parent",
")",
".",
"execute",
"(",
")",
"except",
":",
"# Trying to create an already existing model gets an error. Ignore it.",
"pass",
"body",
"=",
"{",
"'name'",
":",
"version_name",
",",
"'deployment_uri'",
":",
"path",
",",
"}",
"if",
"runtime_version",
":",
"body",
"[",
"'runtime_version'",
"]",
"=",
"runtime_version",
"response",
"=",
"self",
".",
"_api",
".",
"projects",
"(",
")",
".",
"models",
"(",
")",
".",
"versions",
"(",
")",
".",
"create",
"(",
"body",
"=",
"body",
",",
"parent",
"=",
"self",
".",
"_full_model_name",
")",
".",
"execute",
"(",
")",
"if",
"'name'",
"not",
"in",
"response",
":",
"raise",
"Exception",
"(",
"'Invalid response from service. \"name\" is not found.'",
")",
"_util",
".",
"wait_for_long_running_operation",
"(",
"response",
"[",
"'name'",
"]",
")"
] | Deploy a model version to the cloud.
Args:
version_name: the name of the version in short form, such as "v1".
path: the Google Cloud Storage path (gs://...) which contains the model files.
runtime_version: the ML Engine runtime version as a string, example '1.2'.
See https://cloud.google.com/ml-engine/docs/concepts/runtime-version-list
for a list of runtimes. If None, the ML Engine service will pick one.
Raises: Exception if the path is invalid or does not contain expected files.
Exception if the service returns invalid response. | [
"Deploy",
"a",
"model",
"version",
"to",
"the",
"cloud",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L176-L222 |
4,952 | googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | ModelVersions.delete | def delete(self, version_name):
"""Delete a version of model.
Args:
version_name: the name of the version in short form, such as "v1".
"""
name = ('%s/versions/%s' % (self._full_model_name, version_name))
response = self._api.projects().models().versions().delete(name=name).execute()
if 'name' not in response:
raise Exception('Invalid response from service. "name" is not found.')
_util.wait_for_long_running_operation(response['name']) | python | def delete(self, version_name):
"""Delete a version of model.
Args:
version_name: the name of the version in short form, such as "v1".
"""
name = ('%s/versions/%s' % (self._full_model_name, version_name))
response = self._api.projects().models().versions().delete(name=name).execute()
if 'name' not in response:
raise Exception('Invalid response from service. "name" is not found.')
_util.wait_for_long_running_operation(response['name']) | [
"def",
"delete",
"(",
"self",
",",
"version_name",
")",
":",
"name",
"=",
"(",
"'%s/versions/%s'",
"%",
"(",
"self",
".",
"_full_model_name",
",",
"version_name",
")",
")",
"response",
"=",
"self",
".",
"_api",
".",
"projects",
"(",
")",
".",
"models",
"(",
")",
".",
"versions",
"(",
")",
".",
"delete",
"(",
"name",
"=",
"name",
")",
".",
"execute",
"(",
")",
"if",
"'name'",
"not",
"in",
"response",
":",
"raise",
"Exception",
"(",
"'Invalid response from service. \"name\" is not found.'",
")",
"_util",
".",
"wait_for_long_running_operation",
"(",
"response",
"[",
"'name'",
"]",
")"
] | Delete a version of model.
Args:
version_name: the name of the version in short form, such as "v1". | [
"Delete",
"a",
"version",
"of",
"model",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L224-L234 |
4,953 | googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | ModelVersions.predict | def predict(self, version_name, data):
"""Get prediction results from features instances.
Args:
version_name: the name of the version used for prediction.
data: typically a list of instance to be submitted for prediction. The format of the
instance depends on the model. For example, structured data model may require
a csv line for each instance.
Note that online prediction only works on models that take one placeholder value,
such as a string encoding a csv line.
Returns:
A list of prediction results for given instances. Each element is a dictionary representing
output mapping from the graph.
An example:
[{"predictions": 1, "score": [0.00078, 0.71406, 0.28515]},
{"predictions": 1, "score": [0.00244, 0.99634, 0.00121]}]
"""
full_version_name = ('%s/versions/%s' % (self._full_model_name, version_name))
request = self._api.projects().predict(body={'instances': data},
name=full_version_name)
request.headers['user-agent'] = 'GoogleCloudDataLab/1.0'
result = request.execute()
if 'predictions' not in result:
raise Exception('Invalid response from service. Cannot find "predictions" in response.')
return result['predictions'] | python | def predict(self, version_name, data):
"""Get prediction results from features instances.
Args:
version_name: the name of the version used for prediction.
data: typically a list of instance to be submitted for prediction. The format of the
instance depends on the model. For example, structured data model may require
a csv line for each instance.
Note that online prediction only works on models that take one placeholder value,
such as a string encoding a csv line.
Returns:
A list of prediction results for given instances. Each element is a dictionary representing
output mapping from the graph.
An example:
[{"predictions": 1, "score": [0.00078, 0.71406, 0.28515]},
{"predictions": 1, "score": [0.00244, 0.99634, 0.00121]}]
"""
full_version_name = ('%s/versions/%s' % (self._full_model_name, version_name))
request = self._api.projects().predict(body={'instances': data},
name=full_version_name)
request.headers['user-agent'] = 'GoogleCloudDataLab/1.0'
result = request.execute()
if 'predictions' not in result:
raise Exception('Invalid response from service. Cannot find "predictions" in response.')
return result['predictions'] | [
"def",
"predict",
"(",
"self",
",",
"version_name",
",",
"data",
")",
":",
"full_version_name",
"=",
"(",
"'%s/versions/%s'",
"%",
"(",
"self",
".",
"_full_model_name",
",",
"version_name",
")",
")",
"request",
"=",
"self",
".",
"_api",
".",
"projects",
"(",
")",
".",
"predict",
"(",
"body",
"=",
"{",
"'instances'",
":",
"data",
"}",
",",
"name",
"=",
"full_version_name",
")",
"request",
".",
"headers",
"[",
"'user-agent'",
"]",
"=",
"'GoogleCloudDataLab/1.0'",
"result",
"=",
"request",
".",
"execute",
"(",
")",
"if",
"'predictions'",
"not",
"in",
"result",
":",
"raise",
"Exception",
"(",
"'Invalid response from service. Cannot find \"predictions\" in response.'",
")",
"return",
"result",
"[",
"'predictions'",
"]"
] | Get prediction results from features instances.
Args:
version_name: the name of the version used for prediction.
data: typically a list of instance to be submitted for prediction. The format of the
instance depends on the model. For example, structured data model may require
a csv line for each instance.
Note that online prediction only works on models that take one placeholder value,
such as a string encoding a csv line.
Returns:
A list of prediction results for given instances. Each element is a dictionary representing
output mapping from the graph.
An example:
[{"predictions": 1, "score": [0.00078, 0.71406, 0.28515]},
{"predictions": 1, "score": [0.00244, 0.99634, 0.00121]}] | [
"Get",
"prediction",
"results",
"from",
"features",
"instances",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L236-L261 |
4,954 | googledatalab/pydatalab | google/datalab/ml/_cloud_models.py | ModelVersions.list | def list(self):
"""List versions under the current model in a table view.
Raises:
Exception if it is called in a non-IPython environment.
"""
import IPython
# "self" is iterable (see __iter__() method).
data = [{'name': version['name'].split()[-1],
'deploymentUri': version['deploymentUri'], 'createTime': version['createTime']}
for version in self.get_iterator()]
IPython.display.display(
datalab.utils.commands.render_dictionary(data, ['name', 'deploymentUri', 'createTime'])) | python | def list(self):
"""List versions under the current model in a table view.
Raises:
Exception if it is called in a non-IPython environment.
"""
import IPython
# "self" is iterable (see __iter__() method).
data = [{'name': version['name'].split()[-1],
'deploymentUri': version['deploymentUri'], 'createTime': version['createTime']}
for version in self.get_iterator()]
IPython.display.display(
datalab.utils.commands.render_dictionary(data, ['name', 'deploymentUri', 'createTime'])) | [
"def",
"list",
"(",
"self",
")",
":",
"import",
"IPython",
"# \"self\" is iterable (see __iter__() method).",
"data",
"=",
"[",
"{",
"'name'",
":",
"version",
"[",
"'name'",
"]",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
",",
"'deploymentUri'",
":",
"version",
"[",
"'deploymentUri'",
"]",
",",
"'createTime'",
":",
"version",
"[",
"'createTime'",
"]",
"}",
"for",
"version",
"in",
"self",
".",
"get_iterator",
"(",
")",
"]",
"IPython",
".",
"display",
".",
"display",
"(",
"datalab",
".",
"utils",
".",
"commands",
".",
"render_dictionary",
"(",
"data",
",",
"[",
"'name'",
",",
"'deploymentUri'",
",",
"'createTime'",
"]",
")",
")"
] | List versions under the current model in a table view.
Raises:
Exception if it is called in a non-IPython environment. | [
"List",
"versions",
"under",
"the",
"current",
"model",
"in",
"a",
"table",
"view",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L273-L286 |
4,955 | googledatalab/pydatalab | solutionbox/ml_workbench/xgboost/trainer/feature_transforms.py | create_feature_map | def create_feature_map(features, feature_indices, output_dir):
"""Returns feature_map about the transformed features.
feature_map includes information such as:
1, cat1=0
2, cat1=1
3, numeric1
...
Returns:
List in the from
[(index, feature_description)]
"""
feature_map = []
for name, info in feature_indices:
transform_name = features[name]['transform']
source_column = features[name]['source_column']
if transform_name in [IDENTITY_TRANSFORM, SCALE_TRANSFORM]:
feature_map.append((info['index_start'], name))
elif transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]:
vocab, _ = read_vocab_file(
os.path.join(output_dir, VOCAB_ANALYSIS_FILE % source_column))
for i, word in enumerate(vocab):
if transform_name == ONE_HOT_TRANSFORM:
feature_map.append((info['index_start'] + i, '%s=%s' % (source_column, word)))
elif transform_name == MULTI_HOT_TRANSFORM:
feature_map.append((info['index_start'] + i, '%s has "%s"' % (source_column, word)))
elif transform_name == IMAGE_TRANSFORM:
for i in range(info['size']):
feature_map.append((info['index_start'] + i, '%s image feature %d' % (source_column, i)))
return feature_map | python | def create_feature_map(features, feature_indices, output_dir):
"""Returns feature_map about the transformed features.
feature_map includes information such as:
1, cat1=0
2, cat1=1
3, numeric1
...
Returns:
List in the from
[(index, feature_description)]
"""
feature_map = []
for name, info in feature_indices:
transform_name = features[name]['transform']
source_column = features[name]['source_column']
if transform_name in [IDENTITY_TRANSFORM, SCALE_TRANSFORM]:
feature_map.append((info['index_start'], name))
elif transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]:
vocab, _ = read_vocab_file(
os.path.join(output_dir, VOCAB_ANALYSIS_FILE % source_column))
for i, word in enumerate(vocab):
if transform_name == ONE_HOT_TRANSFORM:
feature_map.append((info['index_start'] + i, '%s=%s' % (source_column, word)))
elif transform_name == MULTI_HOT_TRANSFORM:
feature_map.append((info['index_start'] + i, '%s has "%s"' % (source_column, word)))
elif transform_name == IMAGE_TRANSFORM:
for i in range(info['size']):
feature_map.append((info['index_start'] + i, '%s image feature %d' % (source_column, i)))
return feature_map | [
"def",
"create_feature_map",
"(",
"features",
",",
"feature_indices",
",",
"output_dir",
")",
":",
"feature_map",
"=",
"[",
"]",
"for",
"name",
",",
"info",
"in",
"feature_indices",
":",
"transform_name",
"=",
"features",
"[",
"name",
"]",
"[",
"'transform'",
"]",
"source_column",
"=",
"features",
"[",
"name",
"]",
"[",
"'source_column'",
"]",
"if",
"transform_name",
"in",
"[",
"IDENTITY_TRANSFORM",
",",
"SCALE_TRANSFORM",
"]",
":",
"feature_map",
".",
"append",
"(",
"(",
"info",
"[",
"'index_start'",
"]",
",",
"name",
")",
")",
"elif",
"transform_name",
"in",
"[",
"ONE_HOT_TRANSFORM",
",",
"MULTI_HOT_TRANSFORM",
"]",
":",
"vocab",
",",
"_",
"=",
"read_vocab_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"VOCAB_ANALYSIS_FILE",
"%",
"source_column",
")",
")",
"for",
"i",
",",
"word",
"in",
"enumerate",
"(",
"vocab",
")",
":",
"if",
"transform_name",
"==",
"ONE_HOT_TRANSFORM",
":",
"feature_map",
".",
"append",
"(",
"(",
"info",
"[",
"'index_start'",
"]",
"+",
"i",
",",
"'%s=%s'",
"%",
"(",
"source_column",
",",
"word",
")",
")",
")",
"elif",
"transform_name",
"==",
"MULTI_HOT_TRANSFORM",
":",
"feature_map",
".",
"append",
"(",
"(",
"info",
"[",
"'index_start'",
"]",
"+",
"i",
",",
"'%s has \"%s\"'",
"%",
"(",
"source_column",
",",
"word",
")",
")",
")",
"elif",
"transform_name",
"==",
"IMAGE_TRANSFORM",
":",
"for",
"i",
"in",
"range",
"(",
"info",
"[",
"'size'",
"]",
")",
":",
"feature_map",
".",
"append",
"(",
"(",
"info",
"[",
"'index_start'",
"]",
"+",
"i",
",",
"'%s image feature %d'",
"%",
"(",
"source_column",
",",
"i",
")",
")",
")",
"return",
"feature_map"
] | Returns feature_map about the transformed features.
feature_map includes information such as:
1, cat1=0
2, cat1=1
3, numeric1
...
Returns:
List in the from
[(index, feature_description)] | [
"Returns",
"feature_map",
"about",
"the",
"transformed",
"features",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/xgboost/trainer/feature_transforms.py#L447-L477 |
4,956 | googledatalab/pydatalab | datalab/bigquery/_view.py | View.create | def create(self, query):
""" Creates the view with the specified query.
Args:
query: the query to use to for the View; either a string containing a SQL query or
a Query object.
Returns:
The View instance.
Raises:
Exception if the view couldn't be created or already exists and overwrite was False.
"""
if isinstance(query, _query.Query):
query = query.sql
try:
response = self._table._api.tables_insert(self._table.name, query=query)
except Exception as e:
raise e
if 'selfLink' in response:
return self
raise Exception("View %s could not be created as it already exists" % str(self)) | python | def create(self, query):
""" Creates the view with the specified query.
Args:
query: the query to use to for the View; either a string containing a SQL query or
a Query object.
Returns:
The View instance.
Raises:
Exception if the view couldn't be created or already exists and overwrite was False.
"""
if isinstance(query, _query.Query):
query = query.sql
try:
response = self._table._api.tables_insert(self._table.name, query=query)
except Exception as e:
raise e
if 'selfLink' in response:
return self
raise Exception("View %s could not be created as it already exists" % str(self)) | [
"def",
"create",
"(",
"self",
",",
"query",
")",
":",
"if",
"isinstance",
"(",
"query",
",",
"_query",
".",
"Query",
")",
":",
"query",
"=",
"query",
".",
"sql",
"try",
":",
"response",
"=",
"self",
".",
"_table",
".",
"_api",
".",
"tables_insert",
"(",
"self",
".",
"_table",
".",
"name",
",",
"query",
"=",
"query",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"'selfLink'",
"in",
"response",
":",
"return",
"self",
"raise",
"Exception",
"(",
"\"View %s could not be created as it already exists\"",
"%",
"str",
"(",
"self",
")",
")"
] | Creates the view with the specified query.
Args:
query: the query to use to for the View; either a string containing a SQL query or
a Query object.
Returns:
The View instance.
Raises:
Exception if the view couldn't be created or already exists and overwrite was False. | [
"Creates",
"the",
"view",
"with",
"the",
"specified",
"query",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_view.py#L91-L110 |
4,957 | googledatalab/pydatalab | datalab/bigquery/_view.py | View.sample | def sample(self, fields=None, count=5, sampling=None, use_cache=True, dialect=None,
billing_tier=None):
"""Retrieves a sampling of data from the view.
Args:
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the view.
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable object containing the resulting data.
Raises:
Exception if the sample query could not be executed or the query response was malformed.
"""
return self._table.sample(fields=fields, count=count, sampling=sampling, use_cache=use_cache,
dialect=dialect, billing_tier=billing_tier) | python | def sample(self, fields=None, count=5, sampling=None, use_cache=True, dialect=None,
billing_tier=None):
"""Retrieves a sampling of data from the view.
Args:
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the view.
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable object containing the resulting data.
Raises:
Exception if the sample query could not be executed or the query response was malformed.
"""
return self._table.sample(fields=fields, count=count, sampling=sampling, use_cache=use_cache,
dialect=dialect, billing_tier=billing_tier) | [
"def",
"sample",
"(",
"self",
",",
"fields",
"=",
"None",
",",
"count",
"=",
"5",
",",
"sampling",
"=",
"None",
",",
"use_cache",
"=",
"True",
",",
"dialect",
"=",
"None",
",",
"billing_tier",
"=",
"None",
")",
":",
"return",
"self",
".",
"_table",
".",
"sample",
"(",
"fields",
"=",
"fields",
",",
"count",
"=",
"count",
",",
"sampling",
"=",
"sampling",
",",
"use_cache",
"=",
"use_cache",
",",
"dialect",
"=",
"dialect",
",",
"billing_tier",
"=",
"billing_tier",
")"
] | Retrieves a sampling of data from the view.
Args:
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the view.
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable object containing the resulting data.
Raises:
Exception if the sample query could not be executed or the query response was malformed. | [
"Retrieves",
"a",
"sampling",
"of",
"data",
"from",
"the",
"view",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_view.py#L112-L136 |
4,958 | googledatalab/pydatalab | datalab/bigquery/_view.py | View.update | def update(self, friendly_name=None, description=None, query=None):
""" Selectively updates View information.
Any parameters that are None (the default) are not applied in the update.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
query: if not None, a new query string for the View.
"""
self._table._load_info()
if query is not None:
if isinstance(query, _query.Query):
query = query.sql
self._table._info['view'] = {'query': query}
self._table.update(friendly_name=friendly_name, description=description) | python | def update(self, friendly_name=None, description=None, query=None):
""" Selectively updates View information.
Any parameters that are None (the default) are not applied in the update.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
query: if not None, a new query string for the View.
"""
self._table._load_info()
if query is not None:
if isinstance(query, _query.Query):
query = query.sql
self._table._info['view'] = {'query': query}
self._table.update(friendly_name=friendly_name, description=description) | [
"def",
"update",
"(",
"self",
",",
"friendly_name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"query",
"=",
"None",
")",
":",
"self",
".",
"_table",
".",
"_load_info",
"(",
")",
"if",
"query",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"query",
",",
"_query",
".",
"Query",
")",
":",
"query",
"=",
"query",
".",
"sql",
"self",
".",
"_table",
".",
"_info",
"[",
"'view'",
"]",
"=",
"{",
"'query'",
":",
"query",
"}",
"self",
".",
"_table",
".",
"update",
"(",
"friendly_name",
"=",
"friendly_name",
",",
"description",
"=",
"description",
")"
] | Selectively updates View information.
Any parameters that are None (the default) are not applied in the update.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
query: if not None, a new query string for the View. | [
"Selectively",
"updates",
"View",
"information",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_view.py#L149-L164 |
4,959 | googledatalab/pydatalab | datalab/bigquery/_view.py | View.results | def results(self, use_cache=True, dialect=None, billing_tier=None):
"""Materialize the view synchronously.
If you require more control over the execution, use execute() or execute_async().
Args:
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing the result set.
Raises:
Exception if the query could not be executed or query response was malformed.
"""
return self._materialization.results(use_cache=use_cache, dialect=dialect,
billing_tier=billing_tier) | python | def results(self, use_cache=True, dialect=None, billing_tier=None):
"""Materialize the view synchronously.
If you require more control over the execution, use execute() or execute_async().
Args:
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing the result set.
Raises:
Exception if the query could not be executed or query response was malformed.
"""
return self._materialization.results(use_cache=use_cache, dialect=dialect,
billing_tier=billing_tier) | [
"def",
"results",
"(",
"self",
",",
"use_cache",
"=",
"True",
",",
"dialect",
"=",
"None",
",",
"billing_tier",
"=",
"None",
")",
":",
"return",
"self",
".",
"_materialization",
".",
"results",
"(",
"use_cache",
"=",
"use_cache",
",",
"dialect",
"=",
"dialect",
",",
"billing_tier",
"=",
"billing_tier",
")"
] | Materialize the view synchronously.
If you require more control over the execution, use execute() or execute_async().
Args:
use_cache: whether to use cached results or not.
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryResultsTable containing the result set.
Raises:
Exception if the query could not be executed or query response was malformed. | [
"Materialize",
"the",
"view",
"synchronously",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_view.py#L166-L187 |
4,960 | googledatalab/pydatalab | datalab/bigquery/_view.py | View.execute_async | def execute_async(self, table_name=None, table_mode='create', use_cache=True, priority='high',
allow_large_results=False, dialect=None, billing_tier=None):
"""Materialize the View asynchronously.
Args:
table_name: the result table name; if None, then a temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'low' or 'high' (default). Note that 'high' is more expensive, but is
better suited to exploratory analysis.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a table_name to be specified) (default False).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryJob for the materialization
Raises:
Exception (KeyError) if View could not be materialized.
"""
return self._materialization.execute_async(table_name=table_name, table_mode=table_mode,
use_cache=use_cache, priority=priority,
allow_large_results=allow_large_results,
dialect=dialect, billing_tier=billing_tier) | python | def execute_async(self, table_name=None, table_mode='create', use_cache=True, priority='high',
allow_large_results=False, dialect=None, billing_tier=None):
"""Materialize the View asynchronously.
Args:
table_name: the result table name; if None, then a temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'low' or 'high' (default). Note that 'high' is more expensive, but is
better suited to exploratory analysis.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a table_name to be specified) (default False).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryJob for the materialization
Raises:
Exception (KeyError) if View could not be materialized.
"""
return self._materialization.execute_async(table_name=table_name, table_mode=table_mode,
use_cache=use_cache, priority=priority,
allow_large_results=allow_large_results,
dialect=dialect, billing_tier=billing_tier) | [
"def",
"execute_async",
"(",
"self",
",",
"table_name",
"=",
"None",
",",
"table_mode",
"=",
"'create'",
",",
"use_cache",
"=",
"True",
",",
"priority",
"=",
"'high'",
",",
"allow_large_results",
"=",
"False",
",",
"dialect",
"=",
"None",
",",
"billing_tier",
"=",
"None",
")",
":",
"return",
"self",
".",
"_materialization",
".",
"execute_async",
"(",
"table_name",
"=",
"table_name",
",",
"table_mode",
"=",
"table_mode",
",",
"use_cache",
"=",
"use_cache",
",",
"priority",
"=",
"priority",
",",
"allow_large_results",
"=",
"allow_large_results",
",",
"dialect",
"=",
"dialect",
",",
"billing_tier",
"=",
"billing_tier",
")"
] | Materialize the View asynchronously.
Args:
table_name: the result table name; if None, then a temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'low' or 'high' (default). Note that 'high' is more expensive, but is
better suited to exploratory analysis.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a table_name to be specified) (default False).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A QueryJob for the materialization
Raises:
Exception (KeyError) if View could not be materialized. | [
"Materialize",
"the",
"View",
"asynchronously",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_view.py#L189-L219 |
4,961 | googledatalab/pydatalab | google/datalab/utils/commands/_utils.py | get_notebook_item | def get_notebook_item(name):
""" Get an item from the IPython environment. """
env = notebook_environment()
return google.datalab.utils.get_item(env, name) | python | def get_notebook_item(name):
""" Get an item from the IPython environment. """
env = notebook_environment()
return google.datalab.utils.get_item(env, name) | [
"def",
"get_notebook_item",
"(",
"name",
")",
":",
"env",
"=",
"notebook_environment",
"(",
")",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"get_item",
"(",
"env",
",",
"name",
")"
] | Get an item from the IPython environment. | [
"Get",
"an",
"item",
"from",
"the",
"IPython",
"environment",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_utils.py#L50-L53 |
4,962 | googledatalab/pydatalab | google/datalab/utils/commands/_utils.py | _get_data_from_list_of_dicts | def _get_data_from_list_of_dicts(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles lists of dicts. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
gen = source[first_row:first_row + count] if count >= 0 else source
rows = [{'c': [{'v': row[c]} if c in row else {} for c in fields]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, len(source) | python | def _get_data_from_list_of_dicts(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles lists of dicts. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
gen = source[first_row:first_row + count] if count >= 0 else source
rows = [{'c': [{'v': row[c]} if c in row else {} for c in fields]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, len(source) | [
"def",
"_get_data_from_list_of_dicts",
"(",
"source",
",",
"fields",
"=",
"'*'",
",",
"first_row",
"=",
"0",
",",
"count",
"=",
"-",
"1",
",",
"schema",
"=",
"None",
")",
":",
"if",
"schema",
"is",
"None",
":",
"schema",
"=",
"google",
".",
"datalab",
".",
"bigquery",
".",
"Schema",
".",
"from_data",
"(",
"source",
")",
"fields",
"=",
"get_field_list",
"(",
"fields",
",",
"schema",
")",
"gen",
"=",
"source",
"[",
"first_row",
":",
"first_row",
"+",
"count",
"]",
"if",
"count",
">=",
"0",
"else",
"source",
"rows",
"=",
"[",
"{",
"'c'",
":",
"[",
"{",
"'v'",
":",
"row",
"[",
"c",
"]",
"}",
"if",
"c",
"in",
"row",
"else",
"{",
"}",
"for",
"c",
"in",
"fields",
"]",
"}",
"for",
"row",
"in",
"gen",
"]",
"return",
"{",
"'cols'",
":",
"_get_cols",
"(",
"fields",
",",
"schema",
")",
",",
"'rows'",
":",
"rows",
"}",
",",
"len",
"(",
"source",
")"
] | Helper function for _get_data that handles lists of dicts. | [
"Helper",
"function",
"for",
"_get_data",
"that",
"handles",
"lists",
"of",
"dicts",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_utils.py#L144-L151 |
4,963 | googledatalab/pydatalab | google/datalab/utils/commands/_utils.py | _get_data_from_list_of_lists | def _get_data_from_list_of_lists(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles lists of lists. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
gen = source[first_row:first_row + count] if count >= 0 else source
cols = [schema.find(name) for name in fields]
rows = [{'c': [{'v': row[i]} for i in cols]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, len(source) | python | def _get_data_from_list_of_lists(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles lists of lists. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
gen = source[first_row:first_row + count] if count >= 0 else source
cols = [schema.find(name) for name in fields]
rows = [{'c': [{'v': row[i]} for i in cols]} for row in gen]
return {'cols': _get_cols(fields, schema), 'rows': rows}, len(source) | [
"def",
"_get_data_from_list_of_lists",
"(",
"source",
",",
"fields",
"=",
"'*'",
",",
"first_row",
"=",
"0",
",",
"count",
"=",
"-",
"1",
",",
"schema",
"=",
"None",
")",
":",
"if",
"schema",
"is",
"None",
":",
"schema",
"=",
"google",
".",
"datalab",
".",
"bigquery",
".",
"Schema",
".",
"from_data",
"(",
"source",
")",
"fields",
"=",
"get_field_list",
"(",
"fields",
",",
"schema",
")",
"gen",
"=",
"source",
"[",
"first_row",
":",
"first_row",
"+",
"count",
"]",
"if",
"count",
">=",
"0",
"else",
"source",
"cols",
"=",
"[",
"schema",
".",
"find",
"(",
"name",
")",
"for",
"name",
"in",
"fields",
"]",
"rows",
"=",
"[",
"{",
"'c'",
":",
"[",
"{",
"'v'",
":",
"row",
"[",
"i",
"]",
"}",
"for",
"i",
"in",
"cols",
"]",
"}",
"for",
"row",
"in",
"gen",
"]",
"return",
"{",
"'cols'",
":",
"_get_cols",
"(",
"fields",
",",
"schema",
")",
",",
"'rows'",
":",
"rows",
"}",
",",
"len",
"(",
"source",
")"
] | Helper function for _get_data that handles lists of lists. | [
"Helper",
"function",
"for",
"_get_data",
"that",
"handles",
"lists",
"of",
"lists",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_utils.py#L154-L162 |
4,964 | googledatalab/pydatalab | google/datalab/utils/commands/_utils.py | _get_data_from_dataframe | def _get_data_from_dataframe(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles Pandas DataFrames. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
rows = []
if count < 0:
count = len(source.index)
df_slice = source.reset_index(drop=True)[first_row:first_row + count]
for index, data_frame_row in df_slice.iterrows():
row = data_frame_row.to_dict()
for key in list(row.keys()):
val = row[key]
if isinstance(val, pandas.Timestamp):
row[key] = val.to_pydatetime()
rows.append({'c': [{'v': row[c]} if c in row else {} for c in fields]})
cols = _get_cols(fields, schema)
return {'cols': cols, 'rows': rows}, len(source) | python | def _get_data_from_dataframe(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles Pandas DataFrames. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
rows = []
if count < 0:
count = len(source.index)
df_slice = source.reset_index(drop=True)[first_row:first_row + count]
for index, data_frame_row in df_slice.iterrows():
row = data_frame_row.to_dict()
for key in list(row.keys()):
val = row[key]
if isinstance(val, pandas.Timestamp):
row[key] = val.to_pydatetime()
rows.append({'c': [{'v': row[c]} if c in row else {} for c in fields]})
cols = _get_cols(fields, schema)
return {'cols': cols, 'rows': rows}, len(source) | [
"def",
"_get_data_from_dataframe",
"(",
"source",
",",
"fields",
"=",
"'*'",
",",
"first_row",
"=",
"0",
",",
"count",
"=",
"-",
"1",
",",
"schema",
"=",
"None",
")",
":",
"if",
"schema",
"is",
"None",
":",
"schema",
"=",
"google",
".",
"datalab",
".",
"bigquery",
".",
"Schema",
".",
"from_data",
"(",
"source",
")",
"fields",
"=",
"get_field_list",
"(",
"fields",
",",
"schema",
")",
"rows",
"=",
"[",
"]",
"if",
"count",
"<",
"0",
":",
"count",
"=",
"len",
"(",
"source",
".",
"index",
")",
"df_slice",
"=",
"source",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
"[",
"first_row",
":",
"first_row",
"+",
"count",
"]",
"for",
"index",
",",
"data_frame_row",
"in",
"df_slice",
".",
"iterrows",
"(",
")",
":",
"row",
"=",
"data_frame_row",
".",
"to_dict",
"(",
")",
"for",
"key",
"in",
"list",
"(",
"row",
".",
"keys",
"(",
")",
")",
":",
"val",
"=",
"row",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"val",
",",
"pandas",
".",
"Timestamp",
")",
":",
"row",
"[",
"key",
"]",
"=",
"val",
".",
"to_pydatetime",
"(",
")",
"rows",
".",
"append",
"(",
"{",
"'c'",
":",
"[",
"{",
"'v'",
":",
"row",
"[",
"c",
"]",
"}",
"if",
"c",
"in",
"row",
"else",
"{",
"}",
"for",
"c",
"in",
"fields",
"]",
"}",
")",
"cols",
"=",
"_get_cols",
"(",
"fields",
",",
"schema",
")",
"return",
"{",
"'cols'",
":",
"cols",
",",
"'rows'",
":",
"rows",
"}",
",",
"len",
"(",
"source",
")"
] | Helper function for _get_data that handles Pandas DataFrames. | [
"Helper",
"function",
"for",
"_get_data",
"that",
"handles",
"Pandas",
"DataFrames",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_utils.py#L165-L183 |
4,965 | googledatalab/pydatalab | google/datalab/utils/commands/_utils.py | parse_config_for_selected_keys | def parse_config_for_selected_keys(content, keys):
""" Parse a config from a magic cell body for selected config keys.
For example, if 'content' is:
config_item1: value1
config_item2: value2
config_item3: value3
and 'keys' are: [config_item1, config_item3]
The results will be a tuple of
1. The parsed config items (dict): {config_item1: value1, config_item3: value3}
2. The remaining content (string): config_item2: value2
Args:
content: the input content. A string. It has to be a yaml or JSON string.
keys: a list of keys to retrieve from content. Note that it only checks top level keys
in the dict.
Returns:
A tuple. First is the parsed config including only selected keys. Second is
the remaining content.
Raises:
Exception if the content is not a valid yaml or JSON string.
"""
config_items = {key: None for key in keys}
if not content:
return config_items, content
stripped = content.strip()
if len(stripped) == 0:
return {}, None
elif stripped[0] == '{':
config = json.loads(content)
else:
config = yaml.load(content)
if not isinstance(config, dict):
raise ValueError('Invalid config.')
for key in keys:
config_items[key] = config.pop(key, None)
if not config:
return config_items, None
if stripped[0] == '{':
content_out = json.dumps(config, indent=4)
else:
content_out = yaml.dump(config, default_flow_style=False)
return config_items, content_out | python | def parse_config_for_selected_keys(content, keys):
""" Parse a config from a magic cell body for selected config keys.
For example, if 'content' is:
config_item1: value1
config_item2: value2
config_item3: value3
and 'keys' are: [config_item1, config_item3]
The results will be a tuple of
1. The parsed config items (dict): {config_item1: value1, config_item3: value3}
2. The remaining content (string): config_item2: value2
Args:
content: the input content. A string. It has to be a yaml or JSON string.
keys: a list of keys to retrieve from content. Note that it only checks top level keys
in the dict.
Returns:
A tuple. First is the parsed config including only selected keys. Second is
the remaining content.
Raises:
Exception if the content is not a valid yaml or JSON string.
"""
config_items = {key: None for key in keys}
if not content:
return config_items, content
stripped = content.strip()
if len(stripped) == 0:
return {}, None
elif stripped[0] == '{':
config = json.loads(content)
else:
config = yaml.load(content)
if not isinstance(config, dict):
raise ValueError('Invalid config.')
for key in keys:
config_items[key] = config.pop(key, None)
if not config:
return config_items, None
if stripped[0] == '{':
content_out = json.dumps(config, indent=4)
else:
content_out = yaml.dump(config, default_flow_style=False)
return config_items, content_out | [
"def",
"parse_config_for_selected_keys",
"(",
"content",
",",
"keys",
")",
":",
"config_items",
"=",
"{",
"key",
":",
"None",
"for",
"key",
"in",
"keys",
"}",
"if",
"not",
"content",
":",
"return",
"config_items",
",",
"content",
"stripped",
"=",
"content",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"stripped",
")",
"==",
"0",
":",
"return",
"{",
"}",
",",
"None",
"elif",
"stripped",
"[",
"0",
"]",
"==",
"'{'",
":",
"config",
"=",
"json",
".",
"loads",
"(",
"content",
")",
"else",
":",
"config",
"=",
"yaml",
".",
"load",
"(",
"content",
")",
"if",
"not",
"isinstance",
"(",
"config",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid config.'",
")",
"for",
"key",
"in",
"keys",
":",
"config_items",
"[",
"key",
"]",
"=",
"config",
".",
"pop",
"(",
"key",
",",
"None",
")",
"if",
"not",
"config",
":",
"return",
"config_items",
",",
"None",
"if",
"stripped",
"[",
"0",
"]",
"==",
"'{'",
":",
"content_out",
"=",
"json",
".",
"dumps",
"(",
"config",
",",
"indent",
"=",
"4",
")",
"else",
":",
"content_out",
"=",
"yaml",
".",
"dump",
"(",
"config",
",",
"default_flow_style",
"=",
"False",
")",
"return",
"config_items",
",",
"content_out"
] | Parse a config from a magic cell body for selected config keys.
For example, if 'content' is:
config_item1: value1
config_item2: value2
config_item3: value3
and 'keys' are: [config_item1, config_item3]
The results will be a tuple of
1. The parsed config items (dict): {config_item1: value1, config_item3: value3}
2. The remaining content (string): config_item2: value2
Args:
content: the input content. A string. It has to be a yaml or JSON string.
keys: a list of keys to retrieve from content. Note that it only checks top level keys
in the dict.
Returns:
A tuple. First is the parsed config including only selected keys. Second is
the remaining content.
Raises:
Exception if the content is not a valid yaml or JSON string. | [
"Parse",
"a",
"config",
"from",
"a",
"magic",
"cell",
"body",
"for",
"selected",
"config",
"keys",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_utils.py#L341-L393 |
4,966 | googledatalab/pydatalab | google/datalab/utils/commands/_utils.py | chart_html | def chart_html(driver_name, chart_type, source, chart_options=None, fields='*', refresh_interval=0,
refresh_data=None, control_defaults=None, control_ids=None, schema=None):
""" Return HTML for a chart.
Args:
driver_name: the name of the chart driver. Currently we support 'plotly' or 'gcharts'.
chart_type: string specifying type of chart.
source: the data source for the chart. Can be actual data (e.g. list) or the name of
a data source (e.g. the name of a query module).
chart_options: a dictionary of options for the chart. Can contain a 'controls' entry
specifying controls. Other entries are passed as JSON to Google Charts.
fields: the fields to chart. Can be '*' for all fields (only sensible if the columns are
ordered; e.g. a Query or list of lists, but not a list of dictionaries); otherwise a
string containing a comma-separated list of field names.
refresh_interval: a time in seconds after which the chart data will be refreshed. 0 if the
chart should not be refreshed (i.e. the data is static).
refresh_data: if the source is a list or other raw data, this is a YAML string containing
metadata needed to support calls to refresh (get_chart_data).
control_defaults: the default variable values for controls that are shared across charts
including this one.
control_ids: the DIV IDs for controls that are shared across charts including this one.
schema: an optional schema for the data; if not supplied one will be inferred.
Returns:
A string containing the HTML for the chart.
"""
div_id = _html.Html.next_id()
controls_html = ''
if control_defaults is None:
control_defaults = {}
if control_ids is None:
control_ids = []
if chart_options is not None and 'variables' in chart_options:
controls = chart_options['variables']
del chart_options['variables'] # Just to make sure GCharts doesn't see them.
controls_html, defaults, ids = parse_control_options(controls)
# We augment what we are passed so that in principle we can have controls that are
# shared by charts as well as controls that are specific to a chart.
control_defaults.update(defaults)
control_ids.extend(ids),
_HTML_TEMPLATE = """
<div class="bqgc-container">
{controls}
<div class="bqgc {extra_class}" id="{id}">
</div>
</div>
<script src="/static/components/requirejs/require.js"></script>
<script>
require.config({{
paths: {{
base: '/static/base',
d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',
plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',
jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'
}},
map: {{
'*': {{
datalab: 'nbextensions/gcpdatalab'
}}
}},
shim: {{
plotly: {{
deps: ['d3', 'jquery'],
exports: 'plotly'
}}
}}
}});
require(['datalab/charting',
'datalab/element!{id}',
'base/js/events',
'datalab/style!/nbextensions/gcpdatalab/charting.css'
],
function(charts, dom, events) {{
charts.render(
'{driver}',
dom,
events,
'{chart_type}',
{control_ids},
{data},
{options},
{refresh_data},
{refresh_interval},
{total_rows});
}}
);
</script>
"""
count = 25 if chart_type == 'paged_table' else -1
data, total_count = get_data(source, fields, control_defaults, 0, count, schema)
if refresh_data is None:
if isinstance(source, basestring):
source_index = get_data_source_index(source)
refresh_data = {'source_index': source_index, 'name': source_index}
else:
refresh_data = {'name': 'raw data'}
refresh_data['fields'] = fields
# TODO(gram): check if we need to augment env with user_ns
return _HTML_TEMPLATE \
.format(driver=driver_name,
controls=controls_html,
id=div_id,
chart_type=chart_type,
extra_class=" bqgc-controlled" if len(controls_html) else '',
data=json.dumps(data, cls=google.datalab.utils.JSONEncoder),
options=json.dumps(chart_options, cls=google.datalab.utils.JSONEncoder),
refresh_data=json.dumps(refresh_data, cls=google.datalab.utils.JSONEncoder),
refresh_interval=refresh_interval,
control_ids=str(control_ids),
total_rows=total_count) | python | def chart_html(driver_name, chart_type, source, chart_options=None, fields='*', refresh_interval=0,
refresh_data=None, control_defaults=None, control_ids=None, schema=None):
""" Return HTML for a chart.
Args:
driver_name: the name of the chart driver. Currently we support 'plotly' or 'gcharts'.
chart_type: string specifying type of chart.
source: the data source for the chart. Can be actual data (e.g. list) or the name of
a data source (e.g. the name of a query module).
chart_options: a dictionary of options for the chart. Can contain a 'controls' entry
specifying controls. Other entries are passed as JSON to Google Charts.
fields: the fields to chart. Can be '*' for all fields (only sensible if the columns are
ordered; e.g. a Query or list of lists, but not a list of dictionaries); otherwise a
string containing a comma-separated list of field names.
refresh_interval: a time in seconds after which the chart data will be refreshed. 0 if the
chart should not be refreshed (i.e. the data is static).
refresh_data: if the source is a list or other raw data, this is a YAML string containing
metadata needed to support calls to refresh (get_chart_data).
control_defaults: the default variable values for controls that are shared across charts
including this one.
control_ids: the DIV IDs for controls that are shared across charts including this one.
schema: an optional schema for the data; if not supplied one will be inferred.
Returns:
A string containing the HTML for the chart.
"""
div_id = _html.Html.next_id()
controls_html = ''
if control_defaults is None:
control_defaults = {}
if control_ids is None:
control_ids = []
if chart_options is not None and 'variables' in chart_options:
controls = chart_options['variables']
del chart_options['variables'] # Just to make sure GCharts doesn't see them.
controls_html, defaults, ids = parse_control_options(controls)
# We augment what we are passed so that in principle we can have controls that are
# shared by charts as well as controls that are specific to a chart.
control_defaults.update(defaults)
control_ids.extend(ids),
_HTML_TEMPLATE = """
<div class="bqgc-container">
{controls}
<div class="bqgc {extra_class}" id="{id}">
</div>
</div>
<script src="/static/components/requirejs/require.js"></script>
<script>
require.config({{
paths: {{
base: '/static/base',
d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',
plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',
jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'
}},
map: {{
'*': {{
datalab: 'nbextensions/gcpdatalab'
}}
}},
shim: {{
plotly: {{
deps: ['d3', 'jquery'],
exports: 'plotly'
}}
}}
}});
require(['datalab/charting',
'datalab/element!{id}',
'base/js/events',
'datalab/style!/nbextensions/gcpdatalab/charting.css'
],
function(charts, dom, events) {{
charts.render(
'{driver}',
dom,
events,
'{chart_type}',
{control_ids},
{data},
{options},
{refresh_data},
{refresh_interval},
{total_rows});
}}
);
</script>
"""
count = 25 if chart_type == 'paged_table' else -1
data, total_count = get_data(source, fields, control_defaults, 0, count, schema)
if refresh_data is None:
if isinstance(source, basestring):
source_index = get_data_source_index(source)
refresh_data = {'source_index': source_index, 'name': source_index}
else:
refresh_data = {'name': 'raw data'}
refresh_data['fields'] = fields
# TODO(gram): check if we need to augment env with user_ns
return _HTML_TEMPLATE \
.format(driver=driver_name,
controls=controls_html,
id=div_id,
chart_type=chart_type,
extra_class=" bqgc-controlled" if len(controls_html) else '',
data=json.dumps(data, cls=google.datalab.utils.JSONEncoder),
options=json.dumps(chart_options, cls=google.datalab.utils.JSONEncoder),
refresh_data=json.dumps(refresh_data, cls=google.datalab.utils.JSONEncoder),
refresh_interval=refresh_interval,
control_ids=str(control_ids),
total_rows=total_count) | [
"def",
"chart_html",
"(",
"driver_name",
",",
"chart_type",
",",
"source",
",",
"chart_options",
"=",
"None",
",",
"fields",
"=",
"'*'",
",",
"refresh_interval",
"=",
"0",
",",
"refresh_data",
"=",
"None",
",",
"control_defaults",
"=",
"None",
",",
"control_ids",
"=",
"None",
",",
"schema",
"=",
"None",
")",
":",
"div_id",
"=",
"_html",
".",
"Html",
".",
"next_id",
"(",
")",
"controls_html",
"=",
"''",
"if",
"control_defaults",
"is",
"None",
":",
"control_defaults",
"=",
"{",
"}",
"if",
"control_ids",
"is",
"None",
":",
"control_ids",
"=",
"[",
"]",
"if",
"chart_options",
"is",
"not",
"None",
"and",
"'variables'",
"in",
"chart_options",
":",
"controls",
"=",
"chart_options",
"[",
"'variables'",
"]",
"del",
"chart_options",
"[",
"'variables'",
"]",
"# Just to make sure GCharts doesn't see them.",
"controls_html",
",",
"defaults",
",",
"ids",
"=",
"parse_control_options",
"(",
"controls",
")",
"# We augment what we are passed so that in principle we can have controls that are",
"# shared by charts as well as controls that are specific to a chart.",
"control_defaults",
".",
"update",
"(",
"defaults",
")",
"control_ids",
".",
"extend",
"(",
"ids",
")",
",",
"_HTML_TEMPLATE",
"=",
"\"\"\"\n <div class=\"bqgc-container\">\n {controls}\n <div class=\"bqgc {extra_class}\" id=\"{id}\">\n </div>\n </div>\n <script src=\"/static/components/requirejs/require.js\"></script>\n <script>\n require.config({{\n paths: {{\n base: '/static/base',\n d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',\n plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',\n jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'\n }},\n map: {{\n '*': {{\n datalab: 'nbextensions/gcpdatalab'\n }}\n }},\n shim: {{\n plotly: {{\n deps: ['d3', 'jquery'],\n exports: 'plotly'\n }}\n }}\n }});\n\n require(['datalab/charting',\n 'datalab/element!{id}',\n 'base/js/events',\n 'datalab/style!/nbextensions/gcpdatalab/charting.css'\n ],\n function(charts, dom, events) {{\n charts.render(\n '{driver}',\n dom,\n events,\n '{chart_type}',\n {control_ids},\n {data},\n {options},\n {refresh_data},\n {refresh_interval},\n {total_rows});\n }}\n );\n </script>\n \"\"\"",
"count",
"=",
"25",
"if",
"chart_type",
"==",
"'paged_table'",
"else",
"-",
"1",
"data",
",",
"total_count",
"=",
"get_data",
"(",
"source",
",",
"fields",
",",
"control_defaults",
",",
"0",
",",
"count",
",",
"schema",
")",
"if",
"refresh_data",
"is",
"None",
":",
"if",
"isinstance",
"(",
"source",
",",
"basestring",
")",
":",
"source_index",
"=",
"get_data_source_index",
"(",
"source",
")",
"refresh_data",
"=",
"{",
"'source_index'",
":",
"source_index",
",",
"'name'",
":",
"source_index",
"}",
"else",
":",
"refresh_data",
"=",
"{",
"'name'",
":",
"'raw data'",
"}",
"refresh_data",
"[",
"'fields'",
"]",
"=",
"fields",
"# TODO(gram): check if we need to augment env with user_ns",
"return",
"_HTML_TEMPLATE",
".",
"format",
"(",
"driver",
"=",
"driver_name",
",",
"controls",
"=",
"controls_html",
",",
"id",
"=",
"div_id",
",",
"chart_type",
"=",
"chart_type",
",",
"extra_class",
"=",
"\" bqgc-controlled\"",
"if",
"len",
"(",
"controls_html",
")",
"else",
"''",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
",",
"cls",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"JSONEncoder",
")",
",",
"options",
"=",
"json",
".",
"dumps",
"(",
"chart_options",
",",
"cls",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"JSONEncoder",
")",
",",
"refresh_data",
"=",
"json",
".",
"dumps",
"(",
"refresh_data",
",",
"cls",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"JSONEncoder",
")",
",",
"refresh_interval",
"=",
"refresh_interval",
",",
"control_ids",
"=",
"str",
"(",
"control_ids",
")",
",",
"total_rows",
"=",
"total_count",
")"
] | Return HTML for a chart.
Args:
driver_name: the name of the chart driver. Currently we support 'plotly' or 'gcharts'.
chart_type: string specifying type of chart.
source: the data source for the chart. Can be actual data (e.g. list) or the name of
a data source (e.g. the name of a query module).
chart_options: a dictionary of options for the chart. Can contain a 'controls' entry
specifying controls. Other entries are passed as JSON to Google Charts.
fields: the fields to chart. Can be '*' for all fields (only sensible if the columns are
ordered; e.g. a Query or list of lists, but not a list of dictionaries); otherwise a
string containing a comma-separated list of field names.
refresh_interval: a time in seconds after which the chart data will be refreshed. 0 if the
chart should not be refreshed (i.e. the data is static).
refresh_data: if the source is a list or other raw data, this is a YAML string containing
metadata needed to support calls to refresh (get_chart_data).
control_defaults: the default variable values for controls that are shared across charts
including this one.
control_ids: the DIV IDs for controls that are shared across charts including this one.
schema: an optional schema for the data; if not supplied one will be inferred.
Returns:
A string containing the HTML for the chart. | [
"Return",
"HTML",
"for",
"a",
"chart",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/commands/_utils.py#L614-L727 |
4,967 | googledatalab/pydatalab | google/datalab/bigquery/_sampling.py | Sampling.default | def default(fields=None, count=5):
"""Provides a simple default sampling strategy which limits the result set by a count.
Args:
fields: an optional list of field names to retrieve.
count: optional number of rows to limit the sampled results to.
Returns:
A sampling function that can be applied to get a random sampling.
"""
projection = Sampling._create_projection(fields)
return lambda sql: 'SELECT %s FROM (%s) LIMIT %d' % (projection, sql, count) | python | def default(fields=None, count=5):
"""Provides a simple default sampling strategy which limits the result set by a count.
Args:
fields: an optional list of field names to retrieve.
count: optional number of rows to limit the sampled results to.
Returns:
A sampling function that can be applied to get a random sampling.
"""
projection = Sampling._create_projection(fields)
return lambda sql: 'SELECT %s FROM (%s) LIMIT %d' % (projection, sql, count) | [
"def",
"default",
"(",
"fields",
"=",
"None",
",",
"count",
"=",
"5",
")",
":",
"projection",
"=",
"Sampling",
".",
"_create_projection",
"(",
"fields",
")",
"return",
"lambda",
"sql",
":",
"'SELECT %s FROM (%s) LIMIT %d'",
"%",
"(",
"projection",
",",
"sql",
",",
"count",
")"
] | Provides a simple default sampling strategy which limits the result set by a count.
Args:
fields: an optional list of field names to retrieve.
count: optional number of rows to limit the sampled results to.
Returns:
A sampling function that can be applied to get a random sampling. | [
"Provides",
"a",
"simple",
"default",
"sampling",
"strategy",
"which",
"limits",
"the",
"result",
"set",
"by",
"a",
"count",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_sampling.py#L44-L54 |
4,968 | googledatalab/pydatalab | google/datalab/bigquery/_sampling.py | Sampling.sorted | def sorted(field_name, ascending=True, fields=None, count=5):
"""Provides a sampling strategy that picks from an ordered set of rows.
Args:
field_name: the name of the field to sort the rows by.
ascending: whether to sort in ascending direction or not.
fields: an optional list of field names to retrieve.
count: optional number of rows to limit the sampled results to.
Returns:
A sampling function that can be applied to get the initial few rows.
"""
if field_name is None:
raise Exception('Sort field must be specified')
direction = '' if ascending else ' DESC'
projection = Sampling._create_projection(fields)
return lambda sql: 'SELECT %s FROM (%s) ORDER BY %s%s LIMIT %d' % (projection, sql, field_name,
direction, count) | python | def sorted(field_name, ascending=True, fields=None, count=5):
"""Provides a sampling strategy that picks from an ordered set of rows.
Args:
field_name: the name of the field to sort the rows by.
ascending: whether to sort in ascending direction or not.
fields: an optional list of field names to retrieve.
count: optional number of rows to limit the sampled results to.
Returns:
A sampling function that can be applied to get the initial few rows.
"""
if field_name is None:
raise Exception('Sort field must be specified')
direction = '' if ascending else ' DESC'
projection = Sampling._create_projection(fields)
return lambda sql: 'SELECT %s FROM (%s) ORDER BY %s%s LIMIT %d' % (projection, sql, field_name,
direction, count) | [
"def",
"sorted",
"(",
"field_name",
",",
"ascending",
"=",
"True",
",",
"fields",
"=",
"None",
",",
"count",
"=",
"5",
")",
":",
"if",
"field_name",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'Sort field must be specified'",
")",
"direction",
"=",
"''",
"if",
"ascending",
"else",
"' DESC'",
"projection",
"=",
"Sampling",
".",
"_create_projection",
"(",
"fields",
")",
"return",
"lambda",
"sql",
":",
"'SELECT %s FROM (%s) ORDER BY %s%s LIMIT %d'",
"%",
"(",
"projection",
",",
"sql",
",",
"field_name",
",",
"direction",
",",
"count",
")"
] | Provides a sampling strategy that picks from an ordered set of rows.
Args:
field_name: the name of the field to sort the rows by.
ascending: whether to sort in ascending direction or not.
fields: an optional list of field names to retrieve.
count: optional number of rows to limit the sampled results to.
Returns:
A sampling function that can be applied to get the initial few rows. | [
"Provides",
"a",
"sampling",
"strategy",
"that",
"picks",
"from",
"an",
"ordered",
"set",
"of",
"rows",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_sampling.py#L57-L73 |
4,969 | googledatalab/pydatalab | google/datalab/bigquery/_sampling.py | Sampling.hashed | def hashed(field_name, percent, fields=None, count=0):
"""Provides a sampling strategy based on hashing and selecting a percentage of data.
Args:
field_name: the name of the field to hash.
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: optional maximum count of rows to pick.
Returns:
A sampling function that can be applied to get a hash-based sampling.
"""
if field_name is None:
raise Exception('Hash field must be specified')
def _hashed_sampling(sql):
projection = Sampling._create_projection(fields)
sql = 'SELECT %s FROM (%s) WHERE MOD(ABS(FARM_FINGERPRINT(CAST(%s AS STRING))), 100) < %d' % \
(projection, sql, field_name, percent)
if count != 0:
sql = '%s LIMIT %d' % (sql, count)
return sql
return _hashed_sampling | python | def hashed(field_name, percent, fields=None, count=0):
"""Provides a sampling strategy based on hashing and selecting a percentage of data.
Args:
field_name: the name of the field to hash.
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: optional maximum count of rows to pick.
Returns:
A sampling function that can be applied to get a hash-based sampling.
"""
if field_name is None:
raise Exception('Hash field must be specified')
def _hashed_sampling(sql):
projection = Sampling._create_projection(fields)
sql = 'SELECT %s FROM (%s) WHERE MOD(ABS(FARM_FINGERPRINT(CAST(%s AS STRING))), 100) < %d' % \
(projection, sql, field_name, percent)
if count != 0:
sql = '%s LIMIT %d' % (sql, count)
return sql
return _hashed_sampling | [
"def",
"hashed",
"(",
"field_name",
",",
"percent",
",",
"fields",
"=",
"None",
",",
"count",
"=",
"0",
")",
":",
"if",
"field_name",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'Hash field must be specified'",
")",
"def",
"_hashed_sampling",
"(",
"sql",
")",
":",
"projection",
"=",
"Sampling",
".",
"_create_projection",
"(",
"fields",
")",
"sql",
"=",
"'SELECT %s FROM (%s) WHERE MOD(ABS(FARM_FINGERPRINT(CAST(%s AS STRING))), 100) < %d'",
"%",
"(",
"projection",
",",
"sql",
",",
"field_name",
",",
"percent",
")",
"if",
"count",
"!=",
"0",
":",
"sql",
"=",
"'%s LIMIT %d'",
"%",
"(",
"sql",
",",
"count",
")",
"return",
"sql",
"return",
"_hashed_sampling"
] | Provides a sampling strategy based on hashing and selecting a percentage of data.
Args:
field_name: the name of the field to hash.
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: optional maximum count of rows to pick.
Returns:
A sampling function that can be applied to get a hash-based sampling. | [
"Provides",
"a",
"sampling",
"strategy",
"based",
"on",
"hashing",
"and",
"selecting",
"a",
"percentage",
"of",
"data",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_sampling.py#L76-L97 |
4,970 | googledatalab/pydatalab | google/datalab/bigquery/_sampling.py | Sampling.random | def random(percent, fields=None, count=0):
"""Provides a sampling strategy that picks a semi-random set of rows.
Args:
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: maximum number of rows to limit the sampled results to (default 5).
Returns:
A sampling function that can be applied to get some random rows. In order for this to
provide a good random sample percent should be chosen to be ~count/#rows where #rows
is the number of rows in the object (query, view or table) being sampled.
The rows will be returned in order; i.e. the order itself is not randomized.
"""
def _random_sampling(sql):
projection = Sampling._create_projection(fields)
sql = 'SELECT %s FROM (%s) WHERE rand() < %f' % (projection, sql, (float(percent) / 100.0))
if count != 0:
sql = '%s LIMIT %d' % (sql, count)
return sql
return _random_sampling | python | def random(percent, fields=None, count=0):
"""Provides a sampling strategy that picks a semi-random set of rows.
Args:
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: maximum number of rows to limit the sampled results to (default 5).
Returns:
A sampling function that can be applied to get some random rows. In order for this to
provide a good random sample percent should be chosen to be ~count/#rows where #rows
is the number of rows in the object (query, view or table) being sampled.
The rows will be returned in order; i.e. the order itself is not randomized.
"""
def _random_sampling(sql):
projection = Sampling._create_projection(fields)
sql = 'SELECT %s FROM (%s) WHERE rand() < %f' % (projection, sql, (float(percent) / 100.0))
if count != 0:
sql = '%s LIMIT %d' % (sql, count)
return sql
return _random_sampling | [
"def",
"random",
"(",
"percent",
",",
"fields",
"=",
"None",
",",
"count",
"=",
"0",
")",
":",
"def",
"_random_sampling",
"(",
"sql",
")",
":",
"projection",
"=",
"Sampling",
".",
"_create_projection",
"(",
"fields",
")",
"sql",
"=",
"'SELECT %s FROM (%s) WHERE rand() < %f'",
"%",
"(",
"projection",
",",
"sql",
",",
"(",
"float",
"(",
"percent",
")",
"/",
"100.0",
")",
")",
"if",
"count",
"!=",
"0",
":",
"sql",
"=",
"'%s LIMIT %d'",
"%",
"(",
"sql",
",",
"count",
")",
"return",
"sql",
"return",
"_random_sampling"
] | Provides a sampling strategy that picks a semi-random set of rows.
Args:
percent: the percentage of the resulting hashes to select.
fields: an optional list of field names to retrieve.
count: maximum number of rows to limit the sampled results to (default 5).
Returns:
A sampling function that can be applied to get some random rows. In order for this to
provide a good random sample percent should be chosen to be ~count/#rows where #rows
is the number of rows in the object (query, view or table) being sampled.
The rows will be returned in order; i.e. the order itself is not randomized. | [
"Provides",
"a",
"sampling",
"strategy",
"that",
"picks",
"a",
"semi",
"-",
"random",
"set",
"of",
"rows",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_sampling.py#L100-L119 |
4,971 | googledatalab/pydatalab | google/datalab/bigquery/_sampling.py | Sampling._auto | def _auto(method, fields, count, percent, key_field, ascending):
"""Construct a sampling function according to the provided sampling technique, provided all
its needed fields are passed as arguments
Args:
method: one of the supported sampling methods: {limit,random,hashed,sorted}
fields: an optional list of field names to retrieve.
count: maximum number of rows to limit the sampled results to.
percent: the percentage of the resulting hashes to select if using hashed sampling
key_field: the name of the field to sort the rows by or use for hashing
ascending: whether to sort in ascending direction or not.
Returns:
A sampling function using the provided arguments
Raises:
Exception if an unsupported mathod name is passed
"""
if method == 'limit':
return Sampling.default(fields=fields, count=count)
elif method == 'random':
return Sampling.random(fields=fields, percent=percent, count=count)
elif method == 'hashed':
return Sampling.hashed(fields=fields, field_name=key_field, percent=percent, count=count)
elif method == 'sorted':
return Sampling.sorted(fields=fields, field_name=key_field, ascending=ascending, count=count)
else:
raise Exception('Unsupported sampling method: %s' % method) | python | def _auto(method, fields, count, percent, key_field, ascending):
"""Construct a sampling function according to the provided sampling technique, provided all
its needed fields are passed as arguments
Args:
method: one of the supported sampling methods: {limit,random,hashed,sorted}
fields: an optional list of field names to retrieve.
count: maximum number of rows to limit the sampled results to.
percent: the percentage of the resulting hashes to select if using hashed sampling
key_field: the name of the field to sort the rows by or use for hashing
ascending: whether to sort in ascending direction or not.
Returns:
A sampling function using the provided arguments
Raises:
Exception if an unsupported mathod name is passed
"""
if method == 'limit':
return Sampling.default(fields=fields, count=count)
elif method == 'random':
return Sampling.random(fields=fields, percent=percent, count=count)
elif method == 'hashed':
return Sampling.hashed(fields=fields, field_name=key_field, percent=percent, count=count)
elif method == 'sorted':
return Sampling.sorted(fields=fields, field_name=key_field, ascending=ascending, count=count)
else:
raise Exception('Unsupported sampling method: %s' % method) | [
"def",
"_auto",
"(",
"method",
",",
"fields",
",",
"count",
",",
"percent",
",",
"key_field",
",",
"ascending",
")",
":",
"if",
"method",
"==",
"'limit'",
":",
"return",
"Sampling",
".",
"default",
"(",
"fields",
"=",
"fields",
",",
"count",
"=",
"count",
")",
"elif",
"method",
"==",
"'random'",
":",
"return",
"Sampling",
".",
"random",
"(",
"fields",
"=",
"fields",
",",
"percent",
"=",
"percent",
",",
"count",
"=",
"count",
")",
"elif",
"method",
"==",
"'hashed'",
":",
"return",
"Sampling",
".",
"hashed",
"(",
"fields",
"=",
"fields",
",",
"field_name",
"=",
"key_field",
",",
"percent",
"=",
"percent",
",",
"count",
"=",
"count",
")",
"elif",
"method",
"==",
"'sorted'",
":",
"return",
"Sampling",
".",
"sorted",
"(",
"fields",
"=",
"fields",
",",
"field_name",
"=",
"key_field",
",",
"ascending",
"=",
"ascending",
",",
"count",
"=",
"count",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Unsupported sampling method: %s'",
"%",
"method",
")"
] | Construct a sampling function according to the provided sampling technique, provided all
its needed fields are passed as arguments
Args:
method: one of the supported sampling methods: {limit,random,hashed,sorted}
fields: an optional list of field names to retrieve.
count: maximum number of rows to limit the sampled results to.
percent: the percentage of the resulting hashes to select if using hashed sampling
key_field: the name of the field to sort the rows by or use for hashing
ascending: whether to sort in ascending direction or not.
Returns:
A sampling function using the provided arguments
Raises:
Exception if an unsupported mathod name is passed | [
"Construct",
"a",
"sampling",
"function",
"according",
"to",
"the",
"provided",
"sampling",
"technique",
"provided",
"all",
"its",
"needed",
"fields",
"are",
"passed",
"as",
"arguments"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_sampling.py#L122-L147 |
4,972 | googledatalab/pydatalab | google/datalab/bigquery/_csv_options.py | CSVOptions._to_query_json | def _to_query_json(self):
""" Return the options as a dictionary to be used as JSON in a query job. """
return {
'quote': self._quote,
'fieldDelimiter': self._delimiter,
'encoding': self._encoding.upper(),
'skipLeadingRows': self._skip_leading_rows,
'allowQuotedNewlines': self._allow_quoted_newlines,
'allowJaggedRows': self._allow_jagged_rows
} | python | def _to_query_json(self):
""" Return the options as a dictionary to be used as JSON in a query job. """
return {
'quote': self._quote,
'fieldDelimiter': self._delimiter,
'encoding': self._encoding.upper(),
'skipLeadingRows': self._skip_leading_rows,
'allowQuotedNewlines': self._allow_quoted_newlines,
'allowJaggedRows': self._allow_jagged_rows
} | [
"def",
"_to_query_json",
"(",
"self",
")",
":",
"return",
"{",
"'quote'",
":",
"self",
".",
"_quote",
",",
"'fieldDelimiter'",
":",
"self",
".",
"_delimiter",
",",
"'encoding'",
":",
"self",
".",
"_encoding",
".",
"upper",
"(",
")",
",",
"'skipLeadingRows'",
":",
"self",
".",
"_skip_leading_rows",
",",
"'allowQuotedNewlines'",
":",
"self",
".",
"_allow_quoted_newlines",
",",
"'allowJaggedRows'",
":",
"self",
".",
"_allow_jagged_rows",
"}"
] | Return the options as a dictionary to be used as JSON in a query job. | [
"Return",
"the",
"options",
"as",
"a",
"dictionary",
"to",
"be",
"used",
"as",
"JSON",
"in",
"a",
"query",
"job",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_csv_options.py#L75-L84 |
4,973 | googledatalab/pydatalab | datalab/bigquery/_api.py | Api.jobs_insert_load | def jobs_insert_load(self, source, table_name, append=False, overwrite=False, create=False,
source_format='CSV', field_delimiter=',', allow_jagged_rows=False,
allow_quoted_newlines=False, encoding='UTF-8', ignore_unknown_values=False,
max_bad_records=0, quote='"', skip_leading_rows=0):
""" Issues a request to load data from GCS to a BQ table
Args:
source: the URL of the source bucket(s). Can include wildcards, and can be a single
string argument or a list.
table_name: a tuple representing the full name of the destination table.
append: if True append onto existing table contents.
overwrite: if True overwrite existing table contents.
create: if True, create the table if it doesn't exist
source_format: the format of the data; default 'CSV'. Other options are DATASTORE_BACKUP
or NEWLINE_DELIMITED_JSON.
field_delimiter: The separator for fields in a CSV file. BigQuery converts the string to
ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data
as raw binary (default ',').
allow_jagged_rows: If True, accept rows in CSV files that are missing trailing optional
columns; the missing values are treated as nulls (default False).
allow_quoted_newlines: If True, allow quoted data sections in CSV files that contain newline
characters (default False).
encoding: The character encoding of the data, either 'UTF-8' (the default) or 'ISO-8859-1'.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: The maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
quote: The value used to quote data sections in a CSV file; default '"'. If your data does
not contain quoted sections, set the property value to an empty string. If your data
contains quoted newline characters, you must also enable allow_quoted_newlines.
skip_leading_rows: A number of rows at the top of a CSV file to skip (default 0).
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._JOBS_PATH % (table_name.project_id, ''))
if isinstance(source, basestring):
source = [source]
write_disposition = 'WRITE_EMPTY'
if overwrite:
write_disposition = 'WRITE_TRUNCATE'
if append:
write_disposition = 'WRITE_APPEND'
data = {
'kind': 'bigquery#job',
'configuration': {
'load': {
'sourceUris': source,
'destinationTable': {
'projectId': table_name.project_id,
'datasetId': table_name.dataset_id,
'tableId': table_name.table_id
},
'createDisposition': 'CREATE_IF_NEEDED' if create else 'CREATE_NEVER',
'writeDisposition': write_disposition,
'sourceFormat': source_format,
'ignoreUnknownValues': ignore_unknown_values,
'maxBadRecords': max_bad_records,
}
}
}
if source_format == 'CSV':
load_config = data['configuration']['load']
load_config.update({
'fieldDelimiter': field_delimiter,
'allowJaggedRows': allow_jagged_rows,
'allowQuotedNewlines': allow_quoted_newlines,
'quote': quote,
'encoding': encoding,
'skipLeadingRows': skip_leading_rows
})
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | python | def jobs_insert_load(self, source, table_name, append=False, overwrite=False, create=False,
source_format='CSV', field_delimiter=',', allow_jagged_rows=False,
allow_quoted_newlines=False, encoding='UTF-8', ignore_unknown_values=False,
max_bad_records=0, quote='"', skip_leading_rows=0):
""" Issues a request to load data from GCS to a BQ table
Args:
source: the URL of the source bucket(s). Can include wildcards, and can be a single
string argument or a list.
table_name: a tuple representing the full name of the destination table.
append: if True append onto existing table contents.
overwrite: if True overwrite existing table contents.
create: if True, create the table if it doesn't exist
source_format: the format of the data; default 'CSV'. Other options are DATASTORE_BACKUP
or NEWLINE_DELIMITED_JSON.
field_delimiter: The separator for fields in a CSV file. BigQuery converts the string to
ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data
as raw binary (default ',').
allow_jagged_rows: If True, accept rows in CSV files that are missing trailing optional
columns; the missing values are treated as nulls (default False).
allow_quoted_newlines: If True, allow quoted data sections in CSV files that contain newline
characters (default False).
encoding: The character encoding of the data, either 'UTF-8' (the default) or 'ISO-8859-1'.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: The maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
quote: The value used to quote data sections in a CSV file; default '"'. If your data does
not contain quoted sections, set the property value to an empty string. If your data
contains quoted newline characters, you must also enable allow_quoted_newlines.
skip_leading_rows: A number of rows at the top of a CSV file to skip (default 0).
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._JOBS_PATH % (table_name.project_id, ''))
if isinstance(source, basestring):
source = [source]
write_disposition = 'WRITE_EMPTY'
if overwrite:
write_disposition = 'WRITE_TRUNCATE'
if append:
write_disposition = 'WRITE_APPEND'
data = {
'kind': 'bigquery#job',
'configuration': {
'load': {
'sourceUris': source,
'destinationTable': {
'projectId': table_name.project_id,
'datasetId': table_name.dataset_id,
'tableId': table_name.table_id
},
'createDisposition': 'CREATE_IF_NEEDED' if create else 'CREATE_NEVER',
'writeDisposition': write_disposition,
'sourceFormat': source_format,
'ignoreUnknownValues': ignore_unknown_values,
'maxBadRecords': max_bad_records,
}
}
}
if source_format == 'CSV':
load_config = data['configuration']['load']
load_config.update({
'fieldDelimiter': field_delimiter,
'allowJaggedRows': allow_jagged_rows,
'allowQuotedNewlines': allow_quoted_newlines,
'quote': quote,
'encoding': encoding,
'skipLeadingRows': skip_leading_rows
})
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | [
"def",
"jobs_insert_load",
"(",
"self",
",",
"source",
",",
"table_name",
",",
"append",
"=",
"False",
",",
"overwrite",
"=",
"False",
",",
"create",
"=",
"False",
",",
"source_format",
"=",
"'CSV'",
",",
"field_delimiter",
"=",
"','",
",",
"allow_jagged_rows",
"=",
"False",
",",
"allow_quoted_newlines",
"=",
"False",
",",
"encoding",
"=",
"'UTF-8'",
",",
"ignore_unknown_values",
"=",
"False",
",",
"max_bad_records",
"=",
"0",
",",
"quote",
"=",
"'\"'",
",",
"skip_leading_rows",
"=",
"0",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_JOBS_PATH",
"%",
"(",
"table_name",
".",
"project_id",
",",
"''",
")",
")",
"if",
"isinstance",
"(",
"source",
",",
"basestring",
")",
":",
"source",
"=",
"[",
"source",
"]",
"write_disposition",
"=",
"'WRITE_EMPTY'",
"if",
"overwrite",
":",
"write_disposition",
"=",
"'WRITE_TRUNCATE'",
"if",
"append",
":",
"write_disposition",
"=",
"'WRITE_APPEND'",
"data",
"=",
"{",
"'kind'",
":",
"'bigquery#job'",
",",
"'configuration'",
":",
"{",
"'load'",
":",
"{",
"'sourceUris'",
":",
"source",
",",
"'destinationTable'",
":",
"{",
"'projectId'",
":",
"table_name",
".",
"project_id",
",",
"'datasetId'",
":",
"table_name",
".",
"dataset_id",
",",
"'tableId'",
":",
"table_name",
".",
"table_id",
"}",
",",
"'createDisposition'",
":",
"'CREATE_IF_NEEDED'",
"if",
"create",
"else",
"'CREATE_NEVER'",
",",
"'writeDisposition'",
":",
"write_disposition",
",",
"'sourceFormat'",
":",
"source_format",
",",
"'ignoreUnknownValues'",
":",
"ignore_unknown_values",
",",
"'maxBadRecords'",
":",
"max_bad_records",
",",
"}",
"}",
"}",
"if",
"source_format",
"==",
"'CSV'",
":",
"load_config",
"=",
"data",
"[",
"'configuration'",
"]",
"[",
"'load'",
"]",
"load_config",
".",
"update",
"(",
"{",
"'fieldDelimiter'",
":",
"field_delimiter",
",",
"'allowJaggedRows'",
":",
"allow_jagged_rows",
",",
"'allowQuotedNewlines'",
":",
"allow_quoted_newlines",
",",
"'quote'",
":",
"quote",
",",
"'encoding'",
":",
"encoding",
",",
"'skipLeadingRows'",
":",
"skip_leading_rows",
"}",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to load data from GCS to a BQ table
Args:
source: the URL of the source bucket(s). Can include wildcards, and can be a single
string argument or a list.
table_name: a tuple representing the full name of the destination table.
append: if True append onto existing table contents.
overwrite: if True overwrite existing table contents.
create: if True, create the table if it doesn't exist
source_format: the format of the data; default 'CSV'. Other options are DATASTORE_BACKUP
or NEWLINE_DELIMITED_JSON.
field_delimiter: The separator for fields in a CSV file. BigQuery converts the string to
ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data
as raw binary (default ',').
allow_jagged_rows: If True, accept rows in CSV files that are missing trailing optional
columns; the missing values are treated as nulls (default False).
allow_quoted_newlines: If True, allow quoted data sections in CSV files that contain newline
characters (default False).
encoding: The character encoding of the data, either 'UTF-8' (the default) or 'ISO-8859-1'.
ignore_unknown_values: If True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: The maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
quote: The value used to quote data sections in a CSV file; default '"'. If your data does
not contain quoted sections, set the property value to an empty string. If your data
contains quoted newline characters, you must also enable allow_quoted_newlines.
skip_leading_rows: A number of rows at the top of a CSV file to skip (default 0).
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"load",
"data",
"from",
"GCS",
"to",
"a",
"BQ",
"table"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L50-L123 |
4,974 | googledatalab/pydatalab | datalab/bigquery/_api.py | Api.jobs_get | def jobs_get(self, job_id, project_id=None):
"""Issues a request to retrieve information about a job.
Args:
job_id: the id of the job
project_id: the project id to use to fetch the results; use None for the default project.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
if project_id is None:
project_id = self._project_id
url = Api._ENDPOINT + (Api._JOBS_PATH % (project_id, job_id))
return datalab.utils.Http.request(url, credentials=self._credentials) | python | def jobs_get(self, job_id, project_id=None):
"""Issues a request to retrieve information about a job.
Args:
job_id: the id of the job
project_id: the project id to use to fetch the results; use None for the default project.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
if project_id is None:
project_id = self._project_id
url = Api._ENDPOINT + (Api._JOBS_PATH % (project_id, job_id))
return datalab.utils.Http.request(url, credentials=self._credentials) | [
"def",
"jobs_get",
"(",
"self",
",",
"job_id",
",",
"project_id",
"=",
"None",
")",
":",
"if",
"project_id",
"is",
"None",
":",
"project_id",
"=",
"self",
".",
"_project_id",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_JOBS_PATH",
"%",
"(",
"project_id",
",",
"job_id",
")",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to retrieve information about a job.
Args:
job_id: the id of the job
project_id: the project id to use to fetch the results; use None for the default project.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"retrieve",
"information",
"about",
"a",
"job",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L239-L253 |
4,975 | googledatalab/pydatalab | datalab/bigquery/_api.py | Api.datasets_insert | def datasets_insert(self, dataset_name, friendly_name=None, description=None):
"""Issues a request to create a dataset.
Args:
dataset_name: the name of the dataset to create.
friendly_name: (optional) the friendly name for the dataset
description: (optional) a description for the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % (dataset_name.project_id, ''))
data = {
'kind': 'bigquery#dataset',
'datasetReference': {
'projectId': dataset_name.project_id,
'datasetId': dataset_name.dataset_id
},
}
if friendly_name:
data['friendlyName'] = friendly_name
if description:
data['description'] = description
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | python | def datasets_insert(self, dataset_name, friendly_name=None, description=None):
"""Issues a request to create a dataset.
Args:
dataset_name: the name of the dataset to create.
friendly_name: (optional) the friendly name for the dataset
description: (optional) a description for the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % (dataset_name.project_id, ''))
data = {
'kind': 'bigquery#dataset',
'datasetReference': {
'projectId': dataset_name.project_id,
'datasetId': dataset_name.dataset_id
},
}
if friendly_name:
data['friendlyName'] = friendly_name
if description:
data['description'] = description
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | [
"def",
"datasets_insert",
"(",
"self",
",",
"dataset_name",
",",
"friendly_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_DATASETS_PATH",
"%",
"(",
"dataset_name",
".",
"project_id",
",",
"''",
")",
")",
"data",
"=",
"{",
"'kind'",
":",
"'bigquery#dataset'",
",",
"'datasetReference'",
":",
"{",
"'projectId'",
":",
"dataset_name",
".",
"project_id",
",",
"'datasetId'",
":",
"dataset_name",
".",
"dataset_id",
"}",
",",
"}",
"if",
"friendly_name",
":",
"data",
"[",
"'friendlyName'",
"]",
"=",
"friendly_name",
"if",
"description",
":",
"data",
"[",
"'description'",
"]",
"=",
"description",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to create a dataset.
Args:
dataset_name: the name of the dataset to create.
friendly_name: (optional) the friendly name for the dataset
description: (optional) a description for the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"create",
"a",
"dataset",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L255-L279 |
4,976 | googledatalab/pydatalab | datalab/bigquery/_api.py | Api.datasets_delete | def datasets_delete(self, dataset_name, delete_contents):
"""Issues a request to delete a dataset.
Args:
dataset_name: the name of the dataset to delete.
delete_contents: if True, any tables in the dataset will be deleted. If False and the
dataset is non-empty an exception will be raised.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
args = {}
if delete_contents:
args['deleteContents'] = True
return datalab.utils.Http.request(url, method='DELETE', args=args,
credentials=self._credentials, raw_response=True) | python | def datasets_delete(self, dataset_name, delete_contents):
"""Issues a request to delete a dataset.
Args:
dataset_name: the name of the dataset to delete.
delete_contents: if True, any tables in the dataset will be deleted. If False and the
dataset is non-empty an exception will be raised.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
args = {}
if delete_contents:
args['deleteContents'] = True
return datalab.utils.Http.request(url, method='DELETE', args=args,
credentials=self._credentials, raw_response=True) | [
"def",
"datasets_delete",
"(",
"self",
",",
"dataset_name",
",",
"delete_contents",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_DATASETS_PATH",
"%",
"dataset_name",
")",
"args",
"=",
"{",
"}",
"if",
"delete_contents",
":",
"args",
"[",
"'deleteContents'",
"]",
"=",
"True",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"method",
"=",
"'DELETE'",
",",
"args",
"=",
"args",
",",
"credentials",
"=",
"self",
".",
"_credentials",
",",
"raw_response",
"=",
"True",
")"
] | Issues a request to delete a dataset.
Args:
dataset_name: the name of the dataset to delete.
delete_contents: if True, any tables in the dataset will be deleted. If False and the
dataset is non-empty an exception will be raised.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"delete",
"a",
"dataset",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L281-L298 |
4,977 | googledatalab/pydatalab | datalab/bigquery/_api.py | Api.datasets_update | def datasets_update(self, dataset_name, dataset_info):
"""Updates the Dataset info.
Args:
dataset_name: the name of the dataset to update as a tuple of components.
dataset_info: the Dataset resource with updated fields.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
return datalab.utils.Http.request(url, method='PUT', data=dataset_info,
credentials=self._credentials) | python | def datasets_update(self, dataset_name, dataset_info):
"""Updates the Dataset info.
Args:
dataset_name: the name of the dataset to update as a tuple of components.
dataset_info: the Dataset resource with updated fields.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
return datalab.utils.Http.request(url, method='PUT', data=dataset_info,
credentials=self._credentials) | [
"def",
"datasets_update",
"(",
"self",
",",
"dataset_name",
",",
"dataset_info",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_DATASETS_PATH",
"%",
"dataset_name",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"method",
"=",
"'PUT'",
",",
"data",
"=",
"dataset_info",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Updates the Dataset info.
Args:
dataset_name: the name of the dataset to update as a tuple of components.
dataset_info: the Dataset resource with updated fields. | [
"Updates",
"the",
"Dataset",
"info",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L300-L309 |
4,978 | googledatalab/pydatalab | datalab/bigquery/_api.py | Api.datasets_get | def datasets_get(self, dataset_name):
"""Issues a request to retrieve information about a dataset.
Args:
dataset_name: the name of the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
return datalab.utils.Http.request(url, credentials=self._credentials) | python | def datasets_get(self, dataset_name):
"""Issues a request to retrieve information about a dataset.
Args:
dataset_name: the name of the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name)
return datalab.utils.Http.request(url, credentials=self._credentials) | [
"def",
"datasets_get",
"(",
"self",
",",
"dataset_name",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_DATASETS_PATH",
"%",
"dataset_name",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to retrieve information about a dataset.
Args:
dataset_name: the name of the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"retrieve",
"information",
"about",
"a",
"dataset",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L311-L322 |
4,979 | googledatalab/pydatalab | datalab/bigquery/_api.py | Api.datasets_list | def datasets_list(self, project_id=None, max_results=0, page_token=None):
"""Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
if project_id is None:
project_id = self._project_id
url = Api._ENDPOINT + (Api._DATASETS_PATH % (project_id, ''))
args = {}
if max_results != 0:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return datalab.utils.Http.request(url, args=args, credentials=self._credentials) | python | def datasets_list(self, project_id=None, max_results=0, page_token=None):
"""Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
if project_id is None:
project_id = self._project_id
url = Api._ENDPOINT + (Api._DATASETS_PATH % (project_id, ''))
args = {}
if max_results != 0:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return datalab.utils.Http.request(url, args=args, credentials=self._credentials) | [
"def",
"datasets_list",
"(",
"self",
",",
"project_id",
"=",
"None",
",",
"max_results",
"=",
"0",
",",
"page_token",
"=",
"None",
")",
":",
"if",
"project_id",
"is",
"None",
":",
"project_id",
"=",
"self",
".",
"_project_id",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_DATASETS_PATH",
"%",
"(",
"project_id",
",",
"''",
")",
")",
"args",
"=",
"{",
"}",
"if",
"max_results",
"!=",
"0",
":",
"args",
"[",
"'maxResults'",
"]",
"=",
"max_results",
"if",
"page_token",
"is",
"not",
"None",
":",
"args",
"[",
"'pageToken'",
"]",
"=",
"page_token",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"args",
"=",
"args",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to list the datasets in the project.
Args:
project_id: the project id to use to fetch the results; use None for the default project.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"list",
"the",
"datasets",
"in",
"the",
"project",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L324-L346 |
4,980 | googledatalab/pydatalab | datalab/bigquery/_api.py | Api.tables_get | def tables_get(self, table_name):
"""Issues a request to retrieve information about a table.
Args:
table_name: a tuple representing the full name of the table.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, credentials=self._credentials) | python | def tables_get(self, table_name):
"""Issues a request to retrieve information about a table.
Args:
table_name: a tuple representing the full name of the table.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, credentials=self._credentials) | [
"def",
"tables_get",
"(",
"self",
",",
"table_name",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_TABLES_PATH",
"%",
"table_name",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to retrieve information about a table.
Args:
table_name: a tuple representing the full name of the table.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"retrieve",
"information",
"about",
"a",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L348-L359 |
4,981 | googledatalab/pydatalab | datalab/bigquery/_api.py | Api.tables_insert | def tables_insert(self, table_name, schema=None, query=None, friendly_name=None,
description=None):
"""Issues a request to create a table or view in the specified dataset with the specified id.
A schema must be provided to create a Table, or a query must be provided to create a View.
Args:
table_name: the name of the table as a tuple of components.
schema: the schema, if this is a Table creation.
query: the query, if this is a View creation.
friendly_name: an optional friendly name.
description: an optional description.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + \
(Api._TABLES_PATH % (table_name.project_id, table_name.dataset_id, '', ''))
data = {
'kind': 'bigquery#table',
'tableReference': {
'projectId': table_name.project_id,
'datasetId': table_name.dataset_id,
'tableId': table_name.table_id
}
}
if schema:
data['schema'] = {'fields': schema}
if query:
data['view'] = {'query': query}
if friendly_name:
data['friendlyName'] = friendly_name
if description:
data['description'] = description
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | python | def tables_insert(self, table_name, schema=None, query=None, friendly_name=None,
description=None):
"""Issues a request to create a table or view in the specified dataset with the specified id.
A schema must be provided to create a Table, or a query must be provided to create a View.
Args:
table_name: the name of the table as a tuple of components.
schema: the schema, if this is a Table creation.
query: the query, if this is a View creation.
friendly_name: an optional friendly name.
description: an optional description.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + \
(Api._TABLES_PATH % (table_name.project_id, table_name.dataset_id, '', ''))
data = {
'kind': 'bigquery#table',
'tableReference': {
'projectId': table_name.project_id,
'datasetId': table_name.dataset_id,
'tableId': table_name.table_id
}
}
if schema:
data['schema'] = {'fields': schema}
if query:
data['view'] = {'query': query}
if friendly_name:
data['friendlyName'] = friendly_name
if description:
data['description'] = description
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | [
"def",
"tables_insert",
"(",
"self",
",",
"table_name",
",",
"schema",
"=",
"None",
",",
"query",
"=",
"None",
",",
"friendly_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_TABLES_PATH",
"%",
"(",
"table_name",
".",
"project_id",
",",
"table_name",
".",
"dataset_id",
",",
"''",
",",
"''",
")",
")",
"data",
"=",
"{",
"'kind'",
":",
"'bigquery#table'",
",",
"'tableReference'",
":",
"{",
"'projectId'",
":",
"table_name",
".",
"project_id",
",",
"'datasetId'",
":",
"table_name",
".",
"dataset_id",
",",
"'tableId'",
":",
"table_name",
".",
"table_id",
"}",
"}",
"if",
"schema",
":",
"data",
"[",
"'schema'",
"]",
"=",
"{",
"'fields'",
":",
"schema",
"}",
"if",
"query",
":",
"data",
"[",
"'view'",
"]",
"=",
"{",
"'query'",
":",
"query",
"}",
"if",
"friendly_name",
":",
"data",
"[",
"'friendlyName'",
"]",
"=",
"friendly_name",
"if",
"description",
":",
"data",
"[",
"'description'",
"]",
"=",
"description",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to create a table or view in the specified dataset with the specified id.
A schema must be provided to create a Table, or a query must be provided to create a View.
Args:
table_name: the name of the table as a tuple of components.
schema: the schema, if this is a Table creation.
query: the query, if this is a View creation.
friendly_name: an optional friendly name.
description: an optional description.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"create",
"a",
"table",
"or",
"view",
"in",
"the",
"specified",
"dataset",
"with",
"the",
"specified",
"id",
".",
"A",
"schema",
"must",
"be",
"provided",
"to",
"create",
"a",
"Table",
"or",
"a",
"query",
"must",
"be",
"provided",
"to",
"create",
"a",
"View",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L384-L420 |
4,982 | googledatalab/pydatalab | datalab/bigquery/_api.py | Api.tabledata_insert_all | def tabledata_insert_all(self, table_name, rows):
"""Issues a request to insert data into a table.
Args:
table_name: the name of the table as a tuple of components.
rows: the data to populate the table, as a list of dictionaries.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name) + "/insertAll"
data = {
'kind': 'bigquery#tableDataInsertAllRequest',
'rows': rows
}
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | python | def tabledata_insert_all(self, table_name, rows):
"""Issues a request to insert data into a table.
Args:
table_name: the name of the table as a tuple of components.
rows: the data to populate the table, as a list of dictionaries.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name) + "/insertAll"
data = {
'kind': 'bigquery#tableDataInsertAllRequest',
'rows': rows
}
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | [
"def",
"tabledata_insert_all",
"(",
"self",
",",
"table_name",
",",
"rows",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_TABLES_PATH",
"%",
"table_name",
")",
"+",
"\"/insertAll\"",
"data",
"=",
"{",
"'kind'",
":",
"'bigquery#tableDataInsertAllRequest'",
",",
"'rows'",
":",
"rows",
"}",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Issues a request to insert data into a table.
Args:
table_name: the name of the table as a tuple of components.
rows: the data to populate the table, as a list of dictionaries.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"insert",
"data",
"into",
"a",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L422-L440 |
4,983 | googledatalab/pydatalab | datalab/bigquery/_api.py | Api.tabledata_list | def tabledata_list(self, table_name, start_index=None, max_results=None, page_token=None):
""" Retrieves the contents of a table.
Args:
table_name: the name of the table as a tuple of components.
start_index: the index of the row at which to start retrieval.
max_results: an optional maximum number of rows to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLEDATA_PATH % table_name)
args = {}
if start_index:
args['startIndex'] = start_index
if max_results:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return datalab.utils.Http.request(url, args=args, credentials=self._credentials) | python | def tabledata_list(self, table_name, start_index=None, max_results=None, page_token=None):
""" Retrieves the contents of a table.
Args:
table_name: the name of the table as a tuple of components.
start_index: the index of the row at which to start retrieval.
max_results: an optional maximum number of rows to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLEDATA_PATH % table_name)
args = {}
if start_index:
args['startIndex'] = start_index
if max_results:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return datalab.utils.Http.request(url, args=args, credentials=self._credentials) | [
"def",
"tabledata_list",
"(",
"self",
",",
"table_name",
",",
"start_index",
"=",
"None",
",",
"max_results",
"=",
"None",
",",
"page_token",
"=",
"None",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_TABLEDATA_PATH",
"%",
"table_name",
")",
"args",
"=",
"{",
"}",
"if",
"start_index",
":",
"args",
"[",
"'startIndex'",
"]",
"=",
"start_index",
"if",
"max_results",
":",
"args",
"[",
"'maxResults'",
"]",
"=",
"max_results",
"if",
"page_token",
"is",
"not",
"None",
":",
"args",
"[",
"'pageToken'",
"]",
"=",
"page_token",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"args",
"=",
"args",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Retrieves the contents of a table.
Args:
table_name: the name of the table as a tuple of components.
start_index: the index of the row at which to start retrieval.
max_results: an optional maximum number of rows to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Retrieves",
"the",
"contents",
"of",
"a",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L442-L463 |
4,984 | googledatalab/pydatalab | datalab/bigquery/_api.py | Api.table_delete | def table_delete(self, table_name):
"""Issues a request to delete a table.
Args:
table_name: the name of the table as a tuple of components.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,
raw_response=True) | python | def table_delete(self, table_name):
"""Issues a request to delete a table.
Args:
table_name: the name of the table as a tuple of components.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, method='DELETE', credentials=self._credentials,
raw_response=True) | [
"def",
"table_delete",
"(",
"self",
",",
"table_name",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_TABLES_PATH",
"%",
"table_name",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"method",
"=",
"'DELETE'",
",",
"credentials",
"=",
"self",
".",
"_credentials",
",",
"raw_response",
"=",
"True",
")"
] | Issues a request to delete a table.
Args:
table_name: the name of the table as a tuple of components.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Issues",
"a",
"request",
"to",
"delete",
"a",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L465-L477 |
4,985 | googledatalab/pydatalab | datalab/bigquery/_api.py | Api.table_extract | def table_extract(self, table_name, destination, format='CSV', compress=True,
field_delimiter=',', print_header=True):
"""Exports the table to GCS.
Args:
table_name: the name of the table as a tuple of components.
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of CSV, NEWLINE_DELIMITED_JSON or AVRO.
Defaults to CSV.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
field_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
print_header: for CSV exports, whether to include an initial header line. Default true.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._JOBS_PATH % (table_name.project_id, ''))
if isinstance(destination, basestring):
destination = [destination]
data = {
# 'projectId': table_name.project_id, # Code sample shows this but it is not in job
# reference spec. Filed as b/19235843
'kind': 'bigquery#job',
'configuration': {
'extract': {
'sourceTable': {
'projectId': table_name.project_id,
'datasetId': table_name.dataset_id,
'tableId': table_name.table_id,
},
'compression': 'GZIP' if compress else 'NONE',
'fieldDelimiter': field_delimiter,
'printHeader': print_header,
'destinationUris': destination,
'destinationFormat': format,
}
}
}
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | python | def table_extract(self, table_name, destination, format='CSV', compress=True,
field_delimiter=',', print_header=True):
"""Exports the table to GCS.
Args:
table_name: the name of the table as a tuple of components.
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of CSV, NEWLINE_DELIMITED_JSON or AVRO.
Defaults to CSV.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
field_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
print_header: for CSV exports, whether to include an initial header line. Default true.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._JOBS_PATH % (table_name.project_id, ''))
if isinstance(destination, basestring):
destination = [destination]
data = {
# 'projectId': table_name.project_id, # Code sample shows this but it is not in job
# reference spec. Filed as b/19235843
'kind': 'bigquery#job',
'configuration': {
'extract': {
'sourceTable': {
'projectId': table_name.project_id,
'datasetId': table_name.dataset_id,
'tableId': table_name.table_id,
},
'compression': 'GZIP' if compress else 'NONE',
'fieldDelimiter': field_delimiter,
'printHeader': print_header,
'destinationUris': destination,
'destinationFormat': format,
}
}
}
return datalab.utils.Http.request(url, data=data, credentials=self._credentials) | [
"def",
"table_extract",
"(",
"self",
",",
"table_name",
",",
"destination",
",",
"format",
"=",
"'CSV'",
",",
"compress",
"=",
"True",
",",
"field_delimiter",
"=",
"','",
",",
"print_header",
"=",
"True",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_JOBS_PATH",
"%",
"(",
"table_name",
".",
"project_id",
",",
"''",
")",
")",
"if",
"isinstance",
"(",
"destination",
",",
"basestring",
")",
":",
"destination",
"=",
"[",
"destination",
"]",
"data",
"=",
"{",
"# 'projectId': table_name.project_id, # Code sample shows this but it is not in job",
"# reference spec. Filed as b/19235843",
"'kind'",
":",
"'bigquery#job'",
",",
"'configuration'",
":",
"{",
"'extract'",
":",
"{",
"'sourceTable'",
":",
"{",
"'projectId'",
":",
"table_name",
".",
"project_id",
",",
"'datasetId'",
":",
"table_name",
".",
"dataset_id",
",",
"'tableId'",
":",
"table_name",
".",
"table_id",
",",
"}",
",",
"'compression'",
":",
"'GZIP'",
"if",
"compress",
"else",
"'NONE'",
",",
"'fieldDelimiter'",
":",
"field_delimiter",
",",
"'printHeader'",
":",
"print_header",
",",
"'destinationUris'",
":",
"destination",
",",
"'destinationFormat'",
":",
"format",
",",
"}",
"}",
"}",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Exports the table to GCS.
Args:
table_name: the name of the table as a tuple of components.
destination: the destination URI(s). Can be a single URI or a list.
format: the format to use for the exported data; one of CSV, NEWLINE_DELIMITED_JSON or AVRO.
Defaults to CSV.
compress: whether to compress the data on export. Compression is not supported for
AVRO format. Defaults to False.
field_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
print_header: for CSV exports, whether to include an initial header line. Default true.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | [
"Exports",
"the",
"table",
"to",
"GCS",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L479-L519 |
4,986 | googledatalab/pydatalab | datalab/bigquery/_api.py | Api.table_update | def table_update(self, table_name, table_info):
"""Updates the Table info.
Args:
table_name: the name of the table to update as a tuple of components.
table_info: the Table resource with updated fields.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, method='PUT', data=table_info,
credentials=self._credentials) | python | def table_update(self, table_name, table_info):
"""Updates the Table info.
Args:
table_name: the name of the table to update as a tuple of components.
table_info: the Table resource with updated fields.
"""
url = Api._ENDPOINT + (Api._TABLES_PATH % table_name)
return datalab.utils.Http.request(url, method='PUT', data=table_info,
credentials=self._credentials) | [
"def",
"table_update",
"(",
"self",
",",
"table_name",
",",
"table_info",
")",
":",
"url",
"=",
"Api",
".",
"_ENDPOINT",
"+",
"(",
"Api",
".",
"_TABLES_PATH",
"%",
"table_name",
")",
"return",
"datalab",
".",
"utils",
".",
"Http",
".",
"request",
"(",
"url",
",",
"method",
"=",
"'PUT'",
",",
"data",
"=",
"table_info",
",",
"credentials",
"=",
"self",
".",
"_credentials",
")"
] | Updates the Table info.
Args:
table_name: the name of the table to update as a tuple of components.
table_info: the Table resource with updated fields. | [
"Updates",
"the",
"Table",
"info",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L521-L530 |
4,987 | googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_archive.py | extract_archive | def extract_archive(archive_path, dest):
"""Extract a local or GCS archive file to a folder.
Args:
archive_path: local or gcs path to a *.tar.gz or *.tar file
dest: local folder the archive will be extracted to
"""
# Make the dest folder if it does not exist
if not os.path.isdir(dest):
os.makedirs(dest)
try:
tmpfolder = None
if (not tf.gfile.Exists(archive_path)) or tf.gfile.IsDirectory(archive_path):
raise ValueError('archive path %s is not a file' % archive_path)
if archive_path.startswith('gs://'):
# Copy the file to a local temp folder
tmpfolder = tempfile.mkdtemp()
cmd_args = ['gsutil', 'cp', archive_path, tmpfolder]
_shell_process.run_and_monitor(cmd_args, os.getpid())
archive_path = os.path.join(tmpfolder, os.path.name(archive_path))
if archive_path.lower().endswith('.tar.gz'):
flags = '-xzf'
elif archive_path.lower().endswith('.tar'):
flags = '-xf'
else:
raise ValueError('Only tar.gz or tar.Z files are supported.')
cmd_args = ['tar', flags, archive_path, '-C', dest]
_shell_process.run_and_monitor(cmd_args, os.getpid())
finally:
if tmpfolder:
shutil.rmtree(tmpfolder) | python | def extract_archive(archive_path, dest):
"""Extract a local or GCS archive file to a folder.
Args:
archive_path: local or gcs path to a *.tar.gz or *.tar file
dest: local folder the archive will be extracted to
"""
# Make the dest folder if it does not exist
if not os.path.isdir(dest):
os.makedirs(dest)
try:
tmpfolder = None
if (not tf.gfile.Exists(archive_path)) or tf.gfile.IsDirectory(archive_path):
raise ValueError('archive path %s is not a file' % archive_path)
if archive_path.startswith('gs://'):
# Copy the file to a local temp folder
tmpfolder = tempfile.mkdtemp()
cmd_args = ['gsutil', 'cp', archive_path, tmpfolder]
_shell_process.run_and_monitor(cmd_args, os.getpid())
archive_path = os.path.join(tmpfolder, os.path.name(archive_path))
if archive_path.lower().endswith('.tar.gz'):
flags = '-xzf'
elif archive_path.lower().endswith('.tar'):
flags = '-xf'
else:
raise ValueError('Only tar.gz or tar.Z files are supported.')
cmd_args = ['tar', flags, archive_path, '-C', dest]
_shell_process.run_and_monitor(cmd_args, os.getpid())
finally:
if tmpfolder:
shutil.rmtree(tmpfolder) | [
"def",
"extract_archive",
"(",
"archive_path",
",",
"dest",
")",
":",
"# Make the dest folder if it does not exist",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dest",
")",
":",
"os",
".",
"makedirs",
"(",
"dest",
")",
"try",
":",
"tmpfolder",
"=",
"None",
"if",
"(",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"archive_path",
")",
")",
"or",
"tf",
".",
"gfile",
".",
"IsDirectory",
"(",
"archive_path",
")",
":",
"raise",
"ValueError",
"(",
"'archive path %s is not a file'",
"%",
"archive_path",
")",
"if",
"archive_path",
".",
"startswith",
"(",
"'gs://'",
")",
":",
"# Copy the file to a local temp folder",
"tmpfolder",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"cmd_args",
"=",
"[",
"'gsutil'",
",",
"'cp'",
",",
"archive_path",
",",
"tmpfolder",
"]",
"_shell_process",
".",
"run_and_monitor",
"(",
"cmd_args",
",",
"os",
".",
"getpid",
"(",
")",
")",
"archive_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmpfolder",
",",
"os",
".",
"path",
".",
"name",
"(",
"archive_path",
")",
")",
"if",
"archive_path",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.tar.gz'",
")",
":",
"flags",
"=",
"'-xzf'",
"elif",
"archive_path",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.tar'",
")",
":",
"flags",
"=",
"'-xf'",
"else",
":",
"raise",
"ValueError",
"(",
"'Only tar.gz or tar.Z files are supported.'",
")",
"cmd_args",
"=",
"[",
"'tar'",
",",
"flags",
",",
"archive_path",
",",
"'-C'",
",",
"dest",
"]",
"_shell_process",
".",
"run_and_monitor",
"(",
"cmd_args",
",",
"os",
".",
"getpid",
"(",
")",
")",
"finally",
":",
"if",
"tmpfolder",
":",
"shutil",
".",
"rmtree",
"(",
"tmpfolder",
")"
] | Extract a local or GCS archive file to a folder.
Args:
archive_path: local or gcs path to a *.tar.gz or *.tar file
dest: local folder the archive will be extracted to | [
"Extract",
"a",
"local",
"or",
"GCS",
"archive",
"file",
"to",
"a",
"folder",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_archive.py#L27-L62 |
4,988 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_cloud.py | Cloud.preprocess | def preprocess(train_dataset, output_dir, eval_dataset, checkpoint, pipeline_option):
"""Preprocess data in Cloud with DataFlow."""
import apache_beam as beam
import google.datalab.utils
from . import _preprocess
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
job_name = ('preprocess-image-classification-' +
datetime.datetime.now().strftime('%y%m%d-%H%M%S'))
staging_package_url = _util.repackage_to_staging(output_dir)
tmpdir = tempfile.mkdtemp()
# suppress DataFlow warnings about wheel package as extra package.
original_level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.ERROR)
try:
# Workaround for DataFlow 2.0, which doesn't work well with extra packages in GCS.
# Remove when the issue is fixed and new version of DataFlow is included in Datalab.
extra_packages = [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL]
local_packages = [os.path.join(tmpdir, os.path.basename(p))
for p in extra_packages]
for source, dest in zip(extra_packages, local_packages):
file_io.copy(source, dest, overwrite=True)
options = {
'staging_location': os.path.join(output_dir, 'tmp', 'staging'),
'temp_location': os.path.join(output_dir, 'tmp'),
'job_name': job_name,
'project': _util.default_project(),
'extra_packages': local_packages,
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True
}
if pipeline_option is not None:
options.update(pipeline_option)
opts = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline('DataflowRunner', options=opts)
_preprocess.configure_pipeline(p, train_dataset, eval_dataset,
checkpoint, output_dir, job_name)
job_results = p.run()
finally:
shutil.rmtree(tmpdir)
logging.getLogger().setLevel(original_level)
if (_util.is_in_IPython()):
import IPython
dataflow_url = 'https://console.developers.google.com/dataflow?project=%s' % \
_util.default_project()
html = 'Job "%s" submitted.' % job_name
html += '<p>Click <a href="%s" target="_blank">here</a> to track preprocessing job. <br/>' \
% dataflow_url
IPython.display.display_html(html, raw=True)
return google.datalab.utils.DataflowJob(job_results) | python | def preprocess(train_dataset, output_dir, eval_dataset, checkpoint, pipeline_option):
"""Preprocess data in Cloud with DataFlow."""
import apache_beam as beam
import google.datalab.utils
from . import _preprocess
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
job_name = ('preprocess-image-classification-' +
datetime.datetime.now().strftime('%y%m%d-%H%M%S'))
staging_package_url = _util.repackage_to_staging(output_dir)
tmpdir = tempfile.mkdtemp()
# suppress DataFlow warnings about wheel package as extra package.
original_level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.ERROR)
try:
# Workaround for DataFlow 2.0, which doesn't work well with extra packages in GCS.
# Remove when the issue is fixed and new version of DataFlow is included in Datalab.
extra_packages = [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL]
local_packages = [os.path.join(tmpdir, os.path.basename(p))
for p in extra_packages]
for source, dest in zip(extra_packages, local_packages):
file_io.copy(source, dest, overwrite=True)
options = {
'staging_location': os.path.join(output_dir, 'tmp', 'staging'),
'temp_location': os.path.join(output_dir, 'tmp'),
'job_name': job_name,
'project': _util.default_project(),
'extra_packages': local_packages,
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True
}
if pipeline_option is not None:
options.update(pipeline_option)
opts = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline('DataflowRunner', options=opts)
_preprocess.configure_pipeline(p, train_dataset, eval_dataset,
checkpoint, output_dir, job_name)
job_results = p.run()
finally:
shutil.rmtree(tmpdir)
logging.getLogger().setLevel(original_level)
if (_util.is_in_IPython()):
import IPython
dataflow_url = 'https://console.developers.google.com/dataflow?project=%s' % \
_util.default_project()
html = 'Job "%s" submitted.' % job_name
html += '<p>Click <a href="%s" target="_blank">here</a> to track preprocessing job. <br/>' \
% dataflow_url
IPython.display.display_html(html, raw=True)
return google.datalab.utils.DataflowJob(job_results) | [
"def",
"preprocess",
"(",
"train_dataset",
",",
"output_dir",
",",
"eval_dataset",
",",
"checkpoint",
",",
"pipeline_option",
")",
":",
"import",
"apache_beam",
"as",
"beam",
"import",
"google",
".",
"datalab",
".",
"utils",
"from",
".",
"import",
"_preprocess",
"if",
"checkpoint",
"is",
"None",
":",
"checkpoint",
"=",
"_util",
".",
"_DEFAULT_CHECKPOINT_GSURL",
"job_name",
"=",
"(",
"'preprocess-image-classification-'",
"+",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%y%m%d-%H%M%S'",
")",
")",
"staging_package_url",
"=",
"_util",
".",
"repackage_to_staging",
"(",
"output_dir",
")",
"tmpdir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"# suppress DataFlow warnings about wheel package as extra package.",
"original_level",
"=",
"logging",
".",
"getLogger",
"(",
")",
".",
"getEffectiveLevel",
"(",
")",
"logging",
".",
"getLogger",
"(",
")",
".",
"setLevel",
"(",
"logging",
".",
"ERROR",
")",
"try",
":",
"# Workaround for DataFlow 2.0, which doesn't work well with extra packages in GCS.",
"# Remove when the issue is fixed and new version of DataFlow is included in Datalab.",
"extra_packages",
"=",
"[",
"staging_package_url",
",",
"_TF_GS_URL",
",",
"_PROTOBUF_GS_URL",
"]",
"local_packages",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"tmpdir",
",",
"os",
".",
"path",
".",
"basename",
"(",
"p",
")",
")",
"for",
"p",
"in",
"extra_packages",
"]",
"for",
"source",
",",
"dest",
"in",
"zip",
"(",
"extra_packages",
",",
"local_packages",
")",
":",
"file_io",
".",
"copy",
"(",
"source",
",",
"dest",
",",
"overwrite",
"=",
"True",
")",
"options",
"=",
"{",
"'staging_location'",
":",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"'tmp'",
",",
"'staging'",
")",
",",
"'temp_location'",
":",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"'tmp'",
")",
",",
"'job_name'",
":",
"job_name",
",",
"'project'",
":",
"_util",
".",
"default_project",
"(",
")",
",",
"'extra_packages'",
":",
"local_packages",
",",
"'teardown_policy'",
":",
"'TEARDOWN_ALWAYS'",
",",
"'no_save_main_session'",
":",
"True",
"}",
"if",
"pipeline_option",
"is",
"not",
"None",
":",
"options",
".",
"update",
"(",
"pipeline_option",
")",
"opts",
"=",
"beam",
".",
"pipeline",
".",
"PipelineOptions",
"(",
"flags",
"=",
"[",
"]",
",",
"*",
"*",
"options",
")",
"p",
"=",
"beam",
".",
"Pipeline",
"(",
"'DataflowRunner'",
",",
"options",
"=",
"opts",
")",
"_preprocess",
".",
"configure_pipeline",
"(",
"p",
",",
"train_dataset",
",",
"eval_dataset",
",",
"checkpoint",
",",
"output_dir",
",",
"job_name",
")",
"job_results",
"=",
"p",
".",
"run",
"(",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"tmpdir",
")",
"logging",
".",
"getLogger",
"(",
")",
".",
"setLevel",
"(",
"original_level",
")",
"if",
"(",
"_util",
".",
"is_in_IPython",
"(",
")",
")",
":",
"import",
"IPython",
"dataflow_url",
"=",
"'https://console.developers.google.com/dataflow?project=%s'",
"%",
"_util",
".",
"default_project",
"(",
")",
"html",
"=",
"'Job \"%s\" submitted.'",
"%",
"job_name",
"html",
"+=",
"'<p>Click <a href=\"%s\" target=\"_blank\">here</a> to track preprocessing job. <br/>'",
"%",
"dataflow_url",
"IPython",
".",
"display",
".",
"display_html",
"(",
"html",
",",
"raw",
"=",
"True",
")",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"DataflowJob",
"(",
"job_results",
")"
] | Preprocess data in Cloud with DataFlow. | [
"Preprocess",
"data",
"in",
"Cloud",
"with",
"DataFlow",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_cloud.py#L40-L96 |
4,989 | googledatalab/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_cloud.py | Cloud.train | def train(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud_train_config):
"""Train model in the cloud with CloudML trainer service."""
import google.datalab.ml as ml
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
staging_package_url = _util.repackage_to_staging(output_dir)
job_args = {
'input_dir': input_dir,
'max_steps': max_steps,
'batch_size': batch_size,
'checkpoint': checkpoint
}
job_request = {
'package_uris': [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL],
'python_module': 'mltoolbox.image.classification.task',
'job_dir': output_dir,
'args': job_args
}
job_request.update(dict(cloud_train_config._asdict()))
job_id = 'image_classification_train_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S')
job = ml.Job.submit_training(job_request, job_id)
if (_util.is_in_IPython()):
import IPython
log_url_query_strings = {
'project': _util.default_project(),
'resource': 'ml.googleapis.com/job_id/' + job.info['jobId']
}
log_url = 'https://console.developers.google.com/logs/viewer?' + \
urllib.urlencode(log_url_query_strings)
html = 'Job "%s" submitted.' % job.info['jobId']
html += '<p>Click <a href="%s" target="_blank">here</a> to view cloud log. <br/>' % log_url
IPython.display.display_html(html, raw=True)
return job | python | def train(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud_train_config):
"""Train model in the cloud with CloudML trainer service."""
import google.datalab.ml as ml
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
staging_package_url = _util.repackage_to_staging(output_dir)
job_args = {
'input_dir': input_dir,
'max_steps': max_steps,
'batch_size': batch_size,
'checkpoint': checkpoint
}
job_request = {
'package_uris': [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL],
'python_module': 'mltoolbox.image.classification.task',
'job_dir': output_dir,
'args': job_args
}
job_request.update(dict(cloud_train_config._asdict()))
job_id = 'image_classification_train_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S')
job = ml.Job.submit_training(job_request, job_id)
if (_util.is_in_IPython()):
import IPython
log_url_query_strings = {
'project': _util.default_project(),
'resource': 'ml.googleapis.com/job_id/' + job.info['jobId']
}
log_url = 'https://console.developers.google.com/logs/viewer?' + \
urllib.urlencode(log_url_query_strings)
html = 'Job "%s" submitted.' % job.info['jobId']
html += '<p>Click <a href="%s" target="_blank">here</a> to view cloud log. <br/>' % log_url
IPython.display.display_html(html, raw=True)
return job | [
"def",
"train",
"(",
"input_dir",
",",
"batch_size",
",",
"max_steps",
",",
"output_dir",
",",
"checkpoint",
",",
"cloud_train_config",
")",
":",
"import",
"google",
".",
"datalab",
".",
"ml",
"as",
"ml",
"if",
"checkpoint",
"is",
"None",
":",
"checkpoint",
"=",
"_util",
".",
"_DEFAULT_CHECKPOINT_GSURL",
"staging_package_url",
"=",
"_util",
".",
"repackage_to_staging",
"(",
"output_dir",
")",
"job_args",
"=",
"{",
"'input_dir'",
":",
"input_dir",
",",
"'max_steps'",
":",
"max_steps",
",",
"'batch_size'",
":",
"batch_size",
",",
"'checkpoint'",
":",
"checkpoint",
"}",
"job_request",
"=",
"{",
"'package_uris'",
":",
"[",
"staging_package_url",
",",
"_TF_GS_URL",
",",
"_PROTOBUF_GS_URL",
"]",
",",
"'python_module'",
":",
"'mltoolbox.image.classification.task'",
",",
"'job_dir'",
":",
"output_dir",
",",
"'args'",
":",
"job_args",
"}",
"job_request",
".",
"update",
"(",
"dict",
"(",
"cloud_train_config",
".",
"_asdict",
"(",
")",
")",
")",
"job_id",
"=",
"'image_classification_train_'",
"+",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%y%m%d_%H%M%S'",
")",
"job",
"=",
"ml",
".",
"Job",
".",
"submit_training",
"(",
"job_request",
",",
"job_id",
")",
"if",
"(",
"_util",
".",
"is_in_IPython",
"(",
")",
")",
":",
"import",
"IPython",
"log_url_query_strings",
"=",
"{",
"'project'",
":",
"_util",
".",
"default_project",
"(",
")",
",",
"'resource'",
":",
"'ml.googleapis.com/job_id/'",
"+",
"job",
".",
"info",
"[",
"'jobId'",
"]",
"}",
"log_url",
"=",
"'https://console.developers.google.com/logs/viewer?'",
"+",
"urllib",
".",
"urlencode",
"(",
"log_url_query_strings",
")",
"html",
"=",
"'Job \"%s\" submitted.'",
"%",
"job",
".",
"info",
"[",
"'jobId'",
"]",
"html",
"+=",
"'<p>Click <a href=\"%s\" target=\"_blank\">here</a> to view cloud log. <br/>'",
"%",
"log_url",
"IPython",
".",
"display",
".",
"display_html",
"(",
"html",
",",
"raw",
"=",
"True",
")",
"return",
"job"
] | Train model in the cloud with CloudML trainer service. | [
"Train",
"model",
"in",
"the",
"cloud",
"with",
"CloudML",
"trainer",
"service",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/image_classification/mltoolbox/image/classification/_cloud.py#L99-L132 |
4,990 | googledatalab/pydatalab | google/datalab/bigquery/_query.py | Query.from_table | def from_table(table, fields=None):
""" Return a Query for the given Table object
Args:
table: the Table object to construct a Query out of
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table.
"""
if fields is None:
fields = '*'
elif isinstance(fields, list):
fields = ','.join(fields)
return Query('SELECT %s FROM %s' % (fields, table._repr_sql_())) | python | def from_table(table, fields=None):
""" Return a Query for the given Table object
Args:
table: the Table object to construct a Query out of
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table.
"""
if fields is None:
fields = '*'
elif isinstance(fields, list):
fields = ','.join(fields)
return Query('SELECT %s FROM %s' % (fields, table._repr_sql_())) | [
"def",
"from_table",
"(",
"table",
",",
"fields",
"=",
"None",
")",
":",
"if",
"fields",
"is",
"None",
":",
"fields",
"=",
"'*'",
"elif",
"isinstance",
"(",
"fields",
",",
"list",
")",
":",
"fields",
"=",
"','",
".",
"join",
"(",
"fields",
")",
"return",
"Query",
"(",
"'SELECT %s FROM %s'",
"%",
"(",
"fields",
",",
"table",
".",
"_repr_sql_",
"(",
")",
")",
")"
] | Return a Query for the given Table object
Args:
table: the Table object to construct a Query out of
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table. | [
"Return",
"a",
"Query",
"for",
"the",
"given",
"Table",
"object"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_query.py#L105-L120 |
4,991 | googledatalab/pydatalab | google/datalab/bigquery/_query.py | Query._expanded_sql | def _expanded_sql(self, sampling=None):
"""Get the expanded SQL of this object, including all subqueries, UDFs, and external datasources
Returns:
The expanded SQL string of this object
"""
# use lists to preserve the order of subqueries, bigquery will not like listing subqueries
# out of order if they depend on each other. for example. the following will be rejected:
# WITH q2 as (SELECT * FROM q1),
# q1 as (SELECT * FROM mytable),
# SELECT * FROM q2
# so when we're getting the dependencies, use recursion into a list to maintain the order
udfs = []
subqueries = []
expanded_sql = ''
def _recurse_subqueries(query):
"""Recursively scan subqueries and add their pieces to global scope udfs and subqueries
"""
if query._subqueries:
for subquery in query._subqueries:
_recurse_subqueries(subquery[1])
subqueries.extend([s for s in query._subqueries if s not in subqueries])
if query._udfs:
# query._udfs is a list of (name, UDF) tuples; we just want the UDF.
udfs.extend([u[1] for u in query._udfs if u[1] not in udfs])
_recurse_subqueries(self)
if udfs:
expanded_sql += '\n'.join([udf._expanded_sql() for udf in udfs])
expanded_sql += '\n'
def _indent_query(subquery):
return ' ' + subquery._sql.replace('\n', '\n ')
if subqueries:
expanded_sql += 'WITH ' + \
'\n),\n'.join(['%s AS (\n%s' % (sq[0], _indent_query(sq[1]))
for sq in subqueries])
expanded_sql += '\n)\n\n'
expanded_sql += sampling(self._sql) if sampling else self._sql
return expanded_sql | python | def _expanded_sql(self, sampling=None):
"""Get the expanded SQL of this object, including all subqueries, UDFs, and external datasources
Returns:
The expanded SQL string of this object
"""
# use lists to preserve the order of subqueries, bigquery will not like listing subqueries
# out of order if they depend on each other. for example. the following will be rejected:
# WITH q2 as (SELECT * FROM q1),
# q1 as (SELECT * FROM mytable),
# SELECT * FROM q2
# so when we're getting the dependencies, use recursion into a list to maintain the order
udfs = []
subqueries = []
expanded_sql = ''
def _recurse_subqueries(query):
"""Recursively scan subqueries and add their pieces to global scope udfs and subqueries
"""
if query._subqueries:
for subquery in query._subqueries:
_recurse_subqueries(subquery[1])
subqueries.extend([s for s in query._subqueries if s not in subqueries])
if query._udfs:
# query._udfs is a list of (name, UDF) tuples; we just want the UDF.
udfs.extend([u[1] for u in query._udfs if u[1] not in udfs])
_recurse_subqueries(self)
if udfs:
expanded_sql += '\n'.join([udf._expanded_sql() for udf in udfs])
expanded_sql += '\n'
def _indent_query(subquery):
return ' ' + subquery._sql.replace('\n', '\n ')
if subqueries:
expanded_sql += 'WITH ' + \
'\n),\n'.join(['%s AS (\n%s' % (sq[0], _indent_query(sq[1]))
for sq in subqueries])
expanded_sql += '\n)\n\n'
expanded_sql += sampling(self._sql) if sampling else self._sql
return expanded_sql | [
"def",
"_expanded_sql",
"(",
"self",
",",
"sampling",
"=",
"None",
")",
":",
"# use lists to preserve the order of subqueries, bigquery will not like listing subqueries",
"# out of order if they depend on each other. for example. the following will be rejected:",
"# WITH q2 as (SELECT * FROM q1),",
"# q1 as (SELECT * FROM mytable),",
"# SELECT * FROM q2",
"# so when we're getting the dependencies, use recursion into a list to maintain the order",
"udfs",
"=",
"[",
"]",
"subqueries",
"=",
"[",
"]",
"expanded_sql",
"=",
"''",
"def",
"_recurse_subqueries",
"(",
"query",
")",
":",
"\"\"\"Recursively scan subqueries and add their pieces to global scope udfs and subqueries\n \"\"\"",
"if",
"query",
".",
"_subqueries",
":",
"for",
"subquery",
"in",
"query",
".",
"_subqueries",
":",
"_recurse_subqueries",
"(",
"subquery",
"[",
"1",
"]",
")",
"subqueries",
".",
"extend",
"(",
"[",
"s",
"for",
"s",
"in",
"query",
".",
"_subqueries",
"if",
"s",
"not",
"in",
"subqueries",
"]",
")",
"if",
"query",
".",
"_udfs",
":",
"# query._udfs is a list of (name, UDF) tuples; we just want the UDF.",
"udfs",
".",
"extend",
"(",
"[",
"u",
"[",
"1",
"]",
"for",
"u",
"in",
"query",
".",
"_udfs",
"if",
"u",
"[",
"1",
"]",
"not",
"in",
"udfs",
"]",
")",
"_recurse_subqueries",
"(",
"self",
")",
"if",
"udfs",
":",
"expanded_sql",
"+=",
"'\\n'",
".",
"join",
"(",
"[",
"udf",
".",
"_expanded_sql",
"(",
")",
"for",
"udf",
"in",
"udfs",
"]",
")",
"expanded_sql",
"+=",
"'\\n'",
"def",
"_indent_query",
"(",
"subquery",
")",
":",
"return",
"' '",
"+",
"subquery",
".",
"_sql",
".",
"replace",
"(",
"'\\n'",
",",
"'\\n '",
")",
"if",
"subqueries",
":",
"expanded_sql",
"+=",
"'WITH '",
"+",
"'\\n),\\n'",
".",
"join",
"(",
"[",
"'%s AS (\\n%s'",
"%",
"(",
"sq",
"[",
"0",
"]",
",",
"_indent_query",
"(",
"sq",
"[",
"1",
"]",
")",
")",
"for",
"sq",
"in",
"subqueries",
"]",
")",
"expanded_sql",
"+=",
"'\\n)\\n\\n'",
"expanded_sql",
"+=",
"sampling",
"(",
"self",
".",
"_sql",
")",
"if",
"sampling",
"else",
"self",
".",
"_sql",
"return",
"expanded_sql"
] | Get the expanded SQL of this object, including all subqueries, UDFs, and external datasources
Returns:
The expanded SQL string of this object | [
"Get",
"the",
"expanded",
"SQL",
"of",
"this",
"object",
"including",
"all",
"subqueries",
"UDFs",
"and",
"external",
"datasources"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_query.py#L122-L167 |
4,992 | googledatalab/pydatalab | google/datalab/contrib/mlworkbench/_shell_process.py | run_and_monitor | def run_and_monitor(args, pid_to_wait, std_out_filter_fn=None, cwd=None):
""" Start a process, and have it depend on another specified process.
Args:
args: the args of the process to start and monitor.
pid_to_wait: the process to wait on. If the process ends, also kill the started process.
std_out_filter_fn: a filter function which takes a string content from the stdout of the
started process, and returns True if the string should be redirected to console stdout.
cwd: the current working directory for the process to start.
"""
monitor_process = None
try:
p = subprocess.Popen(args,
cwd=cwd,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pids_to_kill = [p.pid]
script = ('import %s;%s._wait_and_kill(%s, %s)' %
(__name__, __name__, str(pid_to_wait), str(pids_to_kill)))
monitor_process = subprocess.Popen(['python', '-c', script], env=os.environ)
while p.poll() is None:
line = p.stdout.readline()
if not six.PY2:
line = line.decode()
if std_out_filter_fn is None or std_out_filter_fn(line):
sys.stdout.write(line)
# Cannot do sys.stdout.flush(). It appears that too many flush() calls will hang browser.
finally:
if monitor_process:
monitor_process.kill() | python | def run_and_monitor(args, pid_to_wait, std_out_filter_fn=None, cwd=None):
""" Start a process, and have it depend on another specified process.
Args:
args: the args of the process to start and monitor.
pid_to_wait: the process to wait on. If the process ends, also kill the started process.
std_out_filter_fn: a filter function which takes a string content from the stdout of the
started process, and returns True if the string should be redirected to console stdout.
cwd: the current working directory for the process to start.
"""
monitor_process = None
try:
p = subprocess.Popen(args,
cwd=cwd,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pids_to_kill = [p.pid]
script = ('import %s;%s._wait_and_kill(%s, %s)' %
(__name__, __name__, str(pid_to_wait), str(pids_to_kill)))
monitor_process = subprocess.Popen(['python', '-c', script], env=os.environ)
while p.poll() is None:
line = p.stdout.readline()
if not six.PY2:
line = line.decode()
if std_out_filter_fn is None or std_out_filter_fn(line):
sys.stdout.write(line)
# Cannot do sys.stdout.flush(). It appears that too many flush() calls will hang browser.
finally:
if monitor_process:
monitor_process.kill() | [
"def",
"run_and_monitor",
"(",
"args",
",",
"pid_to_wait",
",",
"std_out_filter_fn",
"=",
"None",
",",
"cwd",
"=",
"None",
")",
":",
"monitor_process",
"=",
"None",
"try",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"cwd",
"=",
"cwd",
",",
"env",
"=",
"os",
".",
"environ",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"pids_to_kill",
"=",
"[",
"p",
".",
"pid",
"]",
"script",
"=",
"(",
"'import %s;%s._wait_and_kill(%s, %s)'",
"%",
"(",
"__name__",
",",
"__name__",
",",
"str",
"(",
"pid_to_wait",
")",
",",
"str",
"(",
"pids_to_kill",
")",
")",
")",
"monitor_process",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'python'",
",",
"'-c'",
",",
"script",
"]",
",",
"env",
"=",
"os",
".",
"environ",
")",
"while",
"p",
".",
"poll",
"(",
")",
"is",
"None",
":",
"line",
"=",
"p",
".",
"stdout",
".",
"readline",
"(",
")",
"if",
"not",
"six",
".",
"PY2",
":",
"line",
"=",
"line",
".",
"decode",
"(",
")",
"if",
"std_out_filter_fn",
"is",
"None",
"or",
"std_out_filter_fn",
"(",
"line",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"line",
")",
"# Cannot do sys.stdout.flush(). It appears that too many flush() calls will hang browser.",
"finally",
":",
"if",
"monitor_process",
":",
"monitor_process",
".",
"kill",
"(",
")"
] | Start a process, and have it depend on another specified process.
Args:
args: the args of the process to start and monitor.
pid_to_wait: the process to wait on. If the process ends, also kill the started process.
std_out_filter_fn: a filter function which takes a string content from the stdout of the
started process, and returns True if the string should be redirected to console stdout.
cwd: the current working directory for the process to start. | [
"Start",
"a",
"process",
"and",
"have",
"it",
"depend",
"on",
"another",
"specified",
"process",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_shell_process.py#L43-L77 |
4,993 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | TableMetadata.created_on | def created_on(self):
"""The creation timestamp."""
timestamp = self._info.get('creationTime')
return _parser.Parser.parse_timestamp(timestamp) | python | def created_on(self):
"""The creation timestamp."""
timestamp = self._info.get('creationTime')
return _parser.Parser.parse_timestamp(timestamp) | [
"def",
"created_on",
"(",
"self",
")",
":",
"timestamp",
"=",
"self",
".",
"_info",
".",
"get",
"(",
"'creationTime'",
")",
"return",
"_parser",
".",
"Parser",
".",
"parse_timestamp",
"(",
"timestamp",
")"
] | The creation timestamp. | [
"The",
"creation",
"timestamp",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L56-L59 |
4,994 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | TableMetadata.expires_on | def expires_on(self):
"""The timestamp for when the table will expire, or None if unknown."""
timestamp = self._info.get('expirationTime', None)
if timestamp is None:
return None
return _parser.Parser.parse_timestamp(timestamp) | python | def expires_on(self):
"""The timestamp for when the table will expire, or None if unknown."""
timestamp = self._info.get('expirationTime', None)
if timestamp is None:
return None
return _parser.Parser.parse_timestamp(timestamp) | [
"def",
"expires_on",
"(",
"self",
")",
":",
"timestamp",
"=",
"self",
".",
"_info",
".",
"get",
"(",
"'expirationTime'",
",",
"None",
")",
"if",
"timestamp",
"is",
"None",
":",
"return",
"None",
"return",
"_parser",
".",
"Parser",
".",
"parse_timestamp",
"(",
"timestamp",
")"
] | The timestamp for when the table will expire, or None if unknown. | [
"The",
"timestamp",
"for",
"when",
"the",
"table",
"will",
"expire",
"or",
"None",
"if",
"unknown",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L67-L72 |
4,995 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | TableMetadata.modified_on | def modified_on(self):
"""The timestamp for when the table was last modified."""
timestamp = self._info.get('lastModifiedTime')
return _parser.Parser.parse_timestamp(timestamp) | python | def modified_on(self):
"""The timestamp for when the table was last modified."""
timestamp = self._info.get('lastModifiedTime')
return _parser.Parser.parse_timestamp(timestamp) | [
"def",
"modified_on",
"(",
"self",
")",
":",
"timestamp",
"=",
"self",
".",
"_info",
".",
"get",
"(",
"'lastModifiedTime'",
")",
"return",
"_parser",
".",
"Parser",
".",
"parse_timestamp",
"(",
"timestamp",
")"
] | The timestamp for when the table was last modified. | [
"The",
"timestamp",
"for",
"when",
"the",
"table",
"was",
"last",
"modified",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L80-L83 |
4,996 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table._load_info | def _load_info(self):
"""Loads metadata about this table."""
if self._info is None:
try:
self._info = self._api.tables_get(self._name_parts)
except Exception as e:
raise e | python | def _load_info(self):
"""Loads metadata about this table."""
if self._info is None:
try:
self._info = self._api.tables_get(self._name_parts)
except Exception as e:
raise e | [
"def",
"_load_info",
"(",
"self",
")",
":",
"if",
"self",
".",
"_info",
"is",
"None",
":",
"try",
":",
"self",
".",
"_info",
"=",
"self",
".",
"_api",
".",
"tables_get",
"(",
"self",
".",
"_name_parts",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] | Loads metadata about this table. | [
"Loads",
"metadata",
"about",
"this",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L161-L167 |
4,997 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.exists | def exists(self):
"""Checks if the table exists.
Returns:
True if the table exists; False otherwise.
Raises:
Exception if there was an error requesting information about the table.
"""
try:
info = self._api.tables_get(self._name_parts)
except google.datalab.utils.RequestException as e:
if e.status == 404:
return False
raise e
except Exception as e:
raise e
self._info = info
return True | python | def exists(self):
"""Checks if the table exists.
Returns:
True if the table exists; False otherwise.
Raises:
Exception if there was an error requesting information about the table.
"""
try:
info = self._api.tables_get(self._name_parts)
except google.datalab.utils.RequestException as e:
if e.status == 404:
return False
raise e
except Exception as e:
raise e
self._info = info
return True | [
"def",
"exists",
"(",
"self",
")",
":",
"try",
":",
"info",
"=",
"self",
".",
"_api",
".",
"tables_get",
"(",
"self",
".",
"_name_parts",
")",
"except",
"google",
".",
"datalab",
".",
"utils",
".",
"RequestException",
"as",
"e",
":",
"if",
"e",
".",
"status",
"==",
"404",
":",
"return",
"False",
"raise",
"e",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"self",
".",
"_info",
"=",
"info",
"return",
"True"
] | Checks if the table exists.
Returns:
True if the table exists; False otherwise.
Raises:
Exception if there was an error requesting information about the table. | [
"Checks",
"if",
"the",
"table",
"exists",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L181-L198 |
4,998 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.delete | def delete(self):
""" Delete the table.
Returns:
True if the Table no longer exists; False otherwise.
"""
try:
self._api.table_delete(self._name_parts)
except google.datalab.utils.RequestException:
# TODO(gram): May want to check the error reasons here and if it is not
# because the file didn't exist, return an error.
pass
except Exception as e:
raise e
return not self.exists() | python | def delete(self):
""" Delete the table.
Returns:
True if the Table no longer exists; False otherwise.
"""
try:
self._api.table_delete(self._name_parts)
except google.datalab.utils.RequestException:
# TODO(gram): May want to check the error reasons here and if it is not
# because the file didn't exist, return an error.
pass
except Exception as e:
raise e
return not self.exists() | [
"def",
"delete",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_api",
".",
"table_delete",
"(",
"self",
".",
"_name_parts",
")",
"except",
"google",
".",
"datalab",
".",
"utils",
".",
"RequestException",
":",
"# TODO(gram): May want to check the error reasons here and if it is not",
"# because the file didn't exist, return an error.",
"pass",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"return",
"not",
"self",
".",
"exists",
"(",
")"
] | Delete the table.
Returns:
True if the Table no longer exists; False otherwise. | [
"Delete",
"the",
"table",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L209-L223 |
4,999 | googledatalab/pydatalab | google/datalab/bigquery/_table.py | Table.create | def create(self, schema, overwrite=False):
""" Create the table with the specified schema.
Args:
schema: the schema to use to create the table. Should be a list of dictionaries, each
containing at least a pair of entries, 'name' and 'type'.
See https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
overwrite: if True, delete the table first if it exists. If False and the table exists,
creation will fail and raise an Exception.
Returns:
The Table instance.
Raises:
Exception if the table couldn't be created or already exists and truncate was False.
"""
if overwrite and self.exists():
self.delete()
if not isinstance(schema, _schema.Schema):
# Convert to a Schema object
schema = _schema.Schema(schema)
try:
response = self._api.tables_insert(self._name_parts, schema=schema._bq_schema)
except Exception as e:
raise e
if 'selfLink' in response:
self._schema = schema
return self
raise Exception("Table %s could not be created as it already exists" % self._full_name) | python | def create(self, schema, overwrite=False):
""" Create the table with the specified schema.
Args:
schema: the schema to use to create the table. Should be a list of dictionaries, each
containing at least a pair of entries, 'name' and 'type'.
See https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
overwrite: if True, delete the table first if it exists. If False and the table exists,
creation will fail and raise an Exception.
Returns:
The Table instance.
Raises:
Exception if the table couldn't be created or already exists and truncate was False.
"""
if overwrite and self.exists():
self.delete()
if not isinstance(schema, _schema.Schema):
# Convert to a Schema object
schema = _schema.Schema(schema)
try:
response = self._api.tables_insert(self._name_parts, schema=schema._bq_schema)
except Exception as e:
raise e
if 'selfLink' in response:
self._schema = schema
return self
raise Exception("Table %s could not be created as it already exists" % self._full_name) | [
"def",
"create",
"(",
"self",
",",
"schema",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"overwrite",
"and",
"self",
".",
"exists",
"(",
")",
":",
"self",
".",
"delete",
"(",
")",
"if",
"not",
"isinstance",
"(",
"schema",
",",
"_schema",
".",
"Schema",
")",
":",
"# Convert to a Schema object",
"schema",
"=",
"_schema",
".",
"Schema",
"(",
"schema",
")",
"try",
":",
"response",
"=",
"self",
".",
"_api",
".",
"tables_insert",
"(",
"self",
".",
"_name_parts",
",",
"schema",
"=",
"schema",
".",
"_bq_schema",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"'selfLink'",
"in",
"response",
":",
"self",
".",
"_schema",
"=",
"schema",
"return",
"self",
"raise",
"Exception",
"(",
"\"Table %s could not be created as it already exists\"",
"%",
"self",
".",
"_full_name",
")"
] | Create the table with the specified schema.
Args:
schema: the schema to use to create the table. Should be a list of dictionaries, each
containing at least a pair of entries, 'name' and 'type'.
See https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
overwrite: if True, delete the table first if it exists. If False and the table exists,
creation will fail and raise an Exception.
Returns:
The Table instance.
Raises:
Exception if the table couldn't be created or already exists and truncate was False. | [
"Create",
"the",
"table",
"with",
"the",
"specified",
"schema",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L225-L251 |
Subsets and Splits