id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
5,100 | googledatalab/pydatalab | google/datalab/bigquery/commands/_bigquery.py | _get_table | def _get_table(name):
""" Given a variable or table name, get a Table if it exists.
Args:
name: the name of the Table or a variable referencing the Table.
Returns:
The Table, if found.
"""
# If name is a variable referencing a table, use that.
item = google.datalab.utils.commands.get_notebook_item(name)
if isinstance(item, bigquery.Table):
return item
# Else treat this as a BQ table name and return the (cached) table if it exists.
try:
return _existing_table_cache[name]
except KeyError:
table = bigquery.Table(name)
if table.exists():
_existing_table_cache[name] = table
return table
return None | python | def _get_table(name):
""" Given a variable or table name, get a Table if it exists.
Args:
name: the name of the Table or a variable referencing the Table.
Returns:
The Table, if found.
"""
# If name is a variable referencing a table, use that.
item = google.datalab.utils.commands.get_notebook_item(name)
if isinstance(item, bigquery.Table):
return item
# Else treat this as a BQ table name and return the (cached) table if it exists.
try:
return _existing_table_cache[name]
except KeyError:
table = bigquery.Table(name)
if table.exists():
_existing_table_cache[name] = table
return table
return None | [
"def",
"_get_table",
"(",
"name",
")",
":",
"# If name is a variable referencing a table, use that.",
"item",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"get_notebook_item",
"(",
"name",
")",
"if",
"isinstance",
"(",
"item",
",",
"bigquery",
".",
"Table",
")",
":",
"return",
"item",
"# Else treat this as a BQ table name and return the (cached) table if it exists.",
"try",
":",
"return",
"_existing_table_cache",
"[",
"name",
"]",
"except",
"KeyError",
":",
"table",
"=",
"bigquery",
".",
"Table",
"(",
"name",
")",
"if",
"table",
".",
"exists",
"(",
")",
":",
"_existing_table_cache",
"[",
"name",
"]",
"=",
"table",
"return",
"table",
"return",
"None"
] | Given a variable or table name, get a Table if it exists.
Args:
name: the name of the Table or a variable referencing the Table.
Returns:
The Table, if found. | [
"Given",
"a",
"variable",
"or",
"table",
"name",
"get",
"a",
"Table",
"if",
"it",
"exists",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L622-L642 |
5,101 | googledatalab/pydatalab | google/datalab/bigquery/commands/_bigquery.py | _render_list | def _render_list(data):
""" Helper to render a list of objects as an HTML list object. """
return IPython.core.display.HTML(google.datalab.utils.commands.HtmlBuilder.render_list(data)) | python | def _render_list(data):
""" Helper to render a list of objects as an HTML list object. """
return IPython.core.display.HTML(google.datalab.utils.commands.HtmlBuilder.render_list(data)) | [
"def",
"_render_list",
"(",
"data",
")",
":",
"return",
"IPython",
".",
"core",
".",
"display",
".",
"HTML",
"(",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"HtmlBuilder",
".",
"render_list",
"(",
"data",
")",
")"
] | Helper to render a list of objects as an HTML list object. | [
"Helper",
"to",
"render",
"a",
"list",
"of",
"objects",
"as",
"an",
"HTML",
"list",
"object",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L645-L647 |
5,102 | googledatalab/pydatalab | google/datalab/bigquery/commands/_bigquery.py | _dataset_line | def _dataset_line(args):
"""Implements the BigQuery dataset magic subcommand used to operate on datasets
The supported syntax is:
%bq datasets <command> <args>
Commands:
{list, create, delete}
Args:
args: the optional arguments following '%bq datasets command'.
"""
if args['command'] == 'list':
filter_ = args['filter'] if args['filter'] else '*'
context = google.datalab.Context.default()
if args['project']:
context = google.datalab.Context(args['project'], context.credentials)
return _render_list([str(dataset) for dataset in bigquery.Datasets(context)
if fnmatch.fnmatch(str(dataset), filter_)])
elif args['command'] == 'create':
try:
bigquery.Dataset(args['name']).create(friendly_name=args['friendly'])
except Exception as e:
print('Failed to create dataset %s: %s' % (args['name'], e))
elif args['command'] == 'delete':
try:
bigquery.Dataset(args['name']).delete()
except Exception as e:
print('Failed to delete dataset %s: %s' % (args['name'], e)) | python | def _dataset_line(args):
"""Implements the BigQuery dataset magic subcommand used to operate on datasets
The supported syntax is:
%bq datasets <command> <args>
Commands:
{list, create, delete}
Args:
args: the optional arguments following '%bq datasets command'.
"""
if args['command'] == 'list':
filter_ = args['filter'] if args['filter'] else '*'
context = google.datalab.Context.default()
if args['project']:
context = google.datalab.Context(args['project'], context.credentials)
return _render_list([str(dataset) for dataset in bigquery.Datasets(context)
if fnmatch.fnmatch(str(dataset), filter_)])
elif args['command'] == 'create':
try:
bigquery.Dataset(args['name']).create(friendly_name=args['friendly'])
except Exception as e:
print('Failed to create dataset %s: %s' % (args['name'], e))
elif args['command'] == 'delete':
try:
bigquery.Dataset(args['name']).delete()
except Exception as e:
print('Failed to delete dataset %s: %s' % (args['name'], e)) | [
"def",
"_dataset_line",
"(",
"args",
")",
":",
"if",
"args",
"[",
"'command'",
"]",
"==",
"'list'",
":",
"filter_",
"=",
"args",
"[",
"'filter'",
"]",
"if",
"args",
"[",
"'filter'",
"]",
"else",
"'*'",
"context",
"=",
"google",
".",
"datalab",
".",
"Context",
".",
"default",
"(",
")",
"if",
"args",
"[",
"'project'",
"]",
":",
"context",
"=",
"google",
".",
"datalab",
".",
"Context",
"(",
"args",
"[",
"'project'",
"]",
",",
"context",
".",
"credentials",
")",
"return",
"_render_list",
"(",
"[",
"str",
"(",
"dataset",
")",
"for",
"dataset",
"in",
"bigquery",
".",
"Datasets",
"(",
"context",
")",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"str",
"(",
"dataset",
")",
",",
"filter_",
")",
"]",
")",
"elif",
"args",
"[",
"'command'",
"]",
"==",
"'create'",
":",
"try",
":",
"bigquery",
".",
"Dataset",
"(",
"args",
"[",
"'name'",
"]",
")",
".",
"create",
"(",
"friendly_name",
"=",
"args",
"[",
"'friendly'",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Failed to create dataset %s: %s'",
"%",
"(",
"args",
"[",
"'name'",
"]",
",",
"e",
")",
")",
"elif",
"args",
"[",
"'command'",
"]",
"==",
"'delete'",
":",
"try",
":",
"bigquery",
".",
"Dataset",
"(",
"args",
"[",
"'name'",
"]",
")",
".",
"delete",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Failed to delete dataset %s: %s'",
"%",
"(",
"args",
"[",
"'name'",
"]",
",",
"e",
")",
")"
] | Implements the BigQuery dataset magic subcommand used to operate on datasets
The supported syntax is:
%bq datasets <command> <args>
Commands:
{list, create, delete}
Args:
args: the optional arguments following '%bq datasets command'. | [
"Implements",
"the",
"BigQuery",
"dataset",
"magic",
"subcommand",
"used",
"to",
"operate",
"on",
"datasets"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L650-L680 |
5,103 | googledatalab/pydatalab | google/datalab/bigquery/commands/_bigquery.py | _table_cell | def _table_cell(args, cell_body):
"""Implements the BigQuery table magic subcommand used to operate on tables
The supported syntax is:
%%bq tables <command> <args>
Commands:
{list, create, delete, describe, view}
Args:
args: the optional arguments following '%%bq tables command'.
cell_body: optional contents of the cell interpreted as SQL, YAML or JSON.
Returns:
The HTML rendering for the table of datasets.
"""
if args['command'] == 'list':
filter_ = args['filter'] if args['filter'] else '*'
if args['dataset']:
if args['project'] is None:
datasets = [bigquery.Dataset(args['dataset'])]
else:
context = google.datalab.Context(args['project'],
google.datalab.Context.default().credentials)
datasets = [bigquery.Dataset(args['dataset'], context)]
else:
default_context = google.datalab.Context.default()
context = google.datalab.Context(default_context.project_id, default_context.credentials)
if args['project']:
context.set_project_id(args['project'])
datasets = bigquery.Datasets(context)
tables = []
for dataset in datasets:
tables.extend([table.full_name
for table in dataset if fnmatch.fnmatch(table.full_name, filter_)])
return _render_list(tables)
elif args['command'] == 'create':
if cell_body is None:
print('Failed to create %s: no schema specified' % args['name'])
else:
try:
record = google.datalab.utils.commands.parse_config(
cell_body, google.datalab.utils.commands.notebook_environment(), as_dict=False)
jsonschema.validate(record, BigQuerySchema.TABLE_SCHEMA_SCHEMA)
schema = bigquery.Schema(record['schema'])
bigquery.Table(args['name']).create(schema=schema, overwrite=args['overwrite'])
except Exception as e:
print('Failed to create table %s: %s' % (args['name'], e))
elif args['command'] == 'describe':
name = args['name']
table = _get_table(name)
if not table:
raise Exception('Could not find table %s' % name)
html = _repr_html_table_schema(table.schema)
return IPython.core.display.HTML(html)
elif args['command'] == 'delete':
try:
bigquery.Table(args['name']).delete()
except Exception as e:
print('Failed to delete table %s: %s' % (args['name'], e))
elif args['command'] == 'view':
name = args['name']
table = _get_table(name)
if not table:
raise Exception('Could not find table %s' % name)
return table | python | def _table_cell(args, cell_body):
"""Implements the BigQuery table magic subcommand used to operate on tables
The supported syntax is:
%%bq tables <command> <args>
Commands:
{list, create, delete, describe, view}
Args:
args: the optional arguments following '%%bq tables command'.
cell_body: optional contents of the cell interpreted as SQL, YAML or JSON.
Returns:
The HTML rendering for the table of datasets.
"""
if args['command'] == 'list':
filter_ = args['filter'] if args['filter'] else '*'
if args['dataset']:
if args['project'] is None:
datasets = [bigquery.Dataset(args['dataset'])]
else:
context = google.datalab.Context(args['project'],
google.datalab.Context.default().credentials)
datasets = [bigquery.Dataset(args['dataset'], context)]
else:
default_context = google.datalab.Context.default()
context = google.datalab.Context(default_context.project_id, default_context.credentials)
if args['project']:
context.set_project_id(args['project'])
datasets = bigquery.Datasets(context)
tables = []
for dataset in datasets:
tables.extend([table.full_name
for table in dataset if fnmatch.fnmatch(table.full_name, filter_)])
return _render_list(tables)
elif args['command'] == 'create':
if cell_body is None:
print('Failed to create %s: no schema specified' % args['name'])
else:
try:
record = google.datalab.utils.commands.parse_config(
cell_body, google.datalab.utils.commands.notebook_environment(), as_dict=False)
jsonschema.validate(record, BigQuerySchema.TABLE_SCHEMA_SCHEMA)
schema = bigquery.Schema(record['schema'])
bigquery.Table(args['name']).create(schema=schema, overwrite=args['overwrite'])
except Exception as e:
print('Failed to create table %s: %s' % (args['name'], e))
elif args['command'] == 'describe':
name = args['name']
table = _get_table(name)
if not table:
raise Exception('Could not find table %s' % name)
html = _repr_html_table_schema(table.schema)
return IPython.core.display.HTML(html)
elif args['command'] == 'delete':
try:
bigquery.Table(args['name']).delete()
except Exception as e:
print('Failed to delete table %s: %s' % (args['name'], e))
elif args['command'] == 'view':
name = args['name']
table = _get_table(name)
if not table:
raise Exception('Could not find table %s' % name)
return table | [
"def",
"_table_cell",
"(",
"args",
",",
"cell_body",
")",
":",
"if",
"args",
"[",
"'command'",
"]",
"==",
"'list'",
":",
"filter_",
"=",
"args",
"[",
"'filter'",
"]",
"if",
"args",
"[",
"'filter'",
"]",
"else",
"'*'",
"if",
"args",
"[",
"'dataset'",
"]",
":",
"if",
"args",
"[",
"'project'",
"]",
"is",
"None",
":",
"datasets",
"=",
"[",
"bigquery",
".",
"Dataset",
"(",
"args",
"[",
"'dataset'",
"]",
")",
"]",
"else",
":",
"context",
"=",
"google",
".",
"datalab",
".",
"Context",
"(",
"args",
"[",
"'project'",
"]",
",",
"google",
".",
"datalab",
".",
"Context",
".",
"default",
"(",
")",
".",
"credentials",
")",
"datasets",
"=",
"[",
"bigquery",
".",
"Dataset",
"(",
"args",
"[",
"'dataset'",
"]",
",",
"context",
")",
"]",
"else",
":",
"default_context",
"=",
"google",
".",
"datalab",
".",
"Context",
".",
"default",
"(",
")",
"context",
"=",
"google",
".",
"datalab",
".",
"Context",
"(",
"default_context",
".",
"project_id",
",",
"default_context",
".",
"credentials",
")",
"if",
"args",
"[",
"'project'",
"]",
":",
"context",
".",
"set_project_id",
"(",
"args",
"[",
"'project'",
"]",
")",
"datasets",
"=",
"bigquery",
".",
"Datasets",
"(",
"context",
")",
"tables",
"=",
"[",
"]",
"for",
"dataset",
"in",
"datasets",
":",
"tables",
".",
"extend",
"(",
"[",
"table",
".",
"full_name",
"for",
"table",
"in",
"dataset",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"table",
".",
"full_name",
",",
"filter_",
")",
"]",
")",
"return",
"_render_list",
"(",
"tables",
")",
"elif",
"args",
"[",
"'command'",
"]",
"==",
"'create'",
":",
"if",
"cell_body",
"is",
"None",
":",
"print",
"(",
"'Failed to create %s: no schema specified'",
"%",
"args",
"[",
"'name'",
"]",
")",
"else",
":",
"try",
":",
"record",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"parse_config",
"(",
"cell_body",
",",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
",",
"as_dict",
"=",
"False",
")",
"jsonschema",
".",
"validate",
"(",
"record",
",",
"BigQuerySchema",
".",
"TABLE_SCHEMA_SCHEMA",
")",
"schema",
"=",
"bigquery",
".",
"Schema",
"(",
"record",
"[",
"'schema'",
"]",
")",
"bigquery",
".",
"Table",
"(",
"args",
"[",
"'name'",
"]",
")",
".",
"create",
"(",
"schema",
"=",
"schema",
",",
"overwrite",
"=",
"args",
"[",
"'overwrite'",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Failed to create table %s: %s'",
"%",
"(",
"args",
"[",
"'name'",
"]",
",",
"e",
")",
")",
"elif",
"args",
"[",
"'command'",
"]",
"==",
"'describe'",
":",
"name",
"=",
"args",
"[",
"'name'",
"]",
"table",
"=",
"_get_table",
"(",
"name",
")",
"if",
"not",
"table",
":",
"raise",
"Exception",
"(",
"'Could not find table %s'",
"%",
"name",
")",
"html",
"=",
"_repr_html_table_schema",
"(",
"table",
".",
"schema",
")",
"return",
"IPython",
".",
"core",
".",
"display",
".",
"HTML",
"(",
"html",
")",
"elif",
"args",
"[",
"'command'",
"]",
"==",
"'delete'",
":",
"try",
":",
"bigquery",
".",
"Table",
"(",
"args",
"[",
"'name'",
"]",
")",
".",
"delete",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Failed to delete table %s: %s'",
"%",
"(",
"args",
"[",
"'name'",
"]",
",",
"e",
")",
")",
"elif",
"args",
"[",
"'command'",
"]",
"==",
"'view'",
":",
"name",
"=",
"args",
"[",
"'name'",
"]",
"table",
"=",
"_get_table",
"(",
"name",
")",
"if",
"not",
"table",
":",
"raise",
"Exception",
"(",
"'Could not find table %s'",
"%",
"name",
")",
"return",
"table"
] | Implements the BigQuery table magic subcommand used to operate on tables
The supported syntax is:
%%bq tables <command> <args>
Commands:
{list, create, delete, describe, view}
Args:
args: the optional arguments following '%%bq tables command'.
cell_body: optional contents of the cell interpreted as SQL, YAML or JSON.
Returns:
The HTML rendering for the table of datasets. | [
"Implements",
"the",
"BigQuery",
"table",
"magic",
"subcommand",
"used",
"to",
"operate",
"on",
"tables"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L683-L754 |
5,104 | googledatalab/pydatalab | google/datalab/bigquery/commands/_bigquery.py | _extract_cell | def _extract_cell(args, cell_body):
"""Implements the BigQuery extract magic used to extract query or table data to GCS.
The supported syntax is:
%bq extract <args>
Args:
args: the arguments following '%bigquery extract'.
"""
env = google.datalab.utils.commands.notebook_environment()
config = google.datalab.utils.commands.parse_config(cell_body, env, False) or {}
parameters = config.get('parameters')
if args['table']:
table = google.datalab.bigquery.Query.resolve_parameters(args['table'], parameters)
source = _get_table(table)
if not source:
raise Exception('Could not find table %s' % table)
csv_delimiter = args['delimiter'] if args['format'] == 'csv' else None
path = google.datalab.bigquery.Query.resolve_parameters(args['path'], parameters)
job = source.extract(path, format=args['format'], csv_delimiter=csv_delimiter,
csv_header=args['header'], compress=args['compress'])
elif args['query'] or args['view']:
source_name = args['view'] or args['query']
source = google.datalab.utils.commands.get_notebook_item(source_name)
if not source:
raise Exception('Could not find ' +
('view ' + args['view'] if args['view'] else 'query ' + args['query']))
query = source if args['query'] else bigquery.Query.from_view(source)
query_params = get_query_parameters(args, cell_body) if args['query'] else None
output_options = QueryOutput.file(path=args['path'], format=args['format'],
csv_delimiter=args['delimiter'],
csv_header=args['header'], compress=args['compress'],
use_cache=not args['nocache'])
context = google.datalab.utils._utils._construct_context_for_args(args)
job = query.execute(output_options, context=context, query_params=query_params)
else:
raise Exception('A query, table, or view is needed to extract')
if job.failed:
raise Exception('Extract failed: %s' % str(job.fatal_error))
elif job.errors:
raise Exception('Extract completed with errors: %s' % str(job.errors))
return job.result() | python | def _extract_cell(args, cell_body):
"""Implements the BigQuery extract magic used to extract query or table data to GCS.
The supported syntax is:
%bq extract <args>
Args:
args: the arguments following '%bigquery extract'.
"""
env = google.datalab.utils.commands.notebook_environment()
config = google.datalab.utils.commands.parse_config(cell_body, env, False) or {}
parameters = config.get('parameters')
if args['table']:
table = google.datalab.bigquery.Query.resolve_parameters(args['table'], parameters)
source = _get_table(table)
if not source:
raise Exception('Could not find table %s' % table)
csv_delimiter = args['delimiter'] if args['format'] == 'csv' else None
path = google.datalab.bigquery.Query.resolve_parameters(args['path'], parameters)
job = source.extract(path, format=args['format'], csv_delimiter=csv_delimiter,
csv_header=args['header'], compress=args['compress'])
elif args['query'] or args['view']:
source_name = args['view'] or args['query']
source = google.datalab.utils.commands.get_notebook_item(source_name)
if not source:
raise Exception('Could not find ' +
('view ' + args['view'] if args['view'] else 'query ' + args['query']))
query = source if args['query'] else bigquery.Query.from_view(source)
query_params = get_query_parameters(args, cell_body) if args['query'] else None
output_options = QueryOutput.file(path=args['path'], format=args['format'],
csv_delimiter=args['delimiter'],
csv_header=args['header'], compress=args['compress'],
use_cache=not args['nocache'])
context = google.datalab.utils._utils._construct_context_for_args(args)
job = query.execute(output_options, context=context, query_params=query_params)
else:
raise Exception('A query, table, or view is needed to extract')
if job.failed:
raise Exception('Extract failed: %s' % str(job.fatal_error))
elif job.errors:
raise Exception('Extract completed with errors: %s' % str(job.errors))
return job.result() | [
"def",
"_extract_cell",
"(",
"args",
",",
"cell_body",
")",
":",
"env",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"notebook_environment",
"(",
")",
"config",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"parse_config",
"(",
"cell_body",
",",
"env",
",",
"False",
")",
"or",
"{",
"}",
"parameters",
"=",
"config",
".",
"get",
"(",
"'parameters'",
")",
"if",
"args",
"[",
"'table'",
"]",
":",
"table",
"=",
"google",
".",
"datalab",
".",
"bigquery",
".",
"Query",
".",
"resolve_parameters",
"(",
"args",
"[",
"'table'",
"]",
",",
"parameters",
")",
"source",
"=",
"_get_table",
"(",
"table",
")",
"if",
"not",
"source",
":",
"raise",
"Exception",
"(",
"'Could not find table %s'",
"%",
"table",
")",
"csv_delimiter",
"=",
"args",
"[",
"'delimiter'",
"]",
"if",
"args",
"[",
"'format'",
"]",
"==",
"'csv'",
"else",
"None",
"path",
"=",
"google",
".",
"datalab",
".",
"bigquery",
".",
"Query",
".",
"resolve_parameters",
"(",
"args",
"[",
"'path'",
"]",
",",
"parameters",
")",
"job",
"=",
"source",
".",
"extract",
"(",
"path",
",",
"format",
"=",
"args",
"[",
"'format'",
"]",
",",
"csv_delimiter",
"=",
"csv_delimiter",
",",
"csv_header",
"=",
"args",
"[",
"'header'",
"]",
",",
"compress",
"=",
"args",
"[",
"'compress'",
"]",
")",
"elif",
"args",
"[",
"'query'",
"]",
"or",
"args",
"[",
"'view'",
"]",
":",
"source_name",
"=",
"args",
"[",
"'view'",
"]",
"or",
"args",
"[",
"'query'",
"]",
"source",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"get_notebook_item",
"(",
"source_name",
")",
"if",
"not",
"source",
":",
"raise",
"Exception",
"(",
"'Could not find '",
"+",
"(",
"'view '",
"+",
"args",
"[",
"'view'",
"]",
"if",
"args",
"[",
"'view'",
"]",
"else",
"'query '",
"+",
"args",
"[",
"'query'",
"]",
")",
")",
"query",
"=",
"source",
"if",
"args",
"[",
"'query'",
"]",
"else",
"bigquery",
".",
"Query",
".",
"from_view",
"(",
"source",
")",
"query_params",
"=",
"get_query_parameters",
"(",
"args",
",",
"cell_body",
")",
"if",
"args",
"[",
"'query'",
"]",
"else",
"None",
"output_options",
"=",
"QueryOutput",
".",
"file",
"(",
"path",
"=",
"args",
"[",
"'path'",
"]",
",",
"format",
"=",
"args",
"[",
"'format'",
"]",
",",
"csv_delimiter",
"=",
"args",
"[",
"'delimiter'",
"]",
",",
"csv_header",
"=",
"args",
"[",
"'header'",
"]",
",",
"compress",
"=",
"args",
"[",
"'compress'",
"]",
",",
"use_cache",
"=",
"not",
"args",
"[",
"'nocache'",
"]",
")",
"context",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"_utils",
".",
"_construct_context_for_args",
"(",
"args",
")",
"job",
"=",
"query",
".",
"execute",
"(",
"output_options",
",",
"context",
"=",
"context",
",",
"query_params",
"=",
"query_params",
")",
"else",
":",
"raise",
"Exception",
"(",
"'A query, table, or view is needed to extract'",
")",
"if",
"job",
".",
"failed",
":",
"raise",
"Exception",
"(",
"'Extract failed: %s'",
"%",
"str",
"(",
"job",
".",
"fatal_error",
")",
")",
"elif",
"job",
".",
"errors",
":",
"raise",
"Exception",
"(",
"'Extract completed with errors: %s'",
"%",
"str",
"(",
"job",
".",
"errors",
")",
")",
"return",
"job",
".",
"result",
"(",
")"
] | Implements the BigQuery extract magic used to extract query or table data to GCS.
The supported syntax is:
%bq extract <args>
Args:
args: the arguments following '%bigquery extract'. | [
"Implements",
"the",
"BigQuery",
"extract",
"magic",
"used",
"to",
"extract",
"query",
"or",
"table",
"data",
"to",
"GCS",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L757-L802 |
5,105 | googledatalab/pydatalab | google/datalab/bigquery/commands/_bigquery.py | bq | def bq(line, cell=None):
"""Implements the bq cell magic for ipython notebooks.
The supported syntax is:
%%bq <command> [<args>]
<cell>
or:
%bq <command> [<args>]
Use %bq --help for a list of commands, or %bq <command> --help for help
on a specific command.
"""
return google.datalab.utils.commands.handle_magic_line(line, cell, _bigquery_parser) | python | def bq(line, cell=None):
"""Implements the bq cell magic for ipython notebooks.
The supported syntax is:
%%bq <command> [<args>]
<cell>
or:
%bq <command> [<args>]
Use %bq --help for a list of commands, or %bq <command> --help for help
on a specific command.
"""
return google.datalab.utils.commands.handle_magic_line(line, cell, _bigquery_parser) | [
"def",
"bq",
"(",
"line",
",",
"cell",
"=",
"None",
")",
":",
"return",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"handle_magic_line",
"(",
"line",
",",
"cell",
",",
"_bigquery_parser",
")"
] | Implements the bq cell magic for ipython notebooks.
The supported syntax is:
%%bq <command> [<args>]
<cell>
or:
%bq <command> [<args>]
Use %bq --help for a list of commands, or %bq <command> --help for help
on a specific command. | [
"Implements",
"the",
"bq",
"cell",
"magic",
"for",
"ipython",
"notebooks",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L1028-L1043 |
5,106 | googledatalab/pydatalab | google/datalab/bigquery/commands/_bigquery.py | _table_viewer | def _table_viewer(table, rows_per_page=25, fields=None):
""" Return a table viewer.
This includes a static rendering of the first page of the table, that gets replaced
by the charting code in environments where Javascript is executable and BQ is available.
Args:
table: the table to view.
rows_per_page: how many rows to display at one time.
fields: an array of field names to display; default is None which uses the full schema.
Returns:
A string containing the HTML for the table viewer.
"""
# TODO(gram): rework this to use google.datalab.utils.commands.chart_html
if not table.exists():
raise Exception('Table %s does not exist' % table.full_name)
if not table.is_listable():
return "Done"
_HTML_TEMPLATE = u"""
<div class="bqtv" id="{div_id}">{static_table}</div>
<br />{meta_data}<br />
<script src="/static/components/requirejs/require.js"></script>
<script>
require.config({{
paths: {{
base: '/static/base',
d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',
plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',
jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'
}},
map: {{
'*': {{
datalab: 'nbextensions/gcpdatalab'
}}
}},
shim: {{
plotly: {{
deps: ['d3', 'jquery'],
exports: 'plotly'
}}
}}
}});
require(['datalab/charting', 'datalab/element!{div_id}', 'base/js/events',
'datalab/style!/nbextensions/gcpdatalab/charting.css'],
function(charts, dom, events) {{
charts.render('gcharts', dom, events, '{chart_style}', [], {data},
{{
pageSize: {rows_per_page},
cssClassNames: {{
tableRow: 'gchart-table-row',
headerRow: 'gchart-table-headerrow',
oddTableRow: 'gchart-table-oddrow',
selectedTableRow: 'gchart-table-selectedrow',
hoverTableRow: 'gchart-table-hoverrow',
tableCell: 'gchart-table-cell',
headerCell: 'gchart-table-headercell',
rowNumberCell: 'gchart-table-rownumcell'
}}
}},
{{source_index: {source_index}, fields: '{fields}'}},
0,
{total_rows});
}}
);
</script>
"""
if fields is None:
fields = google.datalab.utils.commands.get_field_list(fields, table.schema)
div_id = google.datalab.utils.commands.Html.next_id()
meta_count = ('rows: %d' % table.length) if table.length >= 0 else ''
meta_name = table.full_name if table.job is None else ('job: %s' % table.job.id)
if table.job:
if table.job.cache_hit:
meta_cost = 'cached'
else:
bytes = bigquery._query_stats.QueryStats._size_formatter(table.job.bytes_processed)
meta_cost = '%s processed' % bytes
meta_time = 'time: %.1fs' % table.job.total_time
else:
meta_cost = ''
meta_time = ''
data, total_count = google.datalab.utils.commands.get_data(table, fields, first_row=0,
count=rows_per_page)
if total_count < 0:
# The table doesn't have a length metadata property but may still be small if we fetched less
# rows than we asked for.
fetched_count = len(data['rows'])
if fetched_count < rows_per_page:
total_count = fetched_count
chart = 'table' if 0 <= total_count <= rows_per_page else 'paged_table'
meta_entries = [meta_count, meta_time, meta_cost, meta_name]
meta_data = '(%s)' % (', '.join([entry for entry in meta_entries if len(entry)]))
return _HTML_TEMPLATE.format(div_id=div_id,
static_table=google.datalab.utils.commands.HtmlBuilder
.render_chart_data(data),
meta_data=meta_data,
chart_style=chart,
source_index=google.datalab.utils.commands
.get_data_source_index(table.full_name),
fields=','.join(fields),
total_rows=total_count,
rows_per_page=rows_per_page,
data=json.dumps(data, cls=google.datalab.utils.JSONEncoder)) | python | def _table_viewer(table, rows_per_page=25, fields=None):
""" Return a table viewer.
This includes a static rendering of the first page of the table, that gets replaced
by the charting code in environments where Javascript is executable and BQ is available.
Args:
table: the table to view.
rows_per_page: how many rows to display at one time.
fields: an array of field names to display; default is None which uses the full schema.
Returns:
A string containing the HTML for the table viewer.
"""
# TODO(gram): rework this to use google.datalab.utils.commands.chart_html
if not table.exists():
raise Exception('Table %s does not exist' % table.full_name)
if not table.is_listable():
return "Done"
_HTML_TEMPLATE = u"""
<div class="bqtv" id="{div_id}">{static_table}</div>
<br />{meta_data}<br />
<script src="/static/components/requirejs/require.js"></script>
<script>
require.config({{
paths: {{
base: '/static/base',
d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',
plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',
jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'
}},
map: {{
'*': {{
datalab: 'nbextensions/gcpdatalab'
}}
}},
shim: {{
plotly: {{
deps: ['d3', 'jquery'],
exports: 'plotly'
}}
}}
}});
require(['datalab/charting', 'datalab/element!{div_id}', 'base/js/events',
'datalab/style!/nbextensions/gcpdatalab/charting.css'],
function(charts, dom, events) {{
charts.render('gcharts', dom, events, '{chart_style}', [], {data},
{{
pageSize: {rows_per_page},
cssClassNames: {{
tableRow: 'gchart-table-row',
headerRow: 'gchart-table-headerrow',
oddTableRow: 'gchart-table-oddrow',
selectedTableRow: 'gchart-table-selectedrow',
hoverTableRow: 'gchart-table-hoverrow',
tableCell: 'gchart-table-cell',
headerCell: 'gchart-table-headercell',
rowNumberCell: 'gchart-table-rownumcell'
}}
}},
{{source_index: {source_index}, fields: '{fields}'}},
0,
{total_rows});
}}
);
</script>
"""
if fields is None:
fields = google.datalab.utils.commands.get_field_list(fields, table.schema)
div_id = google.datalab.utils.commands.Html.next_id()
meta_count = ('rows: %d' % table.length) if table.length >= 0 else ''
meta_name = table.full_name if table.job is None else ('job: %s' % table.job.id)
if table.job:
if table.job.cache_hit:
meta_cost = 'cached'
else:
bytes = bigquery._query_stats.QueryStats._size_formatter(table.job.bytes_processed)
meta_cost = '%s processed' % bytes
meta_time = 'time: %.1fs' % table.job.total_time
else:
meta_cost = ''
meta_time = ''
data, total_count = google.datalab.utils.commands.get_data(table, fields, first_row=0,
count=rows_per_page)
if total_count < 0:
# The table doesn't have a length metadata property but may still be small if we fetched less
# rows than we asked for.
fetched_count = len(data['rows'])
if fetched_count < rows_per_page:
total_count = fetched_count
chart = 'table' if 0 <= total_count <= rows_per_page else 'paged_table'
meta_entries = [meta_count, meta_time, meta_cost, meta_name]
meta_data = '(%s)' % (', '.join([entry for entry in meta_entries if len(entry)]))
return _HTML_TEMPLATE.format(div_id=div_id,
static_table=google.datalab.utils.commands.HtmlBuilder
.render_chart_data(data),
meta_data=meta_data,
chart_style=chart,
source_index=google.datalab.utils.commands
.get_data_source_index(table.full_name),
fields=','.join(fields),
total_rows=total_count,
rows_per_page=rows_per_page,
data=json.dumps(data, cls=google.datalab.utils.JSONEncoder)) | [
"def",
"_table_viewer",
"(",
"table",
",",
"rows_per_page",
"=",
"25",
",",
"fields",
"=",
"None",
")",
":",
"# TODO(gram): rework this to use google.datalab.utils.commands.chart_html",
"if",
"not",
"table",
".",
"exists",
"(",
")",
":",
"raise",
"Exception",
"(",
"'Table %s does not exist'",
"%",
"table",
".",
"full_name",
")",
"if",
"not",
"table",
".",
"is_listable",
"(",
")",
":",
"return",
"\"Done\"",
"_HTML_TEMPLATE",
"=",
"u\"\"\"\n <div class=\"bqtv\" id=\"{div_id}\">{static_table}</div>\n <br />{meta_data}<br />\n <script src=\"/static/components/requirejs/require.js\"></script>\n <script>\n require.config({{\n paths: {{\n base: '/static/base',\n d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',\n plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',\n jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'\n }},\n map: {{\n '*': {{\n datalab: 'nbextensions/gcpdatalab'\n }}\n }},\n shim: {{\n plotly: {{\n deps: ['d3', 'jquery'],\n exports: 'plotly'\n }}\n }}\n }});\n\n require(['datalab/charting', 'datalab/element!{div_id}', 'base/js/events',\n 'datalab/style!/nbextensions/gcpdatalab/charting.css'],\n function(charts, dom, events) {{\n charts.render('gcharts', dom, events, '{chart_style}', [], {data},\n {{\n pageSize: {rows_per_page},\n cssClassNames: {{\n tableRow: 'gchart-table-row',\n headerRow: 'gchart-table-headerrow',\n oddTableRow: 'gchart-table-oddrow',\n selectedTableRow: 'gchart-table-selectedrow',\n hoverTableRow: 'gchart-table-hoverrow',\n tableCell: 'gchart-table-cell',\n headerCell: 'gchart-table-headercell',\n rowNumberCell: 'gchart-table-rownumcell'\n }}\n }},\n {{source_index: {source_index}, fields: '{fields}'}},\n 0,\n {total_rows});\n }}\n );\n </script>\n \"\"\"",
"if",
"fields",
"is",
"None",
":",
"fields",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"get_field_list",
"(",
"fields",
",",
"table",
".",
"schema",
")",
"div_id",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"Html",
".",
"next_id",
"(",
")",
"meta_count",
"=",
"(",
"'rows: %d'",
"%",
"table",
".",
"length",
")",
"if",
"table",
".",
"length",
">=",
"0",
"else",
"''",
"meta_name",
"=",
"table",
".",
"full_name",
"if",
"table",
".",
"job",
"is",
"None",
"else",
"(",
"'job: %s'",
"%",
"table",
".",
"job",
".",
"id",
")",
"if",
"table",
".",
"job",
":",
"if",
"table",
".",
"job",
".",
"cache_hit",
":",
"meta_cost",
"=",
"'cached'",
"else",
":",
"bytes",
"=",
"bigquery",
".",
"_query_stats",
".",
"QueryStats",
".",
"_size_formatter",
"(",
"table",
".",
"job",
".",
"bytes_processed",
")",
"meta_cost",
"=",
"'%s processed'",
"%",
"bytes",
"meta_time",
"=",
"'time: %.1fs'",
"%",
"table",
".",
"job",
".",
"total_time",
"else",
":",
"meta_cost",
"=",
"''",
"meta_time",
"=",
"''",
"data",
",",
"total_count",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"get_data",
"(",
"table",
",",
"fields",
",",
"first_row",
"=",
"0",
",",
"count",
"=",
"rows_per_page",
")",
"if",
"total_count",
"<",
"0",
":",
"# The table doesn't have a length metadata property but may still be small if we fetched less",
"# rows than we asked for.",
"fetched_count",
"=",
"len",
"(",
"data",
"[",
"'rows'",
"]",
")",
"if",
"fetched_count",
"<",
"rows_per_page",
":",
"total_count",
"=",
"fetched_count",
"chart",
"=",
"'table'",
"if",
"0",
"<=",
"total_count",
"<=",
"rows_per_page",
"else",
"'paged_table'",
"meta_entries",
"=",
"[",
"meta_count",
",",
"meta_time",
",",
"meta_cost",
",",
"meta_name",
"]",
"meta_data",
"=",
"'(%s)'",
"%",
"(",
"', '",
".",
"join",
"(",
"[",
"entry",
"for",
"entry",
"in",
"meta_entries",
"if",
"len",
"(",
"entry",
")",
"]",
")",
")",
"return",
"_HTML_TEMPLATE",
".",
"format",
"(",
"div_id",
"=",
"div_id",
",",
"static_table",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"HtmlBuilder",
".",
"render_chart_data",
"(",
"data",
")",
",",
"meta_data",
"=",
"meta_data",
",",
"chart_style",
"=",
"chart",
",",
"source_index",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"commands",
".",
"get_data_source_index",
"(",
"table",
".",
"full_name",
")",
",",
"fields",
"=",
"','",
".",
"join",
"(",
"fields",
")",
",",
"total_rows",
"=",
"total_count",
",",
"rows_per_page",
"=",
"rows_per_page",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
",",
"cls",
"=",
"google",
".",
"datalab",
".",
"utils",
".",
"JSONEncoder",
")",
")"
] | Return a table viewer.
This includes a static rendering of the first page of the table, that gets replaced
by the charting code in environments where Javascript is executable and BQ is available.
Args:
table: the table to view.
rows_per_page: how many rows to display at one time.
fields: an array of field names to display; default is None which uses the full schema.
Returns:
A string containing the HTML for the table viewer. | [
"Return",
"a",
"table",
"viewer",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/commands/_bigquery.py#L1074-L1186 |
5,107 | googledatalab/pydatalab | datalab/bigquery/_udf.py | UDF._build_js | def _build_js(inputs, outputs, name, implementation, support_code):
"""Creates a BigQuery SQL UDF javascript object.
Args:
inputs: a list of (name, type) tuples representing the schema of input.
outputs: a list of (name, type) tuples representing the schema of the output.
name: the name of the function
implementation: a javascript function defining the UDF logic.
support_code: additional javascript code that the function can use.
"""
# Construct a comma-separated list of input field names
# For example, field1,field2,...
input_fields = json.dumps([f[0] for f in inputs])
# Construct a json representation of the output schema
# For example, [{'name':'field1','type':'string'},...]
output_fields = [{'name': f[0], 'type': f[1]} for f in outputs]
output_fields = json.dumps(output_fields, sort_keys=True)
# Build the JS from the individual bits with proper escaping of the implementation
if support_code is None:
support_code = ''
return ('{code}\n{name}={implementation};\nbigquery.defineFunction(\'{name}\', {inputs}, '
'{outputs}, {name});').format(code=support_code, name=name,
implementation=implementation, inputs=str(input_fields),
outputs=str(output_fields)) | python | def _build_js(inputs, outputs, name, implementation, support_code):
"""Creates a BigQuery SQL UDF javascript object.
Args:
inputs: a list of (name, type) tuples representing the schema of input.
outputs: a list of (name, type) tuples representing the schema of the output.
name: the name of the function
implementation: a javascript function defining the UDF logic.
support_code: additional javascript code that the function can use.
"""
# Construct a comma-separated list of input field names
# For example, field1,field2,...
input_fields = json.dumps([f[0] for f in inputs])
# Construct a json representation of the output schema
# For example, [{'name':'field1','type':'string'},...]
output_fields = [{'name': f[0], 'type': f[1]} for f in outputs]
output_fields = json.dumps(output_fields, sort_keys=True)
# Build the JS from the individual bits with proper escaping of the implementation
if support_code is None:
support_code = ''
return ('{code}\n{name}={implementation};\nbigquery.defineFunction(\'{name}\', {inputs}, '
'{outputs}, {name});').format(code=support_code, name=name,
implementation=implementation, inputs=str(input_fields),
outputs=str(output_fields)) | [
"def",
"_build_js",
"(",
"inputs",
",",
"outputs",
",",
"name",
",",
"implementation",
",",
"support_code",
")",
":",
"# Construct a comma-separated list of input field names",
"# For example, field1,field2,...",
"input_fields",
"=",
"json",
".",
"dumps",
"(",
"[",
"f",
"[",
"0",
"]",
"for",
"f",
"in",
"inputs",
"]",
")",
"# Construct a json representation of the output schema",
"# For example, [{'name':'field1','type':'string'},...]",
"output_fields",
"=",
"[",
"{",
"'name'",
":",
"f",
"[",
"0",
"]",
",",
"'type'",
":",
"f",
"[",
"1",
"]",
"}",
"for",
"f",
"in",
"outputs",
"]",
"output_fields",
"=",
"json",
".",
"dumps",
"(",
"output_fields",
",",
"sort_keys",
"=",
"True",
")",
"# Build the JS from the individual bits with proper escaping of the implementation",
"if",
"support_code",
"is",
"None",
":",
"support_code",
"=",
"''",
"return",
"(",
"'{code}\\n{name}={implementation};\\nbigquery.defineFunction(\\'{name}\\', {inputs}, '",
"'{outputs}, {name});'",
")",
".",
"format",
"(",
"code",
"=",
"support_code",
",",
"name",
"=",
"name",
",",
"implementation",
"=",
"implementation",
",",
"inputs",
"=",
"str",
"(",
"input_fields",
")",
",",
"outputs",
"=",
"str",
"(",
"output_fields",
")",
")"
] | Creates a BigQuery SQL UDF javascript object.
Args:
inputs: a list of (name, type) tuples representing the schema of input.
outputs: a list of (name, type) tuples representing the schema of the output.
name: the name of the function
implementation: a javascript function defining the UDF logic.
support_code: additional javascript code that the function can use. | [
"Creates",
"a",
"BigQuery",
"SQL",
"UDF",
"javascript",
"object",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_udf.py#L59-L84 |
5,108 | googledatalab/pydatalab | datalab/bigquery/_sampling.py | Sampling.sampling_query | def sampling_query(sql, fields=None, count=5, sampling=None):
"""Returns a sampling query for the SQL object.
Args:
sql: the SQL object to sample
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
Returns:
A SQL query string for sampling the input sql.
"""
if sampling is None:
sampling = Sampling.default(count=count, fields=fields)
return sampling(sql) | python | def sampling_query(sql, fields=None, count=5, sampling=None):
"""Returns a sampling query for the SQL object.
Args:
sql: the SQL object to sample
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
Returns:
A SQL query string for sampling the input sql.
"""
if sampling is None:
sampling = Sampling.default(count=count, fields=fields)
return sampling(sql) | [
"def",
"sampling_query",
"(",
"sql",
",",
"fields",
"=",
"None",
",",
"count",
"=",
"5",
",",
"sampling",
"=",
"None",
")",
":",
"if",
"sampling",
"is",
"None",
":",
"sampling",
"=",
"Sampling",
".",
"default",
"(",
"count",
"=",
"count",
",",
"fields",
"=",
"fields",
")",
"return",
"sampling",
"(",
"sql",
")"
] | Returns a sampling query for the SQL object.
Args:
sql: the SQL object to sample
fields: an optional list of field names to retrieve.
count: an optional count of rows to retrieve which is used if a specific
sampling is not specified.
sampling: an optional sampling strategy to apply to the table.
Returns:
A SQL query string for sampling the input sql. | [
"Returns",
"a",
"sampling",
"query",
"for",
"the",
"SQL",
"object",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_sampling.py#L74-L88 |
5,109 | googledatalab/pydatalab | google/datalab/ml/_fasets.py | FacetsOverview._remove_nonascii | def _remove_nonascii(self, df):
"""Make copy and remove non-ascii characters from it."""
df_copy = df.copy(deep=True)
for col in df_copy.columns:
if (df_copy[col].dtype == np.dtype('O')):
df_copy[col] = df[col].apply(
lambda x: re.sub(r'[^\x00-\x7f]', r'', x) if isinstance(x, six.string_types) else x)
return df_copy | python | def _remove_nonascii(self, df):
"""Make copy and remove non-ascii characters from it."""
df_copy = df.copy(deep=True)
for col in df_copy.columns:
if (df_copy[col].dtype == np.dtype('O')):
df_copy[col] = df[col].apply(
lambda x: re.sub(r'[^\x00-\x7f]', r'', x) if isinstance(x, six.string_types) else x)
return df_copy | [
"def",
"_remove_nonascii",
"(",
"self",
",",
"df",
")",
":",
"df_copy",
"=",
"df",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"for",
"col",
"in",
"df_copy",
".",
"columns",
":",
"if",
"(",
"df_copy",
"[",
"col",
"]",
".",
"dtype",
"==",
"np",
".",
"dtype",
"(",
"'O'",
")",
")",
":",
"df_copy",
"[",
"col",
"]",
"=",
"df",
"[",
"col",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"re",
".",
"sub",
"(",
"r'[^\\x00-\\x7f]'",
",",
"r''",
",",
"x",
")",
"if",
"isinstance",
"(",
"x",
",",
"six",
".",
"string_types",
")",
"else",
"x",
")",
"return",
"df_copy"
] | Make copy and remove non-ascii characters from it. | [
"Make",
"copy",
"and",
"remove",
"non",
"-",
"ascii",
"characters",
"from",
"it",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_fasets.py#L27-L36 |
5,110 | googledatalab/pydatalab | google/datalab/ml/_fasets.py | FacetsOverview.plot | def plot(self, data):
""" Plots an overview in a list of dataframes
Args:
data: a dictionary with key the name, and value the dataframe.
"""
import IPython
if not isinstance(data, dict) or not all(isinstance(v, pd.DataFrame) for v in data.values()):
raise ValueError('Expect a dictionary where the values are all dataframes.')
gfsg = GenericFeatureStatisticsGenerator()
data = [{'name': k, 'table': self._remove_nonascii(v)} for k, v in six.iteritems(data)]
data_proto = gfsg.ProtoFromDataFrames(data)
protostr = base64.b64encode(data_proto.SerializeToString()).decode("utf-8")
html_id = 'f' + datalab.utils.commands.Html.next_id()
HTML_TEMPLATE = """<link rel="import" href="/nbextensions/gcpdatalab/extern/facets-jupyter.html" >
<facets-overview id="{html_id}"></facets-overview>
<script>
document.querySelector("#{html_id}").protoInput = "{protostr}";
</script>"""
html = HTML_TEMPLATE.format(html_id=html_id, protostr=protostr)
return IPython.core.display.HTML(html) | python | def plot(self, data):
""" Plots an overview in a list of dataframes
Args:
data: a dictionary with key the name, and value the dataframe.
"""
import IPython
if not isinstance(data, dict) or not all(isinstance(v, pd.DataFrame) for v in data.values()):
raise ValueError('Expect a dictionary where the values are all dataframes.')
gfsg = GenericFeatureStatisticsGenerator()
data = [{'name': k, 'table': self._remove_nonascii(v)} for k, v in six.iteritems(data)]
data_proto = gfsg.ProtoFromDataFrames(data)
protostr = base64.b64encode(data_proto.SerializeToString()).decode("utf-8")
html_id = 'f' + datalab.utils.commands.Html.next_id()
HTML_TEMPLATE = """<link rel="import" href="/nbextensions/gcpdatalab/extern/facets-jupyter.html" >
<facets-overview id="{html_id}"></facets-overview>
<script>
document.querySelector("#{html_id}").protoInput = "{protostr}";
</script>"""
html = HTML_TEMPLATE.format(html_id=html_id, protostr=protostr)
return IPython.core.display.HTML(html) | [
"def",
"plot",
"(",
"self",
",",
"data",
")",
":",
"import",
"IPython",
"if",
"not",
"isinstance",
"(",
"data",
",",
"dict",
")",
"or",
"not",
"all",
"(",
"isinstance",
"(",
"v",
",",
"pd",
".",
"DataFrame",
")",
"for",
"v",
"in",
"data",
".",
"values",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Expect a dictionary where the values are all dataframes.'",
")",
"gfsg",
"=",
"GenericFeatureStatisticsGenerator",
"(",
")",
"data",
"=",
"[",
"{",
"'name'",
":",
"k",
",",
"'table'",
":",
"self",
".",
"_remove_nonascii",
"(",
"v",
")",
"}",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"data",
")",
"]",
"data_proto",
"=",
"gfsg",
".",
"ProtoFromDataFrames",
"(",
"data",
")",
"protostr",
"=",
"base64",
".",
"b64encode",
"(",
"data_proto",
".",
"SerializeToString",
"(",
")",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
"html_id",
"=",
"'f'",
"+",
"datalab",
".",
"utils",
".",
"commands",
".",
"Html",
".",
"next_id",
"(",
")",
"HTML_TEMPLATE",
"=",
"\"\"\"<link rel=\"import\" href=\"/nbextensions/gcpdatalab/extern/facets-jupyter.html\" >\n <facets-overview id=\"{html_id}\"></facets-overview>\n <script>\n document.querySelector(\"#{html_id}\").protoInput = \"{protostr}\";\n </script>\"\"\"",
"html",
"=",
"HTML_TEMPLATE",
".",
"format",
"(",
"html_id",
"=",
"html_id",
",",
"protostr",
"=",
"protostr",
")",
"return",
"IPython",
".",
"core",
".",
"display",
".",
"HTML",
"(",
"html",
")"
] | Plots an overview in a list of dataframes
Args:
data: a dictionary with key the name, and value the dataframe. | [
"Plots",
"an",
"overview",
"in",
"a",
"list",
"of",
"dataframes"
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_fasets.py#L38-L62 |
5,111 | googledatalab/pydatalab | google/datalab/ml/_fasets.py | FacetsDiveview.plot | def plot(self, data, height=1000, render_large_data=False):
""" Plots a detail view of data.
Args:
data: a Pandas dataframe.
height: the height of the output.
"""
import IPython
if not isinstance(data, pd.DataFrame):
raise ValueError('Expect a DataFrame.')
if (len(data) > 10000 and not render_large_data):
raise ValueError('Facets dive may not work well with more than 10000 rows. ' +
'Reduce data or set "render_large_data" to True.')
jsonstr = data.to_json(orient='records')
html_id = 'f' + datalab.utils.commands.Html.next_id()
HTML_TEMPLATE = """
<link rel="import" href="/nbextensions/gcpdatalab/extern/facets-jupyter.html">
<facets-dive id="{html_id}" height="{height}"></facets-dive>
<script>
var data = {jsonstr};
document.querySelector("#{html_id}").data = data;
</script>"""
html = HTML_TEMPLATE.format(html_id=html_id, jsonstr=jsonstr, height=height)
return IPython.core.display.HTML(html) | python | def plot(self, data, height=1000, render_large_data=False):
""" Plots a detail view of data.
Args:
data: a Pandas dataframe.
height: the height of the output.
"""
import IPython
if not isinstance(data, pd.DataFrame):
raise ValueError('Expect a DataFrame.')
if (len(data) > 10000 and not render_large_data):
raise ValueError('Facets dive may not work well with more than 10000 rows. ' +
'Reduce data or set "render_large_data" to True.')
jsonstr = data.to_json(orient='records')
html_id = 'f' + datalab.utils.commands.Html.next_id()
HTML_TEMPLATE = """
<link rel="import" href="/nbextensions/gcpdatalab/extern/facets-jupyter.html">
<facets-dive id="{html_id}" height="{height}"></facets-dive>
<script>
var data = {jsonstr};
document.querySelector("#{html_id}").data = data;
</script>"""
html = HTML_TEMPLATE.format(html_id=html_id, jsonstr=jsonstr, height=height)
return IPython.core.display.HTML(html) | [
"def",
"plot",
"(",
"self",
",",
"data",
",",
"height",
"=",
"1000",
",",
"render_large_data",
"=",
"False",
")",
":",
"import",
"IPython",
"if",
"not",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"raise",
"ValueError",
"(",
"'Expect a DataFrame.'",
")",
"if",
"(",
"len",
"(",
"data",
")",
">",
"10000",
"and",
"not",
"render_large_data",
")",
":",
"raise",
"ValueError",
"(",
"'Facets dive may not work well with more than 10000 rows. '",
"+",
"'Reduce data or set \"render_large_data\" to True.'",
")",
"jsonstr",
"=",
"data",
".",
"to_json",
"(",
"orient",
"=",
"'records'",
")",
"html_id",
"=",
"'f'",
"+",
"datalab",
".",
"utils",
".",
"commands",
".",
"Html",
".",
"next_id",
"(",
")",
"HTML_TEMPLATE",
"=",
"\"\"\"\n <link rel=\"import\" href=\"/nbextensions/gcpdatalab/extern/facets-jupyter.html\">\n <facets-dive id=\"{html_id}\" height=\"{height}\"></facets-dive>\n <script>\n var data = {jsonstr};\n document.querySelector(\"#{html_id}\").data = data;\n </script>\"\"\"",
"html",
"=",
"HTML_TEMPLATE",
".",
"format",
"(",
"html_id",
"=",
"html_id",
",",
"jsonstr",
"=",
"jsonstr",
",",
"height",
"=",
"height",
")",
"return",
"IPython",
".",
"core",
".",
"display",
".",
"HTML",
"(",
"html",
")"
] | Plots a detail view of data.
Args:
data: a Pandas dataframe.
height: the height of the output. | [
"Plots",
"a",
"detail",
"view",
"of",
"data",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_fasets.py#L68-L95 |
5,112 | googledatalab/pydatalab | google/datalab/utils/facets/base_generic_feature_statistics_generator.py | BaseGenericFeatureStatisticsGenerator.DtypeToType | def DtypeToType(self, dtype):
"""Converts a Numpy dtype to the FeatureNameStatistics.Type proto enum."""
if dtype.char in np.typecodes['AllFloat']:
return self.fs_proto.FLOAT
elif (dtype.char in np.typecodes['AllInteger'] or dtype == np.bool or
np.issubdtype(dtype, np.datetime64) or
np.issubdtype(dtype, np.timedelta64)):
return self.fs_proto.INT
else:
return self.fs_proto.STRING | python | def DtypeToType(self, dtype):
"""Converts a Numpy dtype to the FeatureNameStatistics.Type proto enum."""
if dtype.char in np.typecodes['AllFloat']:
return self.fs_proto.FLOAT
elif (dtype.char in np.typecodes['AllInteger'] or dtype == np.bool or
np.issubdtype(dtype, np.datetime64) or
np.issubdtype(dtype, np.timedelta64)):
return self.fs_proto.INT
else:
return self.fs_proto.STRING | [
"def",
"DtypeToType",
"(",
"self",
",",
"dtype",
")",
":",
"if",
"dtype",
".",
"char",
"in",
"np",
".",
"typecodes",
"[",
"'AllFloat'",
"]",
":",
"return",
"self",
".",
"fs_proto",
".",
"FLOAT",
"elif",
"(",
"dtype",
".",
"char",
"in",
"np",
".",
"typecodes",
"[",
"'AllInteger'",
"]",
"or",
"dtype",
"==",
"np",
".",
"bool",
"or",
"np",
".",
"issubdtype",
"(",
"dtype",
",",
"np",
".",
"datetime64",
")",
"or",
"np",
".",
"issubdtype",
"(",
"dtype",
",",
"np",
".",
"timedelta64",
")",
")",
":",
"return",
"self",
".",
"fs_proto",
".",
"INT",
"else",
":",
"return",
"self",
".",
"fs_proto",
".",
"STRING"
] | Converts a Numpy dtype to the FeatureNameStatistics.Type proto enum. | [
"Converts",
"a",
"Numpy",
"dtype",
"to",
"the",
"FeatureNameStatistics",
".",
"Type",
"proto",
"enum",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/facets/base_generic_feature_statistics_generator.py#L58-L67 |
5,113 | googledatalab/pydatalab | google/datalab/utils/facets/base_generic_feature_statistics_generator.py | BaseGenericFeatureStatisticsGenerator.NdarrayToEntry | def NdarrayToEntry(self, x):
"""Converts an ndarray to the Entry format."""
row_counts = []
for row in x:
try:
rc = np.count_nonzero(~np.isnan(row))
if rc != 0:
row_counts.append(rc)
except TypeError:
try:
row_counts.append(row.size)
except AttributeError:
row_counts.append(1)
data_type = self.DtypeToType(x.dtype)
converter = self.DtypeToNumberConverter(x.dtype)
flattened = x.ravel()
orig_size = len(flattened)
# Remove all None and nan values and count how many were removed.
flattened = flattened[flattened != np.array(None)]
if converter:
flattened = converter(flattened)
if data_type == self.fs_proto.STRING:
flattened_temp = []
for x in flattened:
try:
if str(x) != 'nan':
flattened_temp.append(x)
except UnicodeEncodeError:
if x.encode('utf-8') != 'nan':
flattened_temp.append(x)
flattened = flattened_temp
else:
flattened = flattened[~np.isnan(flattened)].tolist()
missing = orig_size - len(flattened)
return {
'vals': flattened,
'counts': row_counts,
'missing': missing,
'type': data_type
} | python | def NdarrayToEntry(self, x):
"""Converts an ndarray to the Entry format."""
row_counts = []
for row in x:
try:
rc = np.count_nonzero(~np.isnan(row))
if rc != 0:
row_counts.append(rc)
except TypeError:
try:
row_counts.append(row.size)
except AttributeError:
row_counts.append(1)
data_type = self.DtypeToType(x.dtype)
converter = self.DtypeToNumberConverter(x.dtype)
flattened = x.ravel()
orig_size = len(flattened)
# Remove all None and nan values and count how many were removed.
flattened = flattened[flattened != np.array(None)]
if converter:
flattened = converter(flattened)
if data_type == self.fs_proto.STRING:
flattened_temp = []
for x in flattened:
try:
if str(x) != 'nan':
flattened_temp.append(x)
except UnicodeEncodeError:
if x.encode('utf-8') != 'nan':
flattened_temp.append(x)
flattened = flattened_temp
else:
flattened = flattened[~np.isnan(flattened)].tolist()
missing = orig_size - len(flattened)
return {
'vals': flattened,
'counts': row_counts,
'missing': missing,
'type': data_type
} | [
"def",
"NdarrayToEntry",
"(",
"self",
",",
"x",
")",
":",
"row_counts",
"=",
"[",
"]",
"for",
"row",
"in",
"x",
":",
"try",
":",
"rc",
"=",
"np",
".",
"count_nonzero",
"(",
"~",
"np",
".",
"isnan",
"(",
"row",
")",
")",
"if",
"rc",
"!=",
"0",
":",
"row_counts",
".",
"append",
"(",
"rc",
")",
"except",
"TypeError",
":",
"try",
":",
"row_counts",
".",
"append",
"(",
"row",
".",
"size",
")",
"except",
"AttributeError",
":",
"row_counts",
".",
"append",
"(",
"1",
")",
"data_type",
"=",
"self",
".",
"DtypeToType",
"(",
"x",
".",
"dtype",
")",
"converter",
"=",
"self",
".",
"DtypeToNumberConverter",
"(",
"x",
".",
"dtype",
")",
"flattened",
"=",
"x",
".",
"ravel",
"(",
")",
"orig_size",
"=",
"len",
"(",
"flattened",
")",
"# Remove all None and nan values and count how many were removed.",
"flattened",
"=",
"flattened",
"[",
"flattened",
"!=",
"np",
".",
"array",
"(",
"None",
")",
"]",
"if",
"converter",
":",
"flattened",
"=",
"converter",
"(",
"flattened",
")",
"if",
"data_type",
"==",
"self",
".",
"fs_proto",
".",
"STRING",
":",
"flattened_temp",
"=",
"[",
"]",
"for",
"x",
"in",
"flattened",
":",
"try",
":",
"if",
"str",
"(",
"x",
")",
"!=",
"'nan'",
":",
"flattened_temp",
".",
"append",
"(",
"x",
")",
"except",
"UnicodeEncodeError",
":",
"if",
"x",
".",
"encode",
"(",
"'utf-8'",
")",
"!=",
"'nan'",
":",
"flattened_temp",
".",
"append",
"(",
"x",
")",
"flattened",
"=",
"flattened_temp",
"else",
":",
"flattened",
"=",
"flattened",
"[",
"~",
"np",
".",
"isnan",
"(",
"flattened",
")",
"]",
".",
"tolist",
"(",
")",
"missing",
"=",
"orig_size",
"-",
"len",
"(",
"flattened",
")",
"return",
"{",
"'vals'",
":",
"flattened",
",",
"'counts'",
":",
"row_counts",
",",
"'missing'",
":",
"missing",
",",
"'type'",
":",
"data_type",
"}"
] | Converts an ndarray to the Entry format. | [
"Converts",
"an",
"ndarray",
"to",
"the",
"Entry",
"format",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/facets/base_generic_feature_statistics_generator.py#L96-L137 |
5,114 | googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py | serving_from_csv_input | def serving_from_csv_input(train_config, args, keep_target):
"""Read the input features from a placeholder csv string tensor."""
examples = tf.placeholder(
dtype=tf.string,
shape=(None,),
name='csv_input_string')
features = parse_example_tensor(examples=examples,
train_config=train_config,
keep_target=keep_target)
if keep_target:
target = features.pop(train_config['target_column'])
else:
target = None
features, target = preprocess_input(
features=features,
target=target,
train_config=train_config,
preprocess_output_dir=args.preprocess_output_dir,
model_type=args.model_type)
return input_fn_utils.InputFnOps(features,
target,
{'csv_line': examples}
) | python | def serving_from_csv_input(train_config, args, keep_target):
"""Read the input features from a placeholder csv string tensor."""
examples = tf.placeholder(
dtype=tf.string,
shape=(None,),
name='csv_input_string')
features = parse_example_tensor(examples=examples,
train_config=train_config,
keep_target=keep_target)
if keep_target:
target = features.pop(train_config['target_column'])
else:
target = None
features, target = preprocess_input(
features=features,
target=target,
train_config=train_config,
preprocess_output_dir=args.preprocess_output_dir,
model_type=args.model_type)
return input_fn_utils.InputFnOps(features,
target,
{'csv_line': examples}
) | [
"def",
"serving_from_csv_input",
"(",
"train_config",
",",
"args",
",",
"keep_target",
")",
":",
"examples",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"string",
",",
"shape",
"=",
"(",
"None",
",",
")",
",",
"name",
"=",
"'csv_input_string'",
")",
"features",
"=",
"parse_example_tensor",
"(",
"examples",
"=",
"examples",
",",
"train_config",
"=",
"train_config",
",",
"keep_target",
"=",
"keep_target",
")",
"if",
"keep_target",
":",
"target",
"=",
"features",
".",
"pop",
"(",
"train_config",
"[",
"'target_column'",
"]",
")",
"else",
":",
"target",
"=",
"None",
"features",
",",
"target",
"=",
"preprocess_input",
"(",
"features",
"=",
"features",
",",
"target",
"=",
"target",
",",
"train_config",
"=",
"train_config",
",",
"preprocess_output_dir",
"=",
"args",
".",
"preprocess_output_dir",
",",
"model_type",
"=",
"args",
".",
"model_type",
")",
"return",
"input_fn_utils",
".",
"InputFnOps",
"(",
"features",
",",
"target",
",",
"{",
"'csv_line'",
":",
"examples",
"}",
")"
] | Read the input features from a placeholder csv string tensor. | [
"Read",
"the",
"input",
"features",
"from",
"a",
"placeholder",
"csv",
"string",
"tensor",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py#L90-L115 |
5,115 | googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py | parse_example_tensor | def parse_example_tensor(examples, train_config, keep_target):
"""Read the csv files.
Args:
examples: string tensor
train_config: training config
keep_target: if true, the target column is expected to exist and it is
returned in the features dict.
Returns:
Dict of feature_name to tensor. Target feature is in the dict.
"""
csv_header = []
if keep_target:
csv_header = train_config['csv_header']
else:
csv_header = [name for name in train_config['csv_header']
if name != train_config['target_column']]
# record_defaults are used by tf.decode_csv to insert defaults, and to infer
# the datatype.
record_defaults = [[train_config['csv_defaults'][name]]
for name in csv_header]
tensors = tf.decode_csv(examples, record_defaults, name='csv_to_tensors')
# I'm not really sure why expand_dims needs to be called. If using regression
# models, it errors without it.
tensors = [tf.expand_dims(x, axis=1) for x in tensors]
tensor_dict = dict(zip(csv_header, tensors))
return tensor_dict | python | def parse_example_tensor(examples, train_config, keep_target):
"""Read the csv files.
Args:
examples: string tensor
train_config: training config
keep_target: if true, the target column is expected to exist and it is
returned in the features dict.
Returns:
Dict of feature_name to tensor. Target feature is in the dict.
"""
csv_header = []
if keep_target:
csv_header = train_config['csv_header']
else:
csv_header = [name for name in train_config['csv_header']
if name != train_config['target_column']]
# record_defaults are used by tf.decode_csv to insert defaults, and to infer
# the datatype.
record_defaults = [[train_config['csv_defaults'][name]]
for name in csv_header]
tensors = tf.decode_csv(examples, record_defaults, name='csv_to_tensors')
# I'm not really sure why expand_dims needs to be called. If using regression
# models, it errors without it.
tensors = [tf.expand_dims(x, axis=1) for x in tensors]
tensor_dict = dict(zip(csv_header, tensors))
return tensor_dict | [
"def",
"parse_example_tensor",
"(",
"examples",
",",
"train_config",
",",
"keep_target",
")",
":",
"csv_header",
"=",
"[",
"]",
"if",
"keep_target",
":",
"csv_header",
"=",
"train_config",
"[",
"'csv_header'",
"]",
"else",
":",
"csv_header",
"=",
"[",
"name",
"for",
"name",
"in",
"train_config",
"[",
"'csv_header'",
"]",
"if",
"name",
"!=",
"train_config",
"[",
"'target_column'",
"]",
"]",
"# record_defaults are used by tf.decode_csv to insert defaults, and to infer",
"# the datatype.",
"record_defaults",
"=",
"[",
"[",
"train_config",
"[",
"'csv_defaults'",
"]",
"[",
"name",
"]",
"]",
"for",
"name",
"in",
"csv_header",
"]",
"tensors",
"=",
"tf",
".",
"decode_csv",
"(",
"examples",
",",
"record_defaults",
",",
"name",
"=",
"'csv_to_tensors'",
")",
"# I'm not really sure why expand_dims needs to be called. If using regression",
"# models, it errors without it.",
"tensors",
"=",
"[",
"tf",
".",
"expand_dims",
"(",
"x",
",",
"axis",
"=",
"1",
")",
"for",
"x",
"in",
"tensors",
"]",
"tensor_dict",
"=",
"dict",
"(",
"zip",
"(",
"csv_header",
",",
"tensors",
")",
")",
"return",
"tensor_dict"
] | Read the csv files.
Args:
examples: string tensor
train_config: training config
keep_target: if true, the target column is expected to exist and it is
returned in the features dict.
Returns:
Dict of feature_name to tensor. Target feature is in the dict. | [
"Read",
"the",
"csv",
"files",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py#L281-L312 |
5,116 | googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py | get_estimator | def get_estimator(output_dir, train_config, args):
"""Returns a tf learn estimator.
We only support {DNN, Linear}Regressor and {DNN, Linear}Classifier. This is
controlled by the values of model_type in the args.
Args:
output_dir: Modes are saved into outputdir/train
train_config: our training config
args: command line parameters
Returns:
TF lean estimator
Raises:
ValueError: if config is wrong.
"""
# Check the requested mode fits the preprocessed data.
target_name = train_config['target_column']
if is_classification_model(args.model_type) and target_name not in \
train_config['categorical_columns']:
raise ValueError('When using a classification model, the target must be a '
'categorical variable.')
if is_regression_model(args.model_type) and target_name not in \
train_config['numerical_columns']:
raise ValueError('When using a regression model, the target must be a '
'numerical variable.')
# Check layers used for dnn models.
if is_dnn_model(args.model_type) and not args.layer_sizes:
raise ValueError('--layer-size* must be used with DNN models')
if is_linear_model(args.model_type) and args.layer_sizes:
raise ValueError('--layer-size* cannot be used with linear models')
# Build tf.learn features
feature_columns = _tflearn_features(train_config, args)
# Set how often to run checkpointing in terms of time.
config = tf.contrib.learn.RunConfig(
save_checkpoints_secs=args.save_checkpoints_secs)
train_dir = os.path.join(output_dir, 'train')
if args.model_type == 'dnn_regression':
estimator = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=args.layer_sizes,
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'linear_regression':
estimator = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'dnn_classification':
estimator = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=args.layer_sizes,
n_classes=train_config['vocab_stats'][target_name]['n_classes'],
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'linear_classification':
estimator = tf.contrib.learn.LinearClassifier(
feature_columns=feature_columns,
n_classes=train_config['vocab_stats'][target_name]['n_classes'],
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
else:
raise ValueError('bad --model-type value')
return estimator | python | def get_estimator(output_dir, train_config, args):
"""Returns a tf learn estimator.
We only support {DNN, Linear}Regressor and {DNN, Linear}Classifier. This is
controlled by the values of model_type in the args.
Args:
output_dir: Modes are saved into outputdir/train
train_config: our training config
args: command line parameters
Returns:
TF lean estimator
Raises:
ValueError: if config is wrong.
"""
# Check the requested mode fits the preprocessed data.
target_name = train_config['target_column']
if is_classification_model(args.model_type) and target_name not in \
train_config['categorical_columns']:
raise ValueError('When using a classification model, the target must be a '
'categorical variable.')
if is_regression_model(args.model_type) and target_name not in \
train_config['numerical_columns']:
raise ValueError('When using a regression model, the target must be a '
'numerical variable.')
# Check layers used for dnn models.
if is_dnn_model(args.model_type) and not args.layer_sizes:
raise ValueError('--layer-size* must be used with DNN models')
if is_linear_model(args.model_type) and args.layer_sizes:
raise ValueError('--layer-size* cannot be used with linear models')
# Build tf.learn features
feature_columns = _tflearn_features(train_config, args)
# Set how often to run checkpointing in terms of time.
config = tf.contrib.learn.RunConfig(
save_checkpoints_secs=args.save_checkpoints_secs)
train_dir = os.path.join(output_dir, 'train')
if args.model_type == 'dnn_regression':
estimator = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=args.layer_sizes,
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'linear_regression':
estimator = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'dnn_classification':
estimator = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=args.layer_sizes,
n_classes=train_config['vocab_stats'][target_name]['n_classes'],
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'linear_classification':
estimator = tf.contrib.learn.LinearClassifier(
feature_columns=feature_columns,
n_classes=train_config['vocab_stats'][target_name]['n_classes'],
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
else:
raise ValueError('bad --model-type value')
return estimator | [
"def",
"get_estimator",
"(",
"output_dir",
",",
"train_config",
",",
"args",
")",
":",
"# Check the requested mode fits the preprocessed data.",
"target_name",
"=",
"train_config",
"[",
"'target_column'",
"]",
"if",
"is_classification_model",
"(",
"args",
".",
"model_type",
")",
"and",
"target_name",
"not",
"in",
"train_config",
"[",
"'categorical_columns'",
"]",
":",
"raise",
"ValueError",
"(",
"'When using a classification model, the target must be a '",
"'categorical variable.'",
")",
"if",
"is_regression_model",
"(",
"args",
".",
"model_type",
")",
"and",
"target_name",
"not",
"in",
"train_config",
"[",
"'numerical_columns'",
"]",
":",
"raise",
"ValueError",
"(",
"'When using a regression model, the target must be a '",
"'numerical variable.'",
")",
"# Check layers used for dnn models.",
"if",
"is_dnn_model",
"(",
"args",
".",
"model_type",
")",
"and",
"not",
"args",
".",
"layer_sizes",
":",
"raise",
"ValueError",
"(",
"'--layer-size* must be used with DNN models'",
")",
"if",
"is_linear_model",
"(",
"args",
".",
"model_type",
")",
"and",
"args",
".",
"layer_sizes",
":",
"raise",
"ValueError",
"(",
"'--layer-size* cannot be used with linear models'",
")",
"# Build tf.learn features",
"feature_columns",
"=",
"_tflearn_features",
"(",
"train_config",
",",
"args",
")",
"# Set how often to run checkpointing in terms of time.",
"config",
"=",
"tf",
".",
"contrib",
".",
"learn",
".",
"RunConfig",
"(",
"save_checkpoints_secs",
"=",
"args",
".",
"save_checkpoints_secs",
")",
"train_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"'train'",
")",
"if",
"args",
".",
"model_type",
"==",
"'dnn_regression'",
":",
"estimator",
"=",
"tf",
".",
"contrib",
".",
"learn",
".",
"DNNRegressor",
"(",
"feature_columns",
"=",
"feature_columns",
",",
"hidden_units",
"=",
"args",
".",
"layer_sizes",
",",
"config",
"=",
"config",
",",
"model_dir",
"=",
"train_dir",
",",
"optimizer",
"=",
"tf",
".",
"train",
".",
"AdamOptimizer",
"(",
"args",
".",
"learning_rate",
",",
"epsilon",
"=",
"args",
".",
"epsilon",
")",
")",
"elif",
"args",
".",
"model_type",
"==",
"'linear_regression'",
":",
"estimator",
"=",
"tf",
".",
"contrib",
".",
"learn",
".",
"LinearRegressor",
"(",
"feature_columns",
"=",
"feature_columns",
",",
"config",
"=",
"config",
",",
"model_dir",
"=",
"train_dir",
",",
"optimizer",
"=",
"tf",
".",
"train",
".",
"AdamOptimizer",
"(",
"args",
".",
"learning_rate",
",",
"epsilon",
"=",
"args",
".",
"epsilon",
")",
")",
"elif",
"args",
".",
"model_type",
"==",
"'dnn_classification'",
":",
"estimator",
"=",
"tf",
".",
"contrib",
".",
"learn",
".",
"DNNClassifier",
"(",
"feature_columns",
"=",
"feature_columns",
",",
"hidden_units",
"=",
"args",
".",
"layer_sizes",
",",
"n_classes",
"=",
"train_config",
"[",
"'vocab_stats'",
"]",
"[",
"target_name",
"]",
"[",
"'n_classes'",
"]",
",",
"config",
"=",
"config",
",",
"model_dir",
"=",
"train_dir",
",",
"optimizer",
"=",
"tf",
".",
"train",
".",
"AdamOptimizer",
"(",
"args",
".",
"learning_rate",
",",
"epsilon",
"=",
"args",
".",
"epsilon",
")",
")",
"elif",
"args",
".",
"model_type",
"==",
"'linear_classification'",
":",
"estimator",
"=",
"tf",
".",
"contrib",
".",
"learn",
".",
"LinearClassifier",
"(",
"feature_columns",
"=",
"feature_columns",
",",
"n_classes",
"=",
"train_config",
"[",
"'vocab_stats'",
"]",
"[",
"target_name",
"]",
"[",
"'n_classes'",
"]",
",",
"config",
"=",
"config",
",",
"model_dir",
"=",
"train_dir",
",",
"optimizer",
"=",
"tf",
".",
"train",
".",
"AdamOptimizer",
"(",
"args",
".",
"learning_rate",
",",
"epsilon",
"=",
"args",
".",
"epsilon",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'bad --model-type value'",
")",
"return",
"estimator"
] | Returns a tf learn estimator.
We only support {DNN, Linear}Regressor and {DNN, Linear}Classifier. This is
controlled by the values of model_type in the args.
Args:
output_dir: Modes are saved into outputdir/train
train_config: our training config
args: command line parameters
Returns:
TF lean estimator
Raises:
ValueError: if config is wrong. | [
"Returns",
"a",
"tf",
"learn",
"estimator",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py#L367-L445 |
5,117 | googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py | preprocess_input | def preprocess_input(features, target, train_config, preprocess_output_dir,
model_type):
"""Perform some transformations after reading in the input tensors.
Args:
features: dict of feature_name to tensor
target: tensor
train_config: our training config object
preprocess_output_dir: folder should contain the vocab files.
model_type: the tf model type.
Raises:
ValueError: if wrong transforms are used
Returns:
New features dict and new target tensor.
"""
target_name = train_config['target_column']
key_name = train_config['key_column']
# Do the numerical transforms.
# Numerical transforms supported for regression/classification
# 1) num -> do nothing (identity, default)
# 2) num -> scale to -1, 1 (scale)
# 3) num -> scale to -a, a (scale with value parameter)
with tf.name_scope('numerical_feature_preprocess'):
if train_config['numerical_columns']:
numerical_analysis_file = os.path.join(preprocess_output_dir,
NUMERICAL_ANALYSIS)
if not file_io.file_exists(numerical_analysis_file):
raise ValueError('File %s not found in %s' %
(NUMERICAL_ANALYSIS, preprocess_output_dir))
numerical_anlysis = json.loads(
python_portable_string(
file_io.read_file_to_string(numerical_analysis_file)))
for name in train_config['numerical_columns']:
if name == target_name or name == key_name:
continue
transform_config = train_config['transforms'].get(name, {})
transform_name = transform_config.get('transform', None)
if transform_name == 'scale':
value = float(transform_config.get('value', 1.0))
features[name] = _scale_tensor(
features[name],
range_min=numerical_anlysis[name]['min'],
range_max=numerical_anlysis[name]['max'],
scale_min=-value,
scale_max=value)
elif transform_name == 'identity' or transform_name is None:
pass
else:
raise ValueError(('For numerical variables, only scale '
'and identity are supported: '
'Error for %s') % name)
# Do target transform if it exists.
if target is not None:
with tf.name_scope('target_feature_preprocess'):
if target_name in train_config['categorical_columns']:
labels = train_config['vocab_stats'][target_name]['labels']
table = tf.contrib.lookup.string_to_index_table_from_tensor(labels)
target = table.lookup(target)
# target = tf.contrib.lookup.string_to_index(target, labels)
# Do categorical transforms. Only apply vocab mapping. The real
# transforms are done with tf learn column features.
with tf.name_scope('categorical_feature_preprocess'):
for name in train_config['categorical_columns']:
if name == key_name or name == target_name:
continue
transform_config = train_config['transforms'].get(name, {})
transform_name = transform_config.get('transform', None)
if is_dnn_model(model_type):
if transform_name == 'embedding' or transform_name == 'one_hot' or transform_name is None:
map_vocab = True
else:
raise ValueError('Unknown transform %s' % transform_name)
elif is_linear_model(model_type):
if (transform_name == 'one_hot' or transform_name is None):
map_vocab = True
elif transform_name == 'embedding':
map_vocab = False
else:
raise ValueError('Unknown transform %s' % transform_name)
if map_vocab:
labels = train_config['vocab_stats'][name]['labels']
table = tf.contrib.lookup.string_to_index_table_from_tensor(labels)
features[name] = table.lookup(features[name])
return features, target | python | def preprocess_input(features, target, train_config, preprocess_output_dir,
model_type):
"""Perform some transformations after reading in the input tensors.
Args:
features: dict of feature_name to tensor
target: tensor
train_config: our training config object
preprocess_output_dir: folder should contain the vocab files.
model_type: the tf model type.
Raises:
ValueError: if wrong transforms are used
Returns:
New features dict and new target tensor.
"""
target_name = train_config['target_column']
key_name = train_config['key_column']
# Do the numerical transforms.
# Numerical transforms supported for regression/classification
# 1) num -> do nothing (identity, default)
# 2) num -> scale to -1, 1 (scale)
# 3) num -> scale to -a, a (scale with value parameter)
with tf.name_scope('numerical_feature_preprocess'):
if train_config['numerical_columns']:
numerical_analysis_file = os.path.join(preprocess_output_dir,
NUMERICAL_ANALYSIS)
if not file_io.file_exists(numerical_analysis_file):
raise ValueError('File %s not found in %s' %
(NUMERICAL_ANALYSIS, preprocess_output_dir))
numerical_anlysis = json.loads(
python_portable_string(
file_io.read_file_to_string(numerical_analysis_file)))
for name in train_config['numerical_columns']:
if name == target_name or name == key_name:
continue
transform_config = train_config['transforms'].get(name, {})
transform_name = transform_config.get('transform', None)
if transform_name == 'scale':
value = float(transform_config.get('value', 1.0))
features[name] = _scale_tensor(
features[name],
range_min=numerical_anlysis[name]['min'],
range_max=numerical_anlysis[name]['max'],
scale_min=-value,
scale_max=value)
elif transform_name == 'identity' or transform_name is None:
pass
else:
raise ValueError(('For numerical variables, only scale '
'and identity are supported: '
'Error for %s') % name)
# Do target transform if it exists.
if target is not None:
with tf.name_scope('target_feature_preprocess'):
if target_name in train_config['categorical_columns']:
labels = train_config['vocab_stats'][target_name]['labels']
table = tf.contrib.lookup.string_to_index_table_from_tensor(labels)
target = table.lookup(target)
# target = tf.contrib.lookup.string_to_index(target, labels)
# Do categorical transforms. Only apply vocab mapping. The real
# transforms are done with tf learn column features.
with tf.name_scope('categorical_feature_preprocess'):
for name in train_config['categorical_columns']:
if name == key_name or name == target_name:
continue
transform_config = train_config['transforms'].get(name, {})
transform_name = transform_config.get('transform', None)
if is_dnn_model(model_type):
if transform_name == 'embedding' or transform_name == 'one_hot' or transform_name is None:
map_vocab = True
else:
raise ValueError('Unknown transform %s' % transform_name)
elif is_linear_model(model_type):
if (transform_name == 'one_hot' or transform_name is None):
map_vocab = True
elif transform_name == 'embedding':
map_vocab = False
else:
raise ValueError('Unknown transform %s' % transform_name)
if map_vocab:
labels = train_config['vocab_stats'][name]['labels']
table = tf.contrib.lookup.string_to_index_table_from_tensor(labels)
features[name] = table.lookup(features[name])
return features, target | [
"def",
"preprocess_input",
"(",
"features",
",",
"target",
",",
"train_config",
",",
"preprocess_output_dir",
",",
"model_type",
")",
":",
"target_name",
"=",
"train_config",
"[",
"'target_column'",
"]",
"key_name",
"=",
"train_config",
"[",
"'key_column'",
"]",
"# Do the numerical transforms.",
"# Numerical transforms supported for regression/classification",
"# 1) num -> do nothing (identity, default)",
"# 2) num -> scale to -1, 1 (scale)",
"# 3) num -> scale to -a, a (scale with value parameter)",
"with",
"tf",
".",
"name_scope",
"(",
"'numerical_feature_preprocess'",
")",
":",
"if",
"train_config",
"[",
"'numerical_columns'",
"]",
":",
"numerical_analysis_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"preprocess_output_dir",
",",
"NUMERICAL_ANALYSIS",
")",
"if",
"not",
"file_io",
".",
"file_exists",
"(",
"numerical_analysis_file",
")",
":",
"raise",
"ValueError",
"(",
"'File %s not found in %s'",
"%",
"(",
"NUMERICAL_ANALYSIS",
",",
"preprocess_output_dir",
")",
")",
"numerical_anlysis",
"=",
"json",
".",
"loads",
"(",
"python_portable_string",
"(",
"file_io",
".",
"read_file_to_string",
"(",
"numerical_analysis_file",
")",
")",
")",
"for",
"name",
"in",
"train_config",
"[",
"'numerical_columns'",
"]",
":",
"if",
"name",
"==",
"target_name",
"or",
"name",
"==",
"key_name",
":",
"continue",
"transform_config",
"=",
"train_config",
"[",
"'transforms'",
"]",
".",
"get",
"(",
"name",
",",
"{",
"}",
")",
"transform_name",
"=",
"transform_config",
".",
"get",
"(",
"'transform'",
",",
"None",
")",
"if",
"transform_name",
"==",
"'scale'",
":",
"value",
"=",
"float",
"(",
"transform_config",
".",
"get",
"(",
"'value'",
",",
"1.0",
")",
")",
"features",
"[",
"name",
"]",
"=",
"_scale_tensor",
"(",
"features",
"[",
"name",
"]",
",",
"range_min",
"=",
"numerical_anlysis",
"[",
"name",
"]",
"[",
"'min'",
"]",
",",
"range_max",
"=",
"numerical_anlysis",
"[",
"name",
"]",
"[",
"'max'",
"]",
",",
"scale_min",
"=",
"-",
"value",
",",
"scale_max",
"=",
"value",
")",
"elif",
"transform_name",
"==",
"'identity'",
"or",
"transform_name",
"is",
"None",
":",
"pass",
"else",
":",
"raise",
"ValueError",
"(",
"(",
"'For numerical variables, only scale '",
"'and identity are supported: '",
"'Error for %s'",
")",
"%",
"name",
")",
"# Do target transform if it exists.",
"if",
"target",
"is",
"not",
"None",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'target_feature_preprocess'",
")",
":",
"if",
"target_name",
"in",
"train_config",
"[",
"'categorical_columns'",
"]",
":",
"labels",
"=",
"train_config",
"[",
"'vocab_stats'",
"]",
"[",
"target_name",
"]",
"[",
"'labels'",
"]",
"table",
"=",
"tf",
".",
"contrib",
".",
"lookup",
".",
"string_to_index_table_from_tensor",
"(",
"labels",
")",
"target",
"=",
"table",
".",
"lookup",
"(",
"target",
")",
"# target = tf.contrib.lookup.string_to_index(target, labels)",
"# Do categorical transforms. Only apply vocab mapping. The real",
"# transforms are done with tf learn column features.",
"with",
"tf",
".",
"name_scope",
"(",
"'categorical_feature_preprocess'",
")",
":",
"for",
"name",
"in",
"train_config",
"[",
"'categorical_columns'",
"]",
":",
"if",
"name",
"==",
"key_name",
"or",
"name",
"==",
"target_name",
":",
"continue",
"transform_config",
"=",
"train_config",
"[",
"'transforms'",
"]",
".",
"get",
"(",
"name",
",",
"{",
"}",
")",
"transform_name",
"=",
"transform_config",
".",
"get",
"(",
"'transform'",
",",
"None",
")",
"if",
"is_dnn_model",
"(",
"model_type",
")",
":",
"if",
"transform_name",
"==",
"'embedding'",
"or",
"transform_name",
"==",
"'one_hot'",
"or",
"transform_name",
"is",
"None",
":",
"map_vocab",
"=",
"True",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown transform %s'",
"%",
"transform_name",
")",
"elif",
"is_linear_model",
"(",
"model_type",
")",
":",
"if",
"(",
"transform_name",
"==",
"'one_hot'",
"or",
"transform_name",
"is",
"None",
")",
":",
"map_vocab",
"=",
"True",
"elif",
"transform_name",
"==",
"'embedding'",
":",
"map_vocab",
"=",
"False",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown transform %s'",
"%",
"transform_name",
")",
"if",
"map_vocab",
":",
"labels",
"=",
"train_config",
"[",
"'vocab_stats'",
"]",
"[",
"name",
"]",
"[",
"'labels'",
"]",
"table",
"=",
"tf",
".",
"contrib",
".",
"lookup",
".",
"string_to_index_table_from_tensor",
"(",
"labels",
")",
"features",
"[",
"name",
"]",
"=",
"table",
".",
"lookup",
"(",
"features",
"[",
"name",
"]",
")",
"return",
"features",
",",
"target"
] | Perform some transformations after reading in the input tensors.
Args:
features: dict of feature_name to tensor
target: tensor
train_config: our training config object
preprocess_output_dir: folder should contain the vocab files.
model_type: the tf model type.
Raises:
ValueError: if wrong transforms are used
Returns:
New features dict and new target tensor. | [
"Perform",
"some",
"transformations",
"after",
"reading",
"in",
"the",
"input",
"tensors",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py#L448-L542 |
5,118 | googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py | _scale_tensor | def _scale_tensor(tensor, range_min, range_max, scale_min, scale_max):
"""Scale a tensor to scale_min to scale_max.
Args:
tensor: input tensor. Should be a numerical tensor.
range_min: min expected value for this feature/tensor.
range_max: max expected Value.
scale_min: new expected min value.
scale_max: new expected max value.
Returns:
scaled tensor.
"""
if range_min == range_max:
return tensor
float_tensor = tf.to_float(tensor)
scaled_tensor = tf.divide((tf.subtract(float_tensor, range_min) *
tf.constant(float(scale_max - scale_min))),
tf.constant(float(range_max - range_min)))
shifted_tensor = scaled_tensor + tf.constant(float(scale_min))
return shifted_tensor | python | def _scale_tensor(tensor, range_min, range_max, scale_min, scale_max):
"""Scale a tensor to scale_min to scale_max.
Args:
tensor: input tensor. Should be a numerical tensor.
range_min: min expected value for this feature/tensor.
range_max: max expected Value.
scale_min: new expected min value.
scale_max: new expected max value.
Returns:
scaled tensor.
"""
if range_min == range_max:
return tensor
float_tensor = tf.to_float(tensor)
scaled_tensor = tf.divide((tf.subtract(float_tensor, range_min) *
tf.constant(float(scale_max - scale_min))),
tf.constant(float(range_max - range_min)))
shifted_tensor = scaled_tensor + tf.constant(float(scale_min))
return shifted_tensor | [
"def",
"_scale_tensor",
"(",
"tensor",
",",
"range_min",
",",
"range_max",
",",
"scale_min",
",",
"scale_max",
")",
":",
"if",
"range_min",
"==",
"range_max",
":",
"return",
"tensor",
"float_tensor",
"=",
"tf",
".",
"to_float",
"(",
"tensor",
")",
"scaled_tensor",
"=",
"tf",
".",
"divide",
"(",
"(",
"tf",
".",
"subtract",
"(",
"float_tensor",
",",
"range_min",
")",
"*",
"tf",
".",
"constant",
"(",
"float",
"(",
"scale_max",
"-",
"scale_min",
")",
")",
")",
",",
"tf",
".",
"constant",
"(",
"float",
"(",
"range_max",
"-",
"range_min",
")",
")",
")",
"shifted_tensor",
"=",
"scaled_tensor",
"+",
"tf",
".",
"constant",
"(",
"float",
"(",
"scale_min",
")",
")",
"return",
"shifted_tensor"
] | Scale a tensor to scale_min to scale_max.
Args:
tensor: input tensor. Should be a numerical tensor.
range_min: min expected value for this feature/tensor.
range_max: max expected Value.
scale_min: new expected min value.
scale_max: new expected max value.
Returns:
scaled tensor. | [
"Scale",
"a",
"tensor",
"to",
"scale_min",
"to",
"scale_max",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py#L545-L567 |
5,119 | googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py | _tflearn_features | def _tflearn_features(train_config, args):
"""Builds the tf.learn feature list.
All numerical features are just given real_valued_column because all the
preprocessing transformations are done in preprocess_input. Categoriacl
features are processed here depending if the vocab map (from string to int)
was applied in preprocess_input.
Args:
train_config: our train config object
args: command line args.
Returns:
List of TF lean feature columns.
Raises:
ValueError: if wrong transforms are used for the model type.
"""
feature_columns = []
target_name = train_config['target_column']
key_name = train_config['key_column']
for name in train_config['numerical_columns']:
if name != target_name and name != key_name:
feature_columns.append(tf.contrib.layers.real_valued_column(
name,
dimension=1))
# Supported transforms:
# for DNN
# 1) string -> make int -> embedding (embedding)
# 2) string -> make int -> one_hot (one_hot, default)
# for linear
# 1) string -> sparse_column_with_hash_bucket (embedding)
# 2) string -> make int -> sparse_column_with_integerized_feature (one_hot, default)
# It is unfortunate that tf.layers has different feature transforms if the
# model is linear or DNN. This pacakge should not expose to the user that
# we are using tf.layers. It is crazy that DNN models support more feature
# types (like string -> hash sparse column -> embedding)
for name in train_config['categorical_columns']:
if name != target_name and name != key_name:
transform_config = train_config['transforms'].get(name, {})
transform_name = transform_config.get('transform', None)
if is_dnn_model(args.model_type):
if transform_name == 'embedding':
sparse = tf.contrib.layers.sparse_column_with_integerized_feature(
name,
bucket_size=train_config['vocab_stats'][name]['n_classes'])
learn_feature = tf.contrib.layers.embedding_column(
sparse,
dimension=transform_config['embedding_dim'])
elif transform_name == 'one_hot' or transform_name is None:
sparse = tf.contrib.layers.sparse_column_with_integerized_feature(
name,
bucket_size=train_config['vocab_stats'][name]['n_classes'])
learn_feature = tf.contrib.layers.one_hot_column(sparse)
else:
raise ValueError(('Unknown transform name. Only \'embedding\' '
'and \'one_hot\' transforms are supported. Got %s')
% transform_name)
elif is_linear_model(args.model_type):
if transform_name == 'one_hot' or transform_name is None:
learn_feature = tf.contrib.layers.sparse_column_with_integerized_feature(
name,
bucket_size=train_config['vocab_stats'][name]['n_classes'])
elif transform_name == 'embedding':
learn_feature = tf.contrib.layers.sparse_column_with_hash_bucket(
name,
hash_bucket_size=transform_config['embedding_dim'])
else:
raise ValueError(('Unknown transform name. Only \'embedding\' '
'and \'one_hot\' transforms are supported. Got %s')
% transform_name)
# Save the feature
feature_columns.append(learn_feature)
return feature_columns | python | def _tflearn_features(train_config, args):
"""Builds the tf.learn feature list.
All numerical features are just given real_valued_column because all the
preprocessing transformations are done in preprocess_input. Categoriacl
features are processed here depending if the vocab map (from string to int)
was applied in preprocess_input.
Args:
train_config: our train config object
args: command line args.
Returns:
List of TF lean feature columns.
Raises:
ValueError: if wrong transforms are used for the model type.
"""
feature_columns = []
target_name = train_config['target_column']
key_name = train_config['key_column']
for name in train_config['numerical_columns']:
if name != target_name and name != key_name:
feature_columns.append(tf.contrib.layers.real_valued_column(
name,
dimension=1))
# Supported transforms:
# for DNN
# 1) string -> make int -> embedding (embedding)
# 2) string -> make int -> one_hot (one_hot, default)
# for linear
# 1) string -> sparse_column_with_hash_bucket (embedding)
# 2) string -> make int -> sparse_column_with_integerized_feature (one_hot, default)
# It is unfortunate that tf.layers has different feature transforms if the
# model is linear or DNN. This pacakge should not expose to the user that
# we are using tf.layers. It is crazy that DNN models support more feature
# types (like string -> hash sparse column -> embedding)
for name in train_config['categorical_columns']:
if name != target_name and name != key_name:
transform_config = train_config['transforms'].get(name, {})
transform_name = transform_config.get('transform', None)
if is_dnn_model(args.model_type):
if transform_name == 'embedding':
sparse = tf.contrib.layers.sparse_column_with_integerized_feature(
name,
bucket_size=train_config['vocab_stats'][name]['n_classes'])
learn_feature = tf.contrib.layers.embedding_column(
sparse,
dimension=transform_config['embedding_dim'])
elif transform_name == 'one_hot' or transform_name is None:
sparse = tf.contrib.layers.sparse_column_with_integerized_feature(
name,
bucket_size=train_config['vocab_stats'][name]['n_classes'])
learn_feature = tf.contrib.layers.one_hot_column(sparse)
else:
raise ValueError(('Unknown transform name. Only \'embedding\' '
'and \'one_hot\' transforms are supported. Got %s')
% transform_name)
elif is_linear_model(args.model_type):
if transform_name == 'one_hot' or transform_name is None:
learn_feature = tf.contrib.layers.sparse_column_with_integerized_feature(
name,
bucket_size=train_config['vocab_stats'][name]['n_classes'])
elif transform_name == 'embedding':
learn_feature = tf.contrib.layers.sparse_column_with_hash_bucket(
name,
hash_bucket_size=transform_config['embedding_dim'])
else:
raise ValueError(('Unknown transform name. Only \'embedding\' '
'and \'one_hot\' transforms are supported. Got %s')
% transform_name)
# Save the feature
feature_columns.append(learn_feature)
return feature_columns | [
"def",
"_tflearn_features",
"(",
"train_config",
",",
"args",
")",
":",
"feature_columns",
"=",
"[",
"]",
"target_name",
"=",
"train_config",
"[",
"'target_column'",
"]",
"key_name",
"=",
"train_config",
"[",
"'key_column'",
"]",
"for",
"name",
"in",
"train_config",
"[",
"'numerical_columns'",
"]",
":",
"if",
"name",
"!=",
"target_name",
"and",
"name",
"!=",
"key_name",
":",
"feature_columns",
".",
"append",
"(",
"tf",
".",
"contrib",
".",
"layers",
".",
"real_valued_column",
"(",
"name",
",",
"dimension",
"=",
"1",
")",
")",
"# Supported transforms:",
"# for DNN",
"# 1) string -> make int -> embedding (embedding)",
"# 2) string -> make int -> one_hot (one_hot, default)",
"# for linear",
"# 1) string -> sparse_column_with_hash_bucket (embedding)",
"# 2) string -> make int -> sparse_column_with_integerized_feature (one_hot, default)",
"# It is unfortunate that tf.layers has different feature transforms if the",
"# model is linear or DNN. This pacakge should not expose to the user that",
"# we are using tf.layers. It is crazy that DNN models support more feature",
"# types (like string -> hash sparse column -> embedding)",
"for",
"name",
"in",
"train_config",
"[",
"'categorical_columns'",
"]",
":",
"if",
"name",
"!=",
"target_name",
"and",
"name",
"!=",
"key_name",
":",
"transform_config",
"=",
"train_config",
"[",
"'transforms'",
"]",
".",
"get",
"(",
"name",
",",
"{",
"}",
")",
"transform_name",
"=",
"transform_config",
".",
"get",
"(",
"'transform'",
",",
"None",
")",
"if",
"is_dnn_model",
"(",
"args",
".",
"model_type",
")",
":",
"if",
"transform_name",
"==",
"'embedding'",
":",
"sparse",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"sparse_column_with_integerized_feature",
"(",
"name",
",",
"bucket_size",
"=",
"train_config",
"[",
"'vocab_stats'",
"]",
"[",
"name",
"]",
"[",
"'n_classes'",
"]",
")",
"learn_feature",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"embedding_column",
"(",
"sparse",
",",
"dimension",
"=",
"transform_config",
"[",
"'embedding_dim'",
"]",
")",
"elif",
"transform_name",
"==",
"'one_hot'",
"or",
"transform_name",
"is",
"None",
":",
"sparse",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"sparse_column_with_integerized_feature",
"(",
"name",
",",
"bucket_size",
"=",
"train_config",
"[",
"'vocab_stats'",
"]",
"[",
"name",
"]",
"[",
"'n_classes'",
"]",
")",
"learn_feature",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"one_hot_column",
"(",
"sparse",
")",
"else",
":",
"raise",
"ValueError",
"(",
"(",
"'Unknown transform name. Only \\'embedding\\' '",
"'and \\'one_hot\\' transforms are supported. Got %s'",
")",
"%",
"transform_name",
")",
"elif",
"is_linear_model",
"(",
"args",
".",
"model_type",
")",
":",
"if",
"transform_name",
"==",
"'one_hot'",
"or",
"transform_name",
"is",
"None",
":",
"learn_feature",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"sparse_column_with_integerized_feature",
"(",
"name",
",",
"bucket_size",
"=",
"train_config",
"[",
"'vocab_stats'",
"]",
"[",
"name",
"]",
"[",
"'n_classes'",
"]",
")",
"elif",
"transform_name",
"==",
"'embedding'",
":",
"learn_feature",
"=",
"tf",
".",
"contrib",
".",
"layers",
".",
"sparse_column_with_hash_bucket",
"(",
"name",
",",
"hash_bucket_size",
"=",
"transform_config",
"[",
"'embedding_dim'",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"(",
"'Unknown transform name. Only \\'embedding\\' '",
"'and \\'one_hot\\' transforms are supported. Got %s'",
")",
"%",
"transform_name",
")",
"# Save the feature",
"feature_columns",
".",
"append",
"(",
"learn_feature",
")",
"return",
"feature_columns"
] | Builds the tf.learn feature list.
All numerical features are just given real_valued_column because all the
preprocessing transformations are done in preprocess_input. Categoriacl
features are processed here depending if the vocab map (from string to int)
was applied in preprocess_input.
Args:
train_config: our train config object
args: command line args.
Returns:
List of TF lean feature columns.
Raises:
ValueError: if wrong transforms are used for the model type. | [
"Builds",
"the",
"tf",
".",
"learn",
"feature",
"list",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py#L570-L647 |
5,120 | googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py | get_vocabulary | def get_vocabulary(preprocess_output_dir, name):
"""Loads the vocabulary file as a list of strings.
Args:
preprocess_output_dir: Should contain the file CATEGORICAL_ANALYSIS % name.
name: name of the csv column.
Returns:
List of strings.
Raises:
ValueError: if file is missing.
"""
vocab_file = os.path.join(preprocess_output_dir, CATEGORICAL_ANALYSIS % name)
if not file_io.file_exists(vocab_file):
raise ValueError('File %s not found in %s' %
(CATEGORICAL_ANALYSIS % name, preprocess_output_dir))
labels = python_portable_string(
file_io.read_file_to_string(vocab_file)).split('\n')
label_values = [x for x in labels if x] # remove empty lines
return label_values | python | def get_vocabulary(preprocess_output_dir, name):
"""Loads the vocabulary file as a list of strings.
Args:
preprocess_output_dir: Should contain the file CATEGORICAL_ANALYSIS % name.
name: name of the csv column.
Returns:
List of strings.
Raises:
ValueError: if file is missing.
"""
vocab_file = os.path.join(preprocess_output_dir, CATEGORICAL_ANALYSIS % name)
if not file_io.file_exists(vocab_file):
raise ValueError('File %s not found in %s' %
(CATEGORICAL_ANALYSIS % name, preprocess_output_dir))
labels = python_portable_string(
file_io.read_file_to_string(vocab_file)).split('\n')
label_values = [x for x in labels if x] # remove empty lines
return label_values | [
"def",
"get_vocabulary",
"(",
"preprocess_output_dir",
",",
"name",
")",
":",
"vocab_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"preprocess_output_dir",
",",
"CATEGORICAL_ANALYSIS",
"%",
"name",
")",
"if",
"not",
"file_io",
".",
"file_exists",
"(",
"vocab_file",
")",
":",
"raise",
"ValueError",
"(",
"'File %s not found in %s'",
"%",
"(",
"CATEGORICAL_ANALYSIS",
"%",
"name",
",",
"preprocess_output_dir",
")",
")",
"labels",
"=",
"python_portable_string",
"(",
"file_io",
".",
"read_file_to_string",
"(",
"vocab_file",
")",
")",
".",
"split",
"(",
"'\\n'",
")",
"label_values",
"=",
"[",
"x",
"for",
"x",
"in",
"labels",
"if",
"x",
"]",
"# remove empty lines",
"return",
"label_values"
] | Loads the vocabulary file as a list of strings.
Args:
preprocess_output_dir: Should contain the file CATEGORICAL_ANALYSIS % name.
name: name of the csv column.
Returns:
List of strings.
Raises:
ValueError: if file is missing. | [
"Loads",
"the",
"vocabulary",
"file",
"as",
"a",
"list",
"of",
"strings",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py#L655-L677 |
5,121 | googledatalab/pydatalab | solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py | validate_metadata | def validate_metadata(train_config):
"""Perform some checks that the trainig config is correct.
Args:
train_config: train config as produced by merge_metadata()
Raises:
ValueError: if columns look wrong.
"""
# Make sure we have a default for every column
if len(train_config['csv_header']) != len(train_config['csv_defaults']):
raise ValueError('Unequal number of columns in input features file and '
'schema file.')
# Check there are no missing columns. sorted_colums has two copies of the
# target column because the target column is also listed in
# categorical_columns or numerical_columns.
sorted_columns = sorted(train_config['csv_header'] +
[train_config['target_column']])
sorted_columns2 = sorted(train_config['categorical_columns'] +
train_config['numerical_columns'] +
[train_config['key_column']] +
[train_config['target_column']])
if sorted_columns2 != sorted_columns:
raise ValueError('Each csv header must be a numerical/categorical type, a '
' key, or a target.') | python | def validate_metadata(train_config):
"""Perform some checks that the trainig config is correct.
Args:
train_config: train config as produced by merge_metadata()
Raises:
ValueError: if columns look wrong.
"""
# Make sure we have a default for every column
if len(train_config['csv_header']) != len(train_config['csv_defaults']):
raise ValueError('Unequal number of columns in input features file and '
'schema file.')
# Check there are no missing columns. sorted_colums has two copies of the
# target column because the target column is also listed in
# categorical_columns or numerical_columns.
sorted_columns = sorted(train_config['csv_header'] +
[train_config['target_column']])
sorted_columns2 = sorted(train_config['categorical_columns'] +
train_config['numerical_columns'] +
[train_config['key_column']] +
[train_config['target_column']])
if sorted_columns2 != sorted_columns:
raise ValueError('Each csv header must be a numerical/categorical type, a '
' key, or a target.') | [
"def",
"validate_metadata",
"(",
"train_config",
")",
":",
"# Make sure we have a default for every column",
"if",
"len",
"(",
"train_config",
"[",
"'csv_header'",
"]",
")",
"!=",
"len",
"(",
"train_config",
"[",
"'csv_defaults'",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'Unequal number of columns in input features file and '",
"'schema file.'",
")",
"# Check there are no missing columns. sorted_colums has two copies of the",
"# target column because the target column is also listed in",
"# categorical_columns or numerical_columns.",
"sorted_columns",
"=",
"sorted",
"(",
"train_config",
"[",
"'csv_header'",
"]",
"+",
"[",
"train_config",
"[",
"'target_column'",
"]",
"]",
")",
"sorted_columns2",
"=",
"sorted",
"(",
"train_config",
"[",
"'categorical_columns'",
"]",
"+",
"train_config",
"[",
"'numerical_columns'",
"]",
"+",
"[",
"train_config",
"[",
"'key_column'",
"]",
"]",
"+",
"[",
"train_config",
"[",
"'target_column'",
"]",
"]",
")",
"if",
"sorted_columns2",
"!=",
"sorted_columns",
":",
"raise",
"ValueError",
"(",
"'Each csv header must be a numerical/categorical type, a '",
"' key, or a target.'",
")"
] | Perform some checks that the trainig config is correct.
Args:
train_config: train config as produced by merge_metadata()
Raises:
ValueError: if columns look wrong. | [
"Perform",
"some",
"checks",
"that",
"the",
"trainig",
"config",
"is",
"correct",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py#L811-L838 |
5,122 | googledatalab/pydatalab | datalab/context/_project.py | Projects.get_default_id | def get_default_id(credentials=None):
""" Get default project id.
Returns: the default project id if there is one, or None.
"""
project_id = _utils.get_project_id()
if project_id is None:
projects, _ = Projects(credentials)._retrieve_projects(None, 2)
if len(projects) == 1:
project_id = projects[0].id
return project_id | python | def get_default_id(credentials=None):
""" Get default project id.
Returns: the default project id if there is one, or None.
"""
project_id = _utils.get_project_id()
if project_id is None:
projects, _ = Projects(credentials)._retrieve_projects(None, 2)
if len(projects) == 1:
project_id = projects[0].id
return project_id | [
"def",
"get_default_id",
"(",
"credentials",
"=",
"None",
")",
":",
"project_id",
"=",
"_utils",
".",
"get_project_id",
"(",
")",
"if",
"project_id",
"is",
"None",
":",
"projects",
",",
"_",
"=",
"Projects",
"(",
"credentials",
")",
".",
"_retrieve_projects",
"(",
"None",
",",
"2",
")",
"if",
"len",
"(",
"projects",
")",
"==",
"1",
":",
"project_id",
"=",
"projects",
"[",
"0",
"]",
".",
"id",
"return",
"project_id"
] | Get default project id.
Returns: the default project id if there is one, or None. | [
"Get",
"default",
"project",
"id",
"."
] | d9031901d5bca22fe0d5925d204e6698df9852e1 | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/context/_project.py#L97-L107 |
5,123 | jpvanhal/flask-split | flask_split/core.py | init_app | def init_app(state):
"""
Prepare the Flask application for Flask-Split.
:param state: :class:`BlueprintSetupState` instance
"""
app = state.app
app.config.setdefault('SPLIT_ALLOW_MULTIPLE_EXPERIMENTS', False)
app.config.setdefault('SPLIT_DB_FAILOVER', False)
app.config.setdefault('SPLIT_IGNORE_IP_ADDRESSES', [])
app.config.setdefault('SPLIT_ROBOT_REGEX', r"""
(?i)\b(
Baidu|
Gigabot|
Googlebot|
libwww-perl|
lwp-trivial|
msnbot|
SiteUptime|
Slurp|
WordPress|
ZIBB|
ZyBorg
)\b
""")
app.jinja_env.globals.update({
'ab_test': ab_test,
'finished': finished
})
@app.template_filter()
def percentage(number):
number *= 100
if abs(number) < 10:
return "%.1f%%" % round(number, 1)
else:
return "%d%%" % round(number) | python | def init_app(state):
"""
Prepare the Flask application for Flask-Split.
:param state: :class:`BlueprintSetupState` instance
"""
app = state.app
app.config.setdefault('SPLIT_ALLOW_MULTIPLE_EXPERIMENTS', False)
app.config.setdefault('SPLIT_DB_FAILOVER', False)
app.config.setdefault('SPLIT_IGNORE_IP_ADDRESSES', [])
app.config.setdefault('SPLIT_ROBOT_REGEX', r"""
(?i)\b(
Baidu|
Gigabot|
Googlebot|
libwww-perl|
lwp-trivial|
msnbot|
SiteUptime|
Slurp|
WordPress|
ZIBB|
ZyBorg
)\b
""")
app.jinja_env.globals.update({
'ab_test': ab_test,
'finished': finished
})
@app.template_filter()
def percentage(number):
number *= 100
if abs(number) < 10:
return "%.1f%%" % round(number, 1)
else:
return "%d%%" % round(number) | [
"def",
"init_app",
"(",
"state",
")",
":",
"app",
"=",
"state",
".",
"app",
"app",
".",
"config",
".",
"setdefault",
"(",
"'SPLIT_ALLOW_MULTIPLE_EXPERIMENTS'",
",",
"False",
")",
"app",
".",
"config",
".",
"setdefault",
"(",
"'SPLIT_DB_FAILOVER'",
",",
"False",
")",
"app",
".",
"config",
".",
"setdefault",
"(",
"'SPLIT_IGNORE_IP_ADDRESSES'",
",",
"[",
"]",
")",
"app",
".",
"config",
".",
"setdefault",
"(",
"'SPLIT_ROBOT_REGEX'",
",",
"r\"\"\"\n (?i)\\b(\n Baidu|\n Gigabot|\n Googlebot|\n libwww-perl|\n lwp-trivial|\n msnbot|\n SiteUptime|\n Slurp|\n WordPress|\n ZIBB|\n ZyBorg\n )\\b\n \"\"\"",
")",
"app",
".",
"jinja_env",
".",
"globals",
".",
"update",
"(",
"{",
"'ab_test'",
":",
"ab_test",
",",
"'finished'",
":",
"finished",
"}",
")",
"@",
"app",
".",
"template_filter",
"(",
")",
"def",
"percentage",
"(",
"number",
")",
":",
"number",
"*=",
"100",
"if",
"abs",
"(",
"number",
")",
"<",
"10",
":",
"return",
"\"%.1f%%\"",
"%",
"round",
"(",
"number",
",",
"1",
")",
"else",
":",
"return",
"\"%d%%\"",
"%",
"round",
"(",
"number",
")"
] | Prepare the Flask application for Flask-Split.
:param state: :class:`BlueprintSetupState` instance | [
"Prepare",
"the",
"Flask",
"application",
"for",
"Flask",
"-",
"Split",
"."
] | 52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba | https://github.com/jpvanhal/flask-split/blob/52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba/flask_split/core.py#L23-L61 |
5,124 | jpvanhal/flask-split | flask_split/core.py | finished | def finished(experiment_name, reset=True):
"""
Track a conversion.
:param experiment_name: Name of the experiment.
:param reset: If set to `True` current user's session is reset so that they
may start the test again in the future. If set to `False` the user
will always see the alternative they started with. Defaults to `True`.
"""
if _exclude_visitor():
return
redis = _get_redis_connection()
try:
experiment = Experiment.find(redis, experiment_name)
if not experiment:
return
alternative_name = _get_session().get(experiment.key)
if alternative_name:
split_finished = set(session.get('split_finished', []))
if experiment.key not in split_finished:
alternative = Alternative(
redis, alternative_name, experiment_name)
alternative.increment_completion()
if reset:
_get_session().pop(experiment.key, None)
try:
split_finished.remove(experiment.key)
except KeyError:
pass
else:
split_finished.add(experiment.key)
session['split_finished'] = list(split_finished)
except ConnectionError:
if not current_app.config['SPLIT_DB_FAILOVER']:
raise | python | def finished(experiment_name, reset=True):
"""
Track a conversion.
:param experiment_name: Name of the experiment.
:param reset: If set to `True` current user's session is reset so that they
may start the test again in the future. If set to `False` the user
will always see the alternative they started with. Defaults to `True`.
"""
if _exclude_visitor():
return
redis = _get_redis_connection()
try:
experiment = Experiment.find(redis, experiment_name)
if not experiment:
return
alternative_name = _get_session().get(experiment.key)
if alternative_name:
split_finished = set(session.get('split_finished', []))
if experiment.key not in split_finished:
alternative = Alternative(
redis, alternative_name, experiment_name)
alternative.increment_completion()
if reset:
_get_session().pop(experiment.key, None)
try:
split_finished.remove(experiment.key)
except KeyError:
pass
else:
split_finished.add(experiment.key)
session['split_finished'] = list(split_finished)
except ConnectionError:
if not current_app.config['SPLIT_DB_FAILOVER']:
raise | [
"def",
"finished",
"(",
"experiment_name",
",",
"reset",
"=",
"True",
")",
":",
"if",
"_exclude_visitor",
"(",
")",
":",
"return",
"redis",
"=",
"_get_redis_connection",
"(",
")",
"try",
":",
"experiment",
"=",
"Experiment",
".",
"find",
"(",
"redis",
",",
"experiment_name",
")",
"if",
"not",
"experiment",
":",
"return",
"alternative_name",
"=",
"_get_session",
"(",
")",
".",
"get",
"(",
"experiment",
".",
"key",
")",
"if",
"alternative_name",
":",
"split_finished",
"=",
"set",
"(",
"session",
".",
"get",
"(",
"'split_finished'",
",",
"[",
"]",
")",
")",
"if",
"experiment",
".",
"key",
"not",
"in",
"split_finished",
":",
"alternative",
"=",
"Alternative",
"(",
"redis",
",",
"alternative_name",
",",
"experiment_name",
")",
"alternative",
".",
"increment_completion",
"(",
")",
"if",
"reset",
":",
"_get_session",
"(",
")",
".",
"pop",
"(",
"experiment",
".",
"key",
",",
"None",
")",
"try",
":",
"split_finished",
".",
"remove",
"(",
"experiment",
".",
"key",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"split_finished",
".",
"add",
"(",
"experiment",
".",
"key",
")",
"session",
"[",
"'split_finished'",
"]",
"=",
"list",
"(",
"split_finished",
")",
"except",
"ConnectionError",
":",
"if",
"not",
"current_app",
".",
"config",
"[",
"'SPLIT_DB_FAILOVER'",
"]",
":",
"raise"
] | Track a conversion.
:param experiment_name: Name of the experiment.
:param reset: If set to `True` current user's session is reset so that they
may start the test again in the future. If set to `False` the user
will always see the alternative they started with. Defaults to `True`. | [
"Track",
"a",
"conversion",
"."
] | 52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba | https://github.com/jpvanhal/flask-split/blob/52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba/flask_split/core.py#L108-L142 |
5,125 | jpvanhal/flask-split | flask_split/core.py | _is_robot | def _is_robot():
"""
Return `True` if the current visitor is a robot or spider, or
`False` otherwise.
This function works by comparing the request's user agent with a regular
expression. The regular expression can be configured with the
``SPLIT_ROBOT_REGEX`` setting.
"""
robot_regex = current_app.config['SPLIT_ROBOT_REGEX']
user_agent = request.headers.get('User-Agent', '')
return re.search(robot_regex, user_agent, flags=re.VERBOSE) | python | def _is_robot():
"""
Return `True` if the current visitor is a robot or spider, or
`False` otherwise.
This function works by comparing the request's user agent with a regular
expression. The regular expression can be configured with the
``SPLIT_ROBOT_REGEX`` setting.
"""
robot_regex = current_app.config['SPLIT_ROBOT_REGEX']
user_agent = request.headers.get('User-Agent', '')
return re.search(robot_regex, user_agent, flags=re.VERBOSE) | [
"def",
"_is_robot",
"(",
")",
":",
"robot_regex",
"=",
"current_app",
".",
"config",
"[",
"'SPLIT_ROBOT_REGEX'",
"]",
"user_agent",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"'User-Agent'",
",",
"''",
")",
"return",
"re",
".",
"search",
"(",
"robot_regex",
",",
"user_agent",
",",
"flags",
"=",
"re",
".",
"VERBOSE",
")"
] | Return `True` if the current visitor is a robot or spider, or
`False` otherwise.
This function works by comparing the request's user agent with a regular
expression. The regular expression can be configured with the
``SPLIT_ROBOT_REGEX`` setting. | [
"Return",
"True",
"if",
"the",
"current",
"visitor",
"is",
"a",
"robot",
"or",
"spider",
"or",
"False",
"otherwise",
"."
] | 52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba | https://github.com/jpvanhal/flask-split/blob/52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba/flask_split/core.py#L206-L217 |
5,126 | jpvanhal/flask-split | flask_split/models.py | Experiment.start_time | def start_time(self):
"""The start time of this experiment."""
t = self.redis.hget('experiment_start_times', self.name)
if t:
return datetime.strptime(t, '%Y-%m-%dT%H:%M:%S') | python | def start_time(self):
"""The start time of this experiment."""
t = self.redis.hget('experiment_start_times', self.name)
if t:
return datetime.strptime(t, '%Y-%m-%dT%H:%M:%S') | [
"def",
"start_time",
"(",
"self",
")",
":",
"t",
"=",
"self",
".",
"redis",
".",
"hget",
"(",
"'experiment_start_times'",
",",
"self",
".",
"name",
")",
"if",
"t",
":",
"return",
"datetime",
".",
"strptime",
"(",
"t",
",",
"'%Y-%m-%dT%H:%M:%S'",
")"
] | The start time of this experiment. | [
"The",
"start",
"time",
"of",
"this",
"experiment",
"."
] | 52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba | https://github.com/jpvanhal/flask-split/blob/52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba/flask_split/models.py#L163-L167 |
5,127 | jpvanhal/flask-split | flask_split/models.py | Experiment.reset | def reset(self):
"""Delete all data for this experiment."""
for alternative in self.alternatives:
alternative.reset()
self.reset_winner()
self.increment_version() | python | def reset(self):
"""Delete all data for this experiment."""
for alternative in self.alternatives:
alternative.reset()
self.reset_winner()
self.increment_version() | [
"def",
"reset",
"(",
"self",
")",
":",
"for",
"alternative",
"in",
"self",
".",
"alternatives",
":",
"alternative",
".",
"reset",
"(",
")",
"self",
".",
"reset_winner",
"(",
")",
"self",
".",
"increment_version",
"(",
")"
] | Delete all data for this experiment. | [
"Delete",
"all",
"data",
"for",
"this",
"experiment",
"."
] | 52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba | https://github.com/jpvanhal/flask-split/blob/52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba/flask_split/models.py#L211-L216 |
5,128 | jpvanhal/flask-split | flask_split/models.py | Experiment.delete | def delete(self):
"""Delete this experiment and all its data."""
for alternative in self.alternatives:
alternative.delete()
self.reset_winner()
self.redis.srem('experiments', self.name)
self.redis.delete(self.name)
self.increment_version() | python | def delete(self):
"""Delete this experiment and all its data."""
for alternative in self.alternatives:
alternative.delete()
self.reset_winner()
self.redis.srem('experiments', self.name)
self.redis.delete(self.name)
self.increment_version() | [
"def",
"delete",
"(",
"self",
")",
":",
"for",
"alternative",
"in",
"self",
".",
"alternatives",
":",
"alternative",
".",
"delete",
"(",
")",
"self",
".",
"reset_winner",
"(",
")",
"self",
".",
"redis",
".",
"srem",
"(",
"'experiments'",
",",
"self",
".",
"name",
")",
"self",
".",
"redis",
".",
"delete",
"(",
"self",
".",
"name",
")",
"self",
".",
"increment_version",
"(",
")"
] | Delete this experiment and all its data. | [
"Delete",
"this",
"experiment",
"and",
"all",
"its",
"data",
"."
] | 52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba | https://github.com/jpvanhal/flask-split/blob/52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba/flask_split/models.py#L218-L225 |
5,129 | jpvanhal/flask-split | flask_split/utils.py | _get_redis_connection | def _get_redis_connection():
"""
Return a Redis connection based on the Flask application's configuration.
The connection parameters are retrieved from `REDIS_URL` configuration
variable.
:return: an instance of :class:`redis.Connection`
"""
url = current_app.config.get('REDIS_URL', 'redis://localhost:6379')
return redis.from_url(url, decode_responses=True) | python | def _get_redis_connection():
"""
Return a Redis connection based on the Flask application's configuration.
The connection parameters are retrieved from `REDIS_URL` configuration
variable.
:return: an instance of :class:`redis.Connection`
"""
url = current_app.config.get('REDIS_URL', 'redis://localhost:6379')
return redis.from_url(url, decode_responses=True) | [
"def",
"_get_redis_connection",
"(",
")",
":",
"url",
"=",
"current_app",
".",
"config",
".",
"get",
"(",
"'REDIS_URL'",
",",
"'redis://localhost:6379'",
")",
"return",
"redis",
".",
"from_url",
"(",
"url",
",",
"decode_responses",
"=",
"True",
")"
] | Return a Redis connection based on the Flask application's configuration.
The connection parameters are retrieved from `REDIS_URL` configuration
variable.
:return: an instance of :class:`redis.Connection` | [
"Return",
"a",
"Redis",
"connection",
"based",
"on",
"the",
"Flask",
"application",
"s",
"configuration",
"."
] | 52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba | https://github.com/jpvanhal/flask-split/blob/52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba/flask_split/utils.py#L24-L34 |
5,130 | jpvanhal/flask-split | flask_split/views.py | set_experiment_winner | def set_experiment_winner(experiment):
"""Mark an alternative as the winner of the experiment."""
redis = _get_redis_connection()
experiment = Experiment.find(redis, experiment)
if experiment:
alternative_name = request.form.get('alternative')
alternative = Alternative(redis, alternative_name, experiment.name)
if alternative.name in experiment.alternative_names:
experiment.winner = alternative.name
return redirect(url_for('.index')) | python | def set_experiment_winner(experiment):
"""Mark an alternative as the winner of the experiment."""
redis = _get_redis_connection()
experiment = Experiment.find(redis, experiment)
if experiment:
alternative_name = request.form.get('alternative')
alternative = Alternative(redis, alternative_name, experiment.name)
if alternative.name in experiment.alternative_names:
experiment.winner = alternative.name
return redirect(url_for('.index')) | [
"def",
"set_experiment_winner",
"(",
"experiment",
")",
":",
"redis",
"=",
"_get_redis_connection",
"(",
")",
"experiment",
"=",
"Experiment",
".",
"find",
"(",
"redis",
",",
"experiment",
")",
"if",
"experiment",
":",
"alternative_name",
"=",
"request",
".",
"form",
".",
"get",
"(",
"'alternative'",
")",
"alternative",
"=",
"Alternative",
"(",
"redis",
",",
"alternative_name",
",",
"experiment",
".",
"name",
")",
"if",
"alternative",
".",
"name",
"in",
"experiment",
".",
"alternative_names",
":",
"experiment",
".",
"winner",
"=",
"alternative",
".",
"name",
"return",
"redirect",
"(",
"url_for",
"(",
"'.index'",
")",
")"
] | Mark an alternative as the winner of the experiment. | [
"Mark",
"an",
"alternative",
"as",
"the",
"winner",
"of",
"the",
"experiment",
"."
] | 52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba | https://github.com/jpvanhal/flask-split/blob/52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba/flask_split/views.py#L44-L53 |
5,131 | jpvanhal/flask-split | flask_split/views.py | reset_experiment | def reset_experiment(experiment):
"""Delete all data for an experiment."""
redis = _get_redis_connection()
experiment = Experiment.find(redis, experiment)
if experiment:
experiment.reset()
return redirect(url_for('.index')) | python | def reset_experiment(experiment):
"""Delete all data for an experiment."""
redis = _get_redis_connection()
experiment = Experiment.find(redis, experiment)
if experiment:
experiment.reset()
return redirect(url_for('.index')) | [
"def",
"reset_experiment",
"(",
"experiment",
")",
":",
"redis",
"=",
"_get_redis_connection",
"(",
")",
"experiment",
"=",
"Experiment",
".",
"find",
"(",
"redis",
",",
"experiment",
")",
"if",
"experiment",
":",
"experiment",
".",
"reset",
"(",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'.index'",
")",
")"
] | Delete all data for an experiment. | [
"Delete",
"all",
"data",
"for",
"an",
"experiment",
"."
] | 52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba | https://github.com/jpvanhal/flask-split/blob/52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba/flask_split/views.py#L57-L63 |
5,132 | jpvanhal/flask-split | flask_split/views.py | delete_experiment | def delete_experiment(experiment):
"""Delete an experiment and all its data."""
redis = _get_redis_connection()
experiment = Experiment.find(redis, experiment)
if experiment:
experiment.delete()
return redirect(url_for('.index')) | python | def delete_experiment(experiment):
"""Delete an experiment and all its data."""
redis = _get_redis_connection()
experiment = Experiment.find(redis, experiment)
if experiment:
experiment.delete()
return redirect(url_for('.index')) | [
"def",
"delete_experiment",
"(",
"experiment",
")",
":",
"redis",
"=",
"_get_redis_connection",
"(",
")",
"experiment",
"=",
"Experiment",
".",
"find",
"(",
"redis",
",",
"experiment",
")",
"if",
"experiment",
":",
"experiment",
".",
"delete",
"(",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"'.index'",
")",
")"
] | Delete an experiment and all its data. | [
"Delete",
"an",
"experiment",
"and",
"all",
"its",
"data",
"."
] | 52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba | https://github.com/jpvanhal/flask-split/blob/52bc9df49b5ce8b0ec436ba09b361a4b0b1793ba/flask_split/views.py#L67-L73 |
5,133 | tobami/littlechef | littlechef/chef.py | _get_ipaddress | def _get_ipaddress(node):
"""Adds the ipaddress attribute to the given node object if not already
present and it is correctly given by ohai
Returns True if ipaddress is added, False otherwise
"""
if "ipaddress" not in node:
with settings(hide('stdout'), warn_only=True):
output = sudo('ohai -l warn ipaddress')
if output.succeeded:
try:
node['ipaddress'] = json.loads(output)[0]
except ValueError:
abort("Could not parse ohai's output for ipaddress"
":\n {0}".format(output))
return True
return False | python | def _get_ipaddress(node):
"""Adds the ipaddress attribute to the given node object if not already
present and it is correctly given by ohai
Returns True if ipaddress is added, False otherwise
"""
if "ipaddress" not in node:
with settings(hide('stdout'), warn_only=True):
output = sudo('ohai -l warn ipaddress')
if output.succeeded:
try:
node['ipaddress'] = json.loads(output)[0]
except ValueError:
abort("Could not parse ohai's output for ipaddress"
":\n {0}".format(output))
return True
return False | [
"def",
"_get_ipaddress",
"(",
"node",
")",
":",
"if",
"\"ipaddress\"",
"not",
"in",
"node",
":",
"with",
"settings",
"(",
"hide",
"(",
"'stdout'",
")",
",",
"warn_only",
"=",
"True",
")",
":",
"output",
"=",
"sudo",
"(",
"'ohai -l warn ipaddress'",
")",
"if",
"output",
".",
"succeeded",
":",
"try",
":",
"node",
"[",
"'ipaddress'",
"]",
"=",
"json",
".",
"loads",
"(",
"output",
")",
"[",
"0",
"]",
"except",
"ValueError",
":",
"abort",
"(",
"\"Could not parse ohai's output for ipaddress\"",
"\":\\n {0}\"",
".",
"format",
"(",
"output",
")",
")",
"return",
"True",
"return",
"False"
] | Adds the ipaddress attribute to the given node object if not already
present and it is correctly given by ohai
Returns True if ipaddress is added, False otherwise | [
"Adds",
"the",
"ipaddress",
"attribute",
"to",
"the",
"given",
"node",
"object",
"if",
"not",
"already",
"present",
"and",
"it",
"is",
"correctly",
"given",
"by",
"ohai",
"Returns",
"True",
"if",
"ipaddress",
"is",
"added",
"False",
"otherwise"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/chef.py#L55-L71 |
5,134 | tobami/littlechef | littlechef/chef.py | sync_node | def sync_node(node):
"""Builds, synchronizes and configures a node.
It also injects the ipaddress to the node's config file if not already
existent.
"""
if node.get('dummy') or 'dummy' in node.get('tags', []):
lib.print_header("Skipping dummy: {0}".format(env.host))
return False
current_node = lib.get_node(node['name'])
# Always configure Chef Solo
solo.configure(current_node)
ipaddress = _get_ipaddress(node)
# Everything was configured alright, so save the node configuration
# This is done without credentials, so that we keep the node name used
# by the user and not the hostname or IP translated by .ssh/config
filepath = save_config(node, ipaddress)
try:
# Synchronize the kitchen directory
_synchronize_node(filepath, node)
# Execute Chef Solo
_configure_node()
finally:
_node_cleanup()
return True | python | def sync_node(node):
"""Builds, synchronizes and configures a node.
It also injects the ipaddress to the node's config file if not already
existent.
"""
if node.get('dummy') or 'dummy' in node.get('tags', []):
lib.print_header("Skipping dummy: {0}".format(env.host))
return False
current_node = lib.get_node(node['name'])
# Always configure Chef Solo
solo.configure(current_node)
ipaddress = _get_ipaddress(node)
# Everything was configured alright, so save the node configuration
# This is done without credentials, so that we keep the node name used
# by the user and not the hostname or IP translated by .ssh/config
filepath = save_config(node, ipaddress)
try:
# Synchronize the kitchen directory
_synchronize_node(filepath, node)
# Execute Chef Solo
_configure_node()
finally:
_node_cleanup()
return True | [
"def",
"sync_node",
"(",
"node",
")",
":",
"if",
"node",
".",
"get",
"(",
"'dummy'",
")",
"or",
"'dummy'",
"in",
"node",
".",
"get",
"(",
"'tags'",
",",
"[",
"]",
")",
":",
"lib",
".",
"print_header",
"(",
"\"Skipping dummy: {0}\"",
".",
"format",
"(",
"env",
".",
"host",
")",
")",
"return",
"False",
"current_node",
"=",
"lib",
".",
"get_node",
"(",
"node",
"[",
"'name'",
"]",
")",
"# Always configure Chef Solo",
"solo",
".",
"configure",
"(",
"current_node",
")",
"ipaddress",
"=",
"_get_ipaddress",
"(",
"node",
")",
"# Everything was configured alright, so save the node configuration",
"# This is done without credentials, so that we keep the node name used",
"# by the user and not the hostname or IP translated by .ssh/config",
"filepath",
"=",
"save_config",
"(",
"node",
",",
"ipaddress",
")",
"try",
":",
"# Synchronize the kitchen directory",
"_synchronize_node",
"(",
"filepath",
",",
"node",
")",
"# Execute Chef Solo",
"_configure_node",
"(",
")",
"finally",
":",
"_node_cleanup",
"(",
")",
"return",
"True"
] | Builds, synchronizes and configures a node.
It also injects the ipaddress to the node's config file if not already
existent. | [
"Builds",
"synchronizes",
"and",
"configures",
"a",
"node",
".",
"It",
"also",
"injects",
"the",
"ipaddress",
"to",
"the",
"node",
"s",
"config",
"file",
"if",
"not",
"already",
"existent",
"."
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/chef.py#L86-L110 |
5,135 | tobami/littlechef | littlechef/chef.py | build_dct | def build_dct(dic, keys, value):
"""Builds a dictionary with arbitrary depth out of a key list"""
key = keys.pop(0)
if len(keys):
dic.setdefault(key, {})
build_dct(dic[key], keys, value)
else:
# Transform cookbook default attribute strings into proper booleans
if value == "false":
value = False
elif value == "true":
value = True
# It's a leaf, assign value
dic[key] = deepcopy(value) | python | def build_dct(dic, keys, value):
"""Builds a dictionary with arbitrary depth out of a key list"""
key = keys.pop(0)
if len(keys):
dic.setdefault(key, {})
build_dct(dic[key], keys, value)
else:
# Transform cookbook default attribute strings into proper booleans
if value == "false":
value = False
elif value == "true":
value = True
# It's a leaf, assign value
dic[key] = deepcopy(value) | [
"def",
"build_dct",
"(",
"dic",
",",
"keys",
",",
"value",
")",
":",
"key",
"=",
"keys",
".",
"pop",
"(",
"0",
")",
"if",
"len",
"(",
"keys",
")",
":",
"dic",
".",
"setdefault",
"(",
"key",
",",
"{",
"}",
")",
"build_dct",
"(",
"dic",
"[",
"key",
"]",
",",
"keys",
",",
"value",
")",
"else",
":",
"# Transform cookbook default attribute strings into proper booleans",
"if",
"value",
"==",
"\"false\"",
":",
"value",
"=",
"False",
"elif",
"value",
"==",
"\"true\"",
":",
"value",
"=",
"True",
"# It's a leaf, assign value",
"dic",
"[",
"key",
"]",
"=",
"deepcopy",
"(",
"value",
")"
] | Builds a dictionary with arbitrary depth out of a key list | [
"Builds",
"a",
"dictionary",
"with",
"arbitrary",
"depth",
"out",
"of",
"a",
"key",
"list"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/chef.py#L192-L205 |
5,136 | tobami/littlechef | littlechef/chef.py | update_dct | def update_dct(dic1, dic2):
"""Merges two dictionaries recursively
dic2 will have preference over dic1
"""
for key, val in dic2.items():
if isinstance(val, dict):
dic1.setdefault(key, {})
update_dct(dic1[key], val)
else:
dic1[key] = val | python | def update_dct(dic1, dic2):
"""Merges two dictionaries recursively
dic2 will have preference over dic1
"""
for key, val in dic2.items():
if isinstance(val, dict):
dic1.setdefault(key, {})
update_dct(dic1[key], val)
else:
dic1[key] = val | [
"def",
"update_dct",
"(",
"dic1",
",",
"dic2",
")",
":",
"for",
"key",
",",
"val",
"in",
"dic2",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"dic1",
".",
"setdefault",
"(",
"key",
",",
"{",
"}",
")",
"update_dct",
"(",
"dic1",
"[",
"key",
"]",
",",
"val",
")",
"else",
":",
"dic1",
"[",
"key",
"]",
"=",
"val"
] | Merges two dictionaries recursively
dic2 will have preference over dic1 | [
"Merges",
"two",
"dictionaries",
"recursively",
"dic2",
"will",
"have",
"preference",
"over",
"dic1"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/chef.py#L208-L218 |
5,137 | tobami/littlechef | littlechef/chef.py | _add_merged_attributes | def _add_merged_attributes(node, all_recipes, all_roles):
"""Merges attributes from cookbooks, node and roles
Chef Attribute precedence:
http://docs.opscode.com/essentials_cookbook_attribute_files.html#attribute-precedence
LittleChef implements, in precedence order:
- Cookbook default
- Environment default
- Role default
- Node normal
- Role override
- Environment override
NOTE: In order for cookbook attributes to be read, they need to be
correctly defined in its metadata.json
"""
# Get cookbooks from extended recipes
attributes = {}
for recipe in node['recipes']:
# Find this recipe
found = False
for r in all_recipes:
if recipe == r['name']:
found = True
for attr in r['attributes']:
if r['attributes'][attr].get('type') == "hash":
value = {}
else:
value = r['attributes'][attr].get('default')
# Attribute dictionaries are defined as a single
# compound key. Split and build proper dict
build_dct(attributes, attr.split("/"), value)
if not found:
error = "Could not find recipe '{0}' while ".format(recipe)
error += "building node data bag for '{0}'".format(node['name'])
abort(error)
# Get default role attributes
for role in node['roles']:
for r in all_roles:
if role == r['name']:
update_dct(attributes, r.get('default_attributes', {}))
# Get default environment attributes
environment = lib.get_environment(node['chef_environment'])
update_dct(attributes, environment.get('default_attributes', {}))
# Get normal node attributes
non_attribute_fields = [
'id', 'name', 'role', 'roles', 'recipes', 'run_list', 'ipaddress']
node_attributes = {}
for key in node:
if key in non_attribute_fields:
continue
node_attributes[key] = node[key]
update_dct(attributes, node_attributes)
# Get override role attributes
for role in node['roles']:
for r in all_roles:
if role == r['name']:
update_dct(attributes, r.get('override_attributes', {}))
# Get override environment attributes
update_dct(attributes, environment.get('override_attributes', {}))
# Merge back to the original node object
node.update(attributes) | python | def _add_merged_attributes(node, all_recipes, all_roles):
"""Merges attributes from cookbooks, node and roles
Chef Attribute precedence:
http://docs.opscode.com/essentials_cookbook_attribute_files.html#attribute-precedence
LittleChef implements, in precedence order:
- Cookbook default
- Environment default
- Role default
- Node normal
- Role override
- Environment override
NOTE: In order for cookbook attributes to be read, they need to be
correctly defined in its metadata.json
"""
# Get cookbooks from extended recipes
attributes = {}
for recipe in node['recipes']:
# Find this recipe
found = False
for r in all_recipes:
if recipe == r['name']:
found = True
for attr in r['attributes']:
if r['attributes'][attr].get('type') == "hash":
value = {}
else:
value = r['attributes'][attr].get('default')
# Attribute dictionaries are defined as a single
# compound key. Split and build proper dict
build_dct(attributes, attr.split("/"), value)
if not found:
error = "Could not find recipe '{0}' while ".format(recipe)
error += "building node data bag for '{0}'".format(node['name'])
abort(error)
# Get default role attributes
for role in node['roles']:
for r in all_roles:
if role == r['name']:
update_dct(attributes, r.get('default_attributes', {}))
# Get default environment attributes
environment = lib.get_environment(node['chef_environment'])
update_dct(attributes, environment.get('default_attributes', {}))
# Get normal node attributes
non_attribute_fields = [
'id', 'name', 'role', 'roles', 'recipes', 'run_list', 'ipaddress']
node_attributes = {}
for key in node:
if key in non_attribute_fields:
continue
node_attributes[key] = node[key]
update_dct(attributes, node_attributes)
# Get override role attributes
for role in node['roles']:
for r in all_roles:
if role == r['name']:
update_dct(attributes, r.get('override_attributes', {}))
# Get override environment attributes
update_dct(attributes, environment.get('override_attributes', {}))
# Merge back to the original node object
node.update(attributes) | [
"def",
"_add_merged_attributes",
"(",
"node",
",",
"all_recipes",
",",
"all_roles",
")",
":",
"# Get cookbooks from extended recipes",
"attributes",
"=",
"{",
"}",
"for",
"recipe",
"in",
"node",
"[",
"'recipes'",
"]",
":",
"# Find this recipe",
"found",
"=",
"False",
"for",
"r",
"in",
"all_recipes",
":",
"if",
"recipe",
"==",
"r",
"[",
"'name'",
"]",
":",
"found",
"=",
"True",
"for",
"attr",
"in",
"r",
"[",
"'attributes'",
"]",
":",
"if",
"r",
"[",
"'attributes'",
"]",
"[",
"attr",
"]",
".",
"get",
"(",
"'type'",
")",
"==",
"\"hash\"",
":",
"value",
"=",
"{",
"}",
"else",
":",
"value",
"=",
"r",
"[",
"'attributes'",
"]",
"[",
"attr",
"]",
".",
"get",
"(",
"'default'",
")",
"# Attribute dictionaries are defined as a single",
"# compound key. Split and build proper dict",
"build_dct",
"(",
"attributes",
",",
"attr",
".",
"split",
"(",
"\"/\"",
")",
",",
"value",
")",
"if",
"not",
"found",
":",
"error",
"=",
"\"Could not find recipe '{0}' while \"",
".",
"format",
"(",
"recipe",
")",
"error",
"+=",
"\"building node data bag for '{0}'\"",
".",
"format",
"(",
"node",
"[",
"'name'",
"]",
")",
"abort",
"(",
"error",
")",
"# Get default role attributes",
"for",
"role",
"in",
"node",
"[",
"'roles'",
"]",
":",
"for",
"r",
"in",
"all_roles",
":",
"if",
"role",
"==",
"r",
"[",
"'name'",
"]",
":",
"update_dct",
"(",
"attributes",
",",
"r",
".",
"get",
"(",
"'default_attributes'",
",",
"{",
"}",
")",
")",
"# Get default environment attributes",
"environment",
"=",
"lib",
".",
"get_environment",
"(",
"node",
"[",
"'chef_environment'",
"]",
")",
"update_dct",
"(",
"attributes",
",",
"environment",
".",
"get",
"(",
"'default_attributes'",
",",
"{",
"}",
")",
")",
"# Get normal node attributes",
"non_attribute_fields",
"=",
"[",
"'id'",
",",
"'name'",
",",
"'role'",
",",
"'roles'",
",",
"'recipes'",
",",
"'run_list'",
",",
"'ipaddress'",
"]",
"node_attributes",
"=",
"{",
"}",
"for",
"key",
"in",
"node",
":",
"if",
"key",
"in",
"non_attribute_fields",
":",
"continue",
"node_attributes",
"[",
"key",
"]",
"=",
"node",
"[",
"key",
"]",
"update_dct",
"(",
"attributes",
",",
"node_attributes",
")",
"# Get override role attributes",
"for",
"role",
"in",
"node",
"[",
"'roles'",
"]",
":",
"for",
"r",
"in",
"all_roles",
":",
"if",
"role",
"==",
"r",
"[",
"'name'",
"]",
":",
"update_dct",
"(",
"attributes",
",",
"r",
".",
"get",
"(",
"'override_attributes'",
",",
"{",
"}",
")",
")",
"# Get override environment attributes",
"update_dct",
"(",
"attributes",
",",
"environment",
".",
"get",
"(",
"'override_attributes'",
",",
"{",
"}",
")",
")",
"# Merge back to the original node object",
"node",
".",
"update",
"(",
"attributes",
")"
] | Merges attributes from cookbooks, node and roles
Chef Attribute precedence:
http://docs.opscode.com/essentials_cookbook_attribute_files.html#attribute-precedence
LittleChef implements, in precedence order:
- Cookbook default
- Environment default
- Role default
- Node normal
- Role override
- Environment override
NOTE: In order for cookbook attributes to be read, they need to be
correctly defined in its metadata.json | [
"Merges",
"attributes",
"from",
"cookbooks",
"node",
"and",
"roles"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/chef.py#L232-L300 |
5,138 | tobami/littlechef | littlechef/chef.py | build_node_data_bag | def build_node_data_bag():
"""Builds one 'node' data bag item per file found in the 'nodes' directory
Automatic attributes for a node item:
'id': It adds data bag 'id', same as filename but with underscores
'name': same as the filename
'fqdn': same as the filename (LittleChef filenames should be fqdns)
'hostname': Uses the first part of the filename as the hostname
(until it finds a period) minus the .json extension
'domain': filename minus the first part of the filename (hostname)
minus the .json extension
In addition, it will contain the merged attributes from:
All default cookbook attributes corresponding to the node
All attributes found in nodes/<item>.json file
Default and override attributes from all roles
"""
nodes = lib.get_nodes()
node_data_bag_path = os.path.join('data_bags', 'node')
# In case there are leftovers
remove_local_node_data_bag()
os.makedirs(node_data_bag_path)
all_recipes = lib.get_recipes()
all_roles = lib.get_roles()
for node in nodes:
# Dots are not allowed (only alphanumeric), substitute by underscores
node['id'] = node['name'].replace('.', '_')
# Build extended role list
node['role'] = lib.get_roles_in_node(node)
node['roles'] = node['role'][:]
for role in node['role']:
node['roles'].extend(lib.get_roles_in_role(role))
node['roles'] = list(set(node['roles']))
# Build extended recipe list
node['recipes'] = lib.get_recipes_in_node(node)
# Add recipes found inside each roles in the extended role list
for role in node['roles']:
node['recipes'].extend(lib.get_recipes_in_role(role))
node['recipes'] = list(set(node['recipes']))
# Add node attributes
_add_merged_attributes(node, all_recipes, all_roles)
_add_automatic_attributes(node)
# Save node data bag item
with open(os.path.join(
'data_bags', 'node', node['id'] + '.json'), 'w') as f:
f.write(json.dumps(node)) | python | def build_node_data_bag():
"""Builds one 'node' data bag item per file found in the 'nodes' directory
Automatic attributes for a node item:
'id': It adds data bag 'id', same as filename but with underscores
'name': same as the filename
'fqdn': same as the filename (LittleChef filenames should be fqdns)
'hostname': Uses the first part of the filename as the hostname
(until it finds a period) minus the .json extension
'domain': filename minus the first part of the filename (hostname)
minus the .json extension
In addition, it will contain the merged attributes from:
All default cookbook attributes corresponding to the node
All attributes found in nodes/<item>.json file
Default and override attributes from all roles
"""
nodes = lib.get_nodes()
node_data_bag_path = os.path.join('data_bags', 'node')
# In case there are leftovers
remove_local_node_data_bag()
os.makedirs(node_data_bag_path)
all_recipes = lib.get_recipes()
all_roles = lib.get_roles()
for node in nodes:
# Dots are not allowed (only alphanumeric), substitute by underscores
node['id'] = node['name'].replace('.', '_')
# Build extended role list
node['role'] = lib.get_roles_in_node(node)
node['roles'] = node['role'][:]
for role in node['role']:
node['roles'].extend(lib.get_roles_in_role(role))
node['roles'] = list(set(node['roles']))
# Build extended recipe list
node['recipes'] = lib.get_recipes_in_node(node)
# Add recipes found inside each roles in the extended role list
for role in node['roles']:
node['recipes'].extend(lib.get_recipes_in_role(role))
node['recipes'] = list(set(node['recipes']))
# Add node attributes
_add_merged_attributes(node, all_recipes, all_roles)
_add_automatic_attributes(node)
# Save node data bag item
with open(os.path.join(
'data_bags', 'node', node['id'] + '.json'), 'w') as f:
f.write(json.dumps(node)) | [
"def",
"build_node_data_bag",
"(",
")",
":",
"nodes",
"=",
"lib",
".",
"get_nodes",
"(",
")",
"node_data_bag_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'data_bags'",
",",
"'node'",
")",
"# In case there are leftovers",
"remove_local_node_data_bag",
"(",
")",
"os",
".",
"makedirs",
"(",
"node_data_bag_path",
")",
"all_recipes",
"=",
"lib",
".",
"get_recipes",
"(",
")",
"all_roles",
"=",
"lib",
".",
"get_roles",
"(",
")",
"for",
"node",
"in",
"nodes",
":",
"# Dots are not allowed (only alphanumeric), substitute by underscores",
"node",
"[",
"'id'",
"]",
"=",
"node",
"[",
"'name'",
"]",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
"# Build extended role list",
"node",
"[",
"'role'",
"]",
"=",
"lib",
".",
"get_roles_in_node",
"(",
"node",
")",
"node",
"[",
"'roles'",
"]",
"=",
"node",
"[",
"'role'",
"]",
"[",
":",
"]",
"for",
"role",
"in",
"node",
"[",
"'role'",
"]",
":",
"node",
"[",
"'roles'",
"]",
".",
"extend",
"(",
"lib",
".",
"get_roles_in_role",
"(",
"role",
")",
")",
"node",
"[",
"'roles'",
"]",
"=",
"list",
"(",
"set",
"(",
"node",
"[",
"'roles'",
"]",
")",
")",
"# Build extended recipe list",
"node",
"[",
"'recipes'",
"]",
"=",
"lib",
".",
"get_recipes_in_node",
"(",
"node",
")",
"# Add recipes found inside each roles in the extended role list",
"for",
"role",
"in",
"node",
"[",
"'roles'",
"]",
":",
"node",
"[",
"'recipes'",
"]",
".",
"extend",
"(",
"lib",
".",
"get_recipes_in_role",
"(",
"role",
")",
")",
"node",
"[",
"'recipes'",
"]",
"=",
"list",
"(",
"set",
"(",
"node",
"[",
"'recipes'",
"]",
")",
")",
"# Add node attributes",
"_add_merged_attributes",
"(",
"node",
",",
"all_recipes",
",",
"all_roles",
")",
"_add_automatic_attributes",
"(",
"node",
")",
"# Save node data bag item",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"'data_bags'",
",",
"'node'",
",",
"node",
"[",
"'id'",
"]",
"+",
"'.json'",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"node",
")",
")"
] | Builds one 'node' data bag item per file found in the 'nodes' directory
Automatic attributes for a node item:
'id': It adds data bag 'id', same as filename but with underscores
'name': same as the filename
'fqdn': same as the filename (LittleChef filenames should be fqdns)
'hostname': Uses the first part of the filename as the hostname
(until it finds a period) minus the .json extension
'domain': filename minus the first part of the filename (hostname)
minus the .json extension
In addition, it will contain the merged attributes from:
All default cookbook attributes corresponding to the node
All attributes found in nodes/<item>.json file
Default and override attributes from all roles | [
"Builds",
"one",
"node",
"data",
"bag",
"item",
"per",
"file",
"found",
"in",
"the",
"nodes",
"directory"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/chef.py#L303-L352 |
5,139 | tobami/littlechef | littlechef/chef.py | remove_local_node_data_bag | def remove_local_node_data_bag():
"""Removes generated 'node' data_bag locally"""
node_data_bag_path = os.path.join('data_bags', 'node')
if os.path.exists(node_data_bag_path):
shutil.rmtree(node_data_bag_path) | python | def remove_local_node_data_bag():
"""Removes generated 'node' data_bag locally"""
node_data_bag_path = os.path.join('data_bags', 'node')
if os.path.exists(node_data_bag_path):
shutil.rmtree(node_data_bag_path) | [
"def",
"remove_local_node_data_bag",
"(",
")",
":",
"node_data_bag_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'data_bags'",
",",
"'node'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"node_data_bag_path",
")",
":",
"shutil",
".",
"rmtree",
"(",
"node_data_bag_path",
")"
] | Removes generated 'node' data_bag locally | [
"Removes",
"generated",
"node",
"data_bag",
"locally"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/chef.py#L355-L359 |
5,140 | tobami/littlechef | littlechef/chef.py | ensure_berksfile_cookbooks_are_installed | def ensure_berksfile_cookbooks_are_installed():
"""Run 'berks vendor' to berksfile cookbooks directory"""
msg = "Vendoring cookbooks from Berksfile {0} to directory {1}..."
print(msg.format(env.berksfile, env.berksfile_cookbooks_directory))
run_vendor = True
cookbooks_dir = env.berksfile_cookbooks_directory
berksfile_lock_path = cookbooks_dir+'/Berksfile.lock'
berksfile_lock_exists = os.path.isfile(berksfile_lock_path)
cookbooks_dir_exists = os.path.isdir(cookbooks_dir)
if cookbooks_dir_exists and berksfile_lock_exists:
berksfile_mtime = os.stat('Berksfile').st_mtime
cookbooks_mtime = os.stat(berksfile_lock_path).st_mtime
run_vendor = berksfile_mtime > cookbooks_mtime
if run_vendor:
if cookbooks_dir_exists:
shutil.rmtree(env.berksfile_cookbooks_directory)
p = subprocess.Popen(['berks', 'vendor', env.berksfile_cookbooks_directory],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if env.verbose or p.returncode:
print stdout, stderr | python | def ensure_berksfile_cookbooks_are_installed():
"""Run 'berks vendor' to berksfile cookbooks directory"""
msg = "Vendoring cookbooks from Berksfile {0} to directory {1}..."
print(msg.format(env.berksfile, env.berksfile_cookbooks_directory))
run_vendor = True
cookbooks_dir = env.berksfile_cookbooks_directory
berksfile_lock_path = cookbooks_dir+'/Berksfile.lock'
berksfile_lock_exists = os.path.isfile(berksfile_lock_path)
cookbooks_dir_exists = os.path.isdir(cookbooks_dir)
if cookbooks_dir_exists and berksfile_lock_exists:
berksfile_mtime = os.stat('Berksfile').st_mtime
cookbooks_mtime = os.stat(berksfile_lock_path).st_mtime
run_vendor = berksfile_mtime > cookbooks_mtime
if run_vendor:
if cookbooks_dir_exists:
shutil.rmtree(env.berksfile_cookbooks_directory)
p = subprocess.Popen(['berks', 'vendor', env.berksfile_cookbooks_directory],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if env.verbose or p.returncode:
print stdout, stderr | [
"def",
"ensure_berksfile_cookbooks_are_installed",
"(",
")",
":",
"msg",
"=",
"\"Vendoring cookbooks from Berksfile {0} to directory {1}...\"",
"print",
"(",
"msg",
".",
"format",
"(",
"env",
".",
"berksfile",
",",
"env",
".",
"berksfile_cookbooks_directory",
")",
")",
"run_vendor",
"=",
"True",
"cookbooks_dir",
"=",
"env",
".",
"berksfile_cookbooks_directory",
"berksfile_lock_path",
"=",
"cookbooks_dir",
"+",
"'/Berksfile.lock'",
"berksfile_lock_exists",
"=",
"os",
".",
"path",
".",
"isfile",
"(",
"berksfile_lock_path",
")",
"cookbooks_dir_exists",
"=",
"os",
".",
"path",
".",
"isdir",
"(",
"cookbooks_dir",
")",
"if",
"cookbooks_dir_exists",
"and",
"berksfile_lock_exists",
":",
"berksfile_mtime",
"=",
"os",
".",
"stat",
"(",
"'Berksfile'",
")",
".",
"st_mtime",
"cookbooks_mtime",
"=",
"os",
".",
"stat",
"(",
"berksfile_lock_path",
")",
".",
"st_mtime",
"run_vendor",
"=",
"berksfile_mtime",
">",
"cookbooks_mtime",
"if",
"run_vendor",
":",
"if",
"cookbooks_dir_exists",
":",
"shutil",
".",
"rmtree",
"(",
"env",
".",
"berksfile_cookbooks_directory",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'berks'",
",",
"'vendor'",
",",
"env",
".",
"berksfile_cookbooks_directory",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"stdout",
",",
"stderr",
"=",
"p",
".",
"communicate",
"(",
")",
"if",
"env",
".",
"verbose",
"or",
"p",
".",
"returncode",
":",
"print",
"stdout",
",",
"stderr"
] | Run 'berks vendor' to berksfile cookbooks directory | [
"Run",
"berks",
"vendor",
"to",
"berksfile",
"cookbooks",
"directory"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/chef.py#L362-L388 |
5,141 | tobami/littlechef | littlechef/chef.py | _remove_remote_node_data_bag | def _remove_remote_node_data_bag():
"""Removes generated 'node' data_bag from the remote node"""
node_data_bag_path = os.path.join(env.node_work_path, 'data_bags', 'node')
if exists(node_data_bag_path):
sudo("rm -rf {0}".format(node_data_bag_path)) | python | def _remove_remote_node_data_bag():
"""Removes generated 'node' data_bag from the remote node"""
node_data_bag_path = os.path.join(env.node_work_path, 'data_bags', 'node')
if exists(node_data_bag_path):
sudo("rm -rf {0}".format(node_data_bag_path)) | [
"def",
"_remove_remote_node_data_bag",
"(",
")",
":",
"node_data_bag_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"env",
".",
"node_work_path",
",",
"'data_bags'",
",",
"'node'",
")",
"if",
"exists",
"(",
"node_data_bag_path",
")",
":",
"sudo",
"(",
"\"rm -rf {0}\"",
".",
"format",
"(",
"node_data_bag_path",
")",
")"
] | Removes generated 'node' data_bag from the remote node | [
"Removes",
"generated",
"node",
"data_bag",
"from",
"the",
"remote",
"node"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/chef.py#L391-L395 |
5,142 | tobami/littlechef | littlechef/chef.py | _remove_remote_data_bags | def _remove_remote_data_bags():
"""Remove remote data bags, so it won't leak any sensitive information"""
data_bags_path = os.path.join(env.node_work_path, 'data_bags')
if exists(data_bags_path):
sudo("rm -rf {0}".format(data_bags_path)) | python | def _remove_remote_data_bags():
"""Remove remote data bags, so it won't leak any sensitive information"""
data_bags_path = os.path.join(env.node_work_path, 'data_bags')
if exists(data_bags_path):
sudo("rm -rf {0}".format(data_bags_path)) | [
"def",
"_remove_remote_data_bags",
"(",
")",
":",
"data_bags_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"env",
".",
"node_work_path",
",",
"'data_bags'",
")",
"if",
"exists",
"(",
"data_bags_path",
")",
":",
"sudo",
"(",
"\"rm -rf {0}\"",
".",
"format",
"(",
"data_bags_path",
")",
")"
] | Remove remote data bags, so it won't leak any sensitive information | [
"Remove",
"remote",
"data",
"bags",
"so",
"it",
"won",
"t",
"leak",
"any",
"sensitive",
"information"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/chef.py#L397-L401 |
5,143 | tobami/littlechef | littlechef/chef.py | _configure_node | def _configure_node():
"""Exectutes chef-solo to apply roles and recipes to a node"""
print("")
msg = "Cooking..."
if env.parallel:
msg = "[{0}]: {1}".format(env.host_string, msg)
print(msg)
# Backup last report
with settings(hide('stdout', 'warnings', 'running'), warn_only=True):
sudo("mv {0} {0}.1".format(LOGFILE))
# Build chef-solo command
cmd = "RUBYOPT=-Ku chef-solo"
if whyrun:
cmd += " --why-run"
cmd += ' -l {0} -j /etc/chef/node.json'.format(env.loglevel)
if ENABLE_LOGS:
cmd += ' | tee {0}'.format(LOGFILE)
if env.loglevel == "debug":
print("Executing Chef Solo with the following command:\n"
"{0}".format(cmd))
with settings(hide('warnings', 'running'), warn_only=True):
output = sudo(cmd)
if (output.failed or "FATAL: Stacktrace dumped" in output or
("Chef Run complete" not in output and
"Report handlers complete" not in output)):
if 'chef-solo: command not found' in output:
print(
colors.red(
"\nFAILED: Chef Solo is not installed on this node"))
print(
"Type 'fix node:{0} deploy_chef' to install it".format(
env.host))
abort("")
else:
print(colors.red(
"\nFAILED: chef-solo could not finish configuring the node\n"))
import sys
sys.exit(1)
else:
msg = "\n"
if env.parallel:
msg += "[{0}]: ".format(env.host_string)
msg += "SUCCESS: Node correctly configured"
print(colors.green(msg)) | python | def _configure_node():
"""Exectutes chef-solo to apply roles and recipes to a node"""
print("")
msg = "Cooking..."
if env.parallel:
msg = "[{0}]: {1}".format(env.host_string, msg)
print(msg)
# Backup last report
with settings(hide('stdout', 'warnings', 'running'), warn_only=True):
sudo("mv {0} {0}.1".format(LOGFILE))
# Build chef-solo command
cmd = "RUBYOPT=-Ku chef-solo"
if whyrun:
cmd += " --why-run"
cmd += ' -l {0} -j /etc/chef/node.json'.format(env.loglevel)
if ENABLE_LOGS:
cmd += ' | tee {0}'.format(LOGFILE)
if env.loglevel == "debug":
print("Executing Chef Solo with the following command:\n"
"{0}".format(cmd))
with settings(hide('warnings', 'running'), warn_only=True):
output = sudo(cmd)
if (output.failed or "FATAL: Stacktrace dumped" in output or
("Chef Run complete" not in output and
"Report handlers complete" not in output)):
if 'chef-solo: command not found' in output:
print(
colors.red(
"\nFAILED: Chef Solo is not installed on this node"))
print(
"Type 'fix node:{0} deploy_chef' to install it".format(
env.host))
abort("")
else:
print(colors.red(
"\nFAILED: chef-solo could not finish configuring the node\n"))
import sys
sys.exit(1)
else:
msg = "\n"
if env.parallel:
msg += "[{0}]: ".format(env.host_string)
msg += "SUCCESS: Node correctly configured"
print(colors.green(msg)) | [
"def",
"_configure_node",
"(",
")",
":",
"print",
"(",
"\"\"",
")",
"msg",
"=",
"\"Cooking...\"",
"if",
"env",
".",
"parallel",
":",
"msg",
"=",
"\"[{0}]: {1}\"",
".",
"format",
"(",
"env",
".",
"host_string",
",",
"msg",
")",
"print",
"(",
"msg",
")",
"# Backup last report",
"with",
"settings",
"(",
"hide",
"(",
"'stdout'",
",",
"'warnings'",
",",
"'running'",
")",
",",
"warn_only",
"=",
"True",
")",
":",
"sudo",
"(",
"\"mv {0} {0}.1\"",
".",
"format",
"(",
"LOGFILE",
")",
")",
"# Build chef-solo command",
"cmd",
"=",
"\"RUBYOPT=-Ku chef-solo\"",
"if",
"whyrun",
":",
"cmd",
"+=",
"\" --why-run\"",
"cmd",
"+=",
"' -l {0} -j /etc/chef/node.json'",
".",
"format",
"(",
"env",
".",
"loglevel",
")",
"if",
"ENABLE_LOGS",
":",
"cmd",
"+=",
"' | tee {0}'",
".",
"format",
"(",
"LOGFILE",
")",
"if",
"env",
".",
"loglevel",
"==",
"\"debug\"",
":",
"print",
"(",
"\"Executing Chef Solo with the following command:\\n\"",
"\"{0}\"",
".",
"format",
"(",
"cmd",
")",
")",
"with",
"settings",
"(",
"hide",
"(",
"'warnings'",
",",
"'running'",
")",
",",
"warn_only",
"=",
"True",
")",
":",
"output",
"=",
"sudo",
"(",
"cmd",
")",
"if",
"(",
"output",
".",
"failed",
"or",
"\"FATAL: Stacktrace dumped\"",
"in",
"output",
"or",
"(",
"\"Chef Run complete\"",
"not",
"in",
"output",
"and",
"\"Report handlers complete\"",
"not",
"in",
"output",
")",
")",
":",
"if",
"'chef-solo: command not found'",
"in",
"output",
":",
"print",
"(",
"colors",
".",
"red",
"(",
"\"\\nFAILED: Chef Solo is not installed on this node\"",
")",
")",
"print",
"(",
"\"Type 'fix node:{0} deploy_chef' to install it\"",
".",
"format",
"(",
"env",
".",
"host",
")",
")",
"abort",
"(",
"\"\"",
")",
"else",
":",
"print",
"(",
"colors",
".",
"red",
"(",
"\"\\nFAILED: chef-solo could not finish configuring the node\\n\"",
")",
")",
"import",
"sys",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"msg",
"=",
"\"\\n\"",
"if",
"env",
".",
"parallel",
":",
"msg",
"+=",
"\"[{0}]: \"",
".",
"format",
"(",
"env",
".",
"host_string",
")",
"msg",
"+=",
"\"SUCCESS: Node correctly configured\"",
"print",
"(",
"colors",
".",
"green",
"(",
"msg",
")",
")"
] | Exectutes chef-solo to apply roles and recipes to a node | [
"Exectutes",
"chef",
"-",
"solo",
"to",
"apply",
"roles",
"and",
"recipes",
"to",
"a",
"node"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/chef.py#L431-L474 |
5,144 | tobami/littlechef | littlechef/lib.py | _resolve_hostname | def _resolve_hostname(name):
"""Returns resolved hostname using the ssh config"""
if env.ssh_config is None:
return name
elif not os.path.exists(os.path.join("nodes", name + ".json")):
resolved_name = env.ssh_config.lookup(name)['hostname']
if os.path.exists(os.path.join("nodes", resolved_name + ".json")):
name = resolved_name
return name | python | def _resolve_hostname(name):
"""Returns resolved hostname using the ssh config"""
if env.ssh_config is None:
return name
elif not os.path.exists(os.path.join("nodes", name + ".json")):
resolved_name = env.ssh_config.lookup(name)['hostname']
if os.path.exists(os.path.join("nodes", resolved_name + ".json")):
name = resolved_name
return name | [
"def",
"_resolve_hostname",
"(",
"name",
")",
":",
"if",
"env",
".",
"ssh_config",
"is",
"None",
":",
"return",
"name",
"elif",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"nodes\"",
",",
"name",
"+",
"\".json\"",
")",
")",
":",
"resolved_name",
"=",
"env",
".",
"ssh_config",
".",
"lookup",
"(",
"name",
")",
"[",
"'hostname'",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"nodes\"",
",",
"resolved_name",
"+",
"\".json\"",
")",
")",
":",
"name",
"=",
"resolved_name",
"return",
"name"
] | Returns resolved hostname using the ssh config | [
"Returns",
"resolved",
"hostname",
"using",
"the",
"ssh",
"config"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L31-L39 |
5,145 | tobami/littlechef | littlechef/lib.py | get_environment | def get_environment(name):
"""Returns a JSON environment file as a dictionary"""
if name == "_default":
return env_from_template(name)
filename = os.path.join("environments", name + ".json")
try:
with open(filename) as f:
try:
return json.loads(f.read())
except ValueError as e:
msg = 'LittleChef found the following error in'
msg += ' "{0}":\n {1}'.format(filename, str(e))
abort(msg)
except IOError:
raise FileNotFoundError('File {0} not found'.format(filename)) | python | def get_environment(name):
"""Returns a JSON environment file as a dictionary"""
if name == "_default":
return env_from_template(name)
filename = os.path.join("environments", name + ".json")
try:
with open(filename) as f:
try:
return json.loads(f.read())
except ValueError as e:
msg = 'LittleChef found the following error in'
msg += ' "{0}":\n {1}'.format(filename, str(e))
abort(msg)
except IOError:
raise FileNotFoundError('File {0} not found'.format(filename)) | [
"def",
"get_environment",
"(",
"name",
")",
":",
"if",
"name",
"==",
"\"_default\"",
":",
"return",
"env_from_template",
"(",
"name",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"environments\"",
",",
"name",
"+",
"\".json\"",
")",
"try",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"f",
":",
"try",
":",
"return",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"except",
"ValueError",
"as",
"e",
":",
"msg",
"=",
"'LittleChef found the following error in'",
"msg",
"+=",
"' \"{0}\":\\n {1}'",
".",
"format",
"(",
"filename",
",",
"str",
"(",
"e",
")",
")",
"abort",
"(",
"msg",
")",
"except",
"IOError",
":",
"raise",
"FileNotFoundError",
"(",
"'File {0} not found'",
".",
"format",
"(",
"filename",
")",
")"
] | Returns a JSON environment file as a dictionary | [
"Returns",
"a",
"JSON",
"environment",
"file",
"as",
"a",
"dictionary"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L62-L76 |
5,146 | tobami/littlechef | littlechef/lib.py | get_environments | def get_environments():
"""Gets all environments found in the 'environments' directory"""
envs = []
for root, subfolders, files in os.walk('environments'):
for filename in files:
if filename.endswith(".json"):
path = os.path.join(
root[len('environments'):], filename[:-len('.json')])
envs.append(get_environment(path))
return sorted(envs, key=lambda x: x['name']) | python | def get_environments():
"""Gets all environments found in the 'environments' directory"""
envs = []
for root, subfolders, files in os.walk('environments'):
for filename in files:
if filename.endswith(".json"):
path = os.path.join(
root[len('environments'):], filename[:-len('.json')])
envs.append(get_environment(path))
return sorted(envs, key=lambda x: x['name']) | [
"def",
"get_environments",
"(",
")",
":",
"envs",
"=",
"[",
"]",
"for",
"root",
",",
"subfolders",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"'environments'",
")",
":",
"for",
"filename",
"in",
"files",
":",
"if",
"filename",
".",
"endswith",
"(",
"\".json\"",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
"[",
"len",
"(",
"'environments'",
")",
":",
"]",
",",
"filename",
"[",
":",
"-",
"len",
"(",
"'.json'",
")",
"]",
")",
"envs",
".",
"append",
"(",
"get_environment",
"(",
"path",
")",
")",
"return",
"sorted",
"(",
"envs",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"'name'",
"]",
")"
] | Gets all environments found in the 'environments' directory | [
"Gets",
"all",
"environments",
"found",
"in",
"the",
"environments",
"directory"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L79-L88 |
5,147 | tobami/littlechef | littlechef/lib.py | get_node | def get_node(name, merged=False):
"""Returns a JSON node file as a dictionary"""
if merged:
node_path = os.path.join("data_bags", "node", name.replace('.', '_') + ".json")
else:
node_path = os.path.join("nodes", name + ".json")
if os.path.exists(node_path):
# Read node.json
with open(node_path, 'r') as f:
try:
node = json.loads(f.read())
except ValueError as e:
msg = 'LittleChef found the following error in'
msg += ' "{0}":\n {1}'.format(node_path, str(e))
abort(msg)
else:
print "Creating new node file '{0}.json'".format(name)
node = {'run_list': []}
# Add node name so that we can tell to which node it is
node['name'] = name
if not node.get('chef_environment'):
node['chef_environment'] = '_default'
return node | python | def get_node(name, merged=False):
"""Returns a JSON node file as a dictionary"""
if merged:
node_path = os.path.join("data_bags", "node", name.replace('.', '_') + ".json")
else:
node_path = os.path.join("nodes", name + ".json")
if os.path.exists(node_path):
# Read node.json
with open(node_path, 'r') as f:
try:
node = json.loads(f.read())
except ValueError as e:
msg = 'LittleChef found the following error in'
msg += ' "{0}":\n {1}'.format(node_path, str(e))
abort(msg)
else:
print "Creating new node file '{0}.json'".format(name)
node = {'run_list': []}
# Add node name so that we can tell to which node it is
node['name'] = name
if not node.get('chef_environment'):
node['chef_environment'] = '_default'
return node | [
"def",
"get_node",
"(",
"name",
",",
"merged",
"=",
"False",
")",
":",
"if",
"merged",
":",
"node_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"data_bags\"",
",",
"\"node\"",
",",
"name",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
"+",
"\".json\"",
")",
"else",
":",
"node_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"nodes\"",
",",
"name",
"+",
"\".json\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"node_path",
")",
":",
"# Read node.json",
"with",
"open",
"(",
"node_path",
",",
"'r'",
")",
"as",
"f",
":",
"try",
":",
"node",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"except",
"ValueError",
"as",
"e",
":",
"msg",
"=",
"'LittleChef found the following error in'",
"msg",
"+=",
"' \"{0}\":\\n {1}'",
".",
"format",
"(",
"node_path",
",",
"str",
"(",
"e",
")",
")",
"abort",
"(",
"msg",
")",
"else",
":",
"print",
"\"Creating new node file '{0}.json'\"",
".",
"format",
"(",
"name",
")",
"node",
"=",
"{",
"'run_list'",
":",
"[",
"]",
"}",
"# Add node name so that we can tell to which node it is",
"node",
"[",
"'name'",
"]",
"=",
"name",
"if",
"not",
"node",
".",
"get",
"(",
"'chef_environment'",
")",
":",
"node",
"[",
"'chef_environment'",
"]",
"=",
"'_default'",
"return",
"node"
] | Returns a JSON node file as a dictionary | [
"Returns",
"a",
"JSON",
"node",
"file",
"as",
"a",
"dictionary"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L91-L113 |
5,148 | tobami/littlechef | littlechef/lib.py | get_nodes_with_role | def get_nodes_with_role(role_name, environment=None):
"""Get all nodes which include a given role,
prefix-searches are also supported
"""
prefix_search = role_name.endswith("*")
if prefix_search:
role_name = role_name.rstrip("*")
for n in get_nodes(environment):
roles = get_roles_in_node(n, recursive=True)
if prefix_search:
if any(role.startswith(role_name) for role in roles):
yield n
else:
if role_name in roles:
yield n | python | def get_nodes_with_role(role_name, environment=None):
"""Get all nodes which include a given role,
prefix-searches are also supported
"""
prefix_search = role_name.endswith("*")
if prefix_search:
role_name = role_name.rstrip("*")
for n in get_nodes(environment):
roles = get_roles_in_node(n, recursive=True)
if prefix_search:
if any(role.startswith(role_name) for role in roles):
yield n
else:
if role_name in roles:
yield n | [
"def",
"get_nodes_with_role",
"(",
"role_name",
",",
"environment",
"=",
"None",
")",
":",
"prefix_search",
"=",
"role_name",
".",
"endswith",
"(",
"\"*\"",
")",
"if",
"prefix_search",
":",
"role_name",
"=",
"role_name",
".",
"rstrip",
"(",
"\"*\"",
")",
"for",
"n",
"in",
"get_nodes",
"(",
"environment",
")",
":",
"roles",
"=",
"get_roles_in_node",
"(",
"n",
",",
"recursive",
"=",
"True",
")",
"if",
"prefix_search",
":",
"if",
"any",
"(",
"role",
".",
"startswith",
"(",
"role_name",
")",
"for",
"role",
"in",
"roles",
")",
":",
"yield",
"n",
"else",
":",
"if",
"role_name",
"in",
"roles",
":",
"yield",
"n"
] | Get all nodes which include a given role,
prefix-searches are also supported | [
"Get",
"all",
"nodes",
"which",
"include",
"a",
"given",
"role",
"prefix",
"-",
"searches",
"are",
"also",
"supported"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L132-L147 |
5,149 | tobami/littlechef | littlechef/lib.py | get_nodes_with_tag | def get_nodes_with_tag(tag, environment=None, include_guests=False):
"""Get all nodes which include a given tag"""
nodes = get_nodes(environment)
nodes_mapping = dict((n['name'], n) for n in nodes)
for n in nodes:
if tag in n.get('tags', []):
# Remove from node mapping so it doesn't get added twice by
# guest walking below
try:
del nodes_mapping[n['fqdn']]
except KeyError:
pass
yield n
# Walk guest if it is a host
if include_guests and n.get('virtualization', {}).get('role') == 'host':
for guest in n['virtualization'].get('guests', []):
try:
yield nodes_mapping[guest['fqdn']]
except KeyError:
# we ignore guests which are not in the same
# chef environments than their hosts for now
pass | python | def get_nodes_with_tag(tag, environment=None, include_guests=False):
"""Get all nodes which include a given tag"""
nodes = get_nodes(environment)
nodes_mapping = dict((n['name'], n) for n in nodes)
for n in nodes:
if tag in n.get('tags', []):
# Remove from node mapping so it doesn't get added twice by
# guest walking below
try:
del nodes_mapping[n['fqdn']]
except KeyError:
pass
yield n
# Walk guest if it is a host
if include_guests and n.get('virtualization', {}).get('role') == 'host':
for guest in n['virtualization'].get('guests', []):
try:
yield nodes_mapping[guest['fqdn']]
except KeyError:
# we ignore guests which are not in the same
# chef environments than their hosts for now
pass | [
"def",
"get_nodes_with_tag",
"(",
"tag",
",",
"environment",
"=",
"None",
",",
"include_guests",
"=",
"False",
")",
":",
"nodes",
"=",
"get_nodes",
"(",
"environment",
")",
"nodes_mapping",
"=",
"dict",
"(",
"(",
"n",
"[",
"'name'",
"]",
",",
"n",
")",
"for",
"n",
"in",
"nodes",
")",
"for",
"n",
"in",
"nodes",
":",
"if",
"tag",
"in",
"n",
".",
"get",
"(",
"'tags'",
",",
"[",
"]",
")",
":",
"# Remove from node mapping so it doesn't get added twice by",
"# guest walking below",
"try",
":",
"del",
"nodes_mapping",
"[",
"n",
"[",
"'fqdn'",
"]",
"]",
"except",
"KeyError",
":",
"pass",
"yield",
"n",
"# Walk guest if it is a host",
"if",
"include_guests",
"and",
"n",
".",
"get",
"(",
"'virtualization'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'role'",
")",
"==",
"'host'",
":",
"for",
"guest",
"in",
"n",
"[",
"'virtualization'",
"]",
".",
"get",
"(",
"'guests'",
",",
"[",
"]",
")",
":",
"try",
":",
"yield",
"nodes_mapping",
"[",
"guest",
"[",
"'fqdn'",
"]",
"]",
"except",
"KeyError",
":",
"# we ignore guests which are not in the same",
"# chef environments than their hosts for now",
"pass"
] | Get all nodes which include a given tag | [
"Get",
"all",
"nodes",
"which",
"include",
"a",
"given",
"tag"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L150-L171 |
5,150 | tobami/littlechef | littlechef/lib.py | get_nodes_with_recipe | def get_nodes_with_recipe(recipe_name, environment=None):
"""Get all nodes which include a given recipe,
prefix-searches are also supported
"""
prefix_search = recipe_name.endswith("*")
if prefix_search:
recipe_name = recipe_name.rstrip("*")
for n in get_nodes(environment):
recipes = get_recipes_in_node(n)
for role in get_roles_in_node(n, recursive=True):
recipes.extend(get_recipes_in_role(role))
if prefix_search:
if any(recipe.startswith(recipe_name) for recipe in recipes):
yield n
else:
if recipe_name in recipes:
yield n | python | def get_nodes_with_recipe(recipe_name, environment=None):
"""Get all nodes which include a given recipe,
prefix-searches are also supported
"""
prefix_search = recipe_name.endswith("*")
if prefix_search:
recipe_name = recipe_name.rstrip("*")
for n in get_nodes(environment):
recipes = get_recipes_in_node(n)
for role in get_roles_in_node(n, recursive=True):
recipes.extend(get_recipes_in_role(role))
if prefix_search:
if any(recipe.startswith(recipe_name) for recipe in recipes):
yield n
else:
if recipe_name in recipes:
yield n | [
"def",
"get_nodes_with_recipe",
"(",
"recipe_name",
",",
"environment",
"=",
"None",
")",
":",
"prefix_search",
"=",
"recipe_name",
".",
"endswith",
"(",
"\"*\"",
")",
"if",
"prefix_search",
":",
"recipe_name",
"=",
"recipe_name",
".",
"rstrip",
"(",
"\"*\"",
")",
"for",
"n",
"in",
"get_nodes",
"(",
"environment",
")",
":",
"recipes",
"=",
"get_recipes_in_node",
"(",
"n",
")",
"for",
"role",
"in",
"get_roles_in_node",
"(",
"n",
",",
"recursive",
"=",
"True",
")",
":",
"recipes",
".",
"extend",
"(",
"get_recipes_in_role",
"(",
"role",
")",
")",
"if",
"prefix_search",
":",
"if",
"any",
"(",
"recipe",
".",
"startswith",
"(",
"recipe_name",
")",
"for",
"recipe",
"in",
"recipes",
")",
":",
"yield",
"n",
"else",
":",
"if",
"recipe_name",
"in",
"recipes",
":",
"yield",
"n"
] | Get all nodes which include a given recipe,
prefix-searches are also supported | [
"Get",
"all",
"nodes",
"which",
"include",
"a",
"given",
"recipe",
"prefix",
"-",
"searches",
"are",
"also",
"supported"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L174-L191 |
5,151 | tobami/littlechef | littlechef/lib.py | print_node | def print_node(node, detailed=False):
"""Pretty prints the given node"""
nodename = node['name']
print(colors.yellow("\n" + nodename))
# Roles
if detailed:
for role in get_roles_in_node(node):
print_role(_get_role(role), detailed=False)
else:
print(' Roles: {0}'.format(", ".join(get_roles_in_node(node))))
# Recipes
if detailed:
for recipe in get_recipes_in_node(node):
print " Recipe:", recipe
print " attributes: {0}".format(node.get(recipe, ""))
else:
print(' Recipes: {0}'.format(", ".join(get_recipes_in_node(node))))
# Node attributes
print " Node attributes:"
for attribute in node.keys():
if attribute == "run_list" or attribute == "name":
continue
print " {0}: {1}".format(attribute, node[attribute]) | python | def print_node(node, detailed=False):
"""Pretty prints the given node"""
nodename = node['name']
print(colors.yellow("\n" + nodename))
# Roles
if detailed:
for role in get_roles_in_node(node):
print_role(_get_role(role), detailed=False)
else:
print(' Roles: {0}'.format(", ".join(get_roles_in_node(node))))
# Recipes
if detailed:
for recipe in get_recipes_in_node(node):
print " Recipe:", recipe
print " attributes: {0}".format(node.get(recipe, ""))
else:
print(' Recipes: {0}'.format(", ".join(get_recipes_in_node(node))))
# Node attributes
print " Node attributes:"
for attribute in node.keys():
if attribute == "run_list" or attribute == "name":
continue
print " {0}: {1}".format(attribute, node[attribute]) | [
"def",
"print_node",
"(",
"node",
",",
"detailed",
"=",
"False",
")",
":",
"nodename",
"=",
"node",
"[",
"'name'",
"]",
"print",
"(",
"colors",
".",
"yellow",
"(",
"\"\\n\"",
"+",
"nodename",
")",
")",
"# Roles",
"if",
"detailed",
":",
"for",
"role",
"in",
"get_roles_in_node",
"(",
"node",
")",
":",
"print_role",
"(",
"_get_role",
"(",
"role",
")",
",",
"detailed",
"=",
"False",
")",
"else",
":",
"print",
"(",
"' Roles: {0}'",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"get_roles_in_node",
"(",
"node",
")",
")",
")",
")",
"# Recipes",
"if",
"detailed",
":",
"for",
"recipe",
"in",
"get_recipes_in_node",
"(",
"node",
")",
":",
"print",
"\" Recipe:\"",
",",
"recipe",
"print",
"\" attributes: {0}\"",
".",
"format",
"(",
"node",
".",
"get",
"(",
"recipe",
",",
"\"\"",
")",
")",
"else",
":",
"print",
"(",
"' Recipes: {0}'",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"get_recipes_in_node",
"(",
"node",
")",
")",
")",
")",
"# Node attributes",
"print",
"\" Node attributes:\"",
"for",
"attribute",
"in",
"node",
".",
"keys",
"(",
")",
":",
"if",
"attribute",
"==",
"\"run_list\"",
"or",
"attribute",
"==",
"\"name\"",
":",
"continue",
"print",
"\" {0}: {1}\"",
".",
"format",
"(",
"attribute",
",",
"node",
"[",
"attribute",
"]",
")"
] | Pretty prints the given node | [
"Pretty",
"prints",
"the",
"given",
"node"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L194-L216 |
5,152 | tobami/littlechef | littlechef/lib.py | print_nodes | def print_nodes(nodes, detailed=False):
"""Prints all the given nodes"""
found = 0
for node in nodes:
found += 1
print_node(node, detailed=detailed)
print("\nFound {0} node{1}".format(found, "s" if found != 1 else "")) | python | def print_nodes(nodes, detailed=False):
"""Prints all the given nodes"""
found = 0
for node in nodes:
found += 1
print_node(node, detailed=detailed)
print("\nFound {0} node{1}".format(found, "s" if found != 1 else "")) | [
"def",
"print_nodes",
"(",
"nodes",
",",
"detailed",
"=",
"False",
")",
":",
"found",
"=",
"0",
"for",
"node",
"in",
"nodes",
":",
"found",
"+=",
"1",
"print_node",
"(",
"node",
",",
"detailed",
"=",
"detailed",
")",
"print",
"(",
"\"\\nFound {0} node{1}\"",
".",
"format",
"(",
"found",
",",
"\"s\"",
"if",
"found",
"!=",
"1",
"else",
"\"\"",
")",
")"
] | Prints all the given nodes | [
"Prints",
"all",
"the",
"given",
"nodes"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L219-L225 |
5,153 | tobami/littlechef | littlechef/lib.py | _generate_metadata | def _generate_metadata(path, cookbook_path, name):
"""Checks whether metadata.rb has changed and regenerate metadata.json"""
global knife_installed
if not knife_installed:
return
metadata_path_rb = os.path.join(path, 'metadata.rb')
metadata_path_json = os.path.join(path, 'metadata.json')
if (os.path.exists(metadata_path_rb) and
(not os.path.exists(metadata_path_json) or
os.stat(metadata_path_rb).st_mtime >
os.stat(metadata_path_json).st_mtime)):
error_msg = "Warning: metadata.json for {0}".format(name)
error_msg += " in {0} is older that metadata.rb".format(cookbook_path)
error_msg += ", cookbook attributes could be out of date\n\n"
try:
proc = subprocess.Popen(
['knife', 'cookbook', 'metadata', '-o', cookbook_path, name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
resp, error = proc.communicate()
if ('ERROR:' in resp or 'FATAL:' in resp
or 'Generating metadata for' not in resp):
if("No user specified, pass via -u or specifiy 'node_name'"
in error):
error_msg += "You need to have an up-to-date (>=0.10.x)"
error_msg += " version of knife installed locally in order"
error_msg += " to generate metadata.json.\nError "
else:
error_msg += "Unkown error "
error_msg += "while executing knife to generate "
error_msg += "metadata.json for {0}".format(path)
print(error_msg)
print resp
if env.loglevel == 'debug':
print "\n".join(resp.split("\n")[:2])
except OSError:
knife_installed = False
error_msg += "If you locally install Chef's knife tool, LittleChef"
error_msg += " will regenerate metadata.json files automatically\n"
print(error_msg)
else:
print("Generated metadata.json for {0}\n".format(path)) | python | def _generate_metadata(path, cookbook_path, name):
"""Checks whether metadata.rb has changed and regenerate metadata.json"""
global knife_installed
if not knife_installed:
return
metadata_path_rb = os.path.join(path, 'metadata.rb')
metadata_path_json = os.path.join(path, 'metadata.json')
if (os.path.exists(metadata_path_rb) and
(not os.path.exists(metadata_path_json) or
os.stat(metadata_path_rb).st_mtime >
os.stat(metadata_path_json).st_mtime)):
error_msg = "Warning: metadata.json for {0}".format(name)
error_msg += " in {0} is older that metadata.rb".format(cookbook_path)
error_msg += ", cookbook attributes could be out of date\n\n"
try:
proc = subprocess.Popen(
['knife', 'cookbook', 'metadata', '-o', cookbook_path, name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
resp, error = proc.communicate()
if ('ERROR:' in resp or 'FATAL:' in resp
or 'Generating metadata for' not in resp):
if("No user specified, pass via -u or specifiy 'node_name'"
in error):
error_msg += "You need to have an up-to-date (>=0.10.x)"
error_msg += " version of knife installed locally in order"
error_msg += " to generate metadata.json.\nError "
else:
error_msg += "Unkown error "
error_msg += "while executing knife to generate "
error_msg += "metadata.json for {0}".format(path)
print(error_msg)
print resp
if env.loglevel == 'debug':
print "\n".join(resp.split("\n")[:2])
except OSError:
knife_installed = False
error_msg += "If you locally install Chef's knife tool, LittleChef"
error_msg += " will regenerate metadata.json files automatically\n"
print(error_msg)
else:
print("Generated metadata.json for {0}\n".format(path)) | [
"def",
"_generate_metadata",
"(",
"path",
",",
"cookbook_path",
",",
"name",
")",
":",
"global",
"knife_installed",
"if",
"not",
"knife_installed",
":",
"return",
"metadata_path_rb",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'metadata.rb'",
")",
"metadata_path_json",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'metadata.json'",
")",
"if",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"metadata_path_rb",
")",
"and",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"metadata_path_json",
")",
"or",
"os",
".",
"stat",
"(",
"metadata_path_rb",
")",
".",
"st_mtime",
">",
"os",
".",
"stat",
"(",
"metadata_path_json",
")",
".",
"st_mtime",
")",
")",
":",
"error_msg",
"=",
"\"Warning: metadata.json for {0}\"",
".",
"format",
"(",
"name",
")",
"error_msg",
"+=",
"\" in {0} is older that metadata.rb\"",
".",
"format",
"(",
"cookbook_path",
")",
"error_msg",
"+=",
"\", cookbook attributes could be out of date\\n\\n\"",
"try",
":",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'knife'",
",",
"'cookbook'",
",",
"'metadata'",
",",
"'-o'",
",",
"cookbook_path",
",",
"name",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"resp",
",",
"error",
"=",
"proc",
".",
"communicate",
"(",
")",
"if",
"(",
"'ERROR:'",
"in",
"resp",
"or",
"'FATAL:'",
"in",
"resp",
"or",
"'Generating metadata for'",
"not",
"in",
"resp",
")",
":",
"if",
"(",
"\"No user specified, pass via -u or specifiy 'node_name'\"",
"in",
"error",
")",
":",
"error_msg",
"+=",
"\"You need to have an up-to-date (>=0.10.x)\"",
"error_msg",
"+=",
"\" version of knife installed locally in order\"",
"error_msg",
"+=",
"\" to generate metadata.json.\\nError \"",
"else",
":",
"error_msg",
"+=",
"\"Unkown error \"",
"error_msg",
"+=",
"\"while executing knife to generate \"",
"error_msg",
"+=",
"\"metadata.json for {0}\"",
".",
"format",
"(",
"path",
")",
"print",
"(",
"error_msg",
")",
"print",
"resp",
"if",
"env",
".",
"loglevel",
"==",
"'debug'",
":",
"print",
"\"\\n\"",
".",
"join",
"(",
"resp",
".",
"split",
"(",
"\"\\n\"",
")",
"[",
":",
"2",
"]",
")",
"except",
"OSError",
":",
"knife_installed",
"=",
"False",
"error_msg",
"+=",
"\"If you locally install Chef's knife tool, LittleChef\"",
"error_msg",
"+=",
"\" will regenerate metadata.json files automatically\\n\"",
"print",
"(",
"error_msg",
")",
"else",
":",
"print",
"(",
"\"Generated metadata.json for {0}\\n\"",
".",
"format",
"(",
"path",
")",
")"
] | Checks whether metadata.rb has changed and regenerate metadata.json | [
"Checks",
"whether",
"metadata",
".",
"rb",
"has",
"changed",
"and",
"regenerate",
"metadata",
".",
"json"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L228-L268 |
5,154 | tobami/littlechef | littlechef/lib.py | get_recipes_in_cookbook | def get_recipes_in_cookbook(name):
"""Gets the name of all recipes present in a cookbook
Returns a list of dictionaries
"""
recipes = {}
path = None
cookbook_exists = False
metadata_exists = False
for cookbook_path in cookbook_paths:
path = os.path.join(cookbook_path, name)
path_exists = os.path.exists(path)
# cookbook exists if present in any of the cookbook paths
cookbook_exists = cookbook_exists or path_exists
if not path_exists:
continue
_generate_metadata(path, cookbook_path, name)
# Now try to open metadata.json
try:
with open(os.path.join(path, 'metadata.json'), 'r') as f:
try:
cookbook = json.loads(f.read())
except ValueError as e:
msg = "Little Chef found the following error in your"
msg += " {0} file:\n {1}".format(
os.path.join(path, 'metadata.json'), e)
abort(msg)
# Add each recipe defined in the cookbook
metadata_exists = True
recipe_defaults = {
'description': '',
'version': cookbook.get('version'),
'dependencies': cookbook.get('dependencies', {}).keys(),
'attributes': cookbook.get('attributes', {})
}
for recipe in cookbook.get('recipes', []):
recipes[recipe] = dict(
recipe_defaults,
name=recipe,
description=cookbook['recipes'][recipe]
)
# Cookbook metadata.json was found, don't try next cookbook path
# because metadata.json in site-cookbooks has preference
break
except IOError:
# metadata.json was not found, try next cookbook_path
pass
if not cookbook_exists:
abort('Unable to find cookbook "{0}"'.format(name))
elif not metadata_exists:
abort('Cookbook "{0}" has no metadata.json'.format(name))
# Add recipes found in the 'recipes' directory but not listed
# in the metadata
for cookbook_path in cookbook_paths:
recipes_dir = os.path.join(cookbook_path, name, 'recipes')
if not os.path.isdir(recipes_dir):
continue
for basename in os.listdir(recipes_dir):
fname, ext = os.path.splitext(basename)
if ext != '.rb':
continue
if fname != 'default':
recipe = '%s::%s' % (name, fname)
else:
recipe = name
if recipe not in recipes:
recipes[recipe] = dict(recipe_defaults, name=recipe)
# When a recipe has no default recipe (libraries?),
# add one so that it is listed
if not recipes:
recipes[name] = dict(
recipe_defaults,
name=name,
description='This cookbook has no default recipe'
)
return recipes.values() | python | def get_recipes_in_cookbook(name):
"""Gets the name of all recipes present in a cookbook
Returns a list of dictionaries
"""
recipes = {}
path = None
cookbook_exists = False
metadata_exists = False
for cookbook_path in cookbook_paths:
path = os.path.join(cookbook_path, name)
path_exists = os.path.exists(path)
# cookbook exists if present in any of the cookbook paths
cookbook_exists = cookbook_exists or path_exists
if not path_exists:
continue
_generate_metadata(path, cookbook_path, name)
# Now try to open metadata.json
try:
with open(os.path.join(path, 'metadata.json'), 'r') as f:
try:
cookbook = json.loads(f.read())
except ValueError as e:
msg = "Little Chef found the following error in your"
msg += " {0} file:\n {1}".format(
os.path.join(path, 'metadata.json'), e)
abort(msg)
# Add each recipe defined in the cookbook
metadata_exists = True
recipe_defaults = {
'description': '',
'version': cookbook.get('version'),
'dependencies': cookbook.get('dependencies', {}).keys(),
'attributes': cookbook.get('attributes', {})
}
for recipe in cookbook.get('recipes', []):
recipes[recipe] = dict(
recipe_defaults,
name=recipe,
description=cookbook['recipes'][recipe]
)
# Cookbook metadata.json was found, don't try next cookbook path
# because metadata.json in site-cookbooks has preference
break
except IOError:
# metadata.json was not found, try next cookbook_path
pass
if not cookbook_exists:
abort('Unable to find cookbook "{0}"'.format(name))
elif not metadata_exists:
abort('Cookbook "{0}" has no metadata.json'.format(name))
# Add recipes found in the 'recipes' directory but not listed
# in the metadata
for cookbook_path in cookbook_paths:
recipes_dir = os.path.join(cookbook_path, name, 'recipes')
if not os.path.isdir(recipes_dir):
continue
for basename in os.listdir(recipes_dir):
fname, ext = os.path.splitext(basename)
if ext != '.rb':
continue
if fname != 'default':
recipe = '%s::%s' % (name, fname)
else:
recipe = name
if recipe not in recipes:
recipes[recipe] = dict(recipe_defaults, name=recipe)
# When a recipe has no default recipe (libraries?),
# add one so that it is listed
if not recipes:
recipes[name] = dict(
recipe_defaults,
name=name,
description='This cookbook has no default recipe'
)
return recipes.values() | [
"def",
"get_recipes_in_cookbook",
"(",
"name",
")",
":",
"recipes",
"=",
"{",
"}",
"path",
"=",
"None",
"cookbook_exists",
"=",
"False",
"metadata_exists",
"=",
"False",
"for",
"cookbook_path",
"in",
"cookbook_paths",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cookbook_path",
",",
"name",
")",
"path_exists",
"=",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
"# cookbook exists if present in any of the cookbook paths",
"cookbook_exists",
"=",
"cookbook_exists",
"or",
"path_exists",
"if",
"not",
"path_exists",
":",
"continue",
"_generate_metadata",
"(",
"path",
",",
"cookbook_path",
",",
"name",
")",
"# Now try to open metadata.json",
"try",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'metadata.json'",
")",
",",
"'r'",
")",
"as",
"f",
":",
"try",
":",
"cookbook",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"except",
"ValueError",
"as",
"e",
":",
"msg",
"=",
"\"Little Chef found the following error in your\"",
"msg",
"+=",
"\" {0} file:\\n {1}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'metadata.json'",
")",
",",
"e",
")",
"abort",
"(",
"msg",
")",
"# Add each recipe defined in the cookbook",
"metadata_exists",
"=",
"True",
"recipe_defaults",
"=",
"{",
"'description'",
":",
"''",
",",
"'version'",
":",
"cookbook",
".",
"get",
"(",
"'version'",
")",
",",
"'dependencies'",
":",
"cookbook",
".",
"get",
"(",
"'dependencies'",
",",
"{",
"}",
")",
".",
"keys",
"(",
")",
",",
"'attributes'",
":",
"cookbook",
".",
"get",
"(",
"'attributes'",
",",
"{",
"}",
")",
"}",
"for",
"recipe",
"in",
"cookbook",
".",
"get",
"(",
"'recipes'",
",",
"[",
"]",
")",
":",
"recipes",
"[",
"recipe",
"]",
"=",
"dict",
"(",
"recipe_defaults",
",",
"name",
"=",
"recipe",
",",
"description",
"=",
"cookbook",
"[",
"'recipes'",
"]",
"[",
"recipe",
"]",
")",
"# Cookbook metadata.json was found, don't try next cookbook path",
"# because metadata.json in site-cookbooks has preference",
"break",
"except",
"IOError",
":",
"# metadata.json was not found, try next cookbook_path",
"pass",
"if",
"not",
"cookbook_exists",
":",
"abort",
"(",
"'Unable to find cookbook \"{0}\"'",
".",
"format",
"(",
"name",
")",
")",
"elif",
"not",
"metadata_exists",
":",
"abort",
"(",
"'Cookbook \"{0}\" has no metadata.json'",
".",
"format",
"(",
"name",
")",
")",
"# Add recipes found in the 'recipes' directory but not listed",
"# in the metadata",
"for",
"cookbook_path",
"in",
"cookbook_paths",
":",
"recipes_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cookbook_path",
",",
"name",
",",
"'recipes'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"recipes_dir",
")",
":",
"continue",
"for",
"basename",
"in",
"os",
".",
"listdir",
"(",
"recipes_dir",
")",
":",
"fname",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"basename",
")",
"if",
"ext",
"!=",
"'.rb'",
":",
"continue",
"if",
"fname",
"!=",
"'default'",
":",
"recipe",
"=",
"'%s::%s'",
"%",
"(",
"name",
",",
"fname",
")",
"else",
":",
"recipe",
"=",
"name",
"if",
"recipe",
"not",
"in",
"recipes",
":",
"recipes",
"[",
"recipe",
"]",
"=",
"dict",
"(",
"recipe_defaults",
",",
"name",
"=",
"recipe",
")",
"# When a recipe has no default recipe (libraries?),",
"# add one so that it is listed",
"if",
"not",
"recipes",
":",
"recipes",
"[",
"name",
"]",
"=",
"dict",
"(",
"recipe_defaults",
",",
"name",
"=",
"name",
",",
"description",
"=",
"'This cookbook has no default recipe'",
")",
"return",
"recipes",
".",
"values",
"(",
")"
] | Gets the name of all recipes present in a cookbook
Returns a list of dictionaries | [
"Gets",
"the",
"name",
"of",
"all",
"recipes",
"present",
"in",
"a",
"cookbook",
"Returns",
"a",
"list",
"of",
"dictionaries"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L271-L348 |
5,155 | tobami/littlechef | littlechef/lib.py | get_recipes_in_node | def get_recipes_in_node(node):
"""Gets the name of all recipes present in the run_list of a node"""
recipes = []
for elem in node.get('run_list', []):
if elem.startswith("recipe"):
recipe = elem.split('[')[1].split(']')[0]
recipes.append(recipe)
return recipes | python | def get_recipes_in_node(node):
"""Gets the name of all recipes present in the run_list of a node"""
recipes = []
for elem in node.get('run_list', []):
if elem.startswith("recipe"):
recipe = elem.split('[')[1].split(']')[0]
recipes.append(recipe)
return recipes | [
"def",
"get_recipes_in_node",
"(",
"node",
")",
":",
"recipes",
"=",
"[",
"]",
"for",
"elem",
"in",
"node",
".",
"get",
"(",
"'run_list'",
",",
"[",
"]",
")",
":",
"if",
"elem",
".",
"startswith",
"(",
"\"recipe\"",
")",
":",
"recipe",
"=",
"elem",
".",
"split",
"(",
"'['",
")",
"[",
"1",
"]",
".",
"split",
"(",
"']'",
")",
"[",
"0",
"]",
"recipes",
".",
"append",
"(",
"recipe",
")",
"return",
"recipes"
] | Gets the name of all recipes present in the run_list of a node | [
"Gets",
"the",
"name",
"of",
"all",
"recipes",
"present",
"in",
"the",
"run_list",
"of",
"a",
"node"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L357-L364 |
5,156 | tobami/littlechef | littlechef/lib.py | get_recipes | def get_recipes():
"""Gets all recipes found in the cookbook directories"""
dirnames = set()
for path in cookbook_paths:
dirnames.update([d for d in os.listdir(path) if os.path.isdir(
os.path.join(path, d)) and not d.startswith('.')])
recipes = []
for dirname in dirnames:
recipes.extend(get_recipes_in_cookbook(dirname))
return sorted(recipes, key=lambda x: x['name']) | python | def get_recipes():
"""Gets all recipes found in the cookbook directories"""
dirnames = set()
for path in cookbook_paths:
dirnames.update([d for d in os.listdir(path) if os.path.isdir(
os.path.join(path, d)) and not d.startswith('.')])
recipes = []
for dirname in dirnames:
recipes.extend(get_recipes_in_cookbook(dirname))
return sorted(recipes, key=lambda x: x['name']) | [
"def",
"get_recipes",
"(",
")",
":",
"dirnames",
"=",
"set",
"(",
")",
"for",
"path",
"in",
"cookbook_paths",
":",
"dirnames",
".",
"update",
"(",
"[",
"d",
"for",
"d",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"d",
")",
")",
"and",
"not",
"d",
".",
"startswith",
"(",
"'.'",
")",
"]",
")",
"recipes",
"=",
"[",
"]",
"for",
"dirname",
"in",
"dirnames",
":",
"recipes",
".",
"extend",
"(",
"get_recipes_in_cookbook",
"(",
"dirname",
")",
")",
"return",
"sorted",
"(",
"recipes",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"'name'",
"]",
")"
] | Gets all recipes found in the cookbook directories | [
"Gets",
"all",
"recipes",
"found",
"in",
"the",
"cookbook",
"directories"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L367-L376 |
5,157 | tobami/littlechef | littlechef/lib.py | print_recipe | def print_recipe(recipe):
"""Pretty prints the given recipe"""
print(colors.yellow("\n{0}".format(recipe['name'])))
print " description: {0}".format(recipe['description'])
print " version: {0}".format(recipe['version'])
print " dependencies: {0}".format(", ".join(recipe['dependencies']))
print " attributes: {0}".format(", ".join(recipe['attributes'])) | python | def print_recipe(recipe):
"""Pretty prints the given recipe"""
print(colors.yellow("\n{0}".format(recipe['name'])))
print " description: {0}".format(recipe['description'])
print " version: {0}".format(recipe['version'])
print " dependencies: {0}".format(", ".join(recipe['dependencies']))
print " attributes: {0}".format(", ".join(recipe['attributes'])) | [
"def",
"print_recipe",
"(",
"recipe",
")",
":",
"print",
"(",
"colors",
".",
"yellow",
"(",
"\"\\n{0}\"",
".",
"format",
"(",
"recipe",
"[",
"'name'",
"]",
")",
")",
")",
"print",
"\" description: {0}\"",
".",
"format",
"(",
"recipe",
"[",
"'description'",
"]",
")",
"print",
"\" version: {0}\"",
".",
"format",
"(",
"recipe",
"[",
"'version'",
"]",
")",
"print",
"\" dependencies: {0}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"recipe",
"[",
"'dependencies'",
"]",
")",
")",
"print",
"\" attributes: {0}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"recipe",
"[",
"'attributes'",
"]",
")",
")"
] | Pretty prints the given recipe | [
"Pretty",
"prints",
"the",
"given",
"recipe"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L379-L385 |
5,158 | tobami/littlechef | littlechef/lib.py | _get_role | def _get_role(rolename):
"""Reads and parses a file containing a role"""
path = os.path.join('roles', rolename + '.json')
if not os.path.exists(path):
abort("Couldn't read role file {0}".format(path))
with open(path, 'r') as f:
try:
role = json.loads(f.read())
except ValueError as e:
msg = "Little Chef found the following error in your"
msg += " {0}.json file:\n {1}".format(rolename, str(e))
abort(msg)
role['fullname'] = rolename
return role | python | def _get_role(rolename):
"""Reads and parses a file containing a role"""
path = os.path.join('roles', rolename + '.json')
if not os.path.exists(path):
abort("Couldn't read role file {0}".format(path))
with open(path, 'r') as f:
try:
role = json.loads(f.read())
except ValueError as e:
msg = "Little Chef found the following error in your"
msg += " {0}.json file:\n {1}".format(rolename, str(e))
abort(msg)
role['fullname'] = rolename
return role | [
"def",
"_get_role",
"(",
"rolename",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'roles'",
",",
"rolename",
"+",
"'.json'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"abort",
"(",
"\"Couldn't read role file {0}\"",
".",
"format",
"(",
"path",
")",
")",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"try",
":",
"role",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"except",
"ValueError",
"as",
"e",
":",
"msg",
"=",
"\"Little Chef found the following error in your\"",
"msg",
"+=",
"\" {0}.json file:\\n {1}\"",
".",
"format",
"(",
"rolename",
",",
"str",
"(",
"e",
")",
")",
"abort",
"(",
"msg",
")",
"role",
"[",
"'fullname'",
"]",
"=",
"rolename",
"return",
"role"
] | Reads and parses a file containing a role | [
"Reads",
"and",
"parses",
"a",
"file",
"containing",
"a",
"role"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L413-L426 |
5,159 | tobami/littlechef | littlechef/lib.py | get_roles | def get_roles():
"""Gets all roles found in the 'roles' directory"""
roles = []
for root, subfolders, files in os.walk('roles'):
for filename in files:
if filename.endswith(".json"):
path = os.path.join(
root[len('roles'):], filename[:-len('.json')])
roles.append(_get_role(path))
return sorted(roles, key=lambda x: x['fullname']) | python | def get_roles():
"""Gets all roles found in the 'roles' directory"""
roles = []
for root, subfolders, files in os.walk('roles'):
for filename in files:
if filename.endswith(".json"):
path = os.path.join(
root[len('roles'):], filename[:-len('.json')])
roles.append(_get_role(path))
return sorted(roles, key=lambda x: x['fullname']) | [
"def",
"get_roles",
"(",
")",
":",
"roles",
"=",
"[",
"]",
"for",
"root",
",",
"subfolders",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"'roles'",
")",
":",
"for",
"filename",
"in",
"files",
":",
"if",
"filename",
".",
"endswith",
"(",
"\".json\"",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
"[",
"len",
"(",
"'roles'",
")",
":",
"]",
",",
"filename",
"[",
":",
"-",
"len",
"(",
"'.json'",
")",
"]",
")",
"roles",
".",
"append",
"(",
"_get_role",
"(",
"path",
")",
")",
"return",
"sorted",
"(",
"roles",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"'fullname'",
"]",
")"
] | Gets all roles found in the 'roles' directory | [
"Gets",
"all",
"roles",
"found",
"in",
"the",
"roles",
"directory"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L429-L438 |
5,160 | tobami/littlechef | littlechef/lib.py | print_role | def print_role(role, detailed=True):
"""Pretty prints the given role"""
if detailed:
print(colors.yellow(role.get('fullname')))
else:
print(" Role: {0}".format(role.get('fullname')))
if detailed:
print(" description: {0}".format(role.get('description')))
if 'default_attributes' in role:
print(" default_attributes:")
_pprint(role['default_attributes'])
if 'override_attributes' in role:
print(" override_attributes:")
_pprint(role['override_attributes'])
if detailed:
print(" run_list: {0}".format(role.get('run_list')))
print("") | python | def print_role(role, detailed=True):
"""Pretty prints the given role"""
if detailed:
print(colors.yellow(role.get('fullname')))
else:
print(" Role: {0}".format(role.get('fullname')))
if detailed:
print(" description: {0}".format(role.get('description')))
if 'default_attributes' in role:
print(" default_attributes:")
_pprint(role['default_attributes'])
if 'override_attributes' in role:
print(" override_attributes:")
_pprint(role['override_attributes'])
if detailed:
print(" run_list: {0}".format(role.get('run_list')))
print("") | [
"def",
"print_role",
"(",
"role",
",",
"detailed",
"=",
"True",
")",
":",
"if",
"detailed",
":",
"print",
"(",
"colors",
".",
"yellow",
"(",
"role",
".",
"get",
"(",
"'fullname'",
")",
")",
")",
"else",
":",
"print",
"(",
"\" Role: {0}\"",
".",
"format",
"(",
"role",
".",
"get",
"(",
"'fullname'",
")",
")",
")",
"if",
"detailed",
":",
"print",
"(",
"\" description: {0}\"",
".",
"format",
"(",
"role",
".",
"get",
"(",
"'description'",
")",
")",
")",
"if",
"'default_attributes'",
"in",
"role",
":",
"print",
"(",
"\" default_attributes:\"",
")",
"_pprint",
"(",
"role",
"[",
"'default_attributes'",
"]",
")",
"if",
"'override_attributes'",
"in",
"role",
":",
"print",
"(",
"\" override_attributes:\"",
")",
"_pprint",
"(",
"role",
"[",
"'override_attributes'",
"]",
")",
"if",
"detailed",
":",
"print",
"(",
"\" run_list: {0}\"",
".",
"format",
"(",
"role",
".",
"get",
"(",
"'run_list'",
")",
")",
")",
"print",
"(",
"\"\"",
")"
] | Pretty prints the given role | [
"Pretty",
"prints",
"the",
"given",
"role"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L441-L457 |
5,161 | tobami/littlechef | littlechef/lib.py | import_plugin | def import_plugin(name):
"""Imports plugin python module"""
path = os.path.join("plugins", name + ".py")
try:
with open(path, 'rb') as f:
try:
plugin = imp.load_module(
"p_" + name, f, name + '.py',
('.py', 'rb', imp.PY_SOURCE)
)
except SyntaxError as e:
error = "Found plugin '{0}', but it seems".format(name)
error += " to have a syntax error: {0}".format(str(e))
abort(error)
except IOError:
abort("Sorry, could not find '{0}.py' in the plugin directory".format(
name))
return plugin | python | def import_plugin(name):
"""Imports plugin python module"""
path = os.path.join("plugins", name + ".py")
try:
with open(path, 'rb') as f:
try:
plugin = imp.load_module(
"p_" + name, f, name + '.py',
('.py', 'rb', imp.PY_SOURCE)
)
except SyntaxError as e:
error = "Found plugin '{0}', but it seems".format(name)
error += " to have a syntax error: {0}".format(str(e))
abort(error)
except IOError:
abort("Sorry, could not find '{0}.py' in the plugin directory".format(
name))
return plugin | [
"def",
"import_plugin",
"(",
"name",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"plugins\"",
",",
"name",
"+",
"\".py\"",
")",
"try",
":",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"try",
":",
"plugin",
"=",
"imp",
".",
"load_module",
"(",
"\"p_\"",
"+",
"name",
",",
"f",
",",
"name",
"+",
"'.py'",
",",
"(",
"'.py'",
",",
"'rb'",
",",
"imp",
".",
"PY_SOURCE",
")",
")",
"except",
"SyntaxError",
"as",
"e",
":",
"error",
"=",
"\"Found plugin '{0}', but it seems\"",
".",
"format",
"(",
"name",
")",
"error",
"+=",
"\" to have a syntax error: {0}\"",
".",
"format",
"(",
"str",
"(",
"e",
")",
")",
"abort",
"(",
"error",
")",
"except",
"IOError",
":",
"abort",
"(",
"\"Sorry, could not find '{0}.py' in the plugin directory\"",
".",
"format",
"(",
"name",
")",
")",
"return",
"plugin"
] | Imports plugin python module | [
"Imports",
"plugin",
"python",
"module"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L482-L499 |
5,162 | tobami/littlechef | littlechef/lib.py | get_cookbook_path | def get_cookbook_path(cookbook_name):
"""Returns path to the cookbook for the given cookbook name"""
for cookbook_path in cookbook_paths:
path = os.path.join(cookbook_path, cookbook_name)
if os.path.exists(path):
return path
raise IOError('Can\'t find cookbook with name "{0}"'.format(cookbook_name)) | python | def get_cookbook_path(cookbook_name):
"""Returns path to the cookbook for the given cookbook name"""
for cookbook_path in cookbook_paths:
path = os.path.join(cookbook_path, cookbook_name)
if os.path.exists(path):
return path
raise IOError('Can\'t find cookbook with name "{0}"'.format(cookbook_name)) | [
"def",
"get_cookbook_path",
"(",
"cookbook_name",
")",
":",
"for",
"cookbook_path",
"in",
"cookbook_paths",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cookbook_path",
",",
"cookbook_name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"path",
"raise",
"IOError",
"(",
"'Can\\'t find cookbook with name \"{0}\"'",
".",
"format",
"(",
"cookbook_name",
")",
")"
] | Returns path to the cookbook for the given cookbook name | [
"Returns",
"path",
"to",
"the",
"cookbook",
"for",
"the",
"given",
"cookbook",
"name"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L502-L508 |
5,163 | tobami/littlechef | littlechef/lib.py | global_confirm | def global_confirm(question, default=True):
"""Shows a confirmation that applies to all hosts
by temporarily disabling parallel execution in Fabric
"""
if env.abort_on_prompts:
return True
original_parallel = env.parallel
env.parallel = False
result = confirm(question, default)
env.parallel = original_parallel
return result | python | def global_confirm(question, default=True):
"""Shows a confirmation that applies to all hosts
by temporarily disabling parallel execution in Fabric
"""
if env.abort_on_prompts:
return True
original_parallel = env.parallel
env.parallel = False
result = confirm(question, default)
env.parallel = original_parallel
return result | [
"def",
"global_confirm",
"(",
"question",
",",
"default",
"=",
"True",
")",
":",
"if",
"env",
".",
"abort_on_prompts",
":",
"return",
"True",
"original_parallel",
"=",
"env",
".",
"parallel",
"env",
".",
"parallel",
"=",
"False",
"result",
"=",
"confirm",
"(",
"question",
",",
"default",
")",
"env",
".",
"parallel",
"=",
"original_parallel",
"return",
"result"
] | Shows a confirmation that applies to all hosts
by temporarily disabling parallel execution in Fabric | [
"Shows",
"a",
"confirmation",
"that",
"applies",
"to",
"all",
"hosts",
"by",
"temporarily",
"disabling",
"parallel",
"execution",
"in",
"Fabric"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L511-L521 |
5,164 | tobami/littlechef | littlechef/lib.py | _pprint | def _pprint(dic):
"""Prints a dictionary with one indentation level"""
for key, value in dic.items():
print(" {0}: {1}".format(key, value)) | python | def _pprint(dic):
"""Prints a dictionary with one indentation level"""
for key, value in dic.items():
print(" {0}: {1}".format(key, value)) | [
"def",
"_pprint",
"(",
"dic",
")",
":",
"for",
"key",
",",
"value",
"in",
"dic",
".",
"items",
"(",
")",
":",
"print",
"(",
"\" {0}: {1}\"",
".",
"format",
"(",
"key",
",",
"value",
")",
")"
] | Prints a dictionary with one indentation level | [
"Prints",
"a",
"dictionary",
"with",
"one",
"indentation",
"level"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L524-L527 |
5,165 | tobami/littlechef | littlechef/lib.py | get_margin | def get_margin(length):
"""Add enough tabs to align in two columns"""
if length > 23:
margin_left = "\t"
chars = 1
elif length > 15:
margin_left = "\t\t"
chars = 2
elif length > 7:
margin_left = "\t\t\t"
chars = 3
else:
margin_left = "\t\t\t\t"
chars = 4
return margin_left | python | def get_margin(length):
"""Add enough tabs to align in two columns"""
if length > 23:
margin_left = "\t"
chars = 1
elif length > 15:
margin_left = "\t\t"
chars = 2
elif length > 7:
margin_left = "\t\t\t"
chars = 3
else:
margin_left = "\t\t\t\t"
chars = 4
return margin_left | [
"def",
"get_margin",
"(",
"length",
")",
":",
"if",
"length",
">",
"23",
":",
"margin_left",
"=",
"\"\\t\"",
"chars",
"=",
"1",
"elif",
"length",
">",
"15",
":",
"margin_left",
"=",
"\"\\t\\t\"",
"chars",
"=",
"2",
"elif",
"length",
">",
"7",
":",
"margin_left",
"=",
"\"\\t\\t\\t\"",
"chars",
"=",
"3",
"else",
":",
"margin_left",
"=",
"\"\\t\\t\\t\\t\"",
"chars",
"=",
"4",
"return",
"margin_left"
] | Add enough tabs to align in two columns | [
"Add",
"enough",
"tabs",
"to",
"align",
"in",
"two",
"columns"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L535-L549 |
5,166 | tobami/littlechef | littlechef/solo.py | configure | def configure(current_node=None):
"""Deploy chef-solo specific files"""
current_node = current_node or {}
# Ensure that the /tmp/chef-solo/cache directory exist
cache_dir = "{0}/cache".format(env.node_work_path)
# First remote call, could go wrong
try:
cache_exists = exists(cache_dir)
except EOFError as e:
abort("Could not login to node, got: {0}".format(e))
if not cache_exists:
with settings(hide('running', 'stdout'), warn_only=True):
output = sudo('mkdir -p {0}'.format(cache_dir))
if output.failed:
error = "Could not create {0} dir. ".format(env.node_work_path)
error += "Do you have sudo rights?"
abort(error)
# Change ownership of /tmp/chef-solo/ so that we can rsync
with hide('running', 'stdout'):
with settings(warn_only=True):
output = sudo(
'chown -R {0} {1}'.format(env.user, env.node_work_path))
if output.failed:
error = "Could not modify {0} dir. ".format(env.node_work_path)
error += "Do you have sudo rights?"
abort(error)
# Set up chef solo configuration
logging_path = os.path.dirname(LOGFILE)
if not exists(logging_path):
sudo('mkdir -p {0}'.format(logging_path))
if not exists('/etc/chef'):
sudo('mkdir -p /etc/chef')
# Set parameters and upload solo.rb template
reversed_cookbook_paths = cookbook_paths[:]
reversed_cookbook_paths.reverse()
cookbook_paths_list = '[{0}]'.format(', '.join(
['"{0}/{1}"'.format(env.node_work_path, x)
for x in reversed_cookbook_paths]))
data = {
'node_work_path': env.node_work_path,
'cookbook_paths_list': cookbook_paths_list,
'environment': current_node.get('chef_environment', '_default'),
'verbose': "true" if env.verbose else "false",
'http_proxy': env.http_proxy,
'https_proxy': env.https_proxy
}
with settings(hide('everything')):
try:
upload_template('solo.rb.j2', '/etc/chef/solo.rb',
context=data, use_sudo=True, backup=False,
template_dir=BASEDIR, use_jinja=True, mode=0400)
except SystemExit:
error = ("Failed to upload '/etc/chef/solo.rb'\nThis "
"can happen when the deployment user does not have a "
"home directory, which is needed as a temporary location")
abort(error)
with hide('stdout'):
sudo('chown root:$(id -g -n root) {0}'.format('/etc/chef/solo.rb')) | python | def configure(current_node=None):
"""Deploy chef-solo specific files"""
current_node = current_node or {}
# Ensure that the /tmp/chef-solo/cache directory exist
cache_dir = "{0}/cache".format(env.node_work_path)
# First remote call, could go wrong
try:
cache_exists = exists(cache_dir)
except EOFError as e:
abort("Could not login to node, got: {0}".format(e))
if not cache_exists:
with settings(hide('running', 'stdout'), warn_only=True):
output = sudo('mkdir -p {0}'.format(cache_dir))
if output.failed:
error = "Could not create {0} dir. ".format(env.node_work_path)
error += "Do you have sudo rights?"
abort(error)
# Change ownership of /tmp/chef-solo/ so that we can rsync
with hide('running', 'stdout'):
with settings(warn_only=True):
output = sudo(
'chown -R {0} {1}'.format(env.user, env.node_work_path))
if output.failed:
error = "Could not modify {0} dir. ".format(env.node_work_path)
error += "Do you have sudo rights?"
abort(error)
# Set up chef solo configuration
logging_path = os.path.dirname(LOGFILE)
if not exists(logging_path):
sudo('mkdir -p {0}'.format(logging_path))
if not exists('/etc/chef'):
sudo('mkdir -p /etc/chef')
# Set parameters and upload solo.rb template
reversed_cookbook_paths = cookbook_paths[:]
reversed_cookbook_paths.reverse()
cookbook_paths_list = '[{0}]'.format(', '.join(
['"{0}/{1}"'.format(env.node_work_path, x)
for x in reversed_cookbook_paths]))
data = {
'node_work_path': env.node_work_path,
'cookbook_paths_list': cookbook_paths_list,
'environment': current_node.get('chef_environment', '_default'),
'verbose': "true" if env.verbose else "false",
'http_proxy': env.http_proxy,
'https_proxy': env.https_proxy
}
with settings(hide('everything')):
try:
upload_template('solo.rb.j2', '/etc/chef/solo.rb',
context=data, use_sudo=True, backup=False,
template_dir=BASEDIR, use_jinja=True, mode=0400)
except SystemExit:
error = ("Failed to upload '/etc/chef/solo.rb'\nThis "
"can happen when the deployment user does not have a "
"home directory, which is needed as a temporary location")
abort(error)
with hide('stdout'):
sudo('chown root:$(id -g -n root) {0}'.format('/etc/chef/solo.rb')) | [
"def",
"configure",
"(",
"current_node",
"=",
"None",
")",
":",
"current_node",
"=",
"current_node",
"or",
"{",
"}",
"# Ensure that the /tmp/chef-solo/cache directory exist",
"cache_dir",
"=",
"\"{0}/cache\"",
".",
"format",
"(",
"env",
".",
"node_work_path",
")",
"# First remote call, could go wrong",
"try",
":",
"cache_exists",
"=",
"exists",
"(",
"cache_dir",
")",
"except",
"EOFError",
"as",
"e",
":",
"abort",
"(",
"\"Could not login to node, got: {0}\"",
".",
"format",
"(",
"e",
")",
")",
"if",
"not",
"cache_exists",
":",
"with",
"settings",
"(",
"hide",
"(",
"'running'",
",",
"'stdout'",
")",
",",
"warn_only",
"=",
"True",
")",
":",
"output",
"=",
"sudo",
"(",
"'mkdir -p {0}'",
".",
"format",
"(",
"cache_dir",
")",
")",
"if",
"output",
".",
"failed",
":",
"error",
"=",
"\"Could not create {0} dir. \"",
".",
"format",
"(",
"env",
".",
"node_work_path",
")",
"error",
"+=",
"\"Do you have sudo rights?\"",
"abort",
"(",
"error",
")",
"# Change ownership of /tmp/chef-solo/ so that we can rsync",
"with",
"hide",
"(",
"'running'",
",",
"'stdout'",
")",
":",
"with",
"settings",
"(",
"warn_only",
"=",
"True",
")",
":",
"output",
"=",
"sudo",
"(",
"'chown -R {0} {1}'",
".",
"format",
"(",
"env",
".",
"user",
",",
"env",
".",
"node_work_path",
")",
")",
"if",
"output",
".",
"failed",
":",
"error",
"=",
"\"Could not modify {0} dir. \"",
".",
"format",
"(",
"env",
".",
"node_work_path",
")",
"error",
"+=",
"\"Do you have sudo rights?\"",
"abort",
"(",
"error",
")",
"# Set up chef solo configuration",
"logging_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"LOGFILE",
")",
"if",
"not",
"exists",
"(",
"logging_path",
")",
":",
"sudo",
"(",
"'mkdir -p {0}'",
".",
"format",
"(",
"logging_path",
")",
")",
"if",
"not",
"exists",
"(",
"'/etc/chef'",
")",
":",
"sudo",
"(",
"'mkdir -p /etc/chef'",
")",
"# Set parameters and upload solo.rb template",
"reversed_cookbook_paths",
"=",
"cookbook_paths",
"[",
":",
"]",
"reversed_cookbook_paths",
".",
"reverse",
"(",
")",
"cookbook_paths_list",
"=",
"'[{0}]'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"[",
"'\"{0}/{1}\"'",
".",
"format",
"(",
"env",
".",
"node_work_path",
",",
"x",
")",
"for",
"x",
"in",
"reversed_cookbook_paths",
"]",
")",
")",
"data",
"=",
"{",
"'node_work_path'",
":",
"env",
".",
"node_work_path",
",",
"'cookbook_paths_list'",
":",
"cookbook_paths_list",
",",
"'environment'",
":",
"current_node",
".",
"get",
"(",
"'chef_environment'",
",",
"'_default'",
")",
",",
"'verbose'",
":",
"\"true\"",
"if",
"env",
".",
"verbose",
"else",
"\"false\"",
",",
"'http_proxy'",
":",
"env",
".",
"http_proxy",
",",
"'https_proxy'",
":",
"env",
".",
"https_proxy",
"}",
"with",
"settings",
"(",
"hide",
"(",
"'everything'",
")",
")",
":",
"try",
":",
"upload_template",
"(",
"'solo.rb.j2'",
",",
"'/etc/chef/solo.rb'",
",",
"context",
"=",
"data",
",",
"use_sudo",
"=",
"True",
",",
"backup",
"=",
"False",
",",
"template_dir",
"=",
"BASEDIR",
",",
"use_jinja",
"=",
"True",
",",
"mode",
"=",
"0400",
")",
"except",
"SystemExit",
":",
"error",
"=",
"(",
"\"Failed to upload '/etc/chef/solo.rb'\\nThis \"",
"\"can happen when the deployment user does not have a \"",
"\"home directory, which is needed as a temporary location\"",
")",
"abort",
"(",
"error",
")",
"with",
"hide",
"(",
"'stdout'",
")",
":",
"sudo",
"(",
"'chown root:$(id -g -n root) {0}'",
".",
"format",
"(",
"'/etc/chef/solo.rb'",
")",
")"
] | Deploy chef-solo specific files | [
"Deploy",
"chef",
"-",
"solo",
"specific",
"files"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/solo.py#L42-L99 |
5,167 | tobami/littlechef | plugins/save_xen_info.py | execute | def execute(node):
"""Uses ohai to get virtualization information which is then saved to then
node file
"""
with hide('everything'):
virt = json.loads(sudo('ohai virtualization'))
if not len(virt) or virt[0][1] != "host":
# It may work for virtualization solutions other than Xen
print("This node is not a Xen host, doing nothing")
return
node['virtualization'] = {
'role': 'host',
'system': 'xen',
'vms': [],
}
# VMs
with hide('everything'):
vm_list = sudo("xm list")
for vm in vm_list.split("\n")[2:]:
data = vm.split()
if len(data) != 6:
break
node['virtualization']['vms'].append({
'fqdn': data[0], 'RAM': data[2], 'cpus': data[3]})
print("Found {0} VMs for this Xen host".format(
len(node['virtualization']['vms'])))
# Save node file and remove the returned temp file
del node['name']
os.remove(chef.save_config(node, True)) | python | def execute(node):
"""Uses ohai to get virtualization information which is then saved to then
node file
"""
with hide('everything'):
virt = json.loads(sudo('ohai virtualization'))
if not len(virt) or virt[0][1] != "host":
# It may work for virtualization solutions other than Xen
print("This node is not a Xen host, doing nothing")
return
node['virtualization'] = {
'role': 'host',
'system': 'xen',
'vms': [],
}
# VMs
with hide('everything'):
vm_list = sudo("xm list")
for vm in vm_list.split("\n")[2:]:
data = vm.split()
if len(data) != 6:
break
node['virtualization']['vms'].append({
'fqdn': data[0], 'RAM': data[2], 'cpus': data[3]})
print("Found {0} VMs for this Xen host".format(
len(node['virtualization']['vms'])))
# Save node file and remove the returned temp file
del node['name']
os.remove(chef.save_config(node, True)) | [
"def",
"execute",
"(",
"node",
")",
":",
"with",
"hide",
"(",
"'everything'",
")",
":",
"virt",
"=",
"json",
".",
"loads",
"(",
"sudo",
"(",
"'ohai virtualization'",
")",
")",
"if",
"not",
"len",
"(",
"virt",
")",
"or",
"virt",
"[",
"0",
"]",
"[",
"1",
"]",
"!=",
"\"host\"",
":",
"# It may work for virtualization solutions other than Xen",
"print",
"(",
"\"This node is not a Xen host, doing nothing\"",
")",
"return",
"node",
"[",
"'virtualization'",
"]",
"=",
"{",
"'role'",
":",
"'host'",
",",
"'system'",
":",
"'xen'",
",",
"'vms'",
":",
"[",
"]",
",",
"}",
"# VMs",
"with",
"hide",
"(",
"'everything'",
")",
":",
"vm_list",
"=",
"sudo",
"(",
"\"xm list\"",
")",
"for",
"vm",
"in",
"vm_list",
".",
"split",
"(",
"\"\\n\"",
")",
"[",
"2",
":",
"]",
":",
"data",
"=",
"vm",
".",
"split",
"(",
")",
"if",
"len",
"(",
"data",
")",
"!=",
"6",
":",
"break",
"node",
"[",
"'virtualization'",
"]",
"[",
"'vms'",
"]",
".",
"append",
"(",
"{",
"'fqdn'",
":",
"data",
"[",
"0",
"]",
",",
"'RAM'",
":",
"data",
"[",
"2",
"]",
",",
"'cpus'",
":",
"data",
"[",
"3",
"]",
"}",
")",
"print",
"(",
"\"Found {0} VMs for this Xen host\"",
".",
"format",
"(",
"len",
"(",
"node",
"[",
"'virtualization'",
"]",
"[",
"'vms'",
"]",
")",
")",
")",
"# Save node file and remove the returned temp file",
"del",
"node",
"[",
"'name'",
"]",
"os",
".",
"remove",
"(",
"chef",
".",
"save_config",
"(",
"node",
",",
"True",
")",
")"
] | Uses ohai to get virtualization information which is then saved to then
node file | [
"Uses",
"ohai",
"to",
"get",
"virtualization",
"information",
"which",
"is",
"then",
"saved",
"to",
"then",
"node",
"file"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/plugins/save_xen_info.py#L11-L40 |
5,168 | tobami/littlechef | littlechef/runner.py | nodes_with_role | def nodes_with_role(rolename):
"""Configures a list of nodes that have the given role in their run list"""
nodes = [n['name'] for n in
lib.get_nodes_with_role(rolename, env.chef_environment)]
if not len(nodes):
print("No nodes found with role '{0}'".format(rolename))
sys.exit(0)
return node(*nodes) | python | def nodes_with_role(rolename):
"""Configures a list of nodes that have the given role in their run list"""
nodes = [n['name'] for n in
lib.get_nodes_with_role(rolename, env.chef_environment)]
if not len(nodes):
print("No nodes found with role '{0}'".format(rolename))
sys.exit(0)
return node(*nodes) | [
"def",
"nodes_with_role",
"(",
"rolename",
")",
":",
"nodes",
"=",
"[",
"n",
"[",
"'name'",
"]",
"for",
"n",
"in",
"lib",
".",
"get_nodes_with_role",
"(",
"rolename",
",",
"env",
".",
"chef_environment",
")",
"]",
"if",
"not",
"len",
"(",
"nodes",
")",
":",
"print",
"(",
"\"No nodes found with role '{0}'\"",
".",
"format",
"(",
"rolename",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"return",
"node",
"(",
"*",
"nodes",
")"
] | Configures a list of nodes that have the given role in their run list | [
"Configures",
"a",
"list",
"of",
"nodes",
"that",
"have",
"the",
"given",
"role",
"in",
"their",
"run",
"list"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/runner.py#L92-L99 |
5,169 | tobami/littlechef | littlechef/runner.py | nodes_with_recipe | def nodes_with_recipe(recipename):
"""Configures a list of nodes that have the given recipe in their run list
"""
nodes = [n['name'] for n in
lib.get_nodes_with_recipe(recipename, env.chef_environment)]
if not len(nodes):
print("No nodes found with recipe '{0}'".format(recipename))
sys.exit(0)
return node(*nodes) | python | def nodes_with_recipe(recipename):
"""Configures a list of nodes that have the given recipe in their run list
"""
nodes = [n['name'] for n in
lib.get_nodes_with_recipe(recipename, env.chef_environment)]
if not len(nodes):
print("No nodes found with recipe '{0}'".format(recipename))
sys.exit(0)
return node(*nodes) | [
"def",
"nodes_with_recipe",
"(",
"recipename",
")",
":",
"nodes",
"=",
"[",
"n",
"[",
"'name'",
"]",
"for",
"n",
"in",
"lib",
".",
"get_nodes_with_recipe",
"(",
"recipename",
",",
"env",
".",
"chef_environment",
")",
"]",
"if",
"not",
"len",
"(",
"nodes",
")",
":",
"print",
"(",
"\"No nodes found with recipe '{0}'\"",
".",
"format",
"(",
"recipename",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"return",
"node",
"(",
"*",
"nodes",
")"
] | Configures a list of nodes that have the given recipe in their run list | [
"Configures",
"a",
"list",
"of",
"nodes",
"that",
"have",
"the",
"given",
"recipe",
"in",
"their",
"run",
"list"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/runner.py#L102-L110 |
5,170 | tobami/littlechef | littlechef/runner.py | node | def node(*nodes):
"""Selects and configures a list of nodes. 'all' configures all nodes"""
chef.build_node_data_bag()
if not len(nodes) or nodes[0] == '':
abort('No node was given')
elif nodes[0] == 'all':
# Fetch all nodes and add them to env.hosts
for node in lib.get_nodes(env.chef_environment):
env.hosts.append(node['name'])
if not len(env.hosts):
abort('No nodes found in /nodes/')
message = "Are you sure you want to configure all nodes ({0})".format(
len(env.hosts))
if env.chef_environment:
message += " in the {0} environment".format(env.chef_environment)
message += "?"
if not __testing__:
if not lib.global_confirm(message):
abort('Aborted by user')
else:
# A list of nodes was given
env.hosts = list(nodes)
env.all_hosts = list(env.hosts) # Shouldn't be needed
# Check whether another command was given in addition to "node:"
if not(littlechef.__cooking__ and
'node:' not in sys.argv[-1] and
'nodes_with_role:' not in sys.argv[-1] and
'nodes_with_recipe:' not in sys.argv[-1] and
'nodes_with_tag:' not in sys.argv[-1]):
# If user didn't type recipe:X, role:Y or deploy_chef,
# configure the nodes
with settings():
execute(_node_runner)
chef.remove_local_node_data_bag() | python | def node(*nodes):
"""Selects and configures a list of nodes. 'all' configures all nodes"""
chef.build_node_data_bag()
if not len(nodes) or nodes[0] == '':
abort('No node was given')
elif nodes[0] == 'all':
# Fetch all nodes and add them to env.hosts
for node in lib.get_nodes(env.chef_environment):
env.hosts.append(node['name'])
if not len(env.hosts):
abort('No nodes found in /nodes/')
message = "Are you sure you want to configure all nodes ({0})".format(
len(env.hosts))
if env.chef_environment:
message += " in the {0} environment".format(env.chef_environment)
message += "?"
if not __testing__:
if not lib.global_confirm(message):
abort('Aborted by user')
else:
# A list of nodes was given
env.hosts = list(nodes)
env.all_hosts = list(env.hosts) # Shouldn't be needed
# Check whether another command was given in addition to "node:"
if not(littlechef.__cooking__ and
'node:' not in sys.argv[-1] and
'nodes_with_role:' not in sys.argv[-1] and
'nodes_with_recipe:' not in sys.argv[-1] and
'nodes_with_tag:' not in sys.argv[-1]):
# If user didn't type recipe:X, role:Y or deploy_chef,
# configure the nodes
with settings():
execute(_node_runner)
chef.remove_local_node_data_bag() | [
"def",
"node",
"(",
"*",
"nodes",
")",
":",
"chef",
".",
"build_node_data_bag",
"(",
")",
"if",
"not",
"len",
"(",
"nodes",
")",
"or",
"nodes",
"[",
"0",
"]",
"==",
"''",
":",
"abort",
"(",
"'No node was given'",
")",
"elif",
"nodes",
"[",
"0",
"]",
"==",
"'all'",
":",
"# Fetch all nodes and add them to env.hosts",
"for",
"node",
"in",
"lib",
".",
"get_nodes",
"(",
"env",
".",
"chef_environment",
")",
":",
"env",
".",
"hosts",
".",
"append",
"(",
"node",
"[",
"'name'",
"]",
")",
"if",
"not",
"len",
"(",
"env",
".",
"hosts",
")",
":",
"abort",
"(",
"'No nodes found in /nodes/'",
")",
"message",
"=",
"\"Are you sure you want to configure all nodes ({0})\"",
".",
"format",
"(",
"len",
"(",
"env",
".",
"hosts",
")",
")",
"if",
"env",
".",
"chef_environment",
":",
"message",
"+=",
"\" in the {0} environment\"",
".",
"format",
"(",
"env",
".",
"chef_environment",
")",
"message",
"+=",
"\"?\"",
"if",
"not",
"__testing__",
":",
"if",
"not",
"lib",
".",
"global_confirm",
"(",
"message",
")",
":",
"abort",
"(",
"'Aborted by user'",
")",
"else",
":",
"# A list of nodes was given",
"env",
".",
"hosts",
"=",
"list",
"(",
"nodes",
")",
"env",
".",
"all_hosts",
"=",
"list",
"(",
"env",
".",
"hosts",
")",
"# Shouldn't be needed",
"# Check whether another command was given in addition to \"node:\"",
"if",
"not",
"(",
"littlechef",
".",
"__cooking__",
"and",
"'node:'",
"not",
"in",
"sys",
".",
"argv",
"[",
"-",
"1",
"]",
"and",
"'nodes_with_role:'",
"not",
"in",
"sys",
".",
"argv",
"[",
"-",
"1",
"]",
"and",
"'nodes_with_recipe:'",
"not",
"in",
"sys",
".",
"argv",
"[",
"-",
"1",
"]",
"and",
"'nodes_with_tag:'",
"not",
"in",
"sys",
".",
"argv",
"[",
"-",
"1",
"]",
")",
":",
"# If user didn't type recipe:X, role:Y or deploy_chef,",
"# configure the nodes",
"with",
"settings",
"(",
")",
":",
"execute",
"(",
"_node_runner",
")",
"chef",
".",
"remove_local_node_data_bag",
"(",
")"
] | Selects and configures a list of nodes. 'all' configures all nodes | [
"Selects",
"and",
"configures",
"a",
"list",
"of",
"nodes",
".",
"all",
"configures",
"all",
"nodes"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/runner.py#L124-L158 |
5,171 | tobami/littlechef | littlechef/runner.py | _node_runner | def _node_runner():
"""This is only used by node so that we can execute in parallel"""
env.host_string = lib.get_env_host_string()
node = lib.get_node(env.host_string)
_configure_fabric_for_platform(node.get("platform"))
if __testing__:
print "TEST: would now configure {0}".format(env.host_string)
else:
lib.print_header("Configuring {0}".format(env.host_string))
if env.autodeploy_chef and not chef.chef_test():
deploy_chef(ask="no")
chef.sync_node(node) | python | def _node_runner():
"""This is only used by node so that we can execute in parallel"""
env.host_string = lib.get_env_host_string()
node = lib.get_node(env.host_string)
_configure_fabric_for_platform(node.get("platform"))
if __testing__:
print "TEST: would now configure {0}".format(env.host_string)
else:
lib.print_header("Configuring {0}".format(env.host_string))
if env.autodeploy_chef and not chef.chef_test():
deploy_chef(ask="no")
chef.sync_node(node) | [
"def",
"_node_runner",
"(",
")",
":",
"env",
".",
"host_string",
"=",
"lib",
".",
"get_env_host_string",
"(",
")",
"node",
"=",
"lib",
".",
"get_node",
"(",
"env",
".",
"host_string",
")",
"_configure_fabric_for_platform",
"(",
"node",
".",
"get",
"(",
"\"platform\"",
")",
")",
"if",
"__testing__",
":",
"print",
"\"TEST: would now configure {0}\"",
".",
"format",
"(",
"env",
".",
"host_string",
")",
"else",
":",
"lib",
".",
"print_header",
"(",
"\"Configuring {0}\"",
".",
"format",
"(",
"env",
".",
"host_string",
")",
")",
"if",
"env",
".",
"autodeploy_chef",
"and",
"not",
"chef",
".",
"chef_test",
"(",
")",
":",
"deploy_chef",
"(",
"ask",
"=",
"\"no\"",
")",
"chef",
".",
"sync_node",
"(",
"node",
")"
] | This is only used by node so that we can execute in parallel | [
"This",
"is",
"only",
"used",
"by",
"node",
"so",
"that",
"we",
"can",
"execute",
"in",
"parallel"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/runner.py#L167-L180 |
5,172 | tobami/littlechef | littlechef/runner.py | deploy_chef | def deploy_chef(ask="yes", version="11"):
"""Install chef-solo on a node"""
env.host_string = lib.get_env_host_string()
if ask == "no" or littlechef.noninteractive:
print("Deploying Chef using omnibus installer version: ...".format(version))
else:
message = ('\nAre you sure you want to install Chef version:'
'{0} on node {1}?'.format(version, env.host_string))
if not confirm(message):
abort('Aborted by user')
lib.print_header("Configuring Chef Solo on {0}".format(env.host_string))
if not __testing__:
solo.install(version)
solo.configure()
# Build a basic node file if there isn't one already
# with some properties from ohai
with settings(hide('stdout'), warn_only=True):
output = sudo('ohai -l warn')
if output.succeeded:
try:
ohai = json.loads(output)
except ValueError:
abort("Could not parse ohai's output"
":\n {0}".format(output))
node = {"run_list": []}
for attribute in ["ipaddress", "platform", "platform_family",
"platform_version"]:
if ohai.get(attribute):
node[attribute] = ohai[attribute]
chef.save_config(node) | python | def deploy_chef(ask="yes", version="11"):
"""Install chef-solo on a node"""
env.host_string = lib.get_env_host_string()
if ask == "no" or littlechef.noninteractive:
print("Deploying Chef using omnibus installer version: ...".format(version))
else:
message = ('\nAre you sure you want to install Chef version:'
'{0} on node {1}?'.format(version, env.host_string))
if not confirm(message):
abort('Aborted by user')
lib.print_header("Configuring Chef Solo on {0}".format(env.host_string))
if not __testing__:
solo.install(version)
solo.configure()
# Build a basic node file if there isn't one already
# with some properties from ohai
with settings(hide('stdout'), warn_only=True):
output = sudo('ohai -l warn')
if output.succeeded:
try:
ohai = json.loads(output)
except ValueError:
abort("Could not parse ohai's output"
":\n {0}".format(output))
node = {"run_list": []}
for attribute in ["ipaddress", "platform", "platform_family",
"platform_version"]:
if ohai.get(attribute):
node[attribute] = ohai[attribute]
chef.save_config(node) | [
"def",
"deploy_chef",
"(",
"ask",
"=",
"\"yes\"",
",",
"version",
"=",
"\"11\"",
")",
":",
"env",
".",
"host_string",
"=",
"lib",
".",
"get_env_host_string",
"(",
")",
"if",
"ask",
"==",
"\"no\"",
"or",
"littlechef",
".",
"noninteractive",
":",
"print",
"(",
"\"Deploying Chef using omnibus installer version: ...\"",
".",
"format",
"(",
"version",
")",
")",
"else",
":",
"message",
"=",
"(",
"'\\nAre you sure you want to install Chef version:'",
"'{0} on node {1}?'",
".",
"format",
"(",
"version",
",",
"env",
".",
"host_string",
")",
")",
"if",
"not",
"confirm",
"(",
"message",
")",
":",
"abort",
"(",
"'Aborted by user'",
")",
"lib",
".",
"print_header",
"(",
"\"Configuring Chef Solo on {0}\"",
".",
"format",
"(",
"env",
".",
"host_string",
")",
")",
"if",
"not",
"__testing__",
":",
"solo",
".",
"install",
"(",
"version",
")",
"solo",
".",
"configure",
"(",
")",
"# Build a basic node file if there isn't one already",
"# with some properties from ohai",
"with",
"settings",
"(",
"hide",
"(",
"'stdout'",
")",
",",
"warn_only",
"=",
"True",
")",
":",
"output",
"=",
"sudo",
"(",
"'ohai -l warn'",
")",
"if",
"output",
".",
"succeeded",
":",
"try",
":",
"ohai",
"=",
"json",
".",
"loads",
"(",
"output",
")",
"except",
"ValueError",
":",
"abort",
"(",
"\"Could not parse ohai's output\"",
"\":\\n {0}\"",
".",
"format",
"(",
"output",
")",
")",
"node",
"=",
"{",
"\"run_list\"",
":",
"[",
"]",
"}",
"for",
"attribute",
"in",
"[",
"\"ipaddress\"",
",",
"\"platform\"",
",",
"\"platform_family\"",
",",
"\"platform_version\"",
"]",
":",
"if",
"ohai",
".",
"get",
"(",
"attribute",
")",
":",
"node",
"[",
"attribute",
"]",
"=",
"ohai",
"[",
"attribute",
"]",
"chef",
".",
"save_config",
"(",
"node",
")"
] | Install chef-solo on a node | [
"Install",
"chef",
"-",
"solo",
"on",
"a",
"node"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/runner.py#L183-L215 |
5,173 | tobami/littlechef | littlechef/runner.py | plugin | def plugin(name):
"""Executes the selected plugin
Plugins are expected to be found in the kitchen's 'plugins' directory
"""
env.host_string = lib.get_env_host_string()
plug = lib.import_plugin(name)
lib.print_header("Executing plugin '{0}' on "
"{1}".format(name, env.host_string))
node = lib.get_node(env.host_string)
if node == {'run_list': []}:
node['name'] = env.host_string
plug.execute(node)
print("Finished executing plugin") | python | def plugin(name):
"""Executes the selected plugin
Plugins are expected to be found in the kitchen's 'plugins' directory
"""
env.host_string = lib.get_env_host_string()
plug = lib.import_plugin(name)
lib.print_header("Executing plugin '{0}' on "
"{1}".format(name, env.host_string))
node = lib.get_node(env.host_string)
if node == {'run_list': []}:
node['name'] = env.host_string
plug.execute(node)
print("Finished executing plugin") | [
"def",
"plugin",
"(",
"name",
")",
":",
"env",
".",
"host_string",
"=",
"lib",
".",
"get_env_host_string",
"(",
")",
"plug",
"=",
"lib",
".",
"import_plugin",
"(",
"name",
")",
"lib",
".",
"print_header",
"(",
"\"Executing plugin '{0}' on \"",
"\"{1}\"",
".",
"format",
"(",
"name",
",",
"env",
".",
"host_string",
")",
")",
"node",
"=",
"lib",
".",
"get_node",
"(",
"env",
".",
"host_string",
")",
"if",
"node",
"==",
"{",
"'run_list'",
":",
"[",
"]",
"}",
":",
"node",
"[",
"'name'",
"]",
"=",
"env",
".",
"host_string",
"plug",
".",
"execute",
"(",
"node",
")",
"print",
"(",
"\"Finished executing plugin\"",
")"
] | Executes the selected plugin
Plugins are expected to be found in the kitchen's 'plugins' directory | [
"Executes",
"the",
"selected",
"plugin",
"Plugins",
"are",
"expected",
"to",
"be",
"found",
"in",
"the",
"kitchen",
"s",
"plugins",
"directory"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/runner.py#L269-L282 |
5,174 | tobami/littlechef | littlechef/runner.py | list_envs | def list_envs():
"""List all environments"""
for env in lib.get_environments():
margin_left = lib.get_margin(len(env['name']))
print("{0}{1}{2}".format(
env['name'], margin_left,
env.get('description', '(no description)'))) | python | def list_envs():
"""List all environments"""
for env in lib.get_environments():
margin_left = lib.get_margin(len(env['name']))
print("{0}{1}{2}".format(
env['name'], margin_left,
env.get('description', '(no description)'))) | [
"def",
"list_envs",
"(",
")",
":",
"for",
"env",
"in",
"lib",
".",
"get_environments",
"(",
")",
":",
"margin_left",
"=",
"lib",
".",
"get_margin",
"(",
"len",
"(",
"env",
"[",
"'name'",
"]",
")",
")",
"print",
"(",
"\"{0}{1}{2}\"",
".",
"format",
"(",
"env",
"[",
"'name'",
"]",
",",
"margin_left",
",",
"env",
".",
"get",
"(",
"'description'",
",",
"'(no description)'",
")",
")",
")"
] | List all environments | [
"List",
"all",
"environments"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/runner.py#L310-L316 |
5,175 | tobami/littlechef | littlechef/runner.py | list_nodes_with_tag | def list_nodes_with_tag(tag):
"""Show all nodes which have assigned a given tag"""
lib.print_nodes(lib.get_nodes_with_tag(tag, env.chef_environment,
littlechef.include_guests)) | python | def list_nodes_with_tag(tag):
"""Show all nodes which have assigned a given tag"""
lib.print_nodes(lib.get_nodes_with_tag(tag, env.chef_environment,
littlechef.include_guests)) | [
"def",
"list_nodes_with_tag",
"(",
"tag",
")",
":",
"lib",
".",
"print_nodes",
"(",
"lib",
".",
"get_nodes_with_tag",
"(",
"tag",
",",
"env",
".",
"chef_environment",
",",
"littlechef",
".",
"include_guests",
")",
")"
] | Show all nodes which have assigned a given tag | [
"Show",
"all",
"nodes",
"which",
"have",
"assigned",
"a",
"given",
"tag"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/runner.py#L320-L323 |
5,176 | tobami/littlechef | littlechef/runner.py | list_recipes | def list_recipes():
"""Show a list of all available recipes"""
for recipe in lib.get_recipes():
margin_left = lib.get_margin(len(recipe['name']))
print("{0}{1}{2}".format(
recipe['name'], margin_left, recipe['description'])) | python | def list_recipes():
"""Show a list of all available recipes"""
for recipe in lib.get_recipes():
margin_left = lib.get_margin(len(recipe['name']))
print("{0}{1}{2}".format(
recipe['name'], margin_left, recipe['description'])) | [
"def",
"list_recipes",
"(",
")",
":",
"for",
"recipe",
"in",
"lib",
".",
"get_recipes",
"(",
")",
":",
"margin_left",
"=",
"lib",
".",
"get_margin",
"(",
"len",
"(",
"recipe",
"[",
"'name'",
"]",
")",
")",
"print",
"(",
"\"{0}{1}{2}\"",
".",
"format",
"(",
"recipe",
"[",
"'name'",
"]",
",",
"margin_left",
",",
"recipe",
"[",
"'description'",
"]",
")",
")"
] | Show a list of all available recipes | [
"Show",
"a",
"list",
"of",
"all",
"available",
"recipes"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/runner.py#L327-L332 |
5,177 | tobami/littlechef | littlechef/runner.py | list_roles | def list_roles():
"""Show a list of all available roles"""
for role in lib.get_roles():
margin_left = lib.get_margin(len(role['fullname']))
print("{0}{1}{2}".format(
role['fullname'], margin_left,
role.get('description', '(no description)'))) | python | def list_roles():
"""Show a list of all available roles"""
for role in lib.get_roles():
margin_left = lib.get_margin(len(role['fullname']))
print("{0}{1}{2}".format(
role['fullname'], margin_left,
role.get('description', '(no description)'))) | [
"def",
"list_roles",
"(",
")",
":",
"for",
"role",
"in",
"lib",
".",
"get_roles",
"(",
")",
":",
"margin_left",
"=",
"lib",
".",
"get_margin",
"(",
"len",
"(",
"role",
"[",
"'fullname'",
"]",
")",
")",
"print",
"(",
"\"{0}{1}{2}\"",
".",
"format",
"(",
"role",
"[",
"'fullname'",
"]",
",",
"margin_left",
",",
"role",
".",
"get",
"(",
"'description'",
",",
"'(no description)'",
")",
")",
")"
] | Show a list of all available roles | [
"Show",
"a",
"list",
"of",
"all",
"available",
"roles"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/runner.py#L343-L349 |
5,178 | tobami/littlechef | littlechef/runner.py | _check_appliances | def _check_appliances():
"""Looks around and return True or False based on whether we are in a
kitchen
"""
filenames = os.listdir(os.getcwd())
missing = []
for dirname in ['nodes', 'environments', 'roles', 'cookbooks', 'data_bags']:
if (dirname not in filenames) or (not os.path.isdir(dirname)):
missing.append(dirname)
return (not bool(missing)), missing | python | def _check_appliances():
"""Looks around and return True or False based on whether we are in a
kitchen
"""
filenames = os.listdir(os.getcwd())
missing = []
for dirname in ['nodes', 'environments', 'roles', 'cookbooks', 'data_bags']:
if (dirname not in filenames) or (not os.path.isdir(dirname)):
missing.append(dirname)
return (not bool(missing)), missing | [
"def",
"_check_appliances",
"(",
")",
":",
"filenames",
"=",
"os",
".",
"listdir",
"(",
"os",
".",
"getcwd",
"(",
")",
")",
"missing",
"=",
"[",
"]",
"for",
"dirname",
"in",
"[",
"'nodes'",
",",
"'environments'",
",",
"'roles'",
",",
"'cookbooks'",
",",
"'data_bags'",
"]",
":",
"if",
"(",
"dirname",
"not",
"in",
"filenames",
")",
"or",
"(",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dirname",
")",
")",
":",
"missing",
".",
"append",
"(",
"dirname",
")",
"return",
"(",
"not",
"bool",
"(",
"missing",
")",
")",
",",
"missing"
] | Looks around and return True or False based on whether we are in a
kitchen | [
"Looks",
"around",
"and",
"return",
"True",
"or",
"False",
"based",
"on",
"whether",
"we",
"are",
"in",
"a",
"kitchen"
] | aab8c94081b38100a69cc100bc4278ae7419c58e | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/runner.py#L365-L374 |
5,179 | jbittel/django-mama-cas | mama_cas/models.py | TicketManager.create_ticket_str | def create_ticket_str(self, prefix=None):
"""
Generate a sufficiently opaque ticket string to ensure the ticket is
not guessable. If a prefix is provided, prepend it to the string.
"""
if not prefix:
prefix = self.model.TICKET_PREFIX
return "%s-%d-%s" % (prefix, int(time.time()),
get_random_string(length=self.model.TICKET_RAND_LEN)) | python | def create_ticket_str(self, prefix=None):
"""
Generate a sufficiently opaque ticket string to ensure the ticket is
not guessable. If a prefix is provided, prepend it to the string.
"""
if not prefix:
prefix = self.model.TICKET_PREFIX
return "%s-%d-%s" % (prefix, int(time.time()),
get_random_string(length=self.model.TICKET_RAND_LEN)) | [
"def",
"create_ticket_str",
"(",
"self",
",",
"prefix",
"=",
"None",
")",
":",
"if",
"not",
"prefix",
":",
"prefix",
"=",
"self",
".",
"model",
".",
"TICKET_PREFIX",
"return",
"\"%s-%d-%s\"",
"%",
"(",
"prefix",
",",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
",",
"get_random_string",
"(",
"length",
"=",
"self",
".",
"model",
".",
"TICKET_RAND_LEN",
")",
")"
] | Generate a sufficiently opaque ticket string to ensure the ticket is
not guessable. If a prefix is provided, prepend it to the string. | [
"Generate",
"a",
"sufficiently",
"opaque",
"ticket",
"string",
"to",
"ensure",
"the",
"ticket",
"is",
"not",
"guessable",
".",
"If",
"a",
"prefix",
"is",
"provided",
"prepend",
"it",
"to",
"the",
"string",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/models.py#L58-L66 |
5,180 | jbittel/django-mama-cas | mama_cas/models.py | TicketManager.validate_ticket | def validate_ticket(self, ticket, service, renew=False, require_https=False):
"""
Given a ticket string and service identifier, validate the
corresponding ``Ticket``. If validation succeeds, return the
``Ticket``. If validation fails, raise an appropriate error.
If ``renew`` is ``True``, ``ServiceTicket`` validation will
only succeed if the ticket was issued from the presentation
of the user's primary credentials.
If ``require_https`` is ``True``, ``ServiceTicket`` validation
will only succeed if the service URL scheme is HTTPS.
"""
if not ticket:
raise InvalidRequest("No ticket string provided")
if not self.model.TICKET_RE.match(ticket):
raise InvalidTicket("Ticket string %s is invalid" % ticket)
try:
t = self.get(ticket=ticket)
except self.model.DoesNotExist:
raise InvalidTicket("Ticket %s does not exist" % ticket)
if t.is_consumed():
raise InvalidTicket("%s %s has already been used" %
(t.name, ticket))
if t.is_expired():
raise InvalidTicket("%s %s has expired" % (t.name, ticket))
if not service:
raise InvalidRequest("No service identifier provided")
if require_https and not is_scheme_https(service):
raise InvalidService("Service %s is not HTTPS" % service)
if not service_allowed(service):
raise InvalidService("Service %s is not a valid %s URL" %
(service, t.name))
try:
if not match_service(t.service, service):
raise InvalidService("%s %s for service %s is invalid for "
"service %s" % (t.name, ticket, t.service, service))
except AttributeError:
pass
try:
if renew and not t.is_primary():
raise InvalidTicket("%s %s was not issued via primary "
"credentials" % (t.name, ticket))
except AttributeError:
pass
logger.debug("Validated %s %s" % (t.name, ticket))
return t | python | def validate_ticket(self, ticket, service, renew=False, require_https=False):
"""
Given a ticket string and service identifier, validate the
corresponding ``Ticket``. If validation succeeds, return the
``Ticket``. If validation fails, raise an appropriate error.
If ``renew`` is ``True``, ``ServiceTicket`` validation will
only succeed if the ticket was issued from the presentation
of the user's primary credentials.
If ``require_https`` is ``True``, ``ServiceTicket`` validation
will only succeed if the service URL scheme is HTTPS.
"""
if not ticket:
raise InvalidRequest("No ticket string provided")
if not self.model.TICKET_RE.match(ticket):
raise InvalidTicket("Ticket string %s is invalid" % ticket)
try:
t = self.get(ticket=ticket)
except self.model.DoesNotExist:
raise InvalidTicket("Ticket %s does not exist" % ticket)
if t.is_consumed():
raise InvalidTicket("%s %s has already been used" %
(t.name, ticket))
if t.is_expired():
raise InvalidTicket("%s %s has expired" % (t.name, ticket))
if not service:
raise InvalidRequest("No service identifier provided")
if require_https and not is_scheme_https(service):
raise InvalidService("Service %s is not HTTPS" % service)
if not service_allowed(service):
raise InvalidService("Service %s is not a valid %s URL" %
(service, t.name))
try:
if not match_service(t.service, service):
raise InvalidService("%s %s for service %s is invalid for "
"service %s" % (t.name, ticket, t.service, service))
except AttributeError:
pass
try:
if renew and not t.is_primary():
raise InvalidTicket("%s %s was not issued via primary "
"credentials" % (t.name, ticket))
except AttributeError:
pass
logger.debug("Validated %s %s" % (t.name, ticket))
return t | [
"def",
"validate_ticket",
"(",
"self",
",",
"ticket",
",",
"service",
",",
"renew",
"=",
"False",
",",
"require_https",
"=",
"False",
")",
":",
"if",
"not",
"ticket",
":",
"raise",
"InvalidRequest",
"(",
"\"No ticket string provided\"",
")",
"if",
"not",
"self",
".",
"model",
".",
"TICKET_RE",
".",
"match",
"(",
"ticket",
")",
":",
"raise",
"InvalidTicket",
"(",
"\"Ticket string %s is invalid\"",
"%",
"ticket",
")",
"try",
":",
"t",
"=",
"self",
".",
"get",
"(",
"ticket",
"=",
"ticket",
")",
"except",
"self",
".",
"model",
".",
"DoesNotExist",
":",
"raise",
"InvalidTicket",
"(",
"\"Ticket %s does not exist\"",
"%",
"ticket",
")",
"if",
"t",
".",
"is_consumed",
"(",
")",
":",
"raise",
"InvalidTicket",
"(",
"\"%s %s has already been used\"",
"%",
"(",
"t",
".",
"name",
",",
"ticket",
")",
")",
"if",
"t",
".",
"is_expired",
"(",
")",
":",
"raise",
"InvalidTicket",
"(",
"\"%s %s has expired\"",
"%",
"(",
"t",
".",
"name",
",",
"ticket",
")",
")",
"if",
"not",
"service",
":",
"raise",
"InvalidRequest",
"(",
"\"No service identifier provided\"",
")",
"if",
"require_https",
"and",
"not",
"is_scheme_https",
"(",
"service",
")",
":",
"raise",
"InvalidService",
"(",
"\"Service %s is not HTTPS\"",
"%",
"service",
")",
"if",
"not",
"service_allowed",
"(",
"service",
")",
":",
"raise",
"InvalidService",
"(",
"\"Service %s is not a valid %s URL\"",
"%",
"(",
"service",
",",
"t",
".",
"name",
")",
")",
"try",
":",
"if",
"not",
"match_service",
"(",
"t",
".",
"service",
",",
"service",
")",
":",
"raise",
"InvalidService",
"(",
"\"%s %s for service %s is invalid for \"",
"\"service %s\"",
"%",
"(",
"t",
".",
"name",
",",
"ticket",
",",
"t",
".",
"service",
",",
"service",
")",
")",
"except",
"AttributeError",
":",
"pass",
"try",
":",
"if",
"renew",
"and",
"not",
"t",
".",
"is_primary",
"(",
")",
":",
"raise",
"InvalidTicket",
"(",
"\"%s %s was not issued via primary \"",
"\"credentials\"",
"%",
"(",
"t",
".",
"name",
",",
"ticket",
")",
")",
"except",
"AttributeError",
":",
"pass",
"logger",
".",
"debug",
"(",
"\"Validated %s %s\"",
"%",
"(",
"t",
".",
"name",
",",
"ticket",
")",
")",
"return",
"t"
] | Given a ticket string and service identifier, validate the
corresponding ``Ticket``. If validation succeeds, return the
``Ticket``. If validation fails, raise an appropriate error.
If ``renew`` is ``True``, ``ServiceTicket`` validation will
only succeed if the ticket was issued from the presentation
of the user's primary credentials.
If ``require_https`` is ``True``, ``ServiceTicket`` validation
will only succeed if the service URL scheme is HTTPS. | [
"Given",
"a",
"ticket",
"string",
"and",
"service",
"identifier",
"validate",
"the",
"corresponding",
"Ticket",
".",
"If",
"validation",
"succeeds",
"return",
"the",
"Ticket",
".",
"If",
"validation",
"fails",
"raise",
"an",
"appropriate",
"error",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/models.py#L68-L123 |
5,181 | jbittel/django-mama-cas | mama_cas/models.py | TicketManager.delete_invalid_tickets | def delete_invalid_tickets(self):
"""
Delete consumed or expired ``Ticket``s that are not referenced
by other ``Ticket``s. Invalid tickets are no longer valid for
authentication and can be safely deleted.
A custom management command is provided that executes this method
on all applicable models by running ``manage.py cleanupcas``.
"""
for ticket in self.filter(Q(consumed__isnull=False) |
Q(expires__lte=now())).order_by('-expires'):
try:
ticket.delete()
except models.ProtectedError:
pass | python | def delete_invalid_tickets(self):
"""
Delete consumed or expired ``Ticket``s that are not referenced
by other ``Ticket``s. Invalid tickets are no longer valid for
authentication and can be safely deleted.
A custom management command is provided that executes this method
on all applicable models by running ``manage.py cleanupcas``.
"""
for ticket in self.filter(Q(consumed__isnull=False) |
Q(expires__lte=now())).order_by('-expires'):
try:
ticket.delete()
except models.ProtectedError:
pass | [
"def",
"delete_invalid_tickets",
"(",
"self",
")",
":",
"for",
"ticket",
"in",
"self",
".",
"filter",
"(",
"Q",
"(",
"consumed__isnull",
"=",
"False",
")",
"|",
"Q",
"(",
"expires__lte",
"=",
"now",
"(",
")",
")",
")",
".",
"order_by",
"(",
"'-expires'",
")",
":",
"try",
":",
"ticket",
".",
"delete",
"(",
")",
"except",
"models",
".",
"ProtectedError",
":",
"pass"
] | Delete consumed or expired ``Ticket``s that are not referenced
by other ``Ticket``s. Invalid tickets are no longer valid for
authentication and can be safely deleted.
A custom management command is provided that executes this method
on all applicable models by running ``manage.py cleanupcas``. | [
"Delete",
"consumed",
"or",
"expired",
"Ticket",
"s",
"that",
"are",
"not",
"referenced",
"by",
"other",
"Ticket",
"s",
".",
"Invalid",
"tickets",
"are",
"no",
"longer",
"valid",
"for",
"authentication",
"and",
"can",
"be",
"safely",
"deleted",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/models.py#L125-L139 |
5,182 | jbittel/django-mama-cas | mama_cas/models.py | TicketManager.consume_tickets | def consume_tickets(self, user):
"""
Consume all valid ``Ticket``s for a specified user. This is run
when the user logs out to ensure all issued tickets are no longer
valid for future authentication attempts.
"""
for ticket in self.filter(user=user, consumed__isnull=True,
expires__gt=now()):
ticket.consume() | python | def consume_tickets(self, user):
"""
Consume all valid ``Ticket``s for a specified user. This is run
when the user logs out to ensure all issued tickets are no longer
valid for future authentication attempts.
"""
for ticket in self.filter(user=user, consumed__isnull=True,
expires__gt=now()):
ticket.consume() | [
"def",
"consume_tickets",
"(",
"self",
",",
"user",
")",
":",
"for",
"ticket",
"in",
"self",
".",
"filter",
"(",
"user",
"=",
"user",
",",
"consumed__isnull",
"=",
"True",
",",
"expires__gt",
"=",
"now",
"(",
")",
")",
":",
"ticket",
".",
"consume",
"(",
")"
] | Consume all valid ``Ticket``s for a specified user. This is run
when the user logs out to ensure all issued tickets are no longer
valid for future authentication attempts. | [
"Consume",
"all",
"valid",
"Ticket",
"s",
"for",
"a",
"specified",
"user",
".",
"This",
"is",
"run",
"when",
"the",
"user",
"logs",
"out",
"to",
"ensure",
"all",
"issued",
"tickets",
"are",
"no",
"longer",
"valid",
"for",
"future",
"authentication",
"attempts",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/models.py#L141-L149 |
5,183 | jbittel/django-mama-cas | mama_cas/models.py | ServiceTicketManager.request_sign_out | def request_sign_out(self, user):
"""
Send a single logout request to each service accessed by a
specified user. This is called at logout when single logout
is enabled.
If requests-futures is installed, asynchronous requests will
be sent. Otherwise, synchronous requests will be sent.
"""
session = Session()
for ticket in self.filter(user=user, consumed__gte=user.last_login):
ticket.request_sign_out(session=session) | python | def request_sign_out(self, user):
"""
Send a single logout request to each service accessed by a
specified user. This is called at logout when single logout
is enabled.
If requests-futures is installed, asynchronous requests will
be sent. Otherwise, synchronous requests will be sent.
"""
session = Session()
for ticket in self.filter(user=user, consumed__gte=user.last_login):
ticket.request_sign_out(session=session) | [
"def",
"request_sign_out",
"(",
"self",
",",
"user",
")",
":",
"session",
"=",
"Session",
"(",
")",
"for",
"ticket",
"in",
"self",
".",
"filter",
"(",
"user",
"=",
"user",
",",
"consumed__gte",
"=",
"user",
".",
"last_login",
")",
":",
"ticket",
".",
"request_sign_out",
"(",
"session",
"=",
"session",
")"
] | Send a single logout request to each service accessed by a
specified user. This is called at logout when single logout
is enabled.
If requests-futures is installed, asynchronous requests will
be sent. Otherwise, synchronous requests will be sent. | [
"Send",
"a",
"single",
"logout",
"request",
"to",
"each",
"service",
"accessed",
"by",
"a",
"specified",
"user",
".",
"This",
"is",
"called",
"at",
"logout",
"when",
"single",
"logout",
"is",
"enabled",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/models.py#L207-L218 |
5,184 | jbittel/django-mama-cas | mama_cas/models.py | ServiceTicket.request_sign_out | def request_sign_out(self, session=requests):
"""
Send a POST request to the ``ServiceTicket``s logout URL to
request sign-out.
"""
if logout_allowed(self.service):
request = SingleSignOutRequest(context={'ticket': self})
url = get_logout_url(self.service) or self.service
session.post(url, data={'logoutRequest': request.render_content()})
logger.info("Single sign-out request sent to %s" % url) | python | def request_sign_out(self, session=requests):
"""
Send a POST request to the ``ServiceTicket``s logout URL to
request sign-out.
"""
if logout_allowed(self.service):
request = SingleSignOutRequest(context={'ticket': self})
url = get_logout_url(self.service) or self.service
session.post(url, data={'logoutRequest': request.render_content()})
logger.info("Single sign-out request sent to %s" % url) | [
"def",
"request_sign_out",
"(",
"self",
",",
"session",
"=",
"requests",
")",
":",
"if",
"logout_allowed",
"(",
"self",
".",
"service",
")",
":",
"request",
"=",
"SingleSignOutRequest",
"(",
"context",
"=",
"{",
"'ticket'",
":",
"self",
"}",
")",
"url",
"=",
"get_logout_url",
"(",
"self",
".",
"service",
")",
"or",
"self",
".",
"service",
"session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"{",
"'logoutRequest'",
":",
"request",
".",
"render_content",
"(",
")",
"}",
")",
"logger",
".",
"info",
"(",
"\"Single sign-out request sent to %s\"",
"%",
"url",
")"
] | Send a POST request to the ``ServiceTicket``s logout URL to
request sign-out. | [
"Send",
"a",
"POST",
"request",
"to",
"the",
"ServiceTicket",
"s",
"logout",
"URL",
"to",
"request",
"sign",
"-",
"out",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/models.py#L248-L257 |
5,185 | jbittel/django-mama-cas | mama_cas/models.py | ProxyGrantingTicketManager.validate_callback | def validate_callback(self, service, pgturl, pgtid, pgtiou):
"""Verify the provided proxy callback URL."""
if not proxy_allowed(service):
raise UnauthorizedServiceProxy("%s is not authorized to use proxy authentication" % service)
if not is_scheme_https(pgturl):
raise InvalidProxyCallback("Proxy callback %s is not HTTPS" % pgturl)
if not proxy_callback_allowed(service, pgturl):
raise InvalidProxyCallback("%s is not an authorized proxy callback URL" % pgturl)
# Verify that the SSL certificate is valid
verify = os.environ.get('REQUESTS_CA_BUNDLE', True)
try:
requests.get(pgturl, verify=verify, timeout=5)
except requests.exceptions.SSLError:
raise InvalidProxyCallback("SSL certificate validation failed for proxy callback %s" % pgturl)
except requests.exceptions.RequestException as e:
raise InvalidProxyCallback(e)
# Callback certificate appears valid, so send the ticket strings
pgturl = add_query_params(pgturl, {'pgtId': pgtid, 'pgtIou': pgtiou})
try:
response = requests.get(pgturl, verify=verify, timeout=5)
except requests.exceptions.RequestException as e:
raise InvalidProxyCallback(e)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise InvalidProxyCallback("Proxy callback %s returned %s" % (pgturl, e)) | python | def validate_callback(self, service, pgturl, pgtid, pgtiou):
"""Verify the provided proxy callback URL."""
if not proxy_allowed(service):
raise UnauthorizedServiceProxy("%s is not authorized to use proxy authentication" % service)
if not is_scheme_https(pgturl):
raise InvalidProxyCallback("Proxy callback %s is not HTTPS" % pgturl)
if not proxy_callback_allowed(service, pgturl):
raise InvalidProxyCallback("%s is not an authorized proxy callback URL" % pgturl)
# Verify that the SSL certificate is valid
verify = os.environ.get('REQUESTS_CA_BUNDLE', True)
try:
requests.get(pgturl, verify=verify, timeout=5)
except requests.exceptions.SSLError:
raise InvalidProxyCallback("SSL certificate validation failed for proxy callback %s" % pgturl)
except requests.exceptions.RequestException as e:
raise InvalidProxyCallback(e)
# Callback certificate appears valid, so send the ticket strings
pgturl = add_query_params(pgturl, {'pgtId': pgtid, 'pgtIou': pgtiou})
try:
response = requests.get(pgturl, verify=verify, timeout=5)
except requests.exceptions.RequestException as e:
raise InvalidProxyCallback(e)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise InvalidProxyCallback("Proxy callback %s returned %s" % (pgturl, e)) | [
"def",
"validate_callback",
"(",
"self",
",",
"service",
",",
"pgturl",
",",
"pgtid",
",",
"pgtiou",
")",
":",
"if",
"not",
"proxy_allowed",
"(",
"service",
")",
":",
"raise",
"UnauthorizedServiceProxy",
"(",
"\"%s is not authorized to use proxy authentication\"",
"%",
"service",
")",
"if",
"not",
"is_scheme_https",
"(",
"pgturl",
")",
":",
"raise",
"InvalidProxyCallback",
"(",
"\"Proxy callback %s is not HTTPS\"",
"%",
"pgturl",
")",
"if",
"not",
"proxy_callback_allowed",
"(",
"service",
",",
"pgturl",
")",
":",
"raise",
"InvalidProxyCallback",
"(",
"\"%s is not an authorized proxy callback URL\"",
"%",
"pgturl",
")",
"# Verify that the SSL certificate is valid",
"verify",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'REQUESTS_CA_BUNDLE'",
",",
"True",
")",
"try",
":",
"requests",
".",
"get",
"(",
"pgturl",
",",
"verify",
"=",
"verify",
",",
"timeout",
"=",
"5",
")",
"except",
"requests",
".",
"exceptions",
".",
"SSLError",
":",
"raise",
"InvalidProxyCallback",
"(",
"\"SSL certificate validation failed for proxy callback %s\"",
"%",
"pgturl",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"e",
":",
"raise",
"InvalidProxyCallback",
"(",
"e",
")",
"# Callback certificate appears valid, so send the ticket strings",
"pgturl",
"=",
"add_query_params",
"(",
"pgturl",
",",
"{",
"'pgtId'",
":",
"pgtid",
",",
"'pgtIou'",
":",
"pgtiou",
"}",
")",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"pgturl",
",",
"verify",
"=",
"verify",
",",
"timeout",
"=",
"5",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"e",
":",
"raise",
"InvalidProxyCallback",
"(",
"e",
")",
"try",
":",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"e",
":",
"raise",
"InvalidProxyCallback",
"(",
"\"Proxy callback %s returned %s\"",
"%",
"(",
"pgturl",
",",
"e",
")",
")"
] | Verify the provided proxy callback URL. | [
"Verify",
"the",
"provided",
"proxy",
"callback",
"URL",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/models.py#L299-L329 |
5,186 | jbittel/django-mama-cas | mama_cas/services/__init__.py | _get_backends | def _get_backends():
"""Retrieve the list of configured service backends."""
backends = []
backend_paths = getattr(
settings, 'MAMA_CAS_SERVICE_BACKENDS',
['mama_cas.services.backends.SettingsBackend']
)
for backend_path in backend_paths:
backend = import_string(backend_path)()
backends.append(backend)
return backends | python | def _get_backends():
"""Retrieve the list of configured service backends."""
backends = []
backend_paths = getattr(
settings, 'MAMA_CAS_SERVICE_BACKENDS',
['mama_cas.services.backends.SettingsBackend']
)
for backend_path in backend_paths:
backend = import_string(backend_path)()
backends.append(backend)
return backends | [
"def",
"_get_backends",
"(",
")",
":",
"backends",
"=",
"[",
"]",
"backend_paths",
"=",
"getattr",
"(",
"settings",
",",
"'MAMA_CAS_SERVICE_BACKENDS'",
",",
"[",
"'mama_cas.services.backends.SettingsBackend'",
"]",
")",
"for",
"backend_path",
"in",
"backend_paths",
":",
"backend",
"=",
"import_string",
"(",
"backend_path",
")",
"(",
")",
"backends",
".",
"append",
"(",
"backend",
")",
"return",
"backends"
] | Retrieve the list of configured service backends. | [
"Retrieve",
"the",
"list",
"of",
"configured",
"service",
"backends",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/services/__init__.py#L8-L18 |
5,187 | jbittel/django-mama-cas | mama_cas/services/__init__.py | _is_allowed | def _is_allowed(attr, *args):
"""
Test if a given attribute is allowed according to the
current set of configured service backends.
"""
for backend in _get_backends():
try:
if getattr(backend, attr)(*args):
return True
except AttributeError:
raise NotImplementedError("%s.%s.%s() not implemented" % (
backend.__class__.__module__, backend.__class__.__name__, attr)
)
return False | python | def _is_allowed(attr, *args):
"""
Test if a given attribute is allowed according to the
current set of configured service backends.
"""
for backend in _get_backends():
try:
if getattr(backend, attr)(*args):
return True
except AttributeError:
raise NotImplementedError("%s.%s.%s() not implemented" % (
backend.__class__.__module__, backend.__class__.__name__, attr)
)
return False | [
"def",
"_is_allowed",
"(",
"attr",
",",
"*",
"args",
")",
":",
"for",
"backend",
"in",
"_get_backends",
"(",
")",
":",
"try",
":",
"if",
"getattr",
"(",
"backend",
",",
"attr",
")",
"(",
"*",
"args",
")",
":",
"return",
"True",
"except",
"AttributeError",
":",
"raise",
"NotImplementedError",
"(",
"\"%s.%s.%s() not implemented\"",
"%",
"(",
"backend",
".",
"__class__",
".",
"__module__",
",",
"backend",
".",
"__class__",
".",
"__name__",
",",
"attr",
")",
")",
"return",
"False"
] | Test if a given attribute is allowed according to the
current set of configured service backends. | [
"Test",
"if",
"a",
"given",
"attribute",
"is",
"allowed",
"according",
"to",
"the",
"current",
"set",
"of",
"configured",
"service",
"backends",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/services/__init__.py#L21-L34 |
5,188 | jbittel/django-mama-cas | mama_cas/services/__init__.py | _is_valid_service_url | def _is_valid_service_url(url):
"""Access services list from ``MAMA_CAS_VALID_SERVICES``."""
valid_services = getattr(settings, 'MAMA_CAS_VALID_SERVICES', ())
if not valid_services:
return True
warnings.warn(
'The MAMA_CAS_VALID_SERVICES setting is deprecated. Services '
'should be configured using MAMA_CAS_SERVICES.', DeprecationWarning)
for service in [re.compile(s) for s in valid_services]:
if service.match(url):
return True
return False | python | def _is_valid_service_url(url):
"""Access services list from ``MAMA_CAS_VALID_SERVICES``."""
valid_services = getattr(settings, 'MAMA_CAS_VALID_SERVICES', ())
if not valid_services:
return True
warnings.warn(
'The MAMA_CAS_VALID_SERVICES setting is deprecated. Services '
'should be configured using MAMA_CAS_SERVICES.', DeprecationWarning)
for service in [re.compile(s) for s in valid_services]:
if service.match(url):
return True
return False | [
"def",
"_is_valid_service_url",
"(",
"url",
")",
":",
"valid_services",
"=",
"getattr",
"(",
"settings",
",",
"'MAMA_CAS_VALID_SERVICES'",
",",
"(",
")",
")",
"if",
"not",
"valid_services",
":",
"return",
"True",
"warnings",
".",
"warn",
"(",
"'The MAMA_CAS_VALID_SERVICES setting is deprecated. Services '",
"'should be configured using MAMA_CAS_SERVICES.'",
",",
"DeprecationWarning",
")",
"for",
"service",
"in",
"[",
"re",
".",
"compile",
"(",
"s",
")",
"for",
"s",
"in",
"valid_services",
"]",
":",
"if",
"service",
".",
"match",
"(",
"url",
")",
":",
"return",
"True",
"return",
"False"
] | Access services list from ``MAMA_CAS_VALID_SERVICES``. | [
"Access",
"services",
"list",
"from",
"MAMA_CAS_VALID_SERVICES",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/services/__init__.py#L37-L48 |
5,189 | jbittel/django-mama-cas | mama_cas/services/__init__.py | get_backend_path | def get_backend_path(service):
"""Return the dotted path of the matching backend."""
for backend in _get_backends():
try:
if backend.service_allowed(service):
return "%s.%s" % (backend.__class__.__module__, backend.__class__.__name__)
except AttributeError:
raise NotImplementedError("%s.%s.service_allowed() not implemented" % (
backend.__class__.__module__, backend.__class__.__name__)
)
return None | python | def get_backend_path(service):
"""Return the dotted path of the matching backend."""
for backend in _get_backends():
try:
if backend.service_allowed(service):
return "%s.%s" % (backend.__class__.__module__, backend.__class__.__name__)
except AttributeError:
raise NotImplementedError("%s.%s.service_allowed() not implemented" % (
backend.__class__.__module__, backend.__class__.__name__)
)
return None | [
"def",
"get_backend_path",
"(",
"service",
")",
":",
"for",
"backend",
"in",
"_get_backends",
"(",
")",
":",
"try",
":",
"if",
"backend",
".",
"service_allowed",
"(",
"service",
")",
":",
"return",
"\"%s.%s\"",
"%",
"(",
"backend",
".",
"__class__",
".",
"__module__",
",",
"backend",
".",
"__class__",
".",
"__name__",
")",
"except",
"AttributeError",
":",
"raise",
"NotImplementedError",
"(",
"\"%s.%s.service_allowed() not implemented\"",
"%",
"(",
"backend",
".",
"__class__",
".",
"__module__",
",",
"backend",
".",
"__class__",
".",
"__name__",
")",
")",
"return",
"None"
] | Return the dotted path of the matching backend. | [
"Return",
"the",
"dotted",
"path",
"of",
"the",
"matching",
"backend",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/services/__init__.py#L51-L61 |
5,190 | jbittel/django-mama-cas | mama_cas/services/__init__.py | get_callbacks | def get_callbacks(service):
"""Get configured callbacks list for a given service identifier."""
callbacks = list(getattr(settings, 'MAMA_CAS_ATTRIBUTE_CALLBACKS', []))
if callbacks:
warnings.warn(
'The MAMA_CAS_ATTRIBUTE_CALLBACKS setting is deprecated. Service callbacks '
'should be configured using MAMA_CAS_SERVICES.', DeprecationWarning)
for backend in _get_backends():
try:
callbacks.extend(backend.get_callbacks(service))
except AttributeError:
raise NotImplementedError("%s.%s.get_callbacks() not implemented" % (
backend.__class__.__module__, backend.__class__.__name__)
)
return callbacks | python | def get_callbacks(service):
"""Get configured callbacks list for a given service identifier."""
callbacks = list(getattr(settings, 'MAMA_CAS_ATTRIBUTE_CALLBACKS', []))
if callbacks:
warnings.warn(
'The MAMA_CAS_ATTRIBUTE_CALLBACKS setting is deprecated. Service callbacks '
'should be configured using MAMA_CAS_SERVICES.', DeprecationWarning)
for backend in _get_backends():
try:
callbacks.extend(backend.get_callbacks(service))
except AttributeError:
raise NotImplementedError("%s.%s.get_callbacks() not implemented" % (
backend.__class__.__module__, backend.__class__.__name__)
)
return callbacks | [
"def",
"get_callbacks",
"(",
"service",
")",
":",
"callbacks",
"=",
"list",
"(",
"getattr",
"(",
"settings",
",",
"'MAMA_CAS_ATTRIBUTE_CALLBACKS'",
",",
"[",
"]",
")",
")",
"if",
"callbacks",
":",
"warnings",
".",
"warn",
"(",
"'The MAMA_CAS_ATTRIBUTE_CALLBACKS setting is deprecated. Service callbacks '",
"'should be configured using MAMA_CAS_SERVICES.'",
",",
"DeprecationWarning",
")",
"for",
"backend",
"in",
"_get_backends",
"(",
")",
":",
"try",
":",
"callbacks",
".",
"extend",
"(",
"backend",
".",
"get_callbacks",
"(",
"service",
")",
")",
"except",
"AttributeError",
":",
"raise",
"NotImplementedError",
"(",
"\"%s.%s.get_callbacks() not implemented\"",
"%",
"(",
"backend",
".",
"__class__",
".",
"__module__",
",",
"backend",
".",
"__class__",
".",
"__name__",
")",
")",
"return",
"callbacks"
] | Get configured callbacks list for a given service identifier. | [
"Get",
"configured",
"callbacks",
"list",
"for",
"a",
"given",
"service",
"identifier",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/services/__init__.py#L64-L79 |
5,191 | jbittel/django-mama-cas | mama_cas/services/__init__.py | get_logout_url | def get_logout_url(service):
"""Get the configured logout URL for a given service identifier, if any."""
for backend in _get_backends():
try:
return backend.get_logout_url(service)
except AttributeError:
raise NotImplementedError("%s.%s.get_logout_url() not implemented" % (
backend.__class__.__module__, backend.__class__.__name__)
)
return None | python | def get_logout_url(service):
"""Get the configured logout URL for a given service identifier, if any."""
for backend in _get_backends():
try:
return backend.get_logout_url(service)
except AttributeError:
raise NotImplementedError("%s.%s.get_logout_url() not implemented" % (
backend.__class__.__module__, backend.__class__.__name__)
)
return None | [
"def",
"get_logout_url",
"(",
"service",
")",
":",
"for",
"backend",
"in",
"_get_backends",
"(",
")",
":",
"try",
":",
"return",
"backend",
".",
"get_logout_url",
"(",
"service",
")",
"except",
"AttributeError",
":",
"raise",
"NotImplementedError",
"(",
"\"%s.%s.get_logout_url() not implemented\"",
"%",
"(",
"backend",
".",
"__class__",
".",
"__module__",
",",
"backend",
".",
"__class__",
".",
"__name__",
")",
")",
"return",
"None"
] | Get the configured logout URL for a given service identifier, if any. | [
"Get",
"the",
"configured",
"logout",
"URL",
"for",
"a",
"given",
"service",
"identifier",
"if",
"any",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/services/__init__.py#L82-L91 |
5,192 | jbittel/django-mama-cas | mama_cas/services/__init__.py | logout_allowed | def logout_allowed(service):
"""Check if a given service identifier should be sent a logout request."""
if hasattr(settings, 'MAMA_CAS_SERVICES'):
return _is_allowed('logout_allowed', service)
if hasattr(settings, 'MAMA_CAS_ENABLE_SINGLE_SIGN_OUT'):
warnings.warn(
'The MAMA_CAS_ENABLE_SINGLE_SIGN_OUT setting is deprecated. SLO '
'should be configured using MAMA_CAS_SERVICES.', DeprecationWarning)
return getattr(settings, 'MAMA_CAS_ENABLE_SINGLE_SIGN_OUT', False) | python | def logout_allowed(service):
"""Check if a given service identifier should be sent a logout request."""
if hasattr(settings, 'MAMA_CAS_SERVICES'):
return _is_allowed('logout_allowed', service)
if hasattr(settings, 'MAMA_CAS_ENABLE_SINGLE_SIGN_OUT'):
warnings.warn(
'The MAMA_CAS_ENABLE_SINGLE_SIGN_OUT setting is deprecated. SLO '
'should be configured using MAMA_CAS_SERVICES.', DeprecationWarning)
return getattr(settings, 'MAMA_CAS_ENABLE_SINGLE_SIGN_OUT', False) | [
"def",
"logout_allowed",
"(",
"service",
")",
":",
"if",
"hasattr",
"(",
"settings",
",",
"'MAMA_CAS_SERVICES'",
")",
":",
"return",
"_is_allowed",
"(",
"'logout_allowed'",
",",
"service",
")",
"if",
"hasattr",
"(",
"settings",
",",
"'MAMA_CAS_ENABLE_SINGLE_SIGN_OUT'",
")",
":",
"warnings",
".",
"warn",
"(",
"'The MAMA_CAS_ENABLE_SINGLE_SIGN_OUT setting is deprecated. SLO '",
"'should be configured using MAMA_CAS_SERVICES.'",
",",
"DeprecationWarning",
")",
"return",
"getattr",
"(",
"settings",
",",
"'MAMA_CAS_ENABLE_SINGLE_SIGN_OUT'",
",",
"False",
")"
] | Check if a given service identifier should be sent a logout request. | [
"Check",
"if",
"a",
"given",
"service",
"identifier",
"should",
"be",
"sent",
"a",
"logout",
"request",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/services/__init__.py#L94-L103 |
5,193 | jbittel/django-mama-cas | mama_cas/services/__init__.py | proxy_callback_allowed | def proxy_callback_allowed(service, pgturl):
"""Check if a given proxy callback is allowed for the given service identifier."""
if hasattr(settings, 'MAMA_CAS_SERVICES'):
return _is_allowed('proxy_callback_allowed', service, pgturl)
return _is_valid_service_url(service) | python | def proxy_callback_allowed(service, pgturl):
"""Check if a given proxy callback is allowed for the given service identifier."""
if hasattr(settings, 'MAMA_CAS_SERVICES'):
return _is_allowed('proxy_callback_allowed', service, pgturl)
return _is_valid_service_url(service) | [
"def",
"proxy_callback_allowed",
"(",
"service",
",",
"pgturl",
")",
":",
"if",
"hasattr",
"(",
"settings",
",",
"'MAMA_CAS_SERVICES'",
")",
":",
"return",
"_is_allowed",
"(",
"'proxy_callback_allowed'",
",",
"service",
",",
"pgturl",
")",
"return",
"_is_valid_service_url",
"(",
"service",
")"
] | Check if a given proxy callback is allowed for the given service identifier. | [
"Check",
"if",
"a",
"given",
"proxy",
"callback",
"is",
"allowed",
"for",
"the",
"given",
"service",
"identifier",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/services/__init__.py#L111-L115 |
5,194 | jbittel/django-mama-cas | mama_cas/forms.py | LoginForm.clean | def clean(self):
"""
Pass the provided username and password to the active
authentication backends and verify the user account is
not disabled. If authentication succeeds, the ``User`` object
is assigned to the form so it can be accessed in the view.
"""
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
try:
self.user = authenticate(request=self.request, username=username, password=password)
except Exception:
logger.exception("Error authenticating %s" % username)
error_msg = _('Internal error while authenticating user')
raise forms.ValidationError(error_msg)
if self.user is None:
logger.warning("Failed authentication for %s" % username)
error_msg = _('The username or password is not correct')
raise forms.ValidationError(error_msg)
else:
if not self.user.is_active:
logger.warning("User account %s is disabled" % username)
error_msg = _('This user account is disabled')
raise forms.ValidationError(error_msg)
return self.cleaned_data | python | def clean(self):
"""
Pass the provided username and password to the active
authentication backends and verify the user account is
not disabled. If authentication succeeds, the ``User`` object
is assigned to the form so it can be accessed in the view.
"""
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
try:
self.user = authenticate(request=self.request, username=username, password=password)
except Exception:
logger.exception("Error authenticating %s" % username)
error_msg = _('Internal error while authenticating user')
raise forms.ValidationError(error_msg)
if self.user is None:
logger.warning("Failed authentication for %s" % username)
error_msg = _('The username or password is not correct')
raise forms.ValidationError(error_msg)
else:
if not self.user.is_active:
logger.warning("User account %s is disabled" % username)
error_msg = _('This user account is disabled')
raise forms.ValidationError(error_msg)
return self.cleaned_data | [
"def",
"clean",
"(",
"self",
")",
":",
"username",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'username'",
")",
"password",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'password'",
")",
"if",
"username",
"and",
"password",
":",
"try",
":",
"self",
".",
"user",
"=",
"authenticate",
"(",
"request",
"=",
"self",
".",
"request",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")",
"except",
"Exception",
":",
"logger",
".",
"exception",
"(",
"\"Error authenticating %s\"",
"%",
"username",
")",
"error_msg",
"=",
"_",
"(",
"'Internal error while authenticating user'",
")",
"raise",
"forms",
".",
"ValidationError",
"(",
"error_msg",
")",
"if",
"self",
".",
"user",
"is",
"None",
":",
"logger",
".",
"warning",
"(",
"\"Failed authentication for %s\"",
"%",
"username",
")",
"error_msg",
"=",
"_",
"(",
"'The username or password is not correct'",
")",
"raise",
"forms",
".",
"ValidationError",
"(",
"error_msg",
")",
"else",
":",
"if",
"not",
"self",
".",
"user",
".",
"is_active",
":",
"logger",
".",
"warning",
"(",
"\"User account %s is disabled\"",
"%",
"username",
")",
"error_msg",
"=",
"_",
"(",
"'This user account is disabled'",
")",
"raise",
"forms",
".",
"ValidationError",
"(",
"error_msg",
")",
"return",
"self",
".",
"cleaned_data"
] | Pass the provided username and password to the active
authentication backends and verify the user account is
not disabled. If authentication succeeds, the ``User`` object
is assigned to the form so it can be accessed in the view. | [
"Pass",
"the",
"provided",
"username",
"and",
"password",
"to",
"the",
"active",
"authentication",
"backends",
"and",
"verify",
"the",
"user",
"account",
"is",
"not",
"disabled",
".",
"If",
"authentication",
"succeeds",
"the",
"User",
"object",
"is",
"assigned",
"to",
"the",
"form",
"so",
"it",
"can",
"be",
"accessed",
"in",
"the",
"view",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/forms.py#L34-L62 |
5,195 | jbittel/django-mama-cas | mama_cas/request.py | CasRequestBase.ns | def ns(self, prefix, tag):
"""
Given a prefix and an XML tag, output the qualified name
for proper namespace handling on output.
"""
return etree.QName(self.prefixes[prefix], tag) | python | def ns(self, prefix, tag):
"""
Given a prefix and an XML tag, output the qualified name
for proper namespace handling on output.
"""
return etree.QName(self.prefixes[prefix], tag) | [
"def",
"ns",
"(",
"self",
",",
"prefix",
",",
"tag",
")",
":",
"return",
"etree",
".",
"QName",
"(",
"self",
".",
"prefixes",
"[",
"prefix",
"]",
",",
"tag",
")"
] | Given a prefix and an XML tag, output the qualified name
for proper namespace handling on output. | [
"Given",
"a",
"prefix",
"and",
"an",
"XML",
"tag",
"output",
"the",
"qualified",
"name",
"for",
"proper",
"namespace",
"handling",
"on",
"output",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/request.py#L19-L24 |
5,196 | jbittel/django-mama-cas | mama_cas/cas.py | validate_service_ticket | def validate_service_ticket(service, ticket, pgturl=None, renew=False, require_https=False):
"""
Validate a service ticket string. Return a triplet containing a
``ServiceTicket`` and an optional ``ProxyGrantingTicket``, or a
``ValidationError`` if ticket validation failed.
"""
logger.debug("Service validation request received for %s" % ticket)
# Check for proxy tickets passed to /serviceValidate
if ticket and ticket.startswith(ProxyTicket.TICKET_PREFIX):
raise InvalidTicketSpec('Proxy tickets cannot be validated with /serviceValidate')
st = ServiceTicket.objects.validate_ticket(ticket, service, renew=renew, require_https=require_https)
attributes = get_attributes(st.user, st.service)
if pgturl is not None:
logger.debug("Proxy-granting ticket request received for %s" % pgturl)
pgt = ProxyGrantingTicket.objects.create_ticket(service, pgturl, user=st.user, granted_by_st=st)
else:
pgt = None
return st, attributes, pgt | python | def validate_service_ticket(service, ticket, pgturl=None, renew=False, require_https=False):
"""
Validate a service ticket string. Return a triplet containing a
``ServiceTicket`` and an optional ``ProxyGrantingTicket``, or a
``ValidationError`` if ticket validation failed.
"""
logger.debug("Service validation request received for %s" % ticket)
# Check for proxy tickets passed to /serviceValidate
if ticket and ticket.startswith(ProxyTicket.TICKET_PREFIX):
raise InvalidTicketSpec('Proxy tickets cannot be validated with /serviceValidate')
st = ServiceTicket.objects.validate_ticket(ticket, service, renew=renew, require_https=require_https)
attributes = get_attributes(st.user, st.service)
if pgturl is not None:
logger.debug("Proxy-granting ticket request received for %s" % pgturl)
pgt = ProxyGrantingTicket.objects.create_ticket(service, pgturl, user=st.user, granted_by_st=st)
else:
pgt = None
return st, attributes, pgt | [
"def",
"validate_service_ticket",
"(",
"service",
",",
"ticket",
",",
"pgturl",
"=",
"None",
",",
"renew",
"=",
"False",
",",
"require_https",
"=",
"False",
")",
":",
"logger",
".",
"debug",
"(",
"\"Service validation request received for %s\"",
"%",
"ticket",
")",
"# Check for proxy tickets passed to /serviceValidate",
"if",
"ticket",
"and",
"ticket",
".",
"startswith",
"(",
"ProxyTicket",
".",
"TICKET_PREFIX",
")",
":",
"raise",
"InvalidTicketSpec",
"(",
"'Proxy tickets cannot be validated with /serviceValidate'",
")",
"st",
"=",
"ServiceTicket",
".",
"objects",
".",
"validate_ticket",
"(",
"ticket",
",",
"service",
",",
"renew",
"=",
"renew",
",",
"require_https",
"=",
"require_https",
")",
"attributes",
"=",
"get_attributes",
"(",
"st",
".",
"user",
",",
"st",
".",
"service",
")",
"if",
"pgturl",
"is",
"not",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Proxy-granting ticket request received for %s\"",
"%",
"pgturl",
")",
"pgt",
"=",
"ProxyGrantingTicket",
".",
"objects",
".",
"create_ticket",
"(",
"service",
",",
"pgturl",
",",
"user",
"=",
"st",
".",
"user",
",",
"granted_by_st",
"=",
"st",
")",
"else",
":",
"pgt",
"=",
"None",
"return",
"st",
",",
"attributes",
",",
"pgt"
] | Validate a service ticket string. Return a triplet containing a
``ServiceTicket`` and an optional ``ProxyGrantingTicket``, or a
``ValidationError`` if ticket validation failed. | [
"Validate",
"a",
"service",
"ticket",
"string",
".",
"Return",
"a",
"triplet",
"containing",
"a",
"ServiceTicket",
"and",
"an",
"optional",
"ProxyGrantingTicket",
"or",
"a",
"ValidationError",
"if",
"ticket",
"validation",
"failed",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/cas.py#L18-L38 |
5,197 | jbittel/django-mama-cas | mama_cas/cas.py | validate_proxy_ticket | def validate_proxy_ticket(service, ticket, pgturl=None):
"""
Validate a proxy ticket string. Return a 4-tuple containing a
``ProxyTicket``, an optional ``ProxyGrantingTicket`` and a list
of proxies through which authentication proceeded, or a
``ValidationError`` if ticket validation failed.
"""
logger.debug("Proxy validation request received for %s" % ticket)
pt = ProxyTicket.objects.validate_ticket(ticket, service)
attributes = get_attributes(pt.user, pt.service)
# Build a list of all services that proxied authentication,
# in reverse order of which they were traversed
proxies = [pt.service]
prior_pt = pt.granted_by_pgt.granted_by_pt
while prior_pt:
proxies.append(prior_pt.service)
prior_pt = prior_pt.granted_by_pgt.granted_by_pt
if pgturl is not None:
logger.debug("Proxy-granting ticket request received for %s" % pgturl)
pgt = ProxyGrantingTicket.objects.create_ticket(service, pgturl, user=pt.user, granted_by_pt=pt)
else:
pgt = None
return pt, attributes, pgt, proxies | python | def validate_proxy_ticket(service, ticket, pgturl=None):
"""
Validate a proxy ticket string. Return a 4-tuple containing a
``ProxyTicket``, an optional ``ProxyGrantingTicket`` and a list
of proxies through which authentication proceeded, or a
``ValidationError`` if ticket validation failed.
"""
logger.debug("Proxy validation request received for %s" % ticket)
pt = ProxyTicket.objects.validate_ticket(ticket, service)
attributes = get_attributes(pt.user, pt.service)
# Build a list of all services that proxied authentication,
# in reverse order of which they were traversed
proxies = [pt.service]
prior_pt = pt.granted_by_pgt.granted_by_pt
while prior_pt:
proxies.append(prior_pt.service)
prior_pt = prior_pt.granted_by_pgt.granted_by_pt
if pgturl is not None:
logger.debug("Proxy-granting ticket request received for %s" % pgturl)
pgt = ProxyGrantingTicket.objects.create_ticket(service, pgturl, user=pt.user, granted_by_pt=pt)
else:
pgt = None
return pt, attributes, pgt, proxies | [
"def",
"validate_proxy_ticket",
"(",
"service",
",",
"ticket",
",",
"pgturl",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"\"Proxy validation request received for %s\"",
"%",
"ticket",
")",
"pt",
"=",
"ProxyTicket",
".",
"objects",
".",
"validate_ticket",
"(",
"ticket",
",",
"service",
")",
"attributes",
"=",
"get_attributes",
"(",
"pt",
".",
"user",
",",
"pt",
".",
"service",
")",
"# Build a list of all services that proxied authentication,",
"# in reverse order of which they were traversed",
"proxies",
"=",
"[",
"pt",
".",
"service",
"]",
"prior_pt",
"=",
"pt",
".",
"granted_by_pgt",
".",
"granted_by_pt",
"while",
"prior_pt",
":",
"proxies",
".",
"append",
"(",
"prior_pt",
".",
"service",
")",
"prior_pt",
"=",
"prior_pt",
".",
"granted_by_pgt",
".",
"granted_by_pt",
"if",
"pgturl",
"is",
"not",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Proxy-granting ticket request received for %s\"",
"%",
"pgturl",
")",
"pgt",
"=",
"ProxyGrantingTicket",
".",
"objects",
".",
"create_ticket",
"(",
"service",
",",
"pgturl",
",",
"user",
"=",
"pt",
".",
"user",
",",
"granted_by_pt",
"=",
"pt",
")",
"else",
":",
"pgt",
"=",
"None",
"return",
"pt",
",",
"attributes",
",",
"pgt",
",",
"proxies"
] | Validate a proxy ticket string. Return a 4-tuple containing a
``ProxyTicket``, an optional ``ProxyGrantingTicket`` and a list
of proxies through which authentication proceeded, or a
``ValidationError`` if ticket validation failed. | [
"Validate",
"a",
"proxy",
"ticket",
"string",
".",
"Return",
"a",
"4",
"-",
"tuple",
"containing",
"a",
"ProxyTicket",
"an",
"optional",
"ProxyGrantingTicket",
"and",
"a",
"list",
"of",
"proxies",
"through",
"which",
"authentication",
"proceeded",
"or",
"a",
"ValidationError",
"if",
"ticket",
"validation",
"failed",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/cas.py#L41-L66 |
5,198 | jbittel/django-mama-cas | mama_cas/cas.py | validate_proxy_granting_ticket | def validate_proxy_granting_ticket(pgt, target_service):
"""
Validate a proxy granting ticket string. Return an ordered pair
containing a ``ProxyTicket``, or a ``ValidationError`` if ticket
validation failed.
"""
logger.debug("Proxy ticket request received for %s using %s" % (target_service, pgt))
pgt = ProxyGrantingTicket.objects.validate_ticket(pgt, target_service)
pt = ProxyTicket.objects.create_ticket(service=target_service, user=pgt.user, granted_by_pgt=pgt)
return pt | python | def validate_proxy_granting_ticket(pgt, target_service):
"""
Validate a proxy granting ticket string. Return an ordered pair
containing a ``ProxyTicket``, or a ``ValidationError`` if ticket
validation failed.
"""
logger.debug("Proxy ticket request received for %s using %s" % (target_service, pgt))
pgt = ProxyGrantingTicket.objects.validate_ticket(pgt, target_service)
pt = ProxyTicket.objects.create_ticket(service=target_service, user=pgt.user, granted_by_pgt=pgt)
return pt | [
"def",
"validate_proxy_granting_ticket",
"(",
"pgt",
",",
"target_service",
")",
":",
"logger",
".",
"debug",
"(",
"\"Proxy ticket request received for %s using %s\"",
"%",
"(",
"target_service",
",",
"pgt",
")",
")",
"pgt",
"=",
"ProxyGrantingTicket",
".",
"objects",
".",
"validate_ticket",
"(",
"pgt",
",",
"target_service",
")",
"pt",
"=",
"ProxyTicket",
".",
"objects",
".",
"create_ticket",
"(",
"service",
"=",
"target_service",
",",
"user",
"=",
"pgt",
".",
"user",
",",
"granted_by_pgt",
"=",
"pgt",
")",
"return",
"pt"
] | Validate a proxy granting ticket string. Return an ordered pair
containing a ``ProxyTicket``, or a ``ValidationError`` if ticket
validation failed. | [
"Validate",
"a",
"proxy",
"granting",
"ticket",
"string",
".",
"Return",
"an",
"ordered",
"pair",
"containing",
"a",
"ProxyTicket",
"or",
"a",
"ValidationError",
"if",
"ticket",
"validation",
"failed",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/cas.py#L69-L79 |
5,199 | jbittel/django-mama-cas | mama_cas/cas.py | get_attributes | def get_attributes(user, service):
"""
Return a dictionary of user attributes from the set of configured
callback functions.
"""
attributes = {}
for path in get_callbacks(service):
callback = import_string(path)
attributes.update(callback(user, service))
return attributes | python | def get_attributes(user, service):
"""
Return a dictionary of user attributes from the set of configured
callback functions.
"""
attributes = {}
for path in get_callbacks(service):
callback = import_string(path)
attributes.update(callback(user, service))
return attributes | [
"def",
"get_attributes",
"(",
"user",
",",
"service",
")",
":",
"attributes",
"=",
"{",
"}",
"for",
"path",
"in",
"get_callbacks",
"(",
"service",
")",
":",
"callback",
"=",
"import_string",
"(",
"path",
")",
"attributes",
".",
"update",
"(",
"callback",
"(",
"user",
",",
"service",
")",
")",
"return",
"attributes"
] | Return a dictionary of user attributes from the set of configured
callback functions. | [
"Return",
"a",
"dictionary",
"of",
"user",
"attributes",
"from",
"the",
"set",
"of",
"configured",
"callback",
"functions",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/cas.py#L82-L91 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.