Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def __auth_descriptor(self, api_info):
if api_info.auth is None:
return None
auth_descriptor = {}
if api_info.auth.allow_cookie_auth is not None:
auth_descriptor['allowCookieAuth'] = api_info.auth.allow_cookie_auth
if api_info.auth.blocked_regions:
auth_descriptor['blockedRegions'] = api_info.auth.blocked_regions
return auth_descriptor | [
"Builds an auth descriptor from API info.\n\n Args:\n api_info: An _ApiInfo object.\n\n Returns:\n A dictionary with 'allowCookieAuth' and/or 'blockedRegions' keys.\n "
] |
Please provide a description of the function:def __frontend_limit_descriptor(self, api_info):
if api_info.frontend_limits is None:
return None
descriptor = {}
for propname, descname in (('unregistered_user_qps', 'unregisteredUserQps'),
('unregistered_qps', 'unregisteredQps'),
('unregistered_daily', 'unregisteredDaily')):
if getattr(api_info.frontend_limits, propname) is not None:
descriptor[descname] = getattr(api_info.frontend_limits, propname)
rules = self.__frontend_limit_rules_descriptor(api_info)
if rules:
descriptor['rules'] = rules
return descriptor | [
"Builds a frontend limit descriptor from API info.\n\n Args:\n api_info: An _ApiInfo object.\n\n Returns:\n A dictionary with frontend limit information.\n "
] |
Please provide a description of the function:def __frontend_limit_rules_descriptor(self, api_info):
if not api_info.frontend_limits.rules:
return None
rules = []
for rule in api_info.frontend_limits.rules:
descriptor = {}
for propname, descname in (('match', 'match'),
('qps', 'qps'),
('user_qps', 'userQps'),
('daily', 'daily'),
('analytics_id', 'analyticsId')):
if getattr(rule, propname) is not None:
descriptor[descname] = getattr(rule, propname)
if descriptor:
rules.append(descriptor)
return rules | [
"Builds a frontend limit rules descriptor from API info.\n\n Args:\n api_info: An _ApiInfo object.\n\n Returns:\n A list of dictionaries with frontend limit rules information.\n "
] |
Please provide a description of the function:def __api_descriptor(self, services, hostname=None):
merged_api_info = self.__get_merged_api_info(services)
descriptor = self.get_descriptor_defaults(merged_api_info,
hostname=hostname)
description = merged_api_info.description
if not description and len(services) == 1:
description = services[0].__doc__
if description:
descriptor['description'] = description
auth_descriptor = self.__auth_descriptor(merged_api_info)
if auth_descriptor:
descriptor['auth'] = auth_descriptor
frontend_limit_descriptor = self.__frontend_limit_descriptor(
merged_api_info)
if frontend_limit_descriptor:
descriptor['frontendLimits'] = frontend_limit_descriptor
method_map = {}
method_collision_tracker = {}
rest_collision_tracker = {}
for service in services:
remote_methods = service.all_remote_methods()
for protorpc_meth_name, protorpc_meth_info in remote_methods.iteritems():
method_info = getattr(protorpc_meth_info, 'method_info', None)
# Skip methods that are not decorated with @method
if method_info is None:
continue
method_id = method_info.method_id(service.api_info)
rosy_method = '%s.%s' % (service.__name__, protorpc_meth_name)
self.__id_from_name[rosy_method] = method_id
method_map[method_id] = self.__method_descriptor(
service, method_info, rosy_method, protorpc_meth_info)
# Make sure the same method name isn't repeated.
if method_id in method_collision_tracker:
raise api_exceptions.ApiConfigurationError(
'Method %s used multiple times, in classes %s and %s' %
(method_id, method_collision_tracker[method_id],
service.__name__))
else:
method_collision_tracker[method_id] = service.__name__
# Make sure the same HTTP method & path aren't repeated.
rest_identifier = (method_info.http_method,
method_info.get_path(service.api_info))
if rest_identifier in rest_collision_tracker:
raise api_exceptions.ApiConfigurationError(
'%s path "%s" used multiple times, in classes %s and %s' %
(method_info.http_method, method_info.get_path(service.api_info),
rest_collision_tracker[rest_identifier],
service.__name__))
else:
rest_collision_tracker[rest_identifier] = service.__name__
if method_map:
descriptor['methods'] = method_map
descriptor['descriptor'] = self.__schema_descriptor(services)
return descriptor | [
"Builds a description of an API.\n\n Args:\n services: List of protorpc.remote.Service instances implementing an\n api/version.\n hostname: string, Hostname of the API, to override the value set on the\n current service. Defaults to None.\n\n Returns:\n A dictionary that can be deserialized into JSON and stored as an API\n description document.\n\n Raises:\n ApiConfigurationError: If there's something wrong with the API\n configuration, such as a multiclass API decorated with different API\n descriptors (see the docstring for api()), or a repeated method\n signature.\n "
] |
Please provide a description of the function:def get_descriptor_defaults(self, api_info, hostname=None):
hostname = (hostname or endpoints_util.get_app_hostname() or
api_info.hostname)
protocol = 'http' if ((hostname and hostname.startswith('localhost')) or
endpoints_util.is_running_on_devserver()) else 'https'
base_path = api_info.base_path.strip('/')
defaults = {
'extends': 'thirdParty.api',
'root': '{0}://{1}/{2}'.format(protocol, hostname, base_path),
'name': api_info.name,
'version': api_info.api_version,
'api_version': api_info.api_version,
'path_version': api_info.path_version,
'defaultVersion': True,
'abstract': False,
'adapter': {
'bns': '{0}://{1}/{2}'.format(protocol, hostname, base_path),
'type': 'lily',
'deadline': 10.0
}
}
if api_info.canonical_name:
defaults['canonicalName'] = api_info.canonical_name
if api_info.owner_domain:
defaults['ownerDomain'] = api_info.owner_domain
if api_info.owner_name:
defaults['ownerName'] = api_info.owner_name
if api_info.package_path:
defaults['packagePath'] = api_info.package_path
if api_info.title:
defaults['title'] = api_info.title
if api_info.documentation:
defaults['documentation'] = api_info.documentation
return defaults | [
"Gets a default configuration for a service.\n\n Args:\n api_info: _ApiInfo object for this service.\n hostname: string, Hostname of the API, to override the value set on the\n current service. Defaults to None.\n\n Returns:\n A dictionary with the default configuration.\n "
] |
Please provide a description of the function:def get_config_dict(self, services, hostname=None):
if not isinstance(services, (tuple, list)):
services = [services]
# The type of a class that inherits from remote.Service is actually
# remote._ServiceClass, thanks to metaclass strangeness.
# pylint: disable=protected-access
endpoints_util.check_list_type(services, remote._ServiceClass, 'services',
allow_none=False)
return self.__api_descriptor(services, hostname=hostname) | [
"JSON dict description of a protorpc.remote.Service in API format.\n\n Args:\n services: Either a single protorpc.remote.Service or a list of them\n that implements an api/version.\n hostname: string, Hostname of the API, to override the value set on the\n current service. Defaults to None.\n\n Returns:\n dict, The API descriptor document as a JSON dict.\n "
] |
Please provide a description of the function:def pretty_print_config_to_json(self, services, hostname=None):
descriptor = self.get_config_dict(services, hostname)
return json.dumps(descriptor, sort_keys=True, indent=2,
separators=(',', ': ')) | [
"JSON string description of a protorpc.remote.Service in API format.\n\n Args:\n services: Either a single protorpc.remote.Service or a list of them\n that implements an api/version.\n hostname: string, Hostname of the API, to override the value set on the\n current service. Defaults to None.\n\n Returns:\n string, The API descriptor document as a JSON string.\n "
] |
Please provide a description of the function:def __field_to_parameter_type_and_format(self, field):
# We use lowercase values for types (e.g. 'string' instead of 'STRING').
variant = field.variant
if variant == messages.Variant.MESSAGE:
raise TypeError('A message variant cannot be used in a parameter.')
# Note that the 64-bit integers are marked as strings -- this is to
# accommodate JavaScript, which would otherwise demote them to 32-bit
# integers.
return CUSTOM_VARIANT_MAP.get(variant) or (variant.name.lower(), None) | [
"Converts the field variant type into a tuple describing the parameter.\n\n Args:\n field: An instance of a subclass of messages.Field.\n\n Returns:\n A tuple with the type and format of the field, respectively.\n\n Raises:\n TypeError: if the field variant is a message variant.\n "
] |
Please provide a description of the function:def __parameter_default(self, field):
if field.default:
if isinstance(field, messages.EnumField):
return field.default.name
elif isinstance(field, messages.BooleanField):
# The Python standard representation of a boolean value causes problems
# when generating client code.
return 'true' if field.default else 'false'
else:
return str(field.default) | [
"Returns default value of field if it has one.\n\n Args:\n field: A simple field.\n\n Returns:\n The default value of the field, if any exists, with the exception of an\n enum field, which will have its value cast to a string.\n "
] |
Please provide a description of the function:def __parameter_enum(self, param):
if isinstance(param, messages.EnumField):
return [enum_entry[0] for enum_entry in sorted(
param.type.to_dict().items(), key=lambda v: v[1])] | [
"Returns enum descriptor of a parameter if it is an enum.\n\n An enum descriptor is a list of keys.\n\n Args:\n param: A simple field.\n\n Returns:\n The enum descriptor for the field, if it's an enum descriptor, else\n returns None.\n "
] |
Please provide a description of the function:def __parameter_descriptor(self, param):
descriptor = {}
param_type, param_format = self.__field_to_parameter_type_and_format(param)
# Required
if param.required:
descriptor['required'] = True
# Type
descriptor['type'] = param_type
# Format (optional)
if param_format:
descriptor['format'] = param_format
# Default
default = self.__parameter_default(param)
if default is not None:
descriptor['default'] = default
# Repeated
if param.repeated:
descriptor['repeated'] = True
# Enum
# Note that enumDescriptions are not currently supported using the
# framework's annotations, so just insert blank strings.
enum_descriptor = self.__parameter_enum(param)
if enum_descriptor is not None:
descriptor['enum'] = enum_descriptor
descriptor['enumDescriptions'] = [''] * len(enum_descriptor)
return descriptor | [
"Creates descriptor for a parameter.\n\n Args:\n param: The parameter to be described.\n\n Returns:\n Dictionary containing a descriptor for the parameter.\n "
] |
Please provide a description of the function:def __add_parameter(self, param, path_parameters, params):
# If this is a simple field, just build the descriptor and append it.
# Otherwise, build a schema and assign it to this descriptor
descriptor = None
if not isinstance(param, messages.MessageField):
name = param.name
descriptor = self.__parameter_descriptor(param)
descriptor['location'] = 'path' if name in path_parameters else 'query'
if descriptor:
params[name] = descriptor
else:
for subfield_list in self.__field_to_subfields(param):
name = '.'.join(subfield.name for subfield in subfield_list)
descriptor = self.__parameter_descriptor(subfield_list[-1])
if name in path_parameters:
descriptor['required'] = True
descriptor['location'] = 'path'
else:
descriptor.pop('required', None)
descriptor['location'] = 'query'
if descriptor:
params[name] = descriptor | [
"Adds all parameters in a field to a method parameters descriptor.\n\n Simple fields will only have one parameter, but a message field 'x' that\n corresponds to a message class with fields 'y' and 'z' will result in\n parameters 'x.y' and 'x.z', for example. The mapping from field to\n parameters is mostly handled by __field_to_subfields.\n\n Args:\n param: Parameter to be added to the descriptor.\n path_parameters: A list of parameters matched from a path for this field.\n For example for the hypothetical 'x' from above if the path was\n '/a/{x.z}/b/{other}' then this list would contain only the element\n 'x.z' since 'other' does not match to this field.\n params: List of parameters. Each parameter in the field.\n "
] |
Please provide a description of the function:def __params_descriptor_without_container(self, message_type,
request_kind, path):
params = {}
path_parameter_dict = self.__get_path_parameters(path)
for field in sorted(message_type.all_fields(), key=lambda f: f.number):
matched_path_parameters = path_parameter_dict.get(field.name, [])
self.__validate_path_parameters(field, matched_path_parameters)
if matched_path_parameters or request_kind == self.__NO_BODY:
self.__add_parameter(field, matched_path_parameters, params)
return params | [
"Describe parameters of a method which does not use a ResourceContainer.\n\n Makes sure that the path parameters are included in the message definition\n and adds any required fields and URL query parameters.\n\n This method is to preserve backwards compatibility and will be removed in\n a future release.\n\n Args:\n message_type: messages.Message class, Message with parameters to describe.\n request_kind: The type of request being made.\n path: string, HTTP path to method.\n\n Returns:\n A list of dicts: Descriptors of the parameters\n "
] |
Please provide a description of the function:def __params_descriptor(self, message_type, request_kind, path, method_id,
request_params_class):
path_parameter_dict = self.__get_path_parameters(path)
if request_params_class is None:
if path_parameter_dict:
_logger.warning('Method %s specifies path parameters but you are not '
'using a ResourceContainer; instead, you are using %r. '
'This will fail in future releases; please switch to '
'using ResourceContainer as soon as possible.',
method_id, type(message_type))
return self.__params_descriptor_without_container(
message_type, request_kind, path)
# From here, we can assume message_type is from a ResourceContainer.
message_type = request_params_class
params = {}
# Make sure all path parameters are covered.
for field_name, matched_path_parameters in path_parameter_dict.iteritems():
field = message_type.field_by_name(field_name)
self.__validate_path_parameters(field, matched_path_parameters)
# Add all fields, sort by field.number since we have parameterOrder.
for field in sorted(message_type.all_fields(), key=lambda f: f.number):
matched_path_parameters = path_parameter_dict.get(field.name, [])
self.__add_parameter(field, matched_path_parameters, params)
return params | [
"Describe the parameters of a method.\n\n If the message_type is not a ResourceContainer, will fall back to\n __params_descriptor_without_container (which will eventually be deprecated).\n\n If the message type is a ResourceContainer, then all path/query parameters\n will come from the ResourceContainer. This method will also make sure all\n path parameters are covered by the message fields.\n\n Args:\n message_type: messages.Message or ResourceContainer class, Message with\n parameters to describe.\n request_kind: The type of request being made.\n path: string, HTTP path to method.\n method_id: string, Unique method identifier (e.g. 'myapi.items.method')\n request_params_class: messages.Message, the original params message when\n using a ResourceContainer. Otherwise, this should be null.\n\n Returns:\n A tuple (dict, list of string): Descriptor of the parameters, Order of the\n parameters.\n "
] |
Please provide a description of the function:def __params_order_descriptor(self, message_type, path, is_params_class=False):
path_params = []
query_params = []
path_parameter_dict = self.__get_path_parameters(path)
for field in sorted(message_type.all_fields(), key=lambda f: f.number):
matched_path_parameters = path_parameter_dict.get(field.name, [])
if not isinstance(field, messages.MessageField):
name = field.name
if name in matched_path_parameters:
path_params.append(name)
elif is_params_class and field.required:
query_params.append(name)
else:
for subfield_list in self.__field_to_subfields(field):
name = '.'.join(subfield.name for subfield in subfield_list)
if name in matched_path_parameters:
path_params.append(name)
elif is_params_class and field.required:
query_params.append(name)
return path_params + sorted(query_params) | [
"Describe the order of path parameters.\n\n Args:\n message_type: messages.Message class, Message with parameters to describe.\n path: string, HTTP path to method.\n is_params_class: boolean, Whether the message represents URL parameters.\n\n Returns:\n Descriptor list for the parameter order.\n "
] |
Please provide a description of the function:def __schemas_descriptor(self):
# Filter out any keys that aren't 'properties', 'type', or 'id'
result = {}
for schema_key, schema_value in self.__parser.schemas().iteritems():
field_keys = schema_value.keys()
key_result = {}
# Some special processing for the properties value
if 'properties' in field_keys:
key_result['properties'] = schema_value['properties'].copy()
# Add in enumDescriptions for any enum properties and strip out
# the required tag for consistency with Java framework
for prop_key, prop_value in schema_value['properties'].iteritems():
if 'enum' in prop_value:
num_enums = len(prop_value['enum'])
key_result['properties'][prop_key]['enumDescriptions'] = (
[''] * num_enums)
elif 'default' in prop_value:
# stringify default values
if prop_value.get('type') == 'boolean':
prop_value['default'] = 'true' if prop_value['default'] else 'false'
else:
prop_value['default'] = str(prop_value['default'])
key_result['properties'][prop_key].pop('required', None)
for key in ('type', 'id', 'description'):
if key in field_keys:
key_result[key] = schema_value[key]
if key_result:
result[schema_key] = key_result
# Add 'type': 'object' to all object properties
for schema_value in result.itervalues():
for field_value in schema_value.itervalues():
if isinstance(field_value, dict):
if '$ref' in field_value:
field_value['type'] = 'object'
return result | [
"Describes the schemas section of the discovery document.\n\n Returns:\n Dictionary describing the schemas of the document.\n "
] |
Please provide a description of the function:def __request_message_descriptor(self, request_kind, message_type, method_id,
request_body_class):
if request_body_class:
message_type = request_body_class
if (request_kind != self.__NO_BODY and
message_type != message_types.VoidMessage()):
self.__request_schema[method_id] = self.__parser.add_message(
message_type.__class__)
return {
'$ref': self.__request_schema[method_id],
'parameterName': 'resource',
} | [
"Describes the parameters and body of the request.\n\n Args:\n request_kind: The type of request being made.\n message_type: messages.Message or ResourceContainer class. The message to\n describe.\n method_id: string, Unique method identifier (e.g. 'myapi.items.method')\n request_body_class: messages.Message of the original body when using\n a ResourceContainer. Otherwise, this should be null.\n\n Returns:\n Dictionary describing the request.\n\n Raises:\n ValueError: if the method path and request required fields do not match\n "
] |
Please provide a description of the function:def __method_descriptor(self, service, method_info,
protorpc_method_info):
descriptor = {}
request_message_type = (resource_container.ResourceContainer.
get_request_message(protorpc_method_info.remote))
request_kind = self.__get_request_kind(method_info)
remote_method = protorpc_method_info.remote
method_id = method_info.method_id(service.api_info)
path = method_info.get_path(service.api_info)
description = protorpc_method_info.remote.method.__doc__
descriptor['id'] = method_id
descriptor['path'] = path
descriptor['httpMethod'] = method_info.http_method
if description:
descriptor['description'] = description
descriptor['scopes'] = [
'https://www.googleapis.com/auth/userinfo.email'
]
parameters = self.__params_descriptor(
request_message_type, request_kind, path, method_id,
method_info.request_params_class)
if parameters:
descriptor['parameters'] = parameters
if method_info.request_params_class:
parameter_order = self.__params_order_descriptor(
method_info.request_params_class, path, is_params_class=True)
else:
parameter_order = self.__params_order_descriptor(
request_message_type, path, is_params_class=False)
if parameter_order:
descriptor['parameterOrder'] = parameter_order
request_descriptor = self.__request_message_descriptor(
request_kind, request_message_type, method_id,
method_info.request_body_class)
if request_descriptor is not None:
descriptor['request'] = request_descriptor
response_descriptor = self.__response_message_descriptor(
remote_method.response_type(), method_info.method_id(service.api_info))
if response_descriptor is not None:
descriptor['response'] = response_descriptor
return descriptor | [
"Describes a method.\n\n Args:\n service: endpoints.Service, Implementation of the API as a service.\n method_info: _MethodInfo, Configuration for the method.\n protorpc_method_info: protorpc.remote._RemoteMethodInfo, ProtoRPC\n description of the method.\n\n Returns:\n Dictionary describing the method.\n "
] |
Please provide a description of the function:def __resource_descriptor(self, resource_path, methods):
descriptor = {}
method_map = {}
sub_resource_index = collections.defaultdict(list)
sub_resource_map = {}
resource_path_tokens = resource_path.split('.')
for service, protorpc_meth_info in methods:
method_info = getattr(protorpc_meth_info, 'method_info', None)
path = method_info.get_path(service.api_info)
method_id = method_info.method_id(service.api_info)
canonical_method_id = self._get_canonical_method_id(method_id)
current_resource_path = self._get_resource_path(method_id)
# Sanity-check that this method belongs to the resource path
if (current_resource_path[:len(resource_path_tokens)] !=
resource_path_tokens):
raise api_exceptions.ToolError(
'Internal consistency error in resource path {0}'.format(
current_resource_path))
# Remove the portion of the current method's resource path that's already
# part of the resource path at this level.
effective_resource_path = current_resource_path[
len(resource_path_tokens):]
# If this method is part of a sub-resource, note it and skip it for now
if effective_resource_path:
sub_resource_name = effective_resource_path[0]
new_resource_path = '.'.join([resource_path, sub_resource_name])
sub_resource_index[new_resource_path].append(
(service, protorpc_meth_info))
else:
method_map[canonical_method_id] = self.__method_descriptor(
service, method_info, protorpc_meth_info)
# Process any sub-resources
for sub_resource, sub_resource_methods in sub_resource_index.items():
sub_resource_name = sub_resource.split('.')[-1]
sub_resource_map[sub_resource_name] = self.__resource_descriptor(
sub_resource, sub_resource_methods)
if method_map:
descriptor['methods'] = method_map
if sub_resource_map:
descriptor['resources'] = sub_resource_map
return descriptor | [
"Describes a resource.\n\n Args:\n resource_path: string, the path of the resource (e.g., 'entries.items')\n methods: list of tuples of type\n (endpoints.Service, protorpc.remote._RemoteMethodInfo), the methods\n that serve this resource.\n\n Returns:\n Dictionary describing the resource.\n "
] |
Please provide a description of the function:def __get_merged_api_info(self, services):
base_paths = sorted(set(s.api_info.base_path for s in services))
if len(base_paths) != 1:
raise api_exceptions.ApiConfigurationError(
'Multiple base_paths found: {!r}'.format(base_paths))
names_versions = sorted(set(
(s.api_info.name, s.api_info.api_version) for s in services))
if len(names_versions) != 1:
raise api_exceptions.ApiConfigurationError(
'Multiple apis/versions found: {!r}'.format(names_versions))
return services[0].api_info | [
"Builds a description of an API.\n\n Args:\n services: List of protorpc.remote.Service instances implementing an\n api/version.\n\n Returns:\n The _ApiInfo object to use for the API that the given services implement.\n "
] |
Please provide a description of the function:def __discovery_doc_descriptor(self, services, hostname=None):
merged_api_info = self.__get_merged_api_info(services)
descriptor = self.get_descriptor_defaults(merged_api_info,
hostname=hostname)
description = merged_api_info.description
if not description and len(services) == 1:
description = services[0].__doc__
if description:
descriptor['description'] = description
descriptor['parameters'] = self.__standard_parameters_descriptor()
descriptor['auth'] = self.__standard_auth_descriptor(services)
# Add namespace information, if provided
if merged_api_info.namespace:
descriptor['ownerDomain'] = merged_api_info.namespace.owner_domain
descriptor['ownerName'] = merged_api_info.namespace.owner_name
descriptor['packagePath'] = merged_api_info.namespace.package_path or ''
else:
if merged_api_info.owner_domain is not None:
descriptor['ownerDomain'] = merged_api_info.owner_domain
if merged_api_info.owner_name is not None:
descriptor['ownerName'] = merged_api_info.owner_name
if merged_api_info.package_path is not None:
descriptor['packagePath'] = merged_api_info.package_path
method_map = {}
method_collision_tracker = {}
rest_collision_tracker = {}
resource_index = collections.defaultdict(list)
resource_map = {}
# For the first pass, only process top-level methods (that is, those methods
# that are unattached to a resource).
for service in services:
remote_methods = service.all_remote_methods()
for protorpc_meth_name, protorpc_meth_info in remote_methods.iteritems():
method_info = getattr(protorpc_meth_info, 'method_info', None)
# Skip methods that are not decorated with @method
if method_info is None:
continue
path = method_info.get_path(service.api_info)
method_id = method_info.method_id(service.api_info)
canonical_method_id = self._get_canonical_method_id(method_id)
resource_path = self._get_resource_path(method_id)
# Make sure the same method name isn't repeated.
if method_id in method_collision_tracker:
raise api_exceptions.ApiConfigurationError(
'Method %s used multiple times, in classes %s and %s' %
(method_id, method_collision_tracker[method_id],
service.__name__))
else:
method_collision_tracker[method_id] = service.__name__
# Make sure the same HTTP method & path aren't repeated.
rest_identifier = (method_info.http_method, path)
if rest_identifier in rest_collision_tracker:
raise api_exceptions.ApiConfigurationError(
'%s path "%s" used multiple times, in classes %s and %s' %
(method_info.http_method, path,
rest_collision_tracker[rest_identifier],
service.__name__))
else:
rest_collision_tracker[rest_identifier] = service.__name__
# If this method is part of a resource, note it and skip it for now
if resource_path:
resource_index[resource_path[0]].append((service, protorpc_meth_info))
else:
method_map[canonical_method_id] = self.__method_descriptor(
service, method_info, protorpc_meth_info)
# Do another pass for methods attached to resources
for resource, resource_methods in resource_index.items():
resource_map[resource] = self.__resource_descriptor(resource,
resource_methods)
if method_map:
descriptor['methods'] = method_map
if resource_map:
descriptor['resources'] = resource_map
# Add schemas, if any
schemas = self.__schemas_descriptor()
if schemas:
descriptor['schemas'] = schemas
return descriptor | [
"Builds a discovery doc for an API.\n\n Args:\n services: List of protorpc.remote.Service instances implementing an\n api/version.\n hostname: string, Hostname of the API, to override the value set on the\n current service. Defaults to None.\n\n Returns:\n A dictionary that can be deserialized into JSON in discovery doc format.\n\n Raises:\n ApiConfigurationError: If there's something wrong with the API\n configuration, such as a multiclass API decorated with different API\n descriptors (see the docstring for api()), or a repeated method\n signature.\n "
] |
Please provide a description of the function:def get_descriptor_defaults(self, api_info, hostname=None):
if self.__request:
hostname = self.__request.reconstruct_hostname()
protocol = self.__request.url_scheme
else:
hostname = (hostname or util.get_app_hostname() or
api_info.hostname)
protocol = 'http' if ((hostname and hostname.startswith('localhost')) or
util.is_running_on_devserver()) else 'https'
full_base_path = '{0}{1}/{2}/'.format(api_info.base_path,
api_info.name,
api_info.path_version)
base_url = '{0}://{1}{2}'.format(protocol, hostname, full_base_path)
root_url = '{0}://{1}{2}'.format(protocol, hostname, api_info.base_path)
defaults = {
'kind': 'discovery#restDescription',
'discoveryVersion': 'v1',
'id': '{0}:{1}'.format(api_info.name, api_info.path_version),
'name': api_info.name,
'version': api_info.api_version,
'icons': {
'x16': 'https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png',
'x32': 'https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png'
},
'protocol': 'rest',
'servicePath': '{0}/{1}/'.format(api_info.name, api_info.path_version),
'batchPath': 'batch',
'basePath': full_base_path,
'rootUrl': root_url,
'baseUrl': base_url,
'description': 'This is an API',
}
if api_info.description:
defaults['description'] = api_info.description
if api_info.title:
defaults['title'] = api_info.title
if api_info.documentation:
defaults['documentationLink'] = api_info.documentation
if api_info.canonical_name:
defaults['canonicalName'] = api_info.canonical_name
return defaults | [
"Gets a default configuration for a service.\n\n Args:\n api_info: _ApiInfo object for this service.\n hostname: string, Hostname of the API, to override the value set on the\n current service. Defaults to None.\n\n Returns:\n A dictionary with the default configuration.\n "
] |
Please provide a description of the function:def get_discovery_doc(self, services, hostname=None):
if not isinstance(services, (tuple, list)):
services = [services]
# The type of a class that inherits from remote.Service is actually
# remote._ServiceClass, thanks to metaclass strangeness.
# pylint: disable=protected-access
util.check_list_type(services, remote._ServiceClass, 'services',
allow_none=False)
return self.__discovery_doc_descriptor(services, hostname=hostname) | [
"JSON dict description of a protorpc.remote.Service in discovery format.\n\n Args:\n services: Either a single protorpc.remote.Service or a list of them\n that implements an api/version.\n hostname: string, Hostname of the API, to override the value set on the\n current service. Defaults to None.\n\n Returns:\n dict, The discovery document as a JSON dict.\n "
] |
Please provide a description of the function:def send_wsgi_response(status, headers, content, start_response,
cors_handler=None):
if cors_handler:
cors_handler.update_headers(headers)
# Update content length.
content_len = len(content) if content else 0
headers = [(header, value) for header, value in headers
if header.lower() != 'content-length']
headers.append(('Content-Length', '%s' % content_len))
start_response(status, headers)
return content | [
"Dump reformatted response to CGI start_response.\n\n This calls start_response and returns the response body.\n\n Args:\n status: A string containing the HTTP status code to send.\n headers: A list of (header, value) tuples, the headers to send in the\n response.\n content: A string containing the body content to write.\n start_response: A function with semantics defined in PEP-333.\n cors_handler: A handler to process CORS request headers and update the\n headers in the response. Or this can be None, to bypass CORS checks.\n\n Returns:\n A string containing the response body.\n "
] |
Please provide a description of the function:def get_headers_from_environ(environ):
headers = wsgiref.headers.Headers([])
for header, value in environ.iteritems():
if header.startswith('HTTP_'):
headers[header[5:].replace('_', '-')] = value
# Content-Type is special; it does not start with 'HTTP_'.
if 'CONTENT_TYPE' in environ:
headers['CONTENT-TYPE'] = environ['CONTENT_TYPE']
return headers | [
"Get a wsgiref.headers.Headers object with headers from the environment.\n\n Headers in environ are prefixed with 'HTTP_', are all uppercase, and have\n had dashes replaced with underscores. This strips the HTTP_ prefix and\n changes underscores back to dashes before adding them to the returned set\n of headers.\n\n Args:\n environ: An environ dict for the request as defined in PEP-333.\n\n Returns:\n A wsgiref.headers.Headers object that's been filled in with any HTTP\n headers found in environ.\n "
] |
Please provide a description of the function:def put_headers_in_environ(headers, environ):
for key, value in headers:
environ['HTTP_%s' % key.upper().replace('-', '_')] = value | [
"Given a list of headers, put them into environ based on PEP-333.\n\n This converts headers to uppercase, prefixes them with 'HTTP_', and\n converts dashes to underscores before adding them to the environ dict.\n\n Args:\n headers: A list of (header, value) tuples. The HTTP headers to add to the\n environment.\n environ: An environ dict for the request as defined in PEP-333.\n "
] |
Please provide a description of the function:def get_hostname_prefix():
parts = []
# Check if this is the default version
version = modules.get_current_version_name()
default_version = modules.get_default_version()
if version != default_version:
parts.append(version)
# Check if this is the default module
module = modules.get_current_module_name()
if module != 'default':
parts.append(module)
# If there is anything to prepend, add an extra blank entry for the trailing
# -dot-
if parts:
parts.append('')
return '-dot-'.join(parts) | [
"Returns the hostname prefix of a running Endpoints service.\n\n The prefix is the portion of the hostname that comes before the API name.\n For example, if a non-default version and a non-default service are in use,\n the returned result would be '{VERSION}-dot-{SERVICE}-'.\n\n Returns:\n str, the hostname prefix.\n "
] |
Please provide a description of the function:def get_app_hostname():
if not is_running_on_app_engine() or is_running_on_localhost():
return None
app_id = app_identity.get_application_id()
prefix = get_hostname_prefix()
suffix = 'appspot.com'
if ':' in app_id:
tokens = app_id.split(':')
api_name = tokens[1]
if tokens[0] == 'google.com':
suffix = 'googleplex.com'
else:
api_name = app_id
return '{0}{1}.{2}'.format(prefix, api_name, suffix) | [
"Return hostname of a running Endpoints service.\n\n Returns hostname of an running Endpoints API. It can be 1) \"localhost:PORT\"\n if running on development server, or 2) \"app_id.appspot.com\" if running on\n external app engine prod, or \"app_id.googleplex.com\" if running as Google\n first-party Endpoints API, or 4) None if not running on App Engine\n (e.g. Tornado Endpoints API).\n\n Returns:\n A string representing the hostname of the service.\n "
] |
Please provide a description of the function:def check_list_type(objects, allowed_type, name, allow_none=True):
if objects is None:
if not allow_none:
raise TypeError('%s is None, which is not allowed.' % name)
return objects
if not isinstance(objects, (tuple, list)):
raise TypeError('%s is not a list.' % name)
if not all(isinstance(i, allowed_type) for i in objects):
type_list = sorted(list(set(type(obj) for obj in objects)))
raise TypeError('%s contains types that don\'t match %s: %s' %
(name, allowed_type.__name__, type_list))
return objects | [
"Verify that objects in list are of the allowed type or raise TypeError.\n\n Args:\n objects: The list of objects to check.\n allowed_type: The allowed type of items in 'settings'.\n name: Name of the list of objects, added to the exception.\n allow_none: If set, None is also allowed.\n\n Raises:\n TypeError: if object is not of the allowed type.\n\n Returns:\n The list of objects, for convenient use in assignment.\n "
] |
Please provide a description of the function:def snake_case_to_headless_camel_case(snake_string):
return ''.join([snake_string.split('_')[0]] +
list(sub_string.capitalize()
for sub_string in snake_string.split('_')[1:])) | [
"Convert snake_case to headlessCamelCase.\n\n Args:\n snake_string: The string to be converted.\n Returns:\n The input string converted to headlessCamelCase.\n "
] |
Please provide a description of the function:def Proxy(self, status, headers, exc_info=None):
self.call_context['status'] = status
self.call_context['headers'] = headers
self.call_context['exc_info'] = exc_info
return self.body_buffer.write | [
"Save args, defer start_response until response body is parsed.\n\n Create output buffer for body to be written into.\n Note: this is not quite WSGI compliant: The body should come back as an\n iterator returned from calling service_app() but instead, StartResponse\n returns a writer that will be later called to output the body.\n See google/appengine/ext/webapp/__init__.py::Response.wsgi_write()\n write = start_response('%d %s' % self.__status, self.__wsgi_headers)\n write(body)\n\n Args:\n status: Http status to be sent with this response\n headers: Http headers to be sent with this response\n exc_info: Exception info to be displayed for this response\n Returns:\n callable that takes as an argument the body content\n "
] |
Please provide a description of the function:def _send_success_response(self, response, start_response):
headers = [('Content-Type', 'application/json; charset=UTF-8')]
return util.send_wsgi_response('200 OK', headers, response, start_response) | [
"Sends an HTTP 200 json success response.\n\n This calls start_response and returns the response body.\n\n Args:\n response: A string containing the response body to return.\n start_response: A function with semantics defined in PEP-333.\n\n Returns:\n A string, the response body.\n "
] |
Please provide a description of the function:def _get_rest_doc(self, request, start_response):
api = request.body_json['api']
version = request.body_json['version']
generator = discovery_generator.DiscoveryGenerator(request=request)
services = [s for s in self._backend.api_services if
s.api_info.name == api and s.api_info.api_version == version]
doc = generator.pretty_print_config_to_json(services)
if not doc:
error_msg = ('Failed to convert .api to discovery doc for '
'version %s of api %s') % (version, api)
_logger.error('%s', error_msg)
return util.send_wsgi_error_response(error_msg, start_response)
return self._send_success_response(doc, start_response) | [
"Sends back HTTP response with API directory.\n\n This calls start_response and returns the response body. It will return\n the discovery doc for the requested api/version.\n\n Args:\n request: An ApiRequest, the transformed request sent to the Discovery API.\n start_response: A function with semantics defined in PEP-333.\n\n Returns:\n A string, the response body.\n "
] |
Please provide a description of the function:def _generate_api_config_with_root(self, request):
actual_root = self._get_actual_root(request)
generator = api_config.ApiConfigGenerator()
api = request.body_json['api']
version = request.body_json['version']
lookup_key = (api, version)
service_factories = self._backend.api_name_version_map.get(lookup_key)
if not service_factories:
return None
service_classes = [service_factory.service_class
for service_factory in service_factories]
config_dict = generator.get_config_dict(
service_classes, hostname=actual_root)
# Save to cache
for config in config_dict.get('items', []):
lookup_key_with_root = (
config.get('name', ''), config.get('version', ''), actual_root)
self._config_manager.save_config(lookup_key_with_root, config)
return config_dict | [
"Generate an API config with a specific root hostname.\n\n This uses the backend object and the ApiConfigGenerator to create an API\n config specific to the hostname of the incoming request. This allows for\n flexible API configs for non-standard environments, such as localhost.\n\n Args:\n request: An ApiRequest, the transformed request sent to the Discovery API.\n\n Returns:\n A string representation of the generated API config.\n "
] |
Please provide a description of the function:def _list(self, request, start_response):
configs = []
generator = directory_list_generator.DirectoryListGenerator(request)
for config in self._config_manager.configs.itervalues():
if config != self.API_CONFIG:
configs.append(config)
directory = generator.pretty_print_config_to_json(configs)
if not directory:
_logger.error('Failed to get API directory')
# By returning a 404, code explorer still works if you select the
# API in the URL
return util.send_wsgi_not_found_response(start_response)
return self._send_success_response(directory, start_response) | [
"Sends HTTP response containing the API directory.\n\n This calls start_response and returns the response body.\n\n Args:\n request: An ApiRequest, the transformed request sent to the Discovery API.\n start_response: A function with semantics defined in PEP-333.\n\n Returns:\n A string containing the response body.\n "
] |
Please provide a description of the function:def handle_discovery_request(self, path, request, start_response):
if path == self._GET_REST_API:
return self._get_rest_doc(request, start_response)
elif path == self._GET_RPC_API:
error_msg = ('RPC format documents are no longer supported with the '
'Endpoints Framework for Python. Please use the REST '
'format.')
_logger.error('%s', error_msg)
return util.send_wsgi_error_response(error_msg, start_response)
elif path == self._LIST_API:
return self._list(request, start_response)
return False | [
"Returns the result of a discovery service request.\n\n This calls start_response and returns the response body.\n\n Args:\n path: A string containing the API path (the portion of the path\n after /_ah/api/).\n request: An ApiRequest, the transformed request sent to the Discovery API.\n start_response: A function with semantics defined in PEP-333.\n\n Returns:\n The response body. Or returns False if the request wasn't handled by\n DiscoveryService.\n "
] |
Please provide a description of the function:def _process_req_body(self, body):
try:
return json.loads(body)
except ValueError:
return urlparse.parse_qs(body, keep_blank_values=True) | [
"Process the body of the HTTP request.\n\n If the body is valid JSON, return the JSON as a dict.\n Else, convert the key=value format to a dict and return that.\n\n Args:\n body: The body of the HTTP request.\n "
] |
Please provide a description of the function:def _reconstruct_relative_url(self, environ):
url = urllib.quote(environ.get('SCRIPT_NAME', ''))
url += urllib.quote(environ.get('PATH_INFO', ''))
if environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url | [
"Reconstruct the relative URL of this request.\n\n This is based on the URL reconstruction code in Python PEP 333:\n http://www.python.org/dev/peps/pep-0333/#url-reconstruction. Rebuild the\n URL from the pieces available in the environment.\n\n Args:\n environ: An environ dict for the request as defined in PEP-333\n\n Returns:\n The portion of the URL from the request after the server and port.\n "
] |
Please provide a description of the function:def reconstruct_hostname(self, port_override=None):
url = self.server
port = port_override or self.port
if port and ((self.url_scheme == 'https' and str(port) != '443') or
(self.url_scheme != 'https' and str(port) != '80')):
url += ':{0}'.format(port)
return url | [
"Reconstruct the hostname of a request.\n\n This is based on the URL reconstruction code in Python PEP 333:\n http://www.python.org/dev/peps/pep-0333/#url-reconstruction. Rebuild the\n hostname from the pieces available in the environment.\n\n Args:\n port_override: str, An override for the port on the returned hostname.\n\n Returns:\n The hostname portion of the URL from the request, not including the\n URL scheme.\n "
] |
Please provide a description of the function:def reconstruct_full_url(self, port_override=None):
return '{0}://{1}{2}'.format(self.url_scheme,
self.reconstruct_hostname(port_override),
self.relative_url) | [
"Reconstruct the full URL of a request.\n\n This is based on the URL reconstruction code in Python PEP 333:\n http://www.python.org/dev/peps/pep-0333/#url-reconstruction. Rebuild the\n hostname from the pieces available in the environment.\n\n Args:\n port_override: str, An override for the port on the returned full URL.\n\n Returns:\n The full URL from the request, including the URL scheme.\n "
] |
Please provide a description of the function:def _add_def_paths(self, prop_dict):
for prop_key, prop_value in prop_dict.iteritems():
if prop_key == '$ref' and not 'prop_value'.startswith('#'):
prop_dict[prop_key] = '#/definitions/' + prop_dict[prop_key]
elif isinstance(prop_value, dict):
self._add_def_paths(prop_value) | [
"Recursive method to add relative paths for any $ref objects.\n\n Args:\n prop_dict: The property dict to alter.\n\n Side Effects:\n Alters prop_dict in-place.\n "
] |
Please provide a description of the function:def _construct_operation_id(self, service_name, protorpc_method_name):
# camelCase the ProtoRPC method name
method_name_camel = util.snake_case_to_headless_camel_case(
protorpc_method_name)
return '{0}_{1}'.format(service_name, method_name_camel) | [
"Return an operation id for a service method.\n\n Args:\n service_name: The name of the service.\n protorpc_method_name: The ProtoRPC method name.\n\n Returns:\n A string representing the operation id.\n "
] |
Please provide a description of the function:def __field_to_parameter_type_and_format(self, field):
# We use lowercase values for types (e.g. 'string' instead of 'STRING').
variant = field.variant
if variant == messages.Variant.MESSAGE:
raise TypeError('A message variant can\'t be used in a parameter.')
# Note that the 64-bit integers are marked as strings -- this is to
# accommodate JavaScript, which would otherwise demote them to 32-bit
# integers.
custom_variant_map = {
messages.Variant.DOUBLE: ('number', 'double'),
messages.Variant.FLOAT: ('number', 'float'),
messages.Variant.INT64: ('string', 'int64'),
messages.Variant.SINT64: ('string', 'int64'),
messages.Variant.UINT64: ('string', 'uint64'),
messages.Variant.INT32: ('integer', 'int32'),
messages.Variant.SINT32: ('integer', 'int32'),
messages.Variant.UINT32: ('integer', 'uint32'),
messages.Variant.BOOL: ('boolean', None),
messages.Variant.STRING: ('string', None),
messages.Variant.BYTES: ('string', 'byte'),
messages.Variant.ENUM: ('string', None),
}
return custom_variant_map.get(variant) or (variant.name.lower(), None) | [
"Converts the field variant type into a tuple describing the parameter.\n\n Args:\n field: An instance of a subclass of messages.Field.\n\n Returns:\n A tuple with the type and format of the field, respectively.\n\n Raises:\n TypeError: if the field variant is a message variant.\n "
] |
Please provide a description of the function:def __parameter_default(self, field):
if field.default:
if isinstance(field, messages.EnumField):
return field.default.name
else:
return field.default | [
"Returns default value of field if it has one.\n\n Args:\n field: A simple field.\n\n Returns:\n The default value of the field, if any exists, with the exception of an\n enum field, which will have its value cast to a string.\n "
] |
Please provide a description of the function:def __non_body_parameter_descriptor(self, param):
descriptor = {}
descriptor['name'] = param.name
param_type, param_format = self.__field_to_parameter_type_and_format(param)
# Required
if param.required:
descriptor['required'] = True
# Type
descriptor['type'] = param_type
# Format (optional)
if param_format:
descriptor['format'] = param_format
# Default
default = self.__parameter_default(param)
if default is not None:
descriptor['default'] = default
# Repeated
if param.repeated:
descriptor['repeated'] = True
# Enum
enum_descriptor = self.__parameter_enum(param)
if enum_descriptor is not None:
descriptor['enum'] = enum_descriptor
return descriptor | [
"Creates descriptor for a parameter.\n\n Args:\n param: The parameter to be described.\n\n Returns:\n Dictionary containing a descriptor for the parameter.\n "
] |
Please provide a description of the function:def __add_parameter(self, param, path_parameters, params):
# If this is a simple field, just build the descriptor and append it.
# Otherwise, build a schema and assign it to this descriptor
if not isinstance(param, messages.MessageField):
if param.name in path_parameters:
descriptor = self.__path_parameter_descriptor(param)
else:
descriptor = self.__query_parameter_descriptor(param)
params.append(descriptor)
else:
# If a subfield of a MessageField is found in the path, build a descriptor
# for the path parameter.
for subfield_list in self.__field_to_subfields(param):
qualified_name = '.'.join(subfield.name for subfield in subfield_list)
if qualified_name in path_parameters:
descriptor = self.__path_parameter_descriptor(subfield_list[-1])
descriptor['required'] = True
params.append(descriptor) | [
"Adds all parameters in a field to a method parameters descriptor.\n\n Simple fields will only have one parameter, but a message field 'x' that\n corresponds to a message class with fields 'y' and 'z' will result in\n parameters 'x.y' and 'x.z', for example. The mapping from field to\n parameters is mostly handled by __field_to_subfields.\n\n Args:\n param: Parameter to be added to the descriptor.\n path_parameters: A list of parameters matched from a path for this field.\n For example for the hypothetical 'x' from above if the path was\n '/a/{x.z}/b/{other}' then this list would contain only the element\n 'x.z' since 'other' does not match to this field.\n params: List of parameters. Each parameter in the field.\n "
] |
Please provide a description of the function:def __params_descriptor_without_container(self, message_type,
request_kind, method_id, path):
params = []
path_parameter_dict = self.__get_path_parameters(path)
for field in sorted(message_type.all_fields(), key=lambda f: f.number):
matched_path_parameters = path_parameter_dict.get(field.name, [])
self.__validate_path_parameters(field, matched_path_parameters)
if matched_path_parameters or request_kind == self.__NO_BODY:
self.__add_parameter(field, matched_path_parameters, params)
# If the request has a body, add the body parameter
if (message_type != message_types.VoidMessage() and
request_kind == self.__HAS_BODY):
params.append(self.__body_parameter_descriptor(method_id))
return params | [
"Describe parameters of a method which does not use a ResourceContainer.\n\n Makes sure that the path parameters are included in the message definition\n and adds any required fields and URL query parameters.\n\n This method is to preserve backwards compatibility and will be removed in\n a future release.\n\n Args:\n message_type: messages.Message class, Message with parameters to describe.\n request_kind: The type of request being made.\n method_id: string, Unique method identifier (e.g. 'myapi.items.method')\n path: string, HTTP path to method.\n\n Returns:\n A list of dicts: Descriptors of the parameters\n "
] |
Please provide a description of the function:def __params_descriptor(self, message_type, request_kind, path, method_id):
path_parameter_dict = self.__get_path_parameters(path)
if not isinstance(message_type, resource_container.ResourceContainer):
if path_parameter_dict:
_logger.warning('Method %s specifies path parameters but you are not '
'using a ResourceContainer; instead, you are using %r. '
'This will fail in future releases; please switch to '
'using ResourceContainer as soon as possible.',
method_id, type(message_type))
return self.__params_descriptor_without_container(
message_type, request_kind, method_id, path)
# From here, we can assume message_type is a ResourceContainer.
params = []
# Process body parameter, if any
if message_type.body_message_class != message_types.VoidMessage:
params.append(self.__body_parameter_descriptor(method_id))
# Process path/querystring parameters
params_message_type = message_type.parameters_message_class()
# Make sure all path parameters are covered.
for field_name, matched_path_parameters in path_parameter_dict.iteritems():
field = params_message_type.field_by_name(field_name)
self.__validate_path_parameters(field, matched_path_parameters)
# Add all fields, sort by field.number since we have parameterOrder.
for field in sorted(params_message_type.all_fields(),
key=lambda f: f.number):
matched_path_parameters = path_parameter_dict.get(field.name, [])
self.__add_parameter(field, matched_path_parameters, params)
return params | [
"Describe the parameters of a method.\n\n If the message_type is not a ResourceContainer, will fall back to\n __params_descriptor_without_container (which will eventually be deprecated).\n\n If the message type is a ResourceContainer, then all path/query parameters\n will come from the ResourceContainer. This method will also make sure all\n path parameters are covered by the message fields.\n\n Args:\n message_type: messages.Message or ResourceContainer class, Message with\n parameters to describe.\n request_kind: The type of request being made.\n path: string, HTTP path to method.\n method_id: string, Unique method identifier (e.g. 'myapi.items.method')\n\n Returns:\n A tuple (dict, list of string): Descriptor of the parameters, Order of the\n parameters.\n "
] |
Please provide a description of the function:def __request_message_descriptor(self, request_kind, message_type, method_id,
path):
if isinstance(message_type, resource_container.ResourceContainer):
base_message_type = message_type.body_message_class()
if (request_kind == self.__NO_BODY and
base_message_type != message_types.VoidMessage()):
msg = ('Method %s specifies a body message in its ResourceContainer, but '
'is a HTTP method type that cannot accept a body.') % method_id
raise api_exceptions.ApiConfigurationError(msg)
else:
base_message_type = message_type
if (request_kind != self.__NO_BODY and
base_message_type != message_types.VoidMessage()):
self.__request_schema[method_id] = self.__parser.add_message(
base_message_type.__class__)
params = self.__params_descriptor(message_type, request_kind, path,
method_id)
return params | [
"Describes the parameters and body of the request.\n\n Args:\n request_kind: The type of request being made.\n message_type: messages.Message or ResourceContainer class. The message to\n describe.\n method_id: string, Unique method identifier (e.g. 'myapi.items.method')\n path: string, HTTP path to method.\n\n Returns:\n Dictionary describing the request.\n\n Raises:\n ValueError: if the method path and request required fields do not match\n "
] |
Please provide a description of the function:def __definitions_descriptor(self):
# Filter out any keys that aren't 'properties' or 'type'
result = {}
for def_key, def_value in self.__parser.schemas().iteritems():
if 'properties' in def_value or 'type' in def_value:
key_result = {}
required_keys = set()
if 'type' in def_value:
key_result['type'] = def_value['type']
if 'properties' in def_value:
for prop_key, prop_value in def_value['properties'].items():
if isinstance(prop_value, dict) and 'required' in prop_value:
required_keys.add(prop_key)
del prop_value['required']
key_result['properties'] = def_value['properties']
# Add in the required fields, if any
if required_keys:
key_result['required'] = sorted(required_keys)
result[def_key] = key_result
# Add 'type': 'object' to all object properties
# Also, recursively add relative path to all $ref values
for def_value in result.itervalues():
for prop_value in def_value.itervalues():
if isinstance(prop_value, dict):
if '$ref' in prop_value:
prop_value['type'] = 'object'
self._add_def_paths(prop_value)
return result | [
"Describes the definitions section of the OpenAPI spec.\n\n Returns:\n Dictionary describing the definitions of the spec.\n "
] |
Please provide a description of the function:def __response_message_descriptor(self, message_type, method_id):
# Skeleton response descriptor, common to all response objects
descriptor = {'200': {'description': 'A successful response'}}
if message_type != message_types.VoidMessage():
self.__parser.add_message(message_type.__class__)
self.__response_schema[method_id] = self.__parser.ref_for_message_type(
message_type.__class__)
descriptor['200']['schema'] = {'$ref': '#/definitions/{0}'.format(
self.__response_schema[method_id])}
return dict(descriptor) | [
"Describes the response.\n\n Args:\n message_type: messages.Message class, The message to describe.\n method_id: string, Unique method identifier (e.g. 'myapi.items.method')\n\n Returns:\n Dictionary describing the response.\n "
] |
Please provide a description of the function:def __x_google_quota_descriptor(self, metric_costs):
return {
'metricCosts': {
metric: cost for (metric, cost) in metric_costs.items()
}
} if metric_costs else None | [
"Describes the metric costs for a call.\n\n Args:\n metric_costs: Dict of metric definitions to the integer cost value against\n that metric.\n\n Returns:\n A dict descriptor describing the Quota limits for the endpoint.\n "
] |
Please provide a description of the function:def __x_google_quota_definitions_descriptor(self, limit_definitions):
if not limit_definitions:
return None
definitions_list = [{
'name': ld.metric_name,
'metric': ld.metric_name,
'unit': '1/min/{project}',
'values': {'STANDARD': ld.default_limit},
'displayName': ld.display_name,
} for ld in limit_definitions]
metrics = [{
'name': ld.metric_name,
'valueType': 'INT64',
'metricKind': 'GAUGE',
} for ld in limit_definitions]
return {
'quota': {'limits': definitions_list},
'metrics': metrics,
} | [
"Describes the quota limit definitions for an API.\n\n Args:\n limit_definitions: List of endpoints.LimitDefinition tuples\n\n Returns:\n A dict descriptor of the API's quota limit definitions.\n "
] |
Please provide a description of the function:def __method_descriptor(self, service, method_info, operation_id,
protorpc_method_info, security_definitions):
descriptor = {}
request_message_type = (resource_container.ResourceContainer.
get_request_message(protorpc_method_info.remote))
request_kind = self.__get_request_kind(method_info)
remote_method = protorpc_method_info.remote
path = method_info.get_path(service.api_info)
descriptor['parameters'] = self.__request_message_descriptor(
request_kind, request_message_type,
method_info.method_id(service.api_info),
path)
descriptor['responses'] = self.__response_message_descriptor(
remote_method.response_type(), method_info.method_id(service.api_info))
descriptor['operationId'] = operation_id
# Insert the auth audiences, if any
api_key_required = method_info.is_api_key_required(service.api_info)
if method_info.audiences is not None:
descriptor['security'] = self.__security_descriptor(
method_info.audiences, security_definitions,
api_key_required=api_key_required)
elif service.api_info.audiences is not None or api_key_required:
descriptor['security'] = self.__security_descriptor(
service.api_info.audiences, security_definitions,
api_key_required=api_key_required)
# Insert the metric costs, if any
if method_info.metric_costs:
descriptor['x-google-quota'] = self.__x_google_quota_descriptor(
method_info.metric_costs)
return descriptor | [
"Describes a method.\n\n Args:\n service: endpoints.Service, Implementation of the API as a service.\n method_info: _MethodInfo, Configuration for the method.\n operation_id: string, Operation ID of the method\n protorpc_method_info: protorpc.remote._RemoteMethodInfo, ProtoRPC\n description of the method.\n security_definitions: list of dicts, security definitions for the API.\n\n Returns:\n Dictionary describing the method.\n "
] |
Please provide a description of the function:def __security_definitions_descriptor(self, issuers):
if not issuers:
result = {
_DEFAULT_SECURITY_DEFINITION: {
'authorizationUrl': '',
'flow': 'implicit',
'type': 'oauth2',
'x-google-issuer': 'https://accounts.google.com',
'x-google-jwks_uri': 'https://www.googleapis.com/oauth2/v3/certs',
}
}
return result
result = {}
for issuer_key, issuer_value in issuers.items():
result[issuer_key] = {
'authorizationUrl': '',
'flow': 'implicit',
'type': 'oauth2',
'x-google-issuer': issuer_value.issuer,
}
# If jwks_uri is omitted, the auth library will use OpenID discovery
# to find it. Otherwise, include it in the descriptor explicitly.
if issuer_value.jwks_uri:
result[issuer_key]['x-google-jwks_uri'] = issuer_value.jwks_uri
return result | [
"Create a descriptor for the security definitions.\n\n Args:\n issuers: dict, mapping issuer names to Issuer tuples\n\n Returns:\n The dict representing the security definitions descriptor.\n "
] |
Please provide a description of the function:def __api_openapi_descriptor(self, services, hostname=None, x_google_api_name=False):
merged_api_info = self.__get_merged_api_info(services)
descriptor = self.get_descriptor_defaults(merged_api_info,
hostname=hostname,
x_google_api_name=x_google_api_name)
description = merged_api_info.description
if not description and len(services) == 1:
description = services[0].__doc__
if description:
descriptor['info']['description'] = description
security_definitions = self.__security_definitions_descriptor(
merged_api_info.issuers)
method_map = {}
method_collision_tracker = {}
rest_collision_tracker = {}
for service in services:
remote_methods = service.all_remote_methods()
for protorpc_meth_name in sorted(remote_methods.iterkeys()):
protorpc_meth_info = remote_methods[protorpc_meth_name]
method_info = getattr(protorpc_meth_info, 'method_info', None)
# Skip methods that are not decorated with @method
if method_info is None:
continue
method_id = method_info.method_id(service.api_info)
is_api_key_required = method_info.is_api_key_required(service.api_info)
path = '/{0}/{1}/{2}'.format(merged_api_info.name,
merged_api_info.path_version,
method_info.get_path(service.api_info))
verb = method_info.http_method.lower()
if path not in method_map:
method_map[path] = {}
# If an API key is required and the security definitions don't already
# have the apiKey issuer, add the appropriate notation now
if is_api_key_required and _API_KEY not in security_definitions:
security_definitions[_API_KEY] = {
'type': 'apiKey',
'name': _API_KEY_PARAM,
'in': 'query'
}
# Derive an OperationId from the method name data
operation_id = self._construct_operation_id(
service.__name__, protorpc_meth_name)
method_map[path][verb] = self.__method_descriptor(
service, method_info, operation_id, protorpc_meth_info,
security_definitions)
# Make sure the same method name isn't repeated.
if method_id in method_collision_tracker:
raise api_exceptions.ApiConfigurationError(
'Method %s used multiple times, in classes %s and %s' %
(method_id, method_collision_tracker[method_id],
service.__name__))
else:
method_collision_tracker[method_id] = service.__name__
# Make sure the same HTTP method & path aren't repeated.
rest_identifier = (method_info.http_method,
method_info.get_path(service.api_info))
if rest_identifier in rest_collision_tracker:
raise api_exceptions.ApiConfigurationError(
'%s path "%s" used multiple times, in classes %s and %s' %
(method_info.http_method, method_info.get_path(service.api_info),
rest_collision_tracker[rest_identifier],
service.__name__))
else:
rest_collision_tracker[rest_identifier] = service.__name__
if method_map:
descriptor['paths'] = method_map
# Add request and/or response definitions, if any
definitions = self.__definitions_descriptor()
if definitions:
descriptor['definitions'] = definitions
descriptor['securityDefinitions'] = security_definitions
# Add quota limit metric definitions, if any
limit_definitions = self.__x_google_quota_definitions_descriptor(
merged_api_info.limit_definitions)
if limit_definitions:
descriptor['x-google-management'] = limit_definitions
return descriptor | [
"Builds an OpenAPI description of an API.\n\n Args:\n services: List of protorpc.remote.Service instances implementing an\n api/version.\n hostname: string, Hostname of the API, to override the value set on the\n current service. Defaults to None.\n\n Returns:\n A dictionary that can be deserialized into JSON and stored as an API\n description document in OpenAPI format.\n\n Raises:\n ApiConfigurationError: If there's something wrong with the API\n configuration, such as a multiclass API decorated with different API\n descriptors (see the docstring for api()), or a repeated method\n signature.\n "
] |
Please provide a description of the function:def get_descriptor_defaults(self, api_info, hostname=None, x_google_api_name=False):
hostname = (hostname or util.get_app_hostname() or
api_info.hostname)
protocol = 'http' if ((hostname and hostname.startswith('localhost')) or
util.is_running_on_devserver()) else 'https'
base_path = api_info.base_path
if base_path != '/':
base_path = base_path.rstrip('/')
defaults = {
'swagger': '2.0',
'info': {
'version': api_info.api_version,
'title': api_info.name
},
'host': hostname,
'consumes': ['application/json'],
'produces': ['application/json'],
'schemes': [protocol],
'basePath': base_path,
}
if x_google_api_name:
defaults['x-google-api-name'] = _validate_api_name(api_info.name)
return defaults | [
"Gets a default configuration for a service.\n\n Args:\n api_info: _ApiInfo object for this service.\n hostname: string, Hostname of the API, to override the value set on the\n current service. Defaults to None.\n\n Returns:\n A dictionary with the default configuration.\n "
] |
Please provide a description of the function:def get_openapi_dict(self, services, hostname=None, x_google_api_name=False):
if not isinstance(services, (tuple, list)):
services = [services]
# The type of a class that inherits from remote.Service is actually
# remote._ServiceClass, thanks to metaclass strangeness.
# pylint: disable=protected-access
util.check_list_type(services, remote._ServiceClass, 'services',
allow_none=False)
return self.__api_openapi_descriptor(services, hostname=hostname, x_google_api_name=x_google_api_name) | [
"JSON dict description of a protorpc.remote.Service in OpenAPI format.\n\n Args:\n services: Either a single protorpc.remote.Service or a list of them\n that implements an api/version.\n hostname: string, Hostname of the API, to override the value set on the\n current service. Defaults to None.\n\n Returns:\n dict, The OpenAPI descriptor document as a JSON dict.\n "
] |
Please provide a description of the function:def pretty_print_config_to_json(self, services, hostname=None, x_google_api_name=False):
descriptor = self.get_openapi_dict(services, hostname, x_google_api_name=x_google_api_name)
return json.dumps(descriptor, sort_keys=True, indent=2,
separators=(',', ': ')) | [
"JSON string description of a protorpc.remote.Service in OpenAPI format.\n\n Args:\n services: Either a single protorpc.remote.Service or a list of them\n that implements an api/version.\n hostname: string, Hostname of the API, to override the value set on the\n current service. Defaults to None.\n\n Returns:\n string, The OpenAPI descriptor document as a JSON string.\n "
] |
Please provide a description of the function:def encode_field(self, field, value):
# Override the handling of 64-bit integers, so they're always encoded
# as strings.
if (isinstance(field, messages.IntegerField) and
field.variant in (messages.Variant.INT64,
messages.Variant.UINT64,
messages.Variant.SINT64)):
if value not in (None, [], ()):
# Convert and replace the value.
if isinstance(value, list):
value = [str(subvalue) for subvalue in value]
else:
value = str(value)
return value
return super(EndpointsProtoJson, self).encode_field(field, value) | [
"Encode a python field value to a JSON value.\n\n Args:\n field: A ProtoRPC field instance.\n value: A python value supported by field.\n\n Returns:\n A JSON serializable value appropriate for field.\n "
] |
Please provide a description of the function:def __pad_value(value, pad_len_multiple, pad_char):
assert pad_len_multiple > 0
assert len(pad_char) == 1
padding_length = (pad_len_multiple -
(len(value) % pad_len_multiple)) % pad_len_multiple
return value + pad_char * padding_length | [
"Add padding characters to the value if needed.\n\n Args:\n value: The string value to be padded.\n pad_len_multiple: Pad the result so its length is a multiple\n of pad_len_multiple.\n pad_char: The character to use for padding.\n\n Returns:\n The string value with padding characters added.\n "
] |
Please provide a description of the function:def decode_field(self, field, value):
# Override BytesField handling. Client libraries typically use a url-safe
# encoding. b64decode doesn't handle these gracefully. urlsafe_b64decode
# handles both cases safely. Also add padding if the padding is incorrect.
if isinstance(field, messages.BytesField):
try:
# Need to call str(value) because ProtoRPC likes to pass values
# as unicode, and urlsafe_b64decode can only handle bytes.
padded_value = self.__pad_value(str(value), 4, '=')
return base64.urlsafe_b64decode(padded_value)
except (TypeError, UnicodeEncodeError), err:
raise messages.DecodeError('Base64 decoding error: %s' % err)
return super(EndpointsProtoJson, self).decode_field(field, value) | [
"Decode a JSON value to a python value.\n\n Args:\n field: A ProtoRPC field instance.\n value: A serialized JSON value.\n\n Returns:\n A Python value compatible with field.\n "
] |
Please provide a description of the function:def add_message(self, message_type):
name = self.__normalized_name(message_type)
if name not in self.__schemas:
# Set a placeholder to prevent infinite recursion.
self.__schemas[name] = None
schema = self.__message_to_schema(message_type)
self.__schemas[name] = schema
return name | [
"Add a new message.\n\n Args:\n message_type: protorpc.message.Message class to be parsed.\n\n Returns:\n string, The JSON Schema id.\n\n Raises:\n KeyError if the Schema id for this message_type would collide with the\n Schema id of a different message_type that was already added.\n "
] |
Please provide a description of the function:def ref_for_message_type(self, message_type):
name = self.__normalized_name(message_type)
if name not in self.__schemas:
raise KeyError('Message has not been parsed: %s', name)
return name | [
"Returns the JSON Schema id for the given message.\n\n Args:\n message_type: protorpc.message.Message class to be parsed.\n\n Returns:\n string, The JSON Schema id.\n\n Raises:\n KeyError: if the message hasn't been parsed via add_message().\n "
] |
Please provide a description of the function:def __normalized_name(self, message_type):
# Normalization is applied to match the constraints that Discovery applies
# to Schema names.
name = message_type.definition_name()
split_name = re.split(r'[^0-9a-zA-Z]', name)
normalized = ''.join(
part[0].upper() + part[1:] for part in split_name if part)
previous = self.__normalized_names.get(normalized)
if previous:
if previous != name:
raise KeyError('Both %s and %s normalize to the same schema name: %s' %
(name, previous, normalized))
else:
self.__normalized_names[normalized] = name
return normalized | [
"Normalized schema name.\n\n Generate a normalized schema name, taking the class name and stripping out\n everything but alphanumerics, and camel casing the remaining words.\n A normalized schema name is a name that matches [a-zA-Z][a-zA-Z0-9]*\n\n Args:\n message_type: protorpc.message.Message class being parsed.\n\n Returns:\n A string, the normalized schema name.\n\n Raises:\n KeyError: A collision was found between normalized names.\n "
] |
Please provide a description of the function:def __message_to_schema(self, message_type):
name = self.__normalized_name(message_type)
schema = {
'id': name,
'type': 'object',
}
if message_type.__doc__:
schema['description'] = message_type.__doc__
properties = {}
for field in message_type.all_fields():
descriptor = {}
# Info about the type of this field. This is either merged with
# the descriptor or it's placed within the descriptor's 'items'
# property, depending on whether this is a repeated field or not.
type_info = {}
if type(field) == messages.MessageField:
field_type = field.type().__class__
type_info['$ref'] = self.add_message(field_type)
if field_type.__doc__:
descriptor['description'] = field_type.__doc__
else:
schema_type = self.__FIELD_TO_SCHEMA_TYPE_MAP.get(
type(field), self.__DEFAULT_SCHEMA_TYPE)
# If the map pointed to a dictionary, check if the field's variant
# is in that dictionary and use the type specified there.
if isinstance(schema_type, dict):
variant_map = schema_type
variant = getattr(field, 'variant', None)
if variant in variant_map:
schema_type = variant_map[variant]
else:
# The variant map needs to specify a default value, mapped by None.
schema_type = variant_map[None]
type_info['type'] = schema_type[0]
if schema_type[1]:
type_info['format'] = schema_type[1]
if type(field) == messages.EnumField:
sorted_enums = sorted([enum_info for enum_info in field.type],
key=lambda enum_info: enum_info.number)
type_info['enum'] = [enum_info.name for enum_info in sorted_enums]
if field.required:
descriptor['required'] = True
if field.default:
if type(field) == messages.EnumField:
descriptor['default'] = str(field.default)
else:
descriptor['default'] = field.default
if field.repeated:
descriptor['items'] = type_info
descriptor['type'] = 'array'
else:
descriptor.update(type_info)
properties[field.name] = descriptor
schema['properties'] = properties
return schema | [
"Parse a single message into JSON Schema.\n\n Will recursively descend the message structure\n and also parse other messages references via MessageFields.\n\n Args:\n message_type: protorpc.messages.Message class to parse.\n\n Returns:\n An object representation of the schema.\n "
] |
Please provide a description of the function:def _check_enum(parameter_name, value, parameter_config):
enum_values = [enum['backendValue']
for enum in parameter_config['enum'].values()
if 'backendValue' in enum]
if value not in enum_values:
raise errors.EnumRejectionError(parameter_name, value, enum_values) | [
"Checks if an enum value is valid.\n\n This is called by the transform_parameter_value function and shouldn't be\n called directly.\n\n This verifies that the value of an enum parameter is valid.\n\n Args:\n parameter_name: A string containing the name of the parameter, which is\n either just a variable name or the name with the index appended. For\n example 'var' or 'var[2]'.\n value: A string containing the value passed in for the parameter.\n parameter_config: The dictionary containing information specific to the\n parameter in question. This is retrieved from request.parameters in\n the method config.\n\n Raises:\n EnumRejectionError: If the given value is not among the accepted\n enum values in the field parameter.\n "
] |
Please provide a description of the function:def _check_boolean(parameter_name, value, parameter_config):
if parameter_config.get('type') != 'boolean':
return
if value.lower() not in ('1', 'true', '0', 'false'):
raise errors.BasicTypeParameterError(parameter_name, value, 'boolean') | [
"Checks if a boolean value is valid.\n\n This is called by the transform_parameter_value function and shouldn't be\n called directly.\n\n This checks that the string value passed in can be converted to a valid\n boolean value.\n\n Args:\n parameter_name: A string containing the name of the parameter, which is\n either just a variable name or the name with the index appended. For\n example 'var' or 'var[2]'.\n value: A string containing the value passed in for the parameter.\n parameter_config: The dictionary containing information specific to the\n parameter in question. This is retrieved from request.parameters in\n the method config.\n\n Raises:\n BasicTypeParameterError: If the given value is not a valid boolean\n value.\n "
] |
Please provide a description of the function:def _get_parameter_conversion_entry(parameter_config):
entry = _PARAM_CONVERSION_MAP.get(parameter_config.get('type'))
# Special handling for enum parameters. An enum's type is 'string', so we
# need to detect them by the presence of an 'enum' property in their
# configuration.
if entry is None and 'enum' in parameter_config:
entry = _PARAM_CONVERSION_MAP['enum']
return entry | [
"Get information needed to convert the given parameter to its API type.\n\n Args:\n parameter_config: The dictionary containing information specific to the\n parameter in question. This is retrieved from request.parameters in the\n method config.\n\n Returns:\n The entry from _PARAM_CONVERSION_MAP with functions/information needed to\n validate and convert the given parameter from a string to the type expected\n by the API.\n "
] |
Please provide a description of the function:def transform_parameter_value(parameter_name, value, parameter_config):
if isinstance(value, list):
# We're only expecting to handle path and query string parameters here.
# The way path and query string parameters are passed in, they'll likely
# only be single values or singly-nested lists (no lists nested within
# lists). But even if there are nested lists, we'd want to preserve that
# structure. These recursive calls should preserve it and convert all
# parameter values. See the docstring for information about the parameter
# renaming done here.
return [transform_parameter_value('%s[%d]' % (parameter_name, index),
element, parameter_config)
for index, element in enumerate(value)]
# Validate and convert the parameter value.
entry = _get_parameter_conversion_entry(parameter_config)
if entry:
validation_func, conversion_func, type_name = entry
if validation_func:
validation_func(parameter_name, value, parameter_config)
if conversion_func:
try:
return conversion_func(value)
except ValueError:
raise errors.BasicTypeParameterError(parameter_name, value, type_name)
return value | [
"Validates and transforms parameters to the type expected by the API.\n\n If the value is a list this will recursively call _transform_parameter_value\n on the values in the list. Otherwise, it checks all parameter rules for the\n the current value and converts its type from a string to whatever format\n the API expects.\n\n In the list case, '[index-of-value]' is appended to the parameter name for\n error reporting purposes.\n\n Args:\n parameter_name: A string containing the name of the parameter, which is\n either just a variable name or the name with the index appended, in the\n recursive case. For example 'var' or 'var[2]'.\n value: A string or list of strings containing the value(s) passed in for\n the parameter. These are the values from the request, to be validated,\n transformed, and passed along to the backend.\n parameter_config: The dictionary containing information specific to the\n parameter in question. This is retrieved from request.parameters in the\n method config.\n\n Returns:\n The converted parameter value(s). Not all types are converted, so this\n may be the same string that's passed in.\n "
] |
Please provide a description of the function:def sort_dependencies(app_list):
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app_config, model_list in app_list:
if model_list is None:
model_list = app_config.get_models()
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [apps.get_model(dep) for dep in deps]
else:
deps = []
# Now add a dependency for any FK or M2M relation with
# a model that defines a natural key
for field in model._meta.fields:
if hasattr(field.rel, 'to'):
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
for field in model._meta.many_to_many:
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise CommandError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list | [
"Sort a list of (app_config, models) pairs into a single list of models.\n The single list of models is sorted so that any model with a natural key\n is serialized before a normal model, and any model with a natural key\n dependency has it's dependencies serialized first.\n "
] |
Please provide a description of the function:def filter_items(self, items):
'''perform filtering items by specific criteria'''
items = self._filter_active(items)
items = self._filter_in_nav(items)
return items | [] |
Please provide a description of the function:def get_formset(self):
if self.folder:
queryset = self.folder.files.all()
else:
queryset = File.objects.none()
if self._formset is None:
self._formset = self.formset_class(
self.request.POST or None,
initial=self._get_formset_data(),
prefix=self._meta.name,
queryset=queryset)
return self._formset | [
"Provide the formset corresponding to this DataTable.\n\n Use this to validate the formset and to get the submitted data back.\n "
] |
Please provide a description of the function:def import_file(self, file_obj, folder):
created = False
for cls in MEDIA_MODELS:
if cls.matches_file_type(file_obj.name):
obj, created = cls.objects.get_or_create(
original_filename=file_obj.name,
file=file_obj,
folder=folder,
is_public=FILER_IS_PUBLIC_DEFAULT)
if created:
self.image_created += 1
if not created:
obj, created = File.objects.get_or_create(
original_filename=file_obj.name,
file=file_obj,
folder=folder,
is_public=FILER_IS_PUBLIC_DEFAULT)
if created:
self.file_created += 1
if self.verbosity >= 2:
print("file_created #%s / image_created #%s -- file : %s -- created : %s" % (self.file_created,
self.image_created,
obj, created))
return obj | [
"\n Create a File or an Image into the given folder\n "
] |
Please provide a description of the function:def is_leonardo_module(mod):
if hasattr(mod, 'default') \
or hasattr(mod, 'leonardo_module_conf'):
return True
for key in dir(mod):
if 'LEONARDO' in key:
return True
return False | [
"returns True if is leonardo module\n "
] |
Please provide a description of the function:def _translate_page_into(page, language, default=None):
# Optimisation shortcut: No need to dive into translations if page already what we want
if page.language == language:
return page
translations = dict((t.language, t) for t in page.available_translations())
translations[page.language] = page
if language in translations:
return translations[language]
else:
if hasattr(default, '__call__'):
return default(page=page)
return default | [
"\n Return the translation for a given page\n "
] |
Please provide a description of the function:def feincms_breadcrumbs(page, include_self=True):
if not page or not isinstance(page, Page):
raise ValueError("feincms_breadcrumbs must be called with a valid Page object")
ancs = page.get_ancestors()
bc = [(anc.get_absolute_url(), anc.short_title()) for anc in ancs]
if include_self:
bc.append((None, page.short_title()))
return {"trail": bc} | [
"\n Generate a list of the page's ancestors suitable for use as breadcrumb navigation.\n\n By default, generates an unordered list with the id \"breadcrumbs\" -\n override breadcrumbs.html to change this.\n\n ::\n\n {% feincms_breadcrumbs feincms_page %}\n "
] |
Please provide a description of the function:def is_parent_of(page1, page2):
try:
return page1.tree_id == page2.tree_id and page1.lft < page2.lft and page1.rght > page2.rght
except AttributeError:
return False | [
"\n Determines whether a given page is the parent of another page\n\n Example::\n\n {% if page|is_parent_of:feincms_page %} ... {% endif %}\n "
] |
Please provide a description of the function:def parent(self):
'''We use parent for some initial data'''
if not hasattr(self, '_parent'):
if 'parent' in self.kwargs:
try:
self._parent = Page.objects.get(id=self.kwargs["parent"])
except Exception as e:
raise e
else:
if hasattr(self.request, 'leonardo_page'):
self._parent = self.request.leonardo_page
else:
return None
return self._parent | [] |
Please provide a description of the function:def head_title(request):
try:
fragments = request._feincms_fragments
except:
fragments = {}
if '_head_title' in fragments and fragments.get("_head_title"):
return fragments.get("_head_title")
else:
# append site name
site_name = getattr(settings, 'LEONARDO_SITE_NAME', '')
if site_name != '':
return getattr(request.leonardo_page,
"page_title", request.leonardo_page.title) \
+ ' | ' + site_name
return getattr(request.leonardo_page,
"page_title", request.leonardo_page.title) | [
"\n {% head_title request %}\n "
] |
Please provide a description of the function:def meta_description(request):
try:
fragments = request._feincms_fragments
except:
fragments = {}
if '_meta_description' in fragments and fragments.get("_meta_description"):
return fragments.get("_meta_description")
else:
# append desc
site_desc = getattr(settings, 'META_DESCRIPTION', '')
if site_desc != '':
return getattr(request.leonardo_page,
"meta_description", request.leonardo_page.meta_description) \
+ ' - ' + site_desc
return getattr(request.leonardo_page,
"meta_description", request.leonardo_page.meta_description) | [
"\n {% meta_description request %}\n "
] |
Please provide a description of the function:def render_region_tools(context, feincms_object, region, request=None):
if context.get('standalone', False) or not feincms_object:
return {}
edit = False
if getattr(settings, 'LEONARDO_USE_PAGE_ADMIN', False):
request = context.get('request', None)
frontend_edit = request.COOKIES.get(
'frontend_editing', False)
if frontend_edit:
edit = True
return {
'edit': edit,
'feincms_object': feincms_object,
'region': region,
'region_name': get_page_region(region),
'widget_add_url': reverse_lazy(
'widget_create',
args=[feincms_object.id,
region,
'%s.%s' % (feincms_object._meta.app_label,
feincms_object.__class__.__name__)
])
} | [
"\n {% render_region_tools feincms_page \"main\" request %}\n\n skip rendering in standalone mode\n "
] |
Please provide a description of the function:def feincms_render_region(context, feincms_object, region, request=None,
classes='', wrapper=True):
if not feincms_object:
return ''
if not context.get('standalone', False) or region in STANDALONE_REGIONS:
region_content = ''.join(
_render_content(content, request=request, context=context)
for content in getattr(feincms_object.content, region))
else:
region_content = ''
if not wrapper:
return region_content
_classes = "leonardo-region leonardo-region-%(region)s %(classes)s" % {
'region': region,
'classes': classes
}
_id = "%(region)s-%(id)s" % {
'id': feincms_object.id,
'region': region,
}
return '<div class="%(classes)s" id=%(id)s>%(content)s</div>' % {
'id': _id,
'classes': _classes,
'content': region_content
} | [
"\n {% feincms_render_region feincms_page \"main\" request %}\n\n Support for rendering Page without some regions especialy for modals\n this feature is driven by context variable\n "
] |
Please provide a description of the function:def app_reverse(parser, token):
bits = token.split_contents()
if len(bits) < 3:
raise TemplateSyntaxError(
"'%s' takes at least two arguments"
" (path to a view and a urlconf)" % bits[0])
viewname = parser.compile_filter(bits[1])
urlconf = parser.compile_filter(bits[2])
args = []
kwargs = {}
asvar = None
bits = bits[3:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError(
"Malformed arguments to app_reverse tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return AppReverseNode(viewname, urlconf, args, kwargs, asvar) | [
"\n Returns an absolute URL for applications integrated with ApplicationContent\n The tag mostly works the same way as Django's own {% url %} tag::\n {% load leonardo_tags %}\n {% app_reverse \"mymodel_detail\" \"myapp.urls\" arg1 arg2 %}\n or\n {% load leonardo_tags %}\n {% app_reverse \"mymodel_detail\" \"myapp.urls\" name1=value1 %}\n The first argument is a path to a view. The second argument is the URLconf\n under which this app is known to the ApplicationContent. The second\n argument may also be a request object if you want to reverse an URL\n belonging to the current application content.\n Other arguments are space-separated values that will be filled in place of\n positional and keyword arguments in the URL. Don't mix positional and\n keyword arguments.\n If you want to store the URL in a variable instead of showing it right away\n you can do so too::\n {% app_reverse \"mymodel_detail\" \"myapp.urls\" arg1 arg2 as url %}\n "
] |
Please provide a description of the function:def feincms_object_tools(context, cls_name):
if context.get('standalone', False):
return {}
edit = False
if getattr(settings, 'LEONARDO_USE_PAGE_ADMIN', False):
request = context.get('request', None)
frontend_edit = request.COOKIES.get(
'frontend_editing', False)
if frontend_edit:
edit = True
return {
'edit': edit,
'add_entry_url': reverse_lazy(
'horizon:contrib:forms:create',
args=[cls_name])
} | [
"\n {% feincms_object_tools 'article' %}\n {% feincms_object_tools 'web.page' %}\n\n render add feincms object entry\n "
] |
Please provide a description of the function:def image_name(image, key='name', clear=True):
if hasattr(image, 'translation') and image.translation:
return getattr(image.translation, key)
if hasattr(image, key) and getattr(image, key):
return getattr(image, key)
try:
name = IMAGE_NAME.match(image.original_filename).group()
except IndexError:
return ''
else:
name = name[:-1]
if clear:
return name.replace("_", " ").replace("-", " ")
return name | [
"\n {{ image|image_name }}\n {{ image|image_name:\"description\" }}\n {{ image|image_name:\"default_caption\" }}\n {{ image|image_name:\"default_caption\" False }}\n\n Return translation or image name\n "
] |
Please provide a description of the function:def tree_label(self):
'''render tree label like as `root > child > child`'''
titles = []
page = self
while page:
titles.append(page.title)
page = page.parent
return smart_text(' > '.join(reversed(titles))) | [] |
Please provide a description of the function:def flush_ct_inventory(self):
if hasattr(self, '_ct_inventory'):
# skip self from update
self._ct_inventory = None
self.update_view = False
self.save() | [
"internal method used only if ct_inventory is enabled\n "
] |
Please provide a description of the function:def register_default_processors(cls, frontend_editing=None):
super(Page, cls).register_default_processors()
if frontend_editing:
cls.register_request_processor(
edit_processors.frontendediting_request_processor,
key='frontend_editing')
cls.register_response_processor(
edit_processors.frontendediting_response_processor,
key='frontend_editing') | [
"\n Register our default request processors for the out-of-the-box\n Page experience.\n\n Since FeinCMS 1.11 was removed from core.\n\n "
] |
Please provide a description of the function:def run_request_processors(self, request):
if not getattr(self, 'request_processors', None):
return
for fn in reversed(list(self.request_processors.values())):
r = fn(self, request)
if r:
return r | [
"\n Before rendering a page, run all registered request processors. A\n request processor may peruse and modify the page or the request. It can\n also return a ``HttpResponse`` for shortcutting the rendering and\n returning that response immediately to the client.\n "
] |
Please provide a description of the function:def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
feincms_page = slug = template = None
try:
from leonardo.module.web.models import Page
feincms_page = Page.objects.for_request(request, best_match=True)
template = feincms_page.theme.template
except:
if Page.objects.exists():
feincms_page = Page.objects.filter(parent=None).first()
template = feincms_page.theme.template
else:
# nested path is not allowed for this time
try:
slug = request.path_info.split("/")[-2:-1][0]
except KeyError:
raise Exception("Nested path is not allowed !")
c = RequestContext(request, {
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
'feincms_page': feincms_page,
'template': template or 'base.html',
'standalone': True,
'slug': slug,
})
try:
t = render_to_string('404_technical.html', c)
except:
from django.views.debug import TECHNICAL_404_TEMPLATE
t = Template(TECHNICAL_404_TEMPLATE).render(c)
return HttpResponseNotFound(t, content_type='text/html') | [] |
Please provide a description of the function:def response_change(self, request, obj):
r = super(FileAdmin, self).response_change(request, obj)
if 'Location' in r and r['Location']:
# it was a successful save
if (r['Location'] in ['../'] or
r['Location'] == self._get_post_url(obj)):
# this means it was a save: redirect to the directory view
if obj.folder:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': obj.folder.id})
else:
url = reverse(
'admin:filer-directory_listing-unfiled_images')
url = "%s%s%s" % (url, popup_param(request),
selectfolder_param(request, "&"))
return HttpResponseRedirect(url)
else:
# this means it probably was a save_and_continue_editing
pass
else:
# this means it was a save: redirect to the directory view
if obj.folder:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': obj.folder.id})
else:
url = reverse(
'admin:filer-directory_listing-unfiled_images')
url = "%s%s%s" % (url, popup_param(request),
selectfolder_param(request, "&"))
return HttpResponseRedirect(url)
return r | [
"\n Overrides the default to be able to forward to the directory listing\n instead of the default change_list_view\n "
] |
Please provide a description of the function:def delete_view(self, request, object_id, extra_context=None):
parent_folder = None
try:
obj = self.get_queryset(request).get(pk=unquote(object_id))
parent_folder = obj.folder
except self.model.DoesNotExist:
obj = None
r = super(FileAdmin, self).delete_view(
request=request, object_id=object_id,
extra_context=extra_context)
url = r.get("Location", None)
# Check against filer_file_changelist as file deletion is always made by
# the base class
if (url in ["../../../../", "../../"] or
url == reverse("admin:media_file_changelist") or
url == reverse("admin:media_image_changelist")):
if parent_folder:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': parent_folder.id})
else:
url = reverse('admin:filer-directory_listing-unfiled_images')
url = "%s%s%s" % (url, popup_param(request),
selectfolder_param(request, "&"))
return HttpResponseRedirect(url)
return r | [
"\n Overrides the default to enable redirecting to the directory view after\n deletion of a image.\n\n we need to fetch the object and find out who the parent is\n before super, because super will delete the object and make it\n impossible to find out the parent folder to redirect to.\n "
] |
Please provide a description of the function:def items(self):
'''access for filtered items'''
if hasattr(self, '_items'):
return self.filter_items(self._items)
self._items = self.get_items()
return self.filter_items(self._items) | [] |
Please provide a description of the function:def populate_items(self, request):
'''populate and returns filtered items'''
self._items = self.get_items(request)
return self.items | [] |
Please provide a description of the function:def get_rows(self):
'''returns rows with items
[[item1 item2], [item3 item4], [item5]]'''
rows = []
row = []
for i, item in enumerate(self.items):
if i > 0 and i % self.objects_per_row == 0:
rows.append(row)
row = []
row.append(item)
rows.append(row)
return rows | [] |
Please provide a description of the function:def columns_classes(self):
'''returns columns count'''
md = 12 / self.objects_per_row
sm = None
if self.objects_per_row > 2:
sm = 12 / (self.objects_per_row / 2)
return md, (sm or md), 12 | [] |
Please provide a description of the function:def get_pages(self):
'''returns pages with rows'''
pages = []
page = []
for i, item in enumerate(self.get_rows):
if i > 0 and i % self.objects_per_page == 0:
pages.append(page)
page = []
page.append(item)
pages.append(page)
return pages | [] |
Please provide a description of the function:def needs_pagination(self):
if self.objects_per_page == 0:
return False
if len(self.items) > self.objects_per_page \
or len(self.get_pages[0]) > self.objects_per_page:
return True
return False | [
"Calculate needs pagination"
] |
Please provide a description of the function:def get_item_template(self):
'''returns template for signle object from queryset
If you have a template name my_list_template.html
then template for a single object will be
_my_list_template.html
Now only for default generates _item.html
_item.html is obsolete use _default.html
'''
content_template = self.content_theme.name
# _item.html is obsolete use _default.html
# TODO: remove this condition after all _item.html will be converted
if content_template == "default":
return "widget/%s/_item.html" % self.widget_name
# TODO: support more template suffixes
return "widget/%s/_%s.html" % (self.widget_name, content_template) | [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.