text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> From the given socket, reads and yields payload of the given size. With sockets, we don't receive all data at <END_TASK> <USER_TASK:> Description: def _read_payload(socket, payload_size): """ From the given socket, reads and yields payload of the given size. With sockets, we don't receive all data at once. Therefore this method will yield each time we read some data from the socket until the payload_size has reached or socket has no more data. Parameters ---------- socket Socket to read from payload_size : int Size of the payload to read. Exactly these many bytes are read from the socket before stopping the yield. Yields ------- int Type of the stream (1 => stdout, 2 => stderr) str Data in the stream """
remaining = payload_size while remaining > 0: # Try and read as much as possible data = read(socket, remaining) if data is None: # ``read`` will terminate with an empty string. This is just a transient state where we didn't get any data continue if len(data) == 0: # pylint: disable=C1801 # Empty string. Socket does not have any more data. We are done here even if we haven't read full payload break remaining -= len(data) yield data
<SYSTEM_TASK:> Creates a Lambda Service ResourceNotFound Response <END_TASK> <USER_TASK:> Description: def resource_not_found(function_name): """ Creates a Lambda Service ResourceNotFound Response Parameters ---------- function_name str Name of the function that was requested to invoke Returns ------- Flask.Response A response object representing the ResourceNotFound Error """
exception_tuple = LambdaErrorResponses.ResourceNotFoundException return BaseLocalService.service_response( LambdaErrorResponses._construct_error_response_body( LambdaErrorResponses.USER_ERROR, "Function not found: arn:aws:lambda:us-west-2:012345678901:function:{}".format(function_name) ), LambdaErrorResponses._construct_headers(exception_tuple[0]), exception_tuple[1] )
<SYSTEM_TASK:> Creates a Lambda Service InvalidRequestContent Response <END_TASK> <USER_TASK:> Description: def invalid_request_content(message): """ Creates a Lambda Service InvalidRequestContent Response Parameters ---------- message str Message to be added to the body of the response Returns ------- Flask.Response A response object representing the InvalidRequestContent Error """
exception_tuple = LambdaErrorResponses.InvalidRequestContentException return BaseLocalService.service_response( LambdaErrorResponses._construct_error_response_body(LambdaErrorResponses.USER_ERROR, message), LambdaErrorResponses._construct_headers(exception_tuple[0]), exception_tuple[1] )
<SYSTEM_TASK:> Creates a Lambda Service UnsupportedMediaType Response <END_TASK> <USER_TASK:> Description: def unsupported_media_type(content_type): """ Creates a Lambda Service UnsupportedMediaType Response Parameters ---------- content_type str Content Type of the request that was made Returns ------- Flask.Response A response object representing the UnsupportedMediaType Error """
exception_tuple = LambdaErrorResponses.UnsupportedMediaTypeException return BaseLocalService.service_response( LambdaErrorResponses._construct_error_response_body(LambdaErrorResponses.USER_ERROR, "Unsupported content type: {}".format(content_type)), LambdaErrorResponses._construct_headers(exception_tuple[0]), exception_tuple[1] )
<SYSTEM_TASK:> Creates a Lambda Service Generic ServiceException Response <END_TASK> <USER_TASK:> Description: def generic_service_exception(*args): """ Creates a Lambda Service Generic ServiceException Response Parameters ---------- args list List of arguments Flask passes to the method Returns ------- Flask.Response A response object representing the GenericServiceException Error """
exception_tuple = LambdaErrorResponses.ServiceException return BaseLocalService.service_response( LambdaErrorResponses._construct_error_response_body(LambdaErrorResponses.SERVICE_ERROR, "ServiceException"), LambdaErrorResponses._construct_headers(exception_tuple[0]), exception_tuple[1] )
<SYSTEM_TASK:> Creates a Lambda Service Generic PathNotFound Response <END_TASK> <USER_TASK:> Description: def generic_path_not_found(*args): """ Creates a Lambda Service Generic PathNotFound Response Parameters ---------- args list List of arguments Flask passes to the method Returns ------- Flask.Response A response object representing the GenericPathNotFound Error """
exception_tuple = LambdaErrorResponses.PathNotFoundException return BaseLocalService.service_response( LambdaErrorResponses._construct_error_response_body( LambdaErrorResponses.LOCAL_SERVICE_ERROR, "PathNotFoundException"), LambdaErrorResponses._construct_headers(exception_tuple[0]), exception_tuple[1] )
<SYSTEM_TASK:> Creates a Lambda Service Generic MethodNotAllowed Response <END_TASK> <USER_TASK:> Description: def generic_method_not_allowed(*args): """ Creates a Lambda Service Generic MethodNotAllowed Response Parameters ---------- args list List of arguments Flask passes to the method Returns ------- Flask.Response A response object representing the GenericMethodNotAllowed Error """
exception_tuple = LambdaErrorResponses.MethodNotAllowedException return BaseLocalService.service_response( LambdaErrorResponses._construct_error_response_body(LambdaErrorResponses.LOCAL_SERVICE_ERROR, "MethodNotAllowedException"), LambdaErrorResponses._construct_headers(exception_tuple[0]), exception_tuple[1] )
<SYSTEM_TASK:> Returns path to the function code resolved based on current working directory. <END_TASK> <USER_TASK:> Description: def resolve_code_path(cwd, codeuri): """ Returns path to the function code resolved based on current working directory. Parameters ---------- cwd str Current working directory codeuri CodeURI of the function. This should contain the path to the function code Returns ------- str Absolute path to the function code """
LOG.debug("Resolving code path. Cwd=%s, CodeUri=%s", cwd, codeuri) # First, let us figure out the current working directory. # If current working directory is not provided, then default to the directory where the CLI is running from if not cwd or cwd == PRESENT_DIR: cwd = os.getcwd() # Make sure cwd is an absolute path cwd = os.path.abspath(cwd) # Next, let us get absolute path of function code. # Codepath is always relative to current working directory # If the path is relative, then construct the absolute version if not os.path.isabs(codeuri): codeuri = os.path.normpath(os.path.join(cwd, codeuri)) return codeuri
<SYSTEM_TASK:> Converts a Path from an Api Gateway defined path to one that is accepted by Flask <END_TASK> <USER_TASK:> Description: def convert_path_to_flask(path): """ Converts a Path from an Api Gateway defined path to one that is accepted by Flask Examples: '/id/{id}' => '/id/<id>' '/{proxy+}' => '/<path:proxy>' :param str path: Path to convert to Flask defined path :return str: Path representing a Flask path """
proxy_sub_path = APIGW_TO_FLASK_REGEX.sub(FLASK_CAPTURE_ALL_PATH, path) # Replace the '{' and '}' with '<' and '>' respectively return proxy_sub_path.replace(LEFT_BRACKET, LEFT_ANGLE_BRACKET).replace(RIGHT_BRACKET, RIGHT_ANGLE_BRACKET)
<SYSTEM_TASK:> Converts a Path from a Flask defined path to one that is accepted by Api Gateway <END_TASK> <USER_TASK:> Description: def convert_path_to_api_gateway(path): """ Converts a Path from a Flask defined path to one that is accepted by Api Gateway Examples: '/id/<id>' => '/id/{id}' '/<path:proxy>' => '/{proxy+}' :param str path: Path to convert to Api Gateway defined path :return str: Path representing an Api Gateway path """
proxy_sub_path = FLASK_TO_APIGW_REGEX.sub(PROXY_PATH_PARAMS, path) # Replace the '<' and '>' with '{' and '}' respectively return proxy_sub_path.replace(LEFT_ANGLE_BRACKET, LEFT_BRACKET).replace(RIGHT_ANGLE_BRACKET, RIGHT_BRACKET)
<SYSTEM_TASK:> Gets the name of the function from the Integration URI ARN. This is a best effort service which returns None <END_TASK> <USER_TASK:> Description: def get_function_name(integration_uri): """ Gets the name of the function from the Integration URI ARN. This is a best effort service which returns None if function name could not be parsed. This can happen when the ARN is an intrinsic function which is too complex or the ARN is not a Lambda integration. Parameters ---------- integration_uri : basestring or dict Integration URI data extracted from Swagger dictionary. This could be a string of the ARN or an intrinsic function that will resolve to the ARN Returns ------- basestring or None If the function name could be parsed out of the Integration URI ARN. None, otherwise """
arn = LambdaUri._get_function_arn(integration_uri) LOG.debug("Extracted Function ARN: %s", arn) return LambdaUri._get_function_name_from_arn(arn)
<SYSTEM_TASK:> Given the integration ARN, extract the Lambda function name from the ARN. If there <END_TASK> <USER_TASK:> Description: def _get_function_name_from_arn(function_arn): """ Given the integration ARN, extract the Lambda function name from the ARN. If there are stage variables, or other unsupported formats, this function will return None. Parameters ---------- function_arn : basestring or None Function ARN from the swagger document Returns ------- basestring or None Function name of this integration. None if the ARN is not parsable """
if not function_arn: return None matches = re.match(LambdaUri._REGEX_GET_FUNCTION_NAME, function_arn) if not matches or not matches.groups(): LOG.debug("No Lambda function ARN defined for integration containing ARN %s", function_arn) return None groups = matches.groups() maybe_function_name = groups[0] # This regex has only one group match # Function name could be a real name or a stage variable or some unknown format if re.match(LambdaUri._REGEX_STAGE_VARIABLE, maybe_function_name): # yes, this is a stage variable LOG.debug("Stage variables are not supported. Ignoring integration with function ARN %s", function_arn) return None elif re.match(LambdaUri._REGEX_VALID_FUNCTION_NAME, maybe_function_name): # Yes, this is a real function name return maybe_function_name # Some unknown format LOG.debug("Ignoring integration ARN. Unable to parse Function Name from function arn %s", function_arn)
<SYSTEM_TASK:> Read the event JSON data from the given file. If no file is provided, read the event from stdin. <END_TASK> <USER_TASK:> Description: def _get_event(event_file_name): """ Read the event JSON data from the given file. If no file is provided, read the event from stdin. :param string event_file_name: Path to event file, or '-' for stdin :return string: Contents of the event file or stdin """
if event_file_name == STDIN_FILE_NAME: # If event is empty, listen to stdin for event data until EOF LOG.info("Reading invoke payload from stdin (you can also pass it from file with --event)") # click.open_file knows to open stdin when filename is '-'. This is safer than manually opening streams, and # accidentally closing a standard stream with click.open_file(event_file_name, 'r') as fp: return fp.read()
<SYSTEM_TASK:> Normalize all Resources in the template with the Metadata Key on the resource. <END_TASK> <USER_TASK:> Description: def normalize(template_dict): """ Normalize all Resources in the template with the Metadata Key on the resource. This method will mutate the template Parameters ---------- template_dict dict Dictionary representing the template """
resources = template_dict.get(RESOURCES_KEY, {}) for logical_id, resource in resources.items(): resource_metadata = resource.get(METADATA_KEY, {}) asset_path = resource_metadata.get(ASSET_PATH_METADATA_KEY) asset_property = resource_metadata.get(ASSET_PROPERTY_METADATA_KEY) ResourceMetadataNormalizer._replace_property(asset_property, asset_path, resource, logical_id)
<SYSTEM_TASK:> Replace a property with an asset on a given resource <END_TASK> <USER_TASK:> Description: def _replace_property(property_key, property_value, resource, logical_id): """ Replace a property with an asset on a given resource This method will mutate the template Parameters ---------- property str The property to replace on the resource property_value str The new value of the property resource dict Dictionary representing the Resource to change logical_id str LogicalId of the Resource """
if property_key and property_value: resource.get(PROPERTIES_KEY, {})[property_key] = property_value elif property_key or property_value: LOG.info("WARNING: Ignoring Metadata for Resource %s. Metadata contains only aws:asset:path or " "aws:assert:property but not both", logical_id)
<SYSTEM_TASK:> Extract the command name from package name. Last part of the module path is the command <END_TASK> <USER_TASK:> Description: def _set_commands(package_names): """ Extract the command name from package name. Last part of the module path is the command ie. if path is foo.bar.baz, then "baz" is the command name. :param package_names: List of package names :return: Dictionary with command name as key and the package name as value. """
commands = {} for pkg_name in package_names: cmd_name = pkg_name.split('.')[-1] commands[cmd_name] = pkg_name return commands
<SYSTEM_TASK:> Overrides method from ``click.MultiCommand`` that returns Click CLI object for given command name, if found. <END_TASK> <USER_TASK:> Description: def get_command(self, ctx, cmd_name): """ Overrides method from ``click.MultiCommand`` that returns Click CLI object for given command name, if found. :param ctx: Click context :param cmd_name: Top-level command name :return: Click object representing the command """
if cmd_name not in self._commands: logger.error("Command %s not available", cmd_name) return pkg_name = self._commands[cmd_name] try: mod = importlib.import_module(pkg_name) except ImportError: logger.exception("Command '%s' is not configured correctly. Unable to import '%s'", cmd_name, pkg_name) return if not hasattr(mod, "cli"): logger.error("Command %s is not configured correctly. It must expose an function called 'cli'", cmd_name) return return mod.cli
<SYSTEM_TASK:> Writes specified text to the underlying stream <END_TASK> <USER_TASK:> Description: def write(self, output): """ Writes specified text to the underlying stream Parameters ---------- output bytes-like object Bytes to write """
self._stream.write(output) if self._auto_flush: self._stream.flush()
<SYSTEM_TASK:> Get a workflow config that corresponds to the runtime provided. This method examines contents of the project <END_TASK> <USER_TASK:> Description: def get_workflow_config(runtime, code_dir, project_dir): """ Get a workflow config that corresponds to the runtime provided. This method examines contents of the project and code directories to determine the most appropriate workflow for the given runtime. Currently the decision is based on the presence of a supported manifest file. For runtimes that have more than one workflow, we choose a workflow by examining ``code_dir`` followed by ``project_dir`` for presence of a supported manifest. Parameters ---------- runtime str The runtime of the config code_dir str Directory where Lambda function code is present project_dir str Root of the Serverless application project. Returns ------- namedtuple(Capability) namedtuple that represents the Builder Workflow Config """
selectors_by_runtime = { "python2.7": BasicWorkflowSelector(PYTHON_PIP_CONFIG), "python3.6": BasicWorkflowSelector(PYTHON_PIP_CONFIG), "python3.7": BasicWorkflowSelector(PYTHON_PIP_CONFIG), "nodejs4.3": BasicWorkflowSelector(NODEJS_NPM_CONFIG), "nodejs6.10": BasicWorkflowSelector(NODEJS_NPM_CONFIG), "nodejs8.10": BasicWorkflowSelector(NODEJS_NPM_CONFIG), "ruby2.5": BasicWorkflowSelector(RUBY_BUNDLER_CONFIG), "dotnetcore2.0": BasicWorkflowSelector(DOTNET_CLIPACKAGE_CONFIG), "dotnetcore2.1": BasicWorkflowSelector(DOTNET_CLIPACKAGE_CONFIG), # When Maven builder exists, add to this list so we can automatically choose a builder based on the supported # manifest "java8": ManifestWorkflowSelector([ # Gradle builder needs custom executable paths to find `gradlew` binary JAVA_GRADLE_CONFIG._replace(executable_search_paths=[code_dir, project_dir]), JAVA_KOTLIN_GRADLE_CONFIG._replace(executable_search_paths=[code_dir, project_dir]), JAVA_MAVEN_CONFIG ]), } if runtime not in selectors_by_runtime: raise UnsupportedRuntimeException("'{}' runtime is not supported".format(runtime)) selector = selectors_by_runtime[runtime] try: config = selector.get_config(code_dir, project_dir) return config except ValueError as ex: raise UnsupportedRuntimeException("Unable to find a supported build workflow for runtime '{}'. Reason: {}" .format(runtime, str(ex)))
<SYSTEM_TASK:> Given a workflow config, this method provides a boolean on whether the workflow can run within a container or not. <END_TASK> <USER_TASK:> Description: def supports_build_in_container(config): """ Given a workflow config, this method provides a boolean on whether the workflow can run within a container or not. Parameters ---------- config namedtuple(Capability) Config specifying the particular build workflow Returns ------- tuple(bool, str) True, if this workflow can be built inside a container. False, along with a reason message if it cannot be. """
def _key(c): return str(c.language) + str(c.dependency_manager) + str(c.application_framework) # This information could have beeen bundled inside the Workflow Config object. But we this way because # ultimately the workflow's implementation dictates whether it can run within a container or not. # A "workflow config" is like a primary key to identify the workflow. So we use the config as a key in the # map to identify which workflows can support building within a container. unsupported = { _key(DOTNET_CLIPACKAGE_CONFIG): "We do not support building .NET Core Lambda functions within a container. " "Try building without the container. Most .NET Core functions will build " "successfully.", } thiskey = _key(config) if thiskey in unsupported: return False, unsupported[thiskey] return True, None
<SYSTEM_TASK:> Finds a configuration by looking for a manifest in the given directories. <END_TASK> <USER_TASK:> Description: def get_config(self, code_dir, project_dir): """ Finds a configuration by looking for a manifest in the given directories. Returns ------- samcli.lib.build.workflow_config.CONFIG A supported configuration if one is found Raises ------ ValueError If none of the supported manifests files are found """
# Search for manifest first in code directory and then in the project directory. # Search order is important here because we want to prefer the manifest present within the code directory over # a manifest present in project directory. search_dirs = [code_dir, project_dir] LOG.debug("Looking for a supported build workflow in following directories: %s", search_dirs) for config in self.configs: if any([self._has_manifest(config, directory) for directory in search_dirs]): return config raise ValueError("None of the supported manifests '{}' were found in the following paths '{}'".format( [config.manifest_name for config in self.configs], search_dirs))
<SYSTEM_TASK:> YAML constructor to parse CloudFormation intrinsics. <END_TASK> <USER_TASK:> Description: def intrinsics_multi_constructor(loader, tag_prefix, node): """ YAML constructor to parse CloudFormation intrinsics. This will return a dictionary with key being the instrinsic name """
# Get the actual tag name excluding the first exclamation tag = node.tag[1:] # Some intrinsic functions doesn't support prefix "Fn::" prefix = "Fn::" if tag in ["Ref", "Condition"]: prefix = "" cfntag = prefix + tag if tag == "GetAtt" and isinstance(node.value, six.string_types): # ShortHand notation for !GetAtt accepts Resource.Attribute format # while the standard notation is to use an array # [Resource, Attribute]. Convert shorthand to standard format value = node.value.split(".", 1) elif isinstance(node, ScalarNode): # Value of this node is scalar value = loader.construct_scalar(node) elif isinstance(node, SequenceNode): # Value of this node is an array (Ex: [1,2]) value = loader.construct_sequence(node) else: # Value of this node is an mapping (ex: {foo: bar}) value = loader.construct_mapping(node) return {cfntag: value}
<SYSTEM_TASK:> reads the encoding type from the event-mapping.json <END_TASK> <USER_TASK:> Description: def encode(self, tags, encoding, values_to_sub): """ reads the encoding type from the event-mapping.json and determines whether a value needs encoding Parameters ---------- tags: dict the values of a particular event that can be substituted within the event json encoding: string string that helps navigate to the encoding field of the json values_to_sub: dict key/value pairs that will be substituted into the json Returns ------- values_to_sub: dict the encoded (if need be) values to substitute into the json. """
for tag in tags: if tags[tag].get(encoding) != "None": if tags[tag].get(encoding) == "url": values_to_sub[tag] = self.url_encode(values_to_sub[tag]) if tags[tag].get(encoding) == "base64": values_to_sub[tag] = self.base64_utf_encode(values_to_sub[tag]) return values_to_sub
<SYSTEM_TASK:> opens the event json, substitutes the values in, and <END_TASK> <USER_TASK:> Description: def generate_event(self, service_name, event_type, values_to_sub): """ opens the event json, substitutes the values in, and returns the customized event json Parameters ---------- service_name: string name of the top level service (S3, apigateway, etc) event_type: string name of the event underneath the service values_to_sub: dict key/value pairs to substitute into the json Returns ------- renderer.render(): string string version of the custom event json """
# set variables for easy calling tags = self.event_mapping[service_name][event_type]['tags'] values_to_sub = self.encode(tags, 'encoding', values_to_sub) # construct the path to the Events json file this_folder = os.path.dirname(os.path.abspath(__file__)) file_name = self.event_mapping[service_name][event_type]['filename'] + ".json" file_path = os.path.join(this_folder, "events", service_name, file_name) # open the file with open(file_path) as f: data = json.load(f) data = json.dumps(data, indent=2) # return the substituted file return renderer.render(data, values_to_sub)
<SYSTEM_TASK:> Underline the input <END_TASK> <USER_TASK:> Description: def underline(self, msg): """Underline the input"""
return click.style(msg, underline=True) if self.colorize else msg
<SYSTEM_TASK:> Internal helper method to add colors to input <END_TASK> <USER_TASK:> Description: def _color(self, msg, color): """Internal helper method to add colors to input"""
kwargs = {'fg': color} return click.style(msg, **kwargs) if self.colorize else msg
<SYSTEM_TASK:> Parses out the Layer version from the arn <END_TASK> <USER_TASK:> Description: def _compute_layer_version(is_defined_within_template, arn): """ Parses out the Layer version from the arn Parameters ---------- is_defined_within_template bool True if the resource is a Ref to a resource otherwise False arn str ARN of the Resource Returns ------- int The Version of the LayerVersion """
if is_defined_within_template: return None try: _, layer_version = arn.rsplit(':', 1) layer_version = int(layer_version) except ValueError: raise InvalidLayerVersionArn(arn + " is an Invalid Layer Arn.") return layer_version
<SYSTEM_TASK:> Computes a unique name based on the LayerVersion Arn <END_TASK> <USER_TASK:> Description: def _compute_layer_name(is_defined_within_template, arn): """ Computes a unique name based on the LayerVersion Arn Format: <Name of the LayerVersion>-<Version of the LayerVersion>-<sha256 of the arn> Parameters ---------- is_defined_within_template bool True if the resource is a Ref to a resource otherwise False arn str ARN of the Resource Returns ------- str A unique name that represents the LayerVersion """
# If the Layer is defined in the template, the arn will represent the LogicalId of the LayerVersion Resource, # which does not require creating a name based on the arn. if is_defined_within_template: return arn try: _, layer_name, layer_version = arn.rsplit(':', 2) except ValueError: raise InvalidLayerVersionArn(arn + " is an Invalid Layer Arn.") return LayerVersion.LAYER_NAME_DELIMETER.join([layer_name, layer_version, hashlib.sha256(arn.encode('utf-8')).hexdigest()[0:10]])
<SYSTEM_TASK:> Context manager that makes a temporary directory and yields it name. Directory is deleted <END_TASK> <USER_TASK:> Description: def mkdir_temp(mode=0o755): """ Context manager that makes a temporary directory and yields it name. Directory is deleted after the context exits Parameters ---------- mode : octal Permissions to apply to the directory. Defaults to '755' because don't want directories world writable Returns ------- str Path to the directory """
temp_dir = None try: temp_dir = tempfile.mkdtemp() os.chmod(temp_dir, mode) yield temp_dir finally: if temp_dir: shutil.rmtree(temp_dir)
<SYSTEM_TASK:> Download a list of layers to the cache <END_TASK> <USER_TASK:> Description: def download_all(self, layers, force=False): """ Download a list of layers to the cache Parameters ---------- layers list(samcli.commands.local.lib.provider.Layer) List of Layers representing the layer to be downloaded force bool True to download the layer even if it exists already on the system Returns ------- List(Path) List of Paths to where the layer was cached """
layer_dirs = [] for layer in layers: layer_dirs.append(self.download(layer, force)) return layer_dirs
<SYSTEM_TASK:> Download a given layer to the local cache. <END_TASK> <USER_TASK:> Description: def download(self, layer, force=False): """ Download a given layer to the local cache. Parameters ---------- layer samcli.commands.local.lib.provider.Layer Layer representing the layer to be downloaded. force bool True to download the layer even if it exists already on the system Returns ------- Path Path object that represents where the layer is download to """
if layer.is_defined_within_template: LOG.info("%s is a local Layer in the template", layer.name) layer.codeuri = resolve_code_path(self.cwd, layer.codeuri) return layer # disabling no-member due to https://github.com/PyCQA/pylint/issues/1660 layer_path = Path(self.layer_cache).joinpath(layer.name).resolve() # pylint: disable=no-member is_layer_downloaded = self._is_layer_cached(layer_path) layer.codeuri = str(layer_path) if is_layer_downloaded and not force: LOG.info("%s is already cached. Skipping download", layer.arn) return layer layer_zip_path = layer.codeuri + '.zip' layer_zip_uri = self._fetch_layer_uri(layer) unzip_from_uri(layer_zip_uri, layer_zip_path, unzip_output_dir=layer.codeuri, progressbar_label='Downloading {}'.format(layer.layer_arn)) return layer
<SYSTEM_TASK:> Fetch the Layer Uri based on the LayerVersion Arn <END_TASK> <USER_TASK:> Description: def _fetch_layer_uri(self, layer): """ Fetch the Layer Uri based on the LayerVersion Arn Parameters ---------- layer samcli.commands.local.lib.provider.LayerVersion LayerVersion to fetch Returns ------- str The Uri to download the LayerVersion Content from Raises ------ samcli.commands.local.cli_common.user_exceptions.NoCredentialsError When the Credentials given are not sufficient to call AWS Lambda """
try: layer_version_response = self.lambda_client.get_layer_version(LayerName=layer.layer_arn, VersionNumber=layer.version) except NoCredentialsError: raise CredentialsRequired("Layers require credentials to download the layers locally.") except ClientError as e: error_code = e.response.get('Error').get('Code') error_exc = { 'AccessDeniedException': CredentialsRequired( "Credentials provided are missing lambda:Getlayerversion policy that is needed to download the " "layer or you do not have permission to download the layer"), 'ResourceNotFoundException': ResourceNotFound("{} was not found.".format(layer.arn)) } if error_code in error_exc: raise error_exc[error_code] # If it was not 'AccessDeniedException' or 'ResourceNotFoundException' re-raise raise e return layer_version_response.get("Content").get("Location")
<SYSTEM_TASK:> Create the Cache directory if it does not exist. <END_TASK> <USER_TASK:> Description: def _create_cache(layer_cache): """ Create the Cache directory if it does not exist. Parameters ---------- layer_cache Directory to where the layers should be cached Returns ------- None """
Path(layer_cache).mkdir(mode=0o700, parents=True, exist_ok=True)
<SYSTEM_TASK:> Runs the SAM Translator to determine if the template provided is valid. This is similar to running a <END_TASK> <USER_TASK:> Description: def is_valid(self): """ Runs the SAM Translator to determine if the template provided is valid. This is similar to running a ChangeSet in CloudFormation for a SAM Template Raises ------- InvalidSamDocumentException If the template is not valid, an InvalidSamDocumentException is raised """
managed_policy_map = self.managed_policy_loader.load() sam_translator = Translator(managed_policy_map=managed_policy_map, sam_parser=self.sam_parser, plugins=[]) self._replace_local_codeuri() try: template = sam_translator.translate(sam_template=self.sam_template, parameter_values={}) LOG.debug("Translated template is:\n%s", yaml_dump(template)) except InvalidDocumentException as e: raise InvalidSamDocumentException( functools.reduce(lambda message, error: message + ' ' + str(error), e.causes, str(e)))
<SYSTEM_TASK:> Updates the 'property_key' in the 'resource_property_dict' to the value of 's3_uri_value' <END_TASK> <USER_TASK:> Description: def _update_to_s3_uri(property_key, resource_property_dict, s3_uri_value="s3://bucket/value"): """ Updates the 'property_key' in the 'resource_property_dict' to the value of 's3_uri_value' Note: The function will mutate the resource_property_dict that is pass in Parameters ---------- property_key str, required Key in the resource_property_dict resource_property_dict dict, required Property dictionary of a Resource in the template to replace s3_uri_value str, optional Value to update the value of the property_key to """
uri_property = resource_property_dict.get(property_key, ".") # ignore if dict or already an S3 Uri if isinstance(uri_property, dict) or SamTemplateValidator.is_s3_uri(uri_property): return resource_property_dict[property_key] = s3_uri_value
<SYSTEM_TASK:> Creates and returns a Formatter capable of nicely formatting Lambda function logs <END_TASK> <USER_TASK:> Description: def formatter(self): """ Creates and returns a Formatter capable of nicely formatting Lambda function logs Returns ------- LogsFormatter """
formatter_chain = [ LambdaLogMsgFormatters.colorize_errors, # Format JSON "before" highlighting the keywords. Otherwise, JSON will be invalid from all the # ANSI color codes and fail to pretty print JSONMsgFormatter.format_json, KeywordHighlighter(self._filter_pattern).highlight_keywords, ] return LogsFormatter(self.colored, formatter_chain)
<SYSTEM_TASK:> Name of the AWS CloudWatch Log Group that we will be querying. It generates the name based on the <END_TASK> <USER_TASK:> Description: def log_group_name(self): """ Name of the AWS CloudWatch Log Group that we will be querying. It generates the name based on the Lambda Function name and stack name provided. Returns ------- str Name of the CloudWatch Log Group """
function_id = self._function_name if self._stack_name: function_id = self._get_resource_id_from_stack(self._cfn_client, self._stack_name, self._function_name) LOG.debug("Function with LogicalId '%s' in stack '%s' resolves to actual physical ID '%s'", self._function_name, self._stack_name, function_id) return LogGroupProvider.for_lambda_function(function_id)
<SYSTEM_TASK:> Parse the time from the given string, convert to UTC, and return the datetime object <END_TASK> <USER_TASK:> Description: def _parse_time(time_str, property_name): """ Parse the time from the given string, convert to UTC, and return the datetime object Parameters ---------- time_str : str The time to parse property_name : str Name of the property where this time came from. Used in the exception raised if time is not parseable Returns ------- datetime.datetime Parsed datetime object Raises ------ samcli.commands.exceptions.UserException If the string cannot be parsed as a timestamp """
if not time_str: return parsed = parse_date(time_str) if not parsed: raise UserException("Unable to parse the time provided by '{}'".format(property_name)) return to_utc(parsed)
<SYSTEM_TASK:> Given the LogicalID of a resource, call AWS CloudFormation to get physical ID of the resource within <END_TASK> <USER_TASK:> Description: def _get_resource_id_from_stack(cfn_client, stack_name, logical_id): """ Given the LogicalID of a resource, call AWS CloudFormation to get physical ID of the resource within the specified stack. Parameters ---------- cfn_client CloudFormation client provided by AWS SDK stack_name : str Name of the stack to query logical_id : str LogicalId of the resource Returns ------- str Physical ID of the resource Raises ------ samcli.commands.exceptions.UserException If the stack or resource does not exist """
LOG.debug("Getting resource's PhysicalId from AWS CloudFormation stack. StackName=%s, LogicalId=%s", stack_name, logical_id) try: response = cfn_client.describe_stack_resource(StackName=stack_name, LogicalResourceId=logical_id) LOG.debug("Response from AWS CloudFormation %s", response) return response["StackResourceDetail"]["PhysicalResourceId"] except botocore.exceptions.ClientError as ex: LOG.debug("Unable to fetch resource name from CloudFormation Stack: " "StackName=%s, ResourceLogicalId=%s, Response=%s", stack_name, logical_id, ex.response) # The exception message already has a well formatted error message that we can surface to user raise UserException(str(ex))
<SYSTEM_TASK:> Given a SAM template dictionary, return a cleaned copy of the template where SAM plugins have been run <END_TASK> <USER_TASK:> Description: def get_template(template_dict, parameter_overrides=None): """ Given a SAM template dictionary, return a cleaned copy of the template where SAM plugins have been run and parameter values have been substituted. Parameters ---------- template_dict : dict unprocessed SAM template dictionary parameter_overrides: dict Optional dictionary of values for template parameters Returns ------- dict Processed SAM template """
template_dict = template_dict or {} if template_dict: template_dict = SamTranslatorWrapper(template_dict).run_plugins() template_dict = SamBaseProvider._resolve_parameters(template_dict, parameter_overrides) ResourceMetadataNormalizer.normalize(template_dict) return template_dict
<SYSTEM_TASK:> In the given template, apply parameter values to resolve intrinsic functions <END_TASK> <USER_TASK:> Description: def _resolve_parameters(template_dict, parameter_overrides): """ In the given template, apply parameter values to resolve intrinsic functions Parameters ---------- template_dict : dict SAM Template parameter_overrides : dict Values for template parameters provided by user Returns ------- dict Resolved SAM template """
parameter_values = SamBaseProvider._get_parameter_values(template_dict, parameter_overrides) supported_intrinsics = {action.intrinsic_name: action() for action in SamBaseProvider._SUPPORTED_INTRINSICS} # Intrinsics resolver will mutate the original template return IntrinsicsResolver(parameters=parameter_values, supported_intrinsics=supported_intrinsics)\ .resolve_parameter_refs(template_dict)
<SYSTEM_TASK:> Construct a final list of values for CloudFormation template parameters based on user-supplied values, <END_TASK> <USER_TASK:> Description: def _get_parameter_values(template_dict, parameter_overrides): """ Construct a final list of values for CloudFormation template parameters based on user-supplied values, default values provided in template, and sane defaults for pseudo-parameters. Parameters ---------- template_dict : dict SAM template dictionary parameter_overrides : dict User-supplied values for CloudFormation template parameters Returns ------- dict Values for template parameters to substitute in template with """
default_values = SamBaseProvider._get_default_parameter_values(template_dict) # NOTE: Ordering of following statements is important. It makes sure that any user-supplied values # override the defaults parameter_values = {} parameter_values.update(SamBaseProvider._DEFAULT_PSEUDO_PARAM_VALUES) parameter_values.update(default_values) parameter_values.update(parameter_overrides or {}) return parameter_values
<SYSTEM_TASK:> Change the code_path to be of unix-style if running on windows when supplied with an absolute windows path. <END_TASK> <USER_TASK:> Description: def to_posix_path(code_path): """ Change the code_path to be of unix-style if running on windows when supplied with an absolute windows path. Parameters ---------- code_path : str Directory in the host operating system that should be mounted within the container. Returns ------- str Posix equivalent of absolute windows style path. Examples -------- >>> to_posix_path('/Users/UserName/sam-app') /Users/UserName/sam-app >>> to_posix_path('C:\\\\Users\\\\UserName\\\\AppData\\\\Local\\\\Temp\\\\mydir') /c/Users/UserName/AppData/Local/Temp/mydir """
return re.sub("^([A-Za-z])+:", lambda match: posixpath.sep + match.group().replace(":", "").lower(), pathlib.PureWindowsPath(code_path).as_posix()) if os.name == "nt" else code_path
<SYSTEM_TASK:> Returns the entry point for the container. The default value for the entry point is already configured in the <END_TASK> <USER_TASK:> Description: def _get_entry_point(runtime, debug_options=None): # pylint: disable=too-many-branches """ Returns the entry point for the container. The default value for the entry point is already configured in the Dockerfile. We override this default specifically when enabling debugging. The overridden entry point includes a few extra flags to start the runtime in debug mode. :param string runtime: Lambda function runtime name :param int debug_port: Optional, port for debugger :param string debug_args: Optional additional arguments passed to the entry point. :return list: List containing the new entry points. Each element in the list is one portion of the command. ie. if command is ``node index.js arg1 arg2``, then this list will be ["node", "index.js", "arg1", "arg2"] """
if not debug_options: return None if runtime not in LambdaContainer._supported_runtimes(): raise DebuggingNotSupported( "Debugging is not currently supported for {}".format(runtime)) debug_port = debug_options.debug_port debug_args_list = [] if debug_options.debug_args: debug_args_list = debug_options.debug_args.split(" ") # configs from: https://github.com/lambci/docker-lambda # to which we add the extra debug mode options entrypoint = None if runtime == Runtime.java8.value: entrypoint = ["/usr/bin/java"] \ + debug_args_list \ + [ "-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,quiet=y,address=" + str(debug_port), "-XX:MaxHeapSize=2834432k", "-XX:MaxMetaspaceSize=163840k", "-XX:ReservedCodeCacheSize=81920k", "-XX:+UseSerialGC", # "-Xshare:on", doesn't work in conjunction with the debug options "-XX:-TieredCompilation", "-Djava.net.preferIPv4Stack=true", "-jar", "/var/runtime/lib/LambdaJavaRTEntry-1.0.jar", ] elif runtime in (Runtime.dotnetcore20.value, Runtime.dotnetcore21.value): entrypoint = ["/var/lang/bin/dotnet"] \ + debug_args_list \ + [ "/var/runtime/MockBootstraps.dll", "--debugger-spin-wait" ] elif runtime == Runtime.go1x.value: entrypoint = ["/var/runtime/aws-lambda-go"] \ + debug_args_list \ + [ "-debug=true", "-delvePort=" + str(debug_port), "-delvePath=" + LambdaContainer._DEFAULT_CONTAINER_DBG_GO_PATH, ] elif runtime == Runtime.nodejs.value: entrypoint = ["/usr/bin/node"] \ + debug_args_list \ + [ "--debug-brk=" + str(debug_port), "--nolazy", "--max-old-space-size=1229", "--max-new-space-size=153", "--max-executable-size=153", "--expose-gc", "/var/runtime/node_modules/awslambda/bin/awslambda", ] elif runtime == Runtime.nodejs43.value: entrypoint = ["/usr/local/lib64/node-v4.3.x/bin/node"] \ + debug_args_list \ + [ "--debug-brk=" + str(debug_port), "--nolazy", "--max-old-space-size=2547", "--max-semi-space-size=150", "--max-executable-size=160", "--expose-gc", "/var/runtime/node_modules/awslambda/index.js", ] elif runtime == Runtime.nodejs610.value: entrypoint = ["/var/lang/bin/node"] \ + debug_args_list \ + [ "--debug-brk=" + str(debug_port), "--nolazy", "--max-old-space-size=2547", "--max-semi-space-size=150", "--max-executable-size=160", "--expose-gc", "/var/runtime/node_modules/awslambda/index.js", ] elif runtime == Runtime.nodejs810.value: entrypoint = ["/var/lang/bin/node"] \ + debug_args_list \ + [ # Node8 requires the host to be explicitly set in order to bind to localhost # instead of 127.0.0.1. https://github.com/nodejs/node/issues/11591#issuecomment-283110138 "--inspect-brk=0.0.0.0:" + str(debug_port), "--nolazy", "--expose-gc", "--max-semi-space-size=150", "--max-old-space-size=2707", "/var/runtime/node_modules/awslambda/index.js", ] elif runtime == Runtime.python27.value: entrypoint = ["/usr/bin/python2.7"] \ + debug_args_list \ + [ "/var/runtime/awslambda/bootstrap.py" ] elif runtime == Runtime.python36.value: entrypoint = ["/var/lang/bin/python3.6"] \ + debug_args_list \ + [ "/var/runtime/awslambda/bootstrap.py" ] elif runtime == Runtime.python37.value: entrypoint = ["/var/rapid/init", "--bootstrap", "/var/lang/bin/python3.7", "--bootstrap-args", json.dumps(debug_args_list + ["/var/runtime/bootstrap"]) ] return entrypoint
<SYSTEM_TASK:> Extract all Implicit Apis (Apis defined through Serverless Function with an Api Event <END_TASK> <USER_TASK:> Description: def _extract_apis(self, resources): """ Extract all Implicit Apis (Apis defined through Serverless Function with an Api Event :param dict resources: Dictionary of SAM/CloudFormation resources :return: List of nametuple Api """
# Some properties like BinaryMediaTypes, Cors are set once on the resource but need to be applied to each API. # For Implicit APIs, which are defined on the Function resource, these properties # are defined on a AWS::Serverless::Api resource with logical ID "ServerlessRestApi". Therefore, no matter # if it is an implicit API or an explicit API, there is a corresponding resource of type AWS::Serverless::Api # that contains these additional configurations. # # We use this assumption in the following loop to collect information from resources of type # AWS::Serverless::Api. We also extract API from Serverless::Function resource and add them to the # corresponding Serverless::Api resource. This is all done using the ``collector``. collector = ApiCollector() for logical_id, resource in resources.items(): resource_type = resource.get(SamApiProvider._TYPE) if resource_type == SamApiProvider._SERVERLESS_FUNCTION: self._extract_apis_from_function(logical_id, resource, collector) if resource_type == SamApiProvider._SERVERLESS_API: self._extract_from_serverless_api(logical_id, resource, collector) apis = SamApiProvider._merge_apis(collector) return self._normalize_apis(apis)
<SYSTEM_TASK:> Quite often, an API is defined both in Implicit and Explicit API definitions. In such cases, Implicit API <END_TASK> <USER_TASK:> Description: def _merge_apis(collector): """ Quite often, an API is defined both in Implicit and Explicit API definitions. In such cases, Implicit API definition wins because that conveys clear intent that the API is backed by a function. This method will merge two such list of Apis with the right order of precedence. If a Path+Method combination is defined in both the places, only one wins. Parameters ---------- collector : ApiCollector Collector object that holds all the APIs specified in the template Returns ------- list of samcli.commands.local.lib.provider.Api List of APIs obtained by combining both the input lists. """
implicit_apis = [] explicit_apis = [] # Store implicit and explicit APIs separately in order to merge them later in the correct order # Implicit APIs are defined on a resource with logicalID ServerlessRestApi for logical_id, apis in collector: if logical_id == SamApiProvider._IMPLICIT_API_RESOURCE_ID: implicit_apis.extend(apis) else: explicit_apis.extend(apis) # We will use "path+method" combination as key to this dictionary and store the Api config for this combination. # If an path+method combo already exists, then overwrite it if and only if this is an implicit API all_apis = {} # By adding implicit APIs to the end of the list, they will be iterated last. If a configuration was already # written by explicit API, it will be overriden by implicit API, just by virtue of order of iteration. all_configs = explicit_apis + implicit_apis for config in all_configs: # Normalize the methods before de-duping to allow an ANY method in implicit API to override a regular HTTP # method on explicit API. for normalized_method in SamApiProvider._normalize_http_methods(config.method): key = config.path + normalized_method all_apis[key] = config result = set(all_apis.values()) # Assign to a set() to de-dupe LOG.debug("Removed duplicates from '%d' Explicit APIs and '%d' Implicit APIs to produce '%d' APIs", len(explicit_apis), len(implicit_apis), len(result)) return list(result)
<SYSTEM_TASK:> Normalize the APIs to use standard method name <END_TASK> <USER_TASK:> Description: def _normalize_apis(apis): """ Normalize the APIs to use standard method name Parameters ---------- apis : list of samcli.commands.local.lib.provider.Api List of APIs to replace normalize Returns ------- list of samcli.commands.local.lib.provider.Api List of normalized APIs """
result = list() for api in apis: for normalized_method in SamApiProvider._normalize_http_methods(api.method): # _replace returns a copy of the namedtuple. This is the official way of creating copies of namedtuple result.append(api._replace(method=normalized_method)) return result
<SYSTEM_TASK:> Fetches a list of APIs configured for this SAM Function resource. <END_TASK> <USER_TASK:> Description: def _extract_apis_from_function(logical_id, function_resource, collector): """ Fetches a list of APIs configured for this SAM Function resource. Parameters ---------- logical_id : str Logical ID of the resource function_resource : dict Contents of the function resource including its properties collector : ApiCollector Instance of the API collector that where we will save the API information """
resource_properties = function_resource.get("Properties", {}) serverless_function_events = resource_properties.get(SamApiProvider._FUNCTION_EVENT, {}) SamApiProvider._extract_apis_from_events(logical_id, serverless_function_events, collector)
<SYSTEM_TASK:> Normalizes Http Methods. Api Gateway allows a Http Methods of ANY. This is a special verb to denote all <END_TASK> <USER_TASK:> Description: def _normalize_http_methods(http_method): """ Normalizes Http Methods. Api Gateway allows a Http Methods of ANY. This is a special verb to denote all supported Http Methods on Api Gateway. :param str http_method: Http method :yield str: Either the input http_method or one of the _ANY_HTTP_METHODS (normalized Http Methods) """
if http_method.upper() == 'ANY': for method in SamApiProvider._ANY_HTTP_METHODS: yield method.upper() else: yield http_method.upper()
<SYSTEM_TASK:> Stores the given APIs tagged under the given logicalId <END_TASK> <USER_TASK:> Description: def add_apis(self, logical_id, apis): """ Stores the given APIs tagged under the given logicalId Parameters ---------- logical_id : str LogicalId of the AWS::Serverless::Api resource apis : list of samcli.commands.local.lib.provider.Api List of APIs available in this resource """
properties = self._get_properties(logical_id) properties.apis.extend(apis)
<SYSTEM_TASK:> Stores the binary media type configuration for the API with given logical ID <END_TASK> <USER_TASK:> Description: def add_binary_media_types(self, logical_id, binary_media_types): """ Stores the binary media type configuration for the API with given logical ID Parameters ---------- logical_id : str LogicalId of the AWS::Serverless::Api resource binary_media_types : list of str List of binary media types supported by this resource """
properties = self._get_properties(logical_id) binary_media_types = binary_media_types or [] for value in binary_media_types: normalized_value = self._normalize_binary_media_type(value) # If the value is not supported, then just skip it. if normalized_value: properties.binary_media_types.add(normalized_value) else: LOG.debug("Unsupported data type of binary media type value of resource '%s'", logical_id)
<SYSTEM_TASK:> Returns the list of APIs in this resource along with other extra configuration such as binary media types, <END_TASK> <USER_TASK:> Description: def _get_apis_with_config(self, logical_id): """ Returns the list of APIs in this resource along with other extra configuration such as binary media types, cors etc. Additional configuration is merged directly into the API data because these properties, although defined globally, actually apply to each API. Parameters ---------- logical_id : str Logical ID of the resource to fetch data for Returns ------- list of samcli.commands.local.lib.provider.Api List of APIs with additional configurations for the resource with given logicalId. If there are no APIs, then it returns an empty list """
properties = self._get_properties(logical_id) # These configs need to be applied to each API binary_media = sorted(list(properties.binary_media_types)) # Also sort the list to keep the ordering stable cors = properties.cors result = [] for api in properties.apis: # Create a copy of the API with updated configuration updated_api = api._replace(binary_media_types=binary_media, cors=cors) result.append(updated_api) return result
<SYSTEM_TASK:> Returns the properties of resource with given logical ID. If a resource is not found, then it returns an <END_TASK> <USER_TASK:> Description: def _get_properties(self, logical_id): """ Returns the properties of resource with given logical ID. If a resource is not found, then it returns an empty data. Parameters ---------- logical_id : str Logical ID of the resource Returns ------- samcli.commands.local.lib.sam_api_provider.ApiCollector.Properties Properties object for this resource. """
if logical_id not in self.by_resource: self.by_resource[logical_id] = self.Properties(apis=[], # Use a set() to be able to easily de-dupe binary_media_types=set(), cors=None) return self.by_resource[logical_id]
<SYSTEM_TASK:> Helper method to unzip a file to a temporary directory <END_TASK> <USER_TASK:> Description: def _unzip_file(filepath): """ Helper method to unzip a file to a temporary directory :param string filepath: Absolute path to this file :return string: Path to the temporary directory where it was unzipped """
temp_dir = tempfile.mkdtemp() if os.name == 'posix': os.chmod(temp_dir, 0o755) LOG.info("Decompressing %s", filepath) unzip(filepath, temp_dir) # The directory that Python returns might have symlinks. The Docker File sharing settings will not resolve # symlinks. Hence get the real path before passing to Docker. # Especially useful in Mac OSX which returns /var/folders which is a symlink to /private/var/folders that is a # part of Docker's Shared Files directories return os.path.realpath(temp_dir)
<SYSTEM_TASK:> Invoke the given Lambda function locally. <END_TASK> <USER_TASK:> Description: def invoke(self, function_config, event, debug_context=None, stdout=None, stderr=None): """ Invoke the given Lambda function locally. ##### NOTE: THIS IS A LONG BLOCKING CALL ##### This method will block until either the Lambda function completes or timed out, which could be seconds. A blocking call will block the thread preventing any other operations from happening. If you are using this method in a web-server or in contexts where your application needs to be responsive when function is running, take care to invoke the function in a separate thread. Co-Routines or micro-threads might not perform well because the underlying implementation essentially blocks on a socket, which is synchronous. :param FunctionConfig function_config: Configuration of the function to invoke :param event: String input event passed to Lambda function :param DebugContext debug_context: Debugging context for the function (includes port, args, and path) :param io.IOBase stdout: Optional. IO Stream to that receives stdout text from container. :param io.IOBase stderr: Optional. IO Stream that receives stderr text from container :raises Keyboard """
timer = None # Update with event input environ = function_config.env_vars environ.add_lambda_event_body(event) # Generate a dictionary of environment variable key:values env_vars = environ.resolve() with self._get_code_dir(function_config.code_abs_path) as code_dir: container = LambdaContainer(function_config.runtime, function_config.handler, code_dir, function_config.layers, self._image_builder, memory_mb=function_config.memory, env_vars=env_vars, debug_options=debug_context) try: # Start the container. This call returns immediately after the container starts self._container_manager.run(container) # Setup appropriate interrupt - timeout or Ctrl+C - before function starts executing. # # Start the timer **after** container starts. Container startup takes several seconds, only after which, # our Lambda function code will run. Starting the timer is a reasonable approximation that function has # started running. timer = self._configure_interrupt(function_config.name, function_config.timeout, container, bool(debug_context)) # NOTE: BLOCKING METHOD # Block the thread waiting to fetch logs from the container. This method will return after container # terminates, either successfully or killed by one of the interrupt handlers above. container.wait_for_logs(stdout=stdout, stderr=stderr) except KeyboardInterrupt: # When user presses Ctrl+C, we receive a Keyboard Interrupt. This is especially very common when # container is in debugging mode. We have special handling of Ctrl+C. So handle KeyboardInterrupt # and swallow the exception. The ``finally`` block will also take care of cleaning it up. LOG.debug("Ctrl+C was pressed. Aborting Lambda execution") finally: # We will be done with execution, if either the execution completed or an interrupt was fired # Any case, cleanup the timer and container. # # If we are in debugging mode, timer would not be created. So skip cleanup of the timer if timer: timer.cancel() self._container_manager.stop(container)
<SYSTEM_TASK:> When a Lambda function is executing, we setup certain interrupt handlers to stop the execution. <END_TASK> <USER_TASK:> Description: def _configure_interrupt(self, function_name, timeout, container, is_debugging): """ When a Lambda function is executing, we setup certain interrupt handlers to stop the execution. Usually, we setup a function timeout interrupt to kill the container after timeout expires. If debugging though, we don't enforce a timeout. But we setup a SIGINT interrupt to catch Ctrl+C and terminate the container. :param string function_name: Name of the function we are running :param integer timeout: Timeout in seconds :param samcli.local.docker.container.Container container: Instance of a container to terminate :param bool is_debugging: Are we debugging? :return threading.Timer: Timer object, if we setup a timer. None otherwise """
def timer_handler(): # NOTE: This handler runs in a separate thread. So don't try to mutate any non-thread-safe data structures LOG.info("Function '%s' timed out after %d seconds", function_name, timeout) self._container_manager.stop(container) def signal_handler(sig, frame): # NOTE: This handler runs in a separate thread. So don't try to mutate any non-thread-safe data structures LOG.info("Execution of function %s was interrupted", function_name) self._container_manager.stop(container) if is_debugging: LOG.debug("Setting up SIGTERM interrupt handler") signal.signal(signal.SIGTERM, signal_handler) else: # Start a timer, we'll use this to abort the function if it runs beyond the specified timeout LOG.debug("Starting a timer for %s seconds for function '%s'", timeout, function_name) timer = threading.Timer(timeout, timer_handler, ()) timer.start() return timer
<SYSTEM_TASK:> Method to get a path to a directory where the Lambda function code is available. This directory will <END_TASK> <USER_TASK:> Description: def _get_code_dir(self, code_path): """ Method to get a path to a directory where the Lambda function code is available. This directory will be mounted directly inside the Docker container. This method handles a few different cases for ``code_path``: - ``code_path``is a existent zip/jar file: Unzip in a temp directory and return the temp directory - ``code_path`` is a existent directory: Return this immediately - ``code_path`` is a file/dir that does not exist: Return it as is. May be this method is not clever to detect the existence of the path :param string code_path: Path to the code. This could be pointing at a file or folder either on a local disk or in some network file system :return string: Directory containing Lambda function code. It can be mounted directly in container """
decompressed_dir = None try: if os.path.isfile(code_path) and code_path.endswith(self.SUPPORTED_ARCHIVE_EXTENSIONS): decompressed_dir = _unzip_file(code_path) yield decompressed_dir else: LOG.debug("Code %s is not a zip/jar file", code_path) yield code_path finally: if decompressed_dir: shutil.rmtree(decompressed_dir)
<SYSTEM_TASK:> Build the image if one is not already on the system that matches the runtime and layers <END_TASK> <USER_TASK:> Description: def build(self, runtime, layers): """ Build the image if one is not already on the system that matches the runtime and layers Parameters ---------- runtime str Name of the Lambda runtime layers list(samcli.commands.local.lib.provider.Layer) List of layers Returns ------- str The image to be used (REPOSITORY:TAG) """
base_image = "{}:{}".format(self._DOCKER_LAMBDA_REPO_NAME, runtime) # Don't build the image if there are no layers. if not layers: LOG.debug("Skipping building an image since no layers were defined") return base_image downloaded_layers = self.layer_downloader.download_all(layers, self.force_image_build) docker_image_version = self._generate_docker_image_version(downloaded_layers, runtime) image_tag = "{}:{}".format(self._SAM_CLI_REPO_NAME, docker_image_version) image_not_found = False try: self.docker_client.images.get(image_tag) except docker.errors.ImageNotFound: LOG.info("Image was not found.") image_not_found = True if self.force_image_build or \ image_not_found or \ any(layer.is_defined_within_template for layer in downloaded_layers): LOG.info("Building image...") self._build_image(base_image, image_tag, downloaded_layers) return image_tag
<SYSTEM_TASK:> Generate the Docker TAG that will be used to create the image <END_TASK> <USER_TASK:> Description: def _generate_docker_image_version(layers, runtime): """ Generate the Docker TAG that will be used to create the image Parameters ---------- layers list(samcli.commands.local.lib.provider.Layer) List of the layers runtime str Runtime of the image to create Returns ------- str String representing the TAG to be attached to the image """
# Docker has a concept of a TAG on an image. This is plus the REPOSITORY is a way to determine # a version of the image. We will produced a TAG for a combination of the runtime with the layers # specified in the template. This will allow reuse of the runtime and layers across different # functions that are defined. If two functions use the same runtime with the same layers (in the # same order), SAM CLI will only produce one image and use this image across both functions for invoke. return runtime + '-' + hashlib.sha256( "-".join([layer.name for layer in layers]).encode('utf-8')).hexdigest()[0:25]
<SYSTEM_TASK:> Builds the image <END_TASK> <USER_TASK:> Description: def _build_image(self, base_image, docker_tag, layers): """ Builds the image Parameters ---------- base_image str Base Image to use for the new image docker_tag Docker tag (REPOSITORY:TAG) to use when building the image layers list(samcli.commands.local.lib.provider.Layer) List of Layers to be use to mount in the image Returns ------- None Raises ------ samcli.commands.local.cli_common.user_exceptions.ImageBuildException When docker fails to build the image """
dockerfile_content = self._generate_dockerfile(base_image, layers) # Create dockerfile in the same directory of the layer cache dockerfile_name = "dockerfile_" + str(uuid.uuid4()) full_dockerfile_path = Path(self.layer_downloader.layer_cache, dockerfile_name) try: with open(str(full_dockerfile_path), "w") as dockerfile: dockerfile.write(dockerfile_content) tar_paths = {str(full_dockerfile_path): "Dockerfile"} for layer in layers: tar_paths[layer.codeuri] = '/' + layer.name with create_tarball(tar_paths) as tarballfile: try: self.docker_client.images.build(fileobj=tarballfile, custom_context=True, rm=True, tag=docker_tag, pull=not self.skip_pull_image) except (docker.errors.BuildError, docker.errors.APIError): LOG.exception("Failed to build Docker Image") raise ImageBuildException("Building Image failed.") finally: if full_dockerfile_path.exists(): full_dockerfile_path.unlink()
<SYSTEM_TASK:> Generate the Dockerfile contents <END_TASK> <USER_TASK:> Description: def _generate_dockerfile(base_image, layers): """ Generate the Dockerfile contents A generated Dockerfile will look like the following: ``` FROM lambci/lambda:python3.6 ADD --chown=sbx_user1051:495 layer1 /opt ADD --chown=sbx_user1051:495 layer2 /opt ``` Parameters ---------- base_image str Base Image to use for the new image layers list(samcli.commands.local.lib.provider.Layer) List of Layers to be use to mount in the image Returns ------- str String representing the Dockerfile contents for the image """
dockerfile_content = "FROM {}\n".format(base_image) for layer in layers: dockerfile_content = dockerfile_content + \ "ADD --chown=sbx_user1051:495 {} {}\n".format(layer.name, LambdaImage._LAYERS_DIR) return dockerfile_content
<SYSTEM_TASK:> Creates and starts the local API Gateway service. This method will block until the service is stopped <END_TASK> <USER_TASK:> Description: def start(self): """ Creates and starts the local API Gateway service. This method will block until the service is stopped manually using an interrupt. After the service is started, callers can make HTTP requests to the endpoint to invoke the Lambda function and receive a response. NOTE: This is a blocking call that will not return until the thread is interrupted with SIGINT/SIGTERM """
routing_list = self._make_routing_list(self.api_provider) if not routing_list: raise NoApisDefined("No APIs available in SAM template") static_dir_path = self._make_static_dir_path(self.cwd, self.static_dir) # We care about passing only stderr to the Service and not stdout because stdout from Docker container # contains the response to the API which is sent out as HTTP response. Only stderr needs to be printed # to the console or a log file. stderr from Docker container contains runtime logs and output of print # statements from the Lambda function service = LocalApigwService(routing_list=routing_list, lambda_runner=self.lambda_runner, static_dir=static_dir_path, port=self.port, host=self.host, stderr=self.stderr_stream) service.create() # Print out the list of routes that will be mounted self._print_routes(self.api_provider, self.host, self.port) LOG.info("You can now browse to the above endpoints to invoke your functions. " "You do not need to restart/reload SAM CLI while working on your functions, " "changes will be reflected instantly/automatically. You only need to restart " "SAM CLI if you update your AWS SAM template") service.run()
<SYSTEM_TASK:> Returns a list of routes to configure the Local API Service based on the APIs configured in the template. <END_TASK> <USER_TASK:> Description: def _make_routing_list(api_provider): """ Returns a list of routes to configure the Local API Service based on the APIs configured in the template. Parameters ---------- api_provider : samcli.commands.local.lib.sam_api_provider.SamApiProvider Returns ------- list(samcli.local.apigw.service.Route) List of Routes to pass to the service """
routes = [] for api in api_provider.get_all(): route = Route(methods=[api.method], function_name=api.function_name, path=api.path, binary_types=api.binary_media_types) routes.append(route) return routes
<SYSTEM_TASK:> Helper method to print the APIs that will be mounted. This method is purely for printing purposes. <END_TASK> <USER_TASK:> Description: def _print_routes(api_provider, host, port): """ Helper method to print the APIs that will be mounted. This method is purely for printing purposes. This method takes in a list of Route Configurations and prints out the Routes grouped by path. Grouping routes by Function Name + Path is the bulk of the logic. Example output: Mounting Product at http://127.0.0.1:3000/path1/bar [GET, POST, DELETE] Mounting Product at http://127.0.0.1:3000/path2/bar [HEAD] :param samcli.commands.local.lib.provider.ApiProvider api_provider: API Provider that can return a list of APIs :param string host: Host name where the service is running :param int port: Port number where the service is running :returns list(string): List of lines that were printed to the console. Helps with testing """
grouped_api_configs = {} for api in api_provider.get_all(): key = "{}-{}".format(api.function_name, api.path) config = grouped_api_configs.get(key, {}) config.setdefault("methods", []) config["function_name"] = api.function_name config["path"] = api.path config["methods"].append(api.method) grouped_api_configs[key] = config print_lines = [] for _, config in grouped_api_configs.items(): methods_str = "[{}]".format(', '.join(config["methods"])) output = "Mounting {} at http://{}:{}{} {}".format( config["function_name"], host, port, config["path"], methods_str) print_lines.append(output) LOG.info(output) return print_lines
<SYSTEM_TASK:> This method returns the path to the directory where static files are to be served from. If static_dir is a <END_TASK> <USER_TASK:> Description: def _make_static_dir_path(cwd, static_dir): """ This method returns the path to the directory where static files are to be served from. If static_dir is a relative path, then it is resolved to be relative to the current working directory. If no static directory is provided, or if the resolved directory does not exist, this method will return None :param string cwd: Current working directory relative to which we will resolve the static directory :param string static_dir: Path to the static directory :return string: Path to the static directory, if it exists. None, otherwise """
if not static_dir: return None static_dir_path = os.path.join(cwd, static_dir) if os.path.exists(static_dir_path): LOG.info("Mounting static files from %s at /", static_dir_path) return static_dir_path
<SYSTEM_TASK:> Validates the incoming request <END_TASK> <USER_TASK:> Description: def validate_request(): """ Validates the incoming request The following are invalid 1. The Request data is not json serializable 2. Query Parameters are sent to the endpoint 3. The Request Content-Type is not application/json 4. 'X-Amz-Log-Type' header is not 'None' 5. 'X-Amz-Invocation-Type' header is not 'RequestResponse' Returns ------- flask.Response If the request is not valid a flask Response is returned None: If the request passes all validation """
flask_request = request request_data = flask_request.get_data() if not request_data: request_data = b'{}' request_data = request_data.decode('utf-8') try: json.loads(request_data) except ValueError as json_error: LOG.debug("Request body was not json. Exception: %s", str(json_error)) return LambdaErrorResponses.invalid_request_content( "Could not parse request body into json: No JSON object could be decoded") if flask_request.args: LOG.debug("Query parameters are in the request but not supported") return LambdaErrorResponses.invalid_request_content("Query Parameters are not supported") request_headers = CaseInsensitiveDict(flask_request.headers) log_type = request_headers.get('X-Amz-Log-Type', 'None') if log_type != 'None': LOG.debug("log-type: %s is not supported. None is only supported.", log_type) return LambdaErrorResponses.not_implemented_locally( "log-type: {} is not supported. None is only supported.".format(log_type)) invocation_type = request_headers.get('X-Amz-Invocation-Type', 'RequestResponse') if invocation_type != 'RequestResponse': LOG.warning("invocation-type: %s is not supported. RequestResponse is only supported.", invocation_type) return LambdaErrorResponses.not_implemented_locally( "invocation-type: {} is not supported. RequestResponse is only supported.".format(invocation_type))
<SYSTEM_TASK:> Request Handler for the Local Lambda Invoke path. This method is responsible for understanding the incoming <END_TASK> <USER_TASK:> Description: def _invoke_request_handler(self, function_name): """ Request Handler for the Local Lambda Invoke path. This method is responsible for understanding the incoming request and invoking the Local Lambda Function Parameters ---------- function_name str Name of the function to invoke Returns ------- A Flask Response response object as if it was returned from Lambda """
flask_request = request request_data = flask_request.get_data() if not request_data: request_data = b'{}' request_data = request_data.decode('utf-8') stdout_stream = io.BytesIO() stdout_stream_writer = StreamWriter(stdout_stream, self.is_debugging) try: self.lambda_runner.invoke(function_name, request_data, stdout=stdout_stream_writer, stderr=self.stderr) except FunctionNotFound: LOG.debug('%s was not found to invoke.', function_name) return LambdaErrorResponses.resource_not_found(function_name) lambda_response, lambda_logs, is_lambda_user_error_response = \ LambdaOutputParser.get_lambda_output(stdout_stream) if self.stderr and lambda_logs: # Write the logs to stderr if available. self.stderr.write(lambda_logs) if is_lambda_user_error_response: return self.service_response(lambda_response, {'Content-Type': 'application/json', 'x-amz-function-error': 'Unhandled'}, 200) return self.service_response(lambda_response, {'Content-Type': 'application/json'}, 200)
<SYSTEM_TASK:> Unzip the given file into the given directory while preserving file permissions in the process. <END_TASK> <USER_TASK:> Description: def unzip(zip_file_path, output_dir, permission=None): """ Unzip the given file into the given directory while preserving file permissions in the process. Parameters ---------- zip_file_path : str Path to the zip file output_dir : str Path to the directory where the it should be unzipped to permission : octal int Permission to set """
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref: # For each item in the zip file, extract the file and set permissions if available for file_info in zip_ref.infolist(): name = file_info.filename extracted_path = os.path.join(output_dir, name) zip_ref.extract(name, output_dir) _set_permissions(file_info, extracted_path) _override_permissions(extracted_path, permission) _override_permissions(output_dir, permission)
<SYSTEM_TASK:> Sets permissions on the extracted file by reading the ``external_attr`` property of given file info. <END_TASK> <USER_TASK:> Description: def _set_permissions(zip_file_info, extracted_path): """ Sets permissions on the extracted file by reading the ``external_attr`` property of given file info. Parameters ---------- zip_file_info : zipfile.ZipInfo Object containing information about a file within a zip archive extracted_path : str Path where the file has been extracted to """
# Permission information is stored in first two bytes. permission = zip_file_info.external_attr >> 16 if not permission: # Zips created on certain Windows machines, however, might not have any permission information on them. # Skip setting a permission on these files. LOG.debug("File %s in zipfile does not have permission information", zip_file_info.filename) return os.chmod(extracted_path, permission)
<SYSTEM_TASK:> Download the LayerVersion Zip to the Layer Pkg Cache <END_TASK> <USER_TASK:> Description: def unzip_from_uri(uri, layer_zip_path, unzip_output_dir, progressbar_label): """ Download the LayerVersion Zip to the Layer Pkg Cache Parameters ---------- uri str Uri to download from layer_zip_path str Path to where the content from the uri should be downloaded to unzip_output_dir str Path to unzip the zip to progressbar_label str Label to use in the Progressbar """
try: get_request = requests.get(uri, stream=True, verify=os.environ.get('AWS_CA_BUNDLE', True)) with open(layer_zip_path, 'wb') as local_layer_file: file_length = int(get_request.headers['Content-length']) with progressbar(file_length, progressbar_label) as p_bar: # Set the chunk size to None. Since we are streaming the request, None will allow the data to be # read as it arrives in whatever size the chunks are received. for data in get_request.iter_content(chunk_size=None): local_layer_file.write(data) p_bar.update(len(data)) # Forcefully set the permissions to 700 on files and directories. This is to ensure the owner # of the files is the only one that can read, write, or execute the files. unzip(layer_zip_path, unzip_output_dir, permission=0o700) finally: # Remove the downloaded zip file path_to_layer = Path(layer_zip_path) if path_to_layer.exists(): path_to_layer.unlink()
<SYSTEM_TASK:> Generates the key to the _dict_of_routes based on the list of methods <END_TASK> <USER_TASK:> Description: def _generate_route_keys(self, methods, path): """ Generates the key to the _dict_of_routes based on the list of methods and path supplied :param list(str) methods: List of HTTP Methods :param str path: Path off the base url :return: str of Path:Method """
for method in methods: yield self._route_key(method, path)
<SYSTEM_TASK:> Parses the output from the Lambda Container <END_TASK> <USER_TASK:> Description: def _parse_lambda_output(lambda_output, binary_types, flask_request): """ Parses the output from the Lambda Container :param str lambda_output: Output from Lambda Invoke :return: Tuple(int, dict, str, bool) """
json_output = json.loads(lambda_output) if not isinstance(json_output, dict): raise TypeError("Lambda returned %{s} instead of dict", type(json_output)) status_code = json_output.get("statusCode") or 200 headers = CaseInsensitiveDict(json_output.get("headers") or {}) body = json_output.get("body") or "no data" is_base_64_encoded = json_output.get("isBase64Encoded") or False try: status_code = int(status_code) if status_code <= 0: raise ValueError except ValueError: message = "statusCode must be a positive int" LOG.error(message) raise TypeError(message) # If the customer doesn't define Content-Type default to application/json if "Content-Type" not in headers: LOG.info("No Content-Type given. Defaulting to 'application/json'.") headers["Content-Type"] = "application/json" if LocalApigwService._should_base64_decode_body(binary_types, flask_request, headers, is_base_64_encoded): body = base64.b64decode(body) return status_code, headers, body
<SYSTEM_TASK:> Whether or not the body should be decoded from Base64 to Binary <END_TASK> <USER_TASK:> Description: def _should_base64_decode_body(binary_types, flask_request, lamba_response_headers, is_base_64_encoded): """ Whether or not the body should be decoded from Base64 to Binary Parameters ---------- binary_types list(basestring) Corresponds to self.binary_types (aka. what is parsed from SAM Template flask_request flask.request Flask request lamba_response_headers dict Headers Lambda returns is_base_64_encoded bool True if the body is Base64 encoded Returns ------- True if the body from the request should be converted to binary, otherwise false """
best_match_mimetype = flask_request.accept_mimetypes.best_match([lamba_response_headers["Content-Type"]]) is_best_match_in_binary_types = best_match_mimetype in binary_types or '*/*' in binary_types return best_match_mimetype and is_best_match_in_binary_types and is_base_64_encoded
<SYSTEM_TASK:> Helper method that constructs the Event to be passed to Lambda <END_TASK> <USER_TASK:> Description: def _construct_event(flask_request, port, binary_types): """ Helper method that constructs the Event to be passed to Lambda :param request flask_request: Flask Request :return: String representing the event """
identity = ContextIdentity(source_ip=flask_request.remote_addr) endpoint = PathConverter.convert_path_to_api_gateway(flask_request.endpoint) method = flask_request.method request_data = flask_request.get_data() request_mimetype = flask_request.mimetype is_base_64 = LocalApigwService._should_base64_encode(binary_types, request_mimetype) if is_base_64: LOG.debug("Incoming Request seems to be binary. Base64 encoding the request data before sending to Lambda.") request_data = base64.b64encode(request_data) if request_data: # Flask does not parse/decode the request data. We should do it ourselves request_data = request_data.decode('utf-8') context = RequestContext(resource_path=endpoint, http_method=method, stage="prod", identity=identity, path=endpoint) event_headers = dict(flask_request.headers) event_headers["X-Forwarded-Proto"] = flask_request.scheme event_headers["X-Forwarded-Port"] = str(port) # APIGW does not support duplicate query parameters. Flask gives query params as a list so # we need to convert only grab the first item unless many were given, were we grab the last to be consistent # with APIGW query_string_dict = LocalApigwService._query_string_params(flask_request) event = ApiGatewayLambdaEvent(http_method=method, body=request_data, resource=endpoint, request_context=context, query_string_params=query_string_dict, headers=event_headers, path_parameters=flask_request.view_args, path=flask_request.path, is_base_64_encoded=is_base_64) event_str = json.dumps(event.to_dict()) LOG.debug("Constructed String representation of Event to invoke Lambda. Event: %s", event_str) return event_str
<SYSTEM_TASK:> Constructs an APIGW equivalent query string dictionary <END_TASK> <USER_TASK:> Description: def _query_string_params(flask_request): """ Constructs an APIGW equivalent query string dictionary Parameters ---------- flask_request request Request from Flask Returns dict (str: str) ------- Empty dict if no query params where in the request otherwise returns a dictionary of key to value """
query_string_dict = {} # Flask returns an ImmutableMultiDict so convert to a dictionary that becomes # a dict(str: list) then iterate over for query_string_key, query_string_list in flask_request.args.lists(): query_string_value_length = len(query_string_list) # if the list is empty, default to empty string if not query_string_value_length: query_string_dict[query_string_key] = "" else: # APIGW doesn't handle duplicate query string keys, picking the last one in the list query_string_dict[query_string_key] = query_string_list[-1] return query_string_dict
<SYSTEM_TASK:> Find the Lambda function with given name and invoke it. Pass the given event to the function and return <END_TASK> <USER_TASK:> Description: def invoke(self, function_name, event, stdout=None, stderr=None): """ Find the Lambda function with given name and invoke it. Pass the given event to the function and return response through the given streams. This function will block until either the function completes or times out. Parameters ---------- function_name str Name of the Lambda function to invoke event str Event data passed to the function. Must be a valid JSON String. stdout samcli.lib.utils.stream_writer.StreamWriter Stream writer to write the output of the Lambda function to. stderr samcli.lib.utils.stream_writer.StreamWriter Stream writer to write the Lambda runtime logs to. Raises ------ FunctionNotfound When we cannot find a function with the given name """
# Generate the correct configuration based on given inputs function = self.provider.get(function_name) if not function: all_functions = [f.name for f in self.provider.get_all()] available_function_message = "{} not found. Possible options in your template: {}"\ .format(function_name, all_functions) LOG.info(available_function_message) raise FunctionNotFound("Unable to find a Function with name '%s'", function_name) LOG.debug("Found one Lambda function with name '%s'", function_name) LOG.info("Invoking %s (%s)", function.handler, function.runtime) config = self._get_invoke_config(function) # Invoke the function self.local_runtime.invoke(config, event, debug_context=self.debug_context, stdout=stdout, stderr=stderr)
<SYSTEM_TASK:> Returns invoke configuration to pass to Lambda Runtime to invoke the given function <END_TASK> <USER_TASK:> Description: def _get_invoke_config(self, function): """ Returns invoke configuration to pass to Lambda Runtime to invoke the given function :param samcli.commands.local.lib.provider.Function function: Lambda function to generate the configuration for :return samcli.local.lambdafn.config.FunctionConfig: Function configuration to pass to Lambda runtime """
env_vars = self._make_env_vars(function) code_abs_path = resolve_code_path(self.cwd, function.codeuri) LOG.debug("Resolved absolute path to code is %s", code_abs_path) function_timeout = function.timeout # The Runtime container handles timeout inside the container. When debugging with short timeouts, this can # cause the container execution to stop. When in debug mode, we set the timeout in the container to a max 10 # hours. This will ensure the container doesn't unexpectedly stop while debugging function code if self.is_debugging(): function_timeout = self.MAX_DEBUG_TIMEOUT return FunctionConfig(name=function.name, runtime=function.runtime, handler=function.handler, code_abs_path=code_abs_path, layers=function.layers, memory=function.memory, timeout=function_timeout, env_vars=env_vars)
<SYSTEM_TASK:> Returns the environment variables configuration for this function <END_TASK> <USER_TASK:> Description: def _make_env_vars(self, function): """Returns the environment variables configuration for this function Parameters ---------- function : samcli.commands.local.lib.provider.Function Lambda function to generate the configuration for Returns ------- samcli.local.lambdafn.env_vars.EnvironmentVariables Environment variable configuration for this function Raises ------ samcli.commands.local.lib.exceptions.OverridesNotWellDefinedError If the environment dict is in the wrong format to process environment vars """
name = function.name variables = None if function.environment and isinstance(function.environment, dict) and "Variables" in function.environment: variables = function.environment["Variables"] else: LOG.debug("No environment variables found for function '%s'", name) # This could either be in standard format, or a CloudFormation parameter file format. # # Standard format is {FunctionName: {key:value}, FunctionName: {key:value}} # CloudFormation parameter file is {"Parameters": {key:value}} for env_var_value in self.env_vars_values.values(): if not isinstance(env_var_value, dict): reason = """ Environment variables must be in either CloudFormation parameter file format or in {FunctionName: {key:value}} JSON pairs """ LOG.debug(reason) raise OverridesNotWellDefinedError(reason) if "Parameters" in self.env_vars_values: LOG.debug("Environment variables overrides data is in CloudFormation parameter file format") # CloudFormation parameter file format overrides = self.env_vars_values["Parameters"] else: # Standard format LOG.debug("Environment variables overrides data is standard format") overrides = self.env_vars_values.get(name, None) shell_env = os.environ aws_creds = self.get_aws_creds() return EnvironmentVariables(function.memory, function.timeout, function.handler, variables=variables, shell_env_values=shell_env, override_values=overrides, aws_creds=aws_creds)
<SYSTEM_TASK:> Returns AWS credentials obtained from the shell environment or given profile <END_TASK> <USER_TASK:> Description: def get_aws_creds(self): """ Returns AWS credentials obtained from the shell environment or given profile :return dict: A dictionary containing credentials. This dict has the structure {"region": "", "key": "", "secret": "", "sessiontoken": ""}. If credentials could not be resolved, this returns None """
result = {} # to pass command line arguments for region & profile to setup boto3 default session if boto3.DEFAULT_SESSION: session = boto3.DEFAULT_SESSION else: session = boto3.session.Session() profile_name = session.profile_name if session else None LOG.debug("Loading AWS credentials from session with profile '%s'", profile_name) if not session: return result # Load the credentials from profile/environment creds = session.get_credentials() if not creds: # If we were unable to load credentials, then just return empty. We will use the default return result # After loading credentials, region name might be available here. if hasattr(session, 'region_name') and session.region_name: result["region"] = session.region_name # Only add the key, if its value is present if hasattr(creds, 'access_key') and creds.access_key: result["key"] = creds.access_key if hasattr(creds, 'secret_key') and creds.secret_key: result["secret"] = creds.secret_key if hasattr(creds, 'token') and creds.token: result["sessiontoken"] = creds.token return result
<SYSTEM_TASK:> Constructs an dictionary representation of the Identity Object to be used in serializing to JSON <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Constructs an dictionary representation of the Identity Object to be used in serializing to JSON :return: dict representing the object """
json_dict = {"apiKey": self.api_key, "userArn": self.user_arn, "cognitoAuthenticationType": self.cognito_authentication_type, "caller": self.caller, "userAgent": self.user_agent, "user": self.user, "cognitoIdentityPoolId": self.cognito_identity_pool_id, "cognitoAuthenticationProvider": self.cognito_authentication_provider, "sourceIp": self.source_ip, "accountId": self.account_id } return json_dict
<SYSTEM_TASK:> Constructs an dictionary representation of the RequestContext Object to be used in serializing to JSON <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Constructs an dictionary representation of the RequestContext Object to be used in serializing to JSON :return: dict representing the object """
identity_dict = {} if self.identity: identity_dict = self.identity.to_dict() json_dict = {"resourceId": self.resource_id, "apiId": self.api_id, "resourcePath": self.resource_path, "httpMethod": self.http_method, "requestId": self.request_id, "accountId": self.account_id, "stage": self.stage, "identity": identity_dict, "extendedRequestId": self.extended_request_id, "path": self.path } return json_dict
<SYSTEM_TASK:> Constructs an dictionary representation of the ApiGatewayLambdaEvent Object to be used in serializing to JSON <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Constructs an dictionary representation of the ApiGatewayLambdaEvent Object to be used in serializing to JSON :return: dict representing the object """
request_context_dict = {} if self.request_context: request_context_dict = self.request_context.to_dict() json_dict = {"httpMethod": self.http_method, "body": self.body if self.body else None, "resource": self.resource, "requestContext": request_context_dict, "queryStringParameters": dict(self.query_string_params) if self.query_string_params else None, "headers": dict(self.headers) if self.headers else None, "pathParameters": dict(self.path_parameters) if self.path_parameters else None, "stageVariables": dict(self.stage_variables) if self.stage_variables else None, "path": self.path, "isBase64Encoded": self.is_base_64_encoded } return json_dict
<SYSTEM_TASK:> Checks if Docker daemon is running. This is required for us to invoke the function locally <END_TASK> <USER_TASK:> Description: def is_docker_reachable(self): """ Checks if Docker daemon is running. This is required for us to invoke the function locally Returns ------- bool True, if Docker is available, False otherwise """
try: self.docker_client.ping() return True # When Docker is not installed, a request.exceptions.ConnectionError is thrown. except (docker.errors.APIError, requests.exceptions.ConnectionError): LOG.debug("Docker is not reachable", exc_info=True) return False
<SYSTEM_TASK:> Create and run a Docker container based on the given configuration. <END_TASK> <USER_TASK:> Description: def run(self, container, input_data=None, warm=False): """ Create and run a Docker container based on the given configuration. :param samcli.local.docker.container.Container container: Container to create and run :param input_data: Optional. Input data sent to the container through container's stdin. :param bool warm: Indicates if an existing container can be reused. Defaults False ie. a new container will be created for every request. :raises DockerImagePullFailedException: If the Docker image was not available in the server """
if warm: raise ValueError("The facility to invoke warm container does not exist") image_name = container.image is_image_local = self.has_image(image_name) # Skip Pulling a new image if: a) Image name is samcli/lambda OR b) Image is available AND # c) We are asked to skip pulling the image if (is_image_local and self.skip_pull_image) or image_name.startswith('samcli/lambda'): LOG.info("Requested to skip pulling images ...\n") else: try: self.pull_image(image_name) except DockerImagePullFailedException: if not is_image_local: raise DockerImagePullFailedException( "Could not find {} image locally and failed to pull it from docker.".format(image_name)) LOG.info( "Failed to download a new %s image. Invoking with the already downloaded image.", image_name) if not container.is_created(): # Create the container first before running. # Create the container in appropriate Docker network container.network_id = self.docker_network_id container.create() container.start(input_data=input_data)
<SYSTEM_TASK:> Ask Docker to pull the container image with given name. <END_TASK> <USER_TASK:> Description: def pull_image(self, image_name, stream=None): """ Ask Docker to pull the container image with given name. Parameters ---------- image_name str Name of the image stream samcli.lib.utils.stream_writer.StreamWriter Optional stream writer to output to. Defaults to stderr Raises ------ DockerImagePullFailedException If the Docker image was not available in the server """
stream_writer = stream or StreamWriter(sys.stderr) try: result_itr = self.docker_client.api.pull(image_name, stream=True, decode=True) except docker.errors.APIError as ex: LOG.debug("Failed to download image with name %s", image_name) raise DockerImagePullFailedException(str(ex)) # io streams, especially StringIO, work only with unicode strings stream_writer.write(u"\nFetching {} Docker container image...".format(image_name)) # Each line contains information on progress of the pull. Each line is a JSON string for _ in result_itr: # For every line, print a dot to show progress stream_writer.write(u'.') stream_writer.flush() # We are done. Go to the next line stream_writer.write(u"\n")
<SYSTEM_TASK:> Is the container image with given name available? <END_TASK> <USER_TASK:> Description: def has_image(self, image_name): """ Is the container image with given name available? :param string image_name: Name of the image :return bool: True, if image is available. False, otherwise """
try: self.docker_client.images.get(image_name) return True except docker.errors.ImageNotFound: return False
<SYSTEM_TASK:> Publish the application based on command line inputs. <END_TASK> <USER_TASK:> Description: def do_cli(ctx, template, semantic_version): """Publish the application based on command line inputs."""
try: template_data = get_template_data(template) except ValueError as ex: click.secho("Publish Failed", fg='red') raise UserException(str(ex)) # Override SemanticVersion in template metadata when provided in command input if semantic_version and SERVERLESS_REPO_APPLICATION in template_data.get(METADATA, {}): template_data.get(METADATA).get(SERVERLESS_REPO_APPLICATION)[SEMANTIC_VERSION] = semantic_version try: publish_output = publish_application(template_data) click.secho("Publish Succeeded", fg="green") click.secho(_gen_success_message(publish_output)) except InvalidS3UriError: click.secho("Publish Failed", fg='red') raise UserException( "Your SAM template contains invalid S3 URIs. Please make sure that you have uploaded application " "artifacts to S3 by packaging the template. See more details in {}".format(SAM_PACKAGE_DOC)) except ServerlessRepoError as ex: click.secho("Publish Failed", fg='red') LOG.debug("Failed to publish application to serverlessrepo", exc_info=True) error_msg = '{}\nPlease follow the instructions in {}'.format(str(ex), SAM_PUBLISH_DOC) raise UserException(error_msg) application_id = publish_output.get('application_id') _print_console_link(ctx.region, application_id)
<SYSTEM_TASK:> Generate detailed success message for published applications. <END_TASK> <USER_TASK:> Description: def _gen_success_message(publish_output): """ Generate detailed success message for published applications. Parameters ---------- publish_output : dict Output from serverlessrepo publish_application Returns ------- str Detailed success message """
application_id = publish_output.get('application_id') details = json.dumps(publish_output.get('details'), indent=2) if CREATE_APPLICATION in publish_output.get('actions'): return "Created new application with the following metadata:\n{}".format(details) return 'The following metadata of application "{}" has been updated:\n{}'.format(application_id, details)
<SYSTEM_TASK:> Print link for the application in AWS Serverless Application Repository console. <END_TASK> <USER_TASK:> Description: def _print_console_link(region, application_id): """ Print link for the application in AWS Serverless Application Repository console. Parameters ---------- region : str AWS region name application_id : str The Amazon Resource Name (ARN) of the application """
if not region: region = boto3.Session().region_name console_link = SERVERLESSREPO_CONSOLE_URL.format(region, application_id.replace('/', '~')) msg = "Click the link below to view your application in AWS console:\n{}".format(console_link) click.secho(msg, fg="yellow")
<SYSTEM_TASK:> Helper function to create a Lambda Failure Response <END_TASK> <USER_TASK:> Description: def lambda_failure_response(*args): """ Helper function to create a Lambda Failure Response :return: A Flask Response """
response_data = jsonify(ServiceErrorResponses._LAMBDA_FAILURE) return make_response(response_data, ServiceErrorResponses.HTTP_STATUS_CODE_502)
<SYSTEM_TASK:> Constructs a Flask Response for when a Lambda function is not found for an endpoint <END_TASK> <USER_TASK:> Description: def lambda_not_found_response(*args): """ Constructs a Flask Response for when a Lambda function is not found for an endpoint :return: a Flask Response """
response_data = jsonify(ServiceErrorResponses._NO_LAMBDA_INTEGRATION) return make_response(response_data, ServiceErrorResponses.HTTP_STATUS_CODE_502)
<SYSTEM_TASK:> Creates a progressbar <END_TASK> <USER_TASK:> Description: def progressbar(length, label): """ Creates a progressbar Parameters ---------- length int Length of the ProgressBar label str Label to give to the progressbar Returns ------- click.progressbar Progressbar """
return click.progressbar(length=length, label=label, show_pos=True)
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def _unquote(value): r""" Removes wrapping double quotes and any '\ ' characters. They are usually added to preserve spaces when passing value thru shell. Examples -------- >>> _unquote('val\ ue') value >>> _unquote("hel\ lo") hello Parameters ---------- value : str Input to unquote Returns ------- Unquoted string """
if value and (value[0] == value[-1] == '"'): # Remove quotes only if the string is wrapped in quotes value = value.strip('"') return value.replace("\\ ", " ").replace('\\"', '"')
<SYSTEM_TASK:> Constructs a Flask Response from the body, headers, and status_code. <END_TASK> <USER_TASK:> Description: def service_response(body, headers, status_code): """ Constructs a Flask Response from the body, headers, and status_code. :param str body: Response body as a string :param dict headers: headers for the response :param int status_code: status_code for response :return: Flask Response """
response = Response(body) response.headers = headers response.status_code = status_code return response
<SYSTEM_TASK:> This method will extract read the given stream and return the response from Lambda function separated out <END_TASK> <USER_TASK:> Description: def get_lambda_output(stdout_stream): """ This method will extract read the given stream and return the response from Lambda function separated out from any log statements it might have outputted. Logs end up in the stdout stream if the Lambda function wrote directly to stdout using System.out.println or equivalents. Parameters ---------- stdout_stream : io.BaseIO Stream to fetch data from Returns ------- str String data containing response from Lambda function str String data containng logs statements, if any. bool If the response is an error/exception from the container """
# We only want the last line of stdout, because it's possible that # the function may have written directly to stdout using # System.out.println or similar, before docker-lambda output the result stdout_data = stdout_stream.getvalue().rstrip(b'\n') # Usually the output is just one line and contains response as JSON string, but if the Lambda function # wrote anything directly to stdout, there will be additional lines. So just extract the last line as # response and everything else as log output. lambda_response = stdout_data lambda_logs = None last_line_position = stdout_data.rfind(b'\n') if last_line_position >= 0: # So there are multiple lines. Separate them out. # Everything but the last line are logs lambda_logs = stdout_data[:last_line_position] # Last line is Lambda response. Make sure to strip() so we get rid of extra whitespaces & newlines around lambda_response = stdout_data[last_line_position:].strip() lambda_response = lambda_response.decode('utf-8') # When the Lambda Function returns an Error/Exception, the output is added to the stdout of the container. From # our perspective, the container returned some value, which is not always true. Since the output is the only # information we have, we need to inspect this to understand if the container returned a some data or raised an # error is_lambda_user_error_response = LambdaOutputParser.is_lambda_error_response(lambda_response) return lambda_response, lambda_logs, is_lambda_user_error_response
<SYSTEM_TASK:> Build the entire application <END_TASK> <USER_TASK:> Description: def build(self): """ Build the entire application Returns ------- dict Returns the path to where each resource was built as a map of resource's LogicalId to the path string """
result = {} for lambda_function in self._functions_to_build: LOG.info("Building resource '%s'", lambda_function.name) result[lambda_function.name] = self._build_function(lambda_function.name, lambda_function.codeuri, lambda_function.runtime) return result
<SYSTEM_TASK:> Given the path to built artifacts, update the template to point appropriate resource CodeUris to the artifacts <END_TASK> <USER_TASK:> Description: def update_template(self, template_dict, original_template_path, built_artifacts): """ Given the path to built artifacts, update the template to point appropriate resource CodeUris to the artifacts folder Parameters ---------- template_dict original_template_path : str Path where the template file will be written to built_artifacts : dict Map of LogicalId of a resource to the path where the the built artifacts for this resource lives Returns ------- dict Updated template """
original_dir = os.path.dirname(original_template_path) for logical_id, resource in template_dict.get("Resources", {}).items(): if logical_id not in built_artifacts: # this resource was not built. So skip it continue # Artifacts are written relative to the template because it makes the template portable # Ex: A CI/CD pipeline build stage could zip the output folder and pass to a # package stage running on a different machine artifact_relative_path = os.path.relpath(built_artifacts[logical_id], original_dir) resource_type = resource.get("Type") properties = resource.setdefault("Properties", {}) if resource_type == "AWS::Serverless::Function": properties["CodeUri"] = artifact_relative_path if resource_type == "AWS::Lambda::Function": properties["Code"] = artifact_relative_path return template_dict
<SYSTEM_TASK:> Given the function information, this method will build the Lambda function. Depending on the configuration <END_TASK> <USER_TASK:> Description: def _build_function(self, function_name, codeuri, runtime): """ Given the function information, this method will build the Lambda function. Depending on the configuration it will either build the function in process or by spinning up a Docker container. Parameters ---------- function_name : str Name or LogicalId of the function codeuri : str Path to where the code lives runtime : str AWS Lambda function runtime Returns ------- str Path to the location where built artifacts are available """
# Create the arguments to pass to the builder # Code is always relative to the given base directory. code_dir = str(pathlib.Path(self._base_dir, codeuri).resolve()) config = get_workflow_config(runtime, code_dir, self._base_dir) # artifacts directory will be created by the builder artifacts_dir = str(pathlib.Path(self._build_dir, function_name)) with osutils.mkdir_temp() as scratch_dir: manifest_path = self._manifest_path_override or os.path.join(code_dir, config.manifest_name) # By default prefer to build in-process for speed build_method = self._build_function_in_process if self._container_manager: build_method = self._build_function_on_container return build_method(config, code_dir, artifacts_dir, scratch_dir, manifest_path, runtime)
<SYSTEM_TASK:> Provides paths to directories within the container that is required by the builder <END_TASK> <USER_TASK:> Description: def _get_container_dirs(source_dir, manifest_dir): """ Provides paths to directories within the container that is required by the builder Parameters ---------- source_dir : str Path to the function source code manifest_dir : str Path to the directory containing manifest Returns ------- dict Contains paths to source, artifacts, scratch & manifest directories """
base = "/tmp/samcli" result = { "source_dir": "{}/source".format(base), "artifacts_dir": "{}/artifacts".format(base), "scratch_dir": "{}/scratch".format(base), "manifest_dir": "{}/manifest".format(base) } if pathlib.PurePath(source_dir) == pathlib.PurePath(manifest_dir): # It is possible that the manifest resides within the source. In that case, we won't mount the manifest # directory separately. result["manifest_dir"] = result["source_dir"] return result
<SYSTEM_TASK:> Use this method to convert a list of host paths to a list of equivalent paths within the container <END_TASK> <USER_TASK:> Description: def _convert_to_container_dirs(host_paths_to_convert, host_to_container_path_mapping): """ Use this method to convert a list of host paths to a list of equivalent paths within the container where the given host path is mounted. This is necessary when SAM CLI needs to pass path information to the Lambda Builder running within the container. If a host path is not mounted within the container, then this method simply passes the path to the result without any changes. Ex: [ "/home/foo", "/home/bar", "/home/not/mounted"] => ["/tmp/source", "/tmp/manifest", "/home/not/mounted"] Parameters ---------- host_paths_to_convert : list List of paths in host that needs to be converted host_to_container_path_mapping : dict Mapping of paths in host to the equivalent paths within the container Returns ------- list Equivalent paths within the container """
if not host_paths_to_convert: # Nothing to do return host_paths_to_convert # Make sure the key is absolute host path. Relative paths are tricky to work with because two different # relative paths can point to the same directory ("../foo", "../../foo") mapping = {str(pathlib.Path(p).resolve()): v for p, v in host_to_container_path_mapping.items()} result = [] for original_path in host_paths_to_convert: abspath = str(pathlib.Path(original_path).resolve()) if abspath in mapping: result.append(mapping[abspath]) else: result.append(original_path) LOG.debug("Cannot convert host path '%s' to its equivalent path within the container. " "Host path is not mounted within the container", abspath) return result